c-tree.h, [...]: Fix comment typos.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 return false;
100
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 }
105 \f
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
108
109 rtx
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
112 {
113 rtx tem;
114
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
119
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
124
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
127
128 if (code == PLUS || code == MINUS)
129 {
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
133 }
134
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 }
137 \f
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
140 rtx
141 avoid_constant_pool_reference (rtx x)
142 {
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
145
146 switch (GET_CODE (x))
147 {
148 case MEM:
149 break;
150
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
156 {
157 REAL_VALUE_TYPE d;
158
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 }
162 return x;
163
164 default:
165 return x;
166 }
167
168 addr = XEXP (x, 0);
169
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
172
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
175
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
179
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
182
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
187 {
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
190 }
191
192 return c;
193 }
194 \f
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
197
198 rtx
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
201 {
202 rtx tem;
203
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
207
208 return gen_rtx_fmt_e (code, mode, op);
209 }
210
211 /* Likewise for ternary operations. */
212
213 rtx
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
223
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
225 }
226
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
229
230 rtx
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
233 {
234 rtx tem;
235
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
239
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
241 }
242 \f
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
245
246 rtx
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
248 {
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
253
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
257
258 if (x == old_rtx)
259 return new_rtx;
260
261 switch (GET_RTX_CLASS (code))
262 {
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
270
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
278
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
289
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
302
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
306 {
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
314 }
315 break;
316
317 case RTX_OBJ:
318 if (code == MEM)
319 {
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
324 }
325 else if (code == LO_SUM)
326 {
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
329
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
333
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
337 }
338 else if (code == REG)
339 {
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
342 }
343 break;
344
345 default:
346 break;
347 }
348 return x;
349 }
350 \f
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
354 rtx
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
357 {
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
360
361 if (code == VEC_DUPLICATE)
362 {
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
365 {
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
371 }
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
374 {
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
379
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
384 {
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
388
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
393 }
394 return gen_rtx_CONST_VECTOR (mode, v);
395 }
396 }
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
399
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
401 {
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
409
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
412 {
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
419 }
420 return gen_rtx_CONST_VECTOR (mode, v);
421 }
422
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
426
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 {
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
432
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
441 }
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
445 {
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
448
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
453
454 if (op_mode == VOIDmode)
455 {
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
460 }
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
462 ;
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
465
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
469 }
470
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
473 {
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
476
477 switch (code)
478 {
479 case NOT:
480 val = ~ arg0;
481 break;
482
483 case NEG:
484 val = - arg0;
485 break;
486
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
490
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
497
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
501 ;
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
505
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
509 {
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
514 }
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
518
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
525
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
533
534 case TRUNCATE:
535 val = arg0;
536 break;
537
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
543 {
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
549 }
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
555
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
560 {
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
566 }
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 {
569 val
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
574 }
575 else
576 return 0;
577 break;
578
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
585
586 default:
587 gcc_unreachable ();
588 }
589
590 val = trunc_int_for_mode (val, mode);
591
592 return GEN_INT (val);
593 }
594
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
601 {
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
604
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
609
610 switch (code)
611 {
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
616
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
620
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
627
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
631 {
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
636 }
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
640
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
651
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
661
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
670
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
680
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
685
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
688
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
691
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
695
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
701 {
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
707
708 hv = HWI_SIGN_EXTEND (lv);
709 }
710 break;
711
712 case SQRT:
713 return 0;
714
715 default:
716 return 0;
717 }
718
719 return immed_double_const (lv, hv, mode);
720 }
721
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
724 {
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
727
728 switch (code)
729 {
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
752 {
753 long tmp[4];
754 int i;
755
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
760 }
761 default:
762 gcc_unreachable ();
763 }
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
765 }
766
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
771 {
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
776
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
780 switch (code)
781 {
782 case FIX:
783 if (REAL_VALUE_ISNAN (x))
784 return const0_rtx;
785
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
788 {
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
791 tl = -1;
792 }
793 else
794 {
795 th = 0;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
797 }
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
800 {
801 xh = th;
802 xl = tl;
803 break;
804 }
805
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
808 {
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
810 tl = 0;
811 }
812 else
813 {
814 th = -1;
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
816 }
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
819 {
820 xh = th;
821 xl = tl;
822 break;
823 }
824 REAL_VALUE_TO_INT (&xl, &xh, x);
825 break;
826
827 case UNSIGNED_FIX:
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
829 return const0_rtx;
830
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
833 {
834 th = -1;
835 tl = -1;
836 }
837 else if (width >= HOST_BITS_PER_WIDE_INT)
838 {
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
841 tl = -1;
842 }
843 else
844 {
845 th = 0;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
847 }
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
850 {
851 xh = th;
852 xl = tl;
853 break;
854 }
855
856 REAL_VALUE_TO_INT (&xl, &xh, x);
857 break;
858
859 default:
860 gcc_unreachable ();
861 }
862 return immed_double_const (xl, xh, mode);
863 }
864
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
867 else
868 {
869 enum rtx_code reversed;
870 rtx temp;
871
872 /* There are some simplifications we can do even if the operands
873 aren't constant. */
874 switch (code)
875 {
876 case NOT:
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
879 return XEXP (op, 0);
880
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
885 != UNKNOWN))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
888
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
893
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
897
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
902 XEXP (op, 1),
903 mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
905
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
914
915
916
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
924 {
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
927 }
928
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && COMPARISON_P (op)
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
937
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
941
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
948
949 break;
950
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
955
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
964
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
975
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 {
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
983 {
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
989 }
990
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
994 }
995
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 {
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1003 }
1004
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1009 {
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1015 }
1016
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1024
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1032
1033 break;
1034
1035 case SIGN_EXTEND:
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1039 the VAX). */
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1046
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1055
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1059 && (CONSTANT_P (op)
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1065 #endif
1066 break;
1067
1068 case ZERO_EXTEND:
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1077
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1081 && (CONSTANT_P (op)
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1087 #endif
1088 break;
1089
1090 default:
1091 break;
1092 }
1093
1094 return 0;
1095 }
1096 }
1097 \f
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1103
1104 static rtx
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1106 rtx op0, rtx op1)
1107 {
1108 rtx tem;
1109
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1112 {
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1115 {
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1118 }
1119
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1123
1124 tem = op0;
1125 op0 = op1;
1126 op1 = tem;
1127 }
1128
1129 if (GET_CODE (op0) == code)
1130 {
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1133 {
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1136 }
1137
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1142 if (tem != 0)
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1144
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 }
1152
1153 return 0;
1154 }
1155
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1158
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1161 rtx
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1164 {
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1170
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1177
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1181 {
1182 tem = op0, op0 = op1, op1 = tem;
1183 }
1184
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1187
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1192 {
1193 unsigned n_elts = GET_MODE_NUNITS (mode);
1194 enum machine_mode op0mode = GET_MODE (trueop0);
1195 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1196 enum machine_mode op1mode = GET_MODE (trueop1);
1197 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1198 rtvec v = rtvec_alloc (n_elts);
1199 unsigned int i;
1200
1201 gcc_assert (op0_n_elts == n_elts);
1202 gcc_assert (op1_n_elts == n_elts);
1203 for (i = 0; i < n_elts; i++)
1204 {
1205 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206 CONST_VECTOR_ELT (trueop0, i),
1207 CONST_VECTOR_ELT (trueop1, i));
1208 if (!x)
1209 return 0;
1210 RTVEC_ELT (v, i) = x;
1211 }
1212
1213 return gen_rtx_CONST_VECTOR (mode, v);
1214 }
1215
1216 if (VECTOR_MODE_P (mode)
1217 && code == VEC_CONCAT
1218 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1219 {
1220 unsigned n_elts = GET_MODE_NUNITS (mode);
1221 rtvec v = rtvec_alloc (n_elts);
1222
1223 gcc_assert (n_elts >= 2);
1224 if (n_elts == 2)
1225 {
1226 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1227 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1228
1229 RTVEC_ELT (v, 0) = trueop0;
1230 RTVEC_ELT (v, 1) = trueop1;
1231 }
1232 else
1233 {
1234 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1235 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1236 unsigned i;
1237
1238 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1239 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1240 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1241
1242 for (i = 0; i < op0_n_elts; ++i)
1243 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1244 for (i = 0; i < op1_n_elts; ++i)
1245 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1246 }
1247
1248 return gen_rtx_CONST_VECTOR (mode, v);
1249 }
1250
1251 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1252 && GET_CODE (trueop0) == CONST_DOUBLE
1253 && GET_CODE (trueop1) == CONST_DOUBLE
1254 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1255 {
1256 if (code == AND
1257 || code == IOR
1258 || code == XOR)
1259 {
1260 long tmp0[4];
1261 long tmp1[4];
1262 REAL_VALUE_TYPE r;
1263 int i;
1264
1265 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1266 GET_MODE (op0));
1267 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1268 GET_MODE (op1));
1269 for (i = 0; i < 4; i++)
1270 {
1271 switch (code)
1272 {
1273 case AND:
1274 tmp0[i] &= tmp1[i];
1275 break;
1276 case IOR:
1277 tmp0[i] |= tmp1[i];
1278 break;
1279 case XOR:
1280 tmp0[i] ^= tmp1[i];
1281 break;
1282 default:
1283 gcc_unreachable ();
1284 }
1285 }
1286 real_from_target (&r, tmp0, mode);
1287 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1288 }
1289 else
1290 {
1291 REAL_VALUE_TYPE f0, f1, value, result;
1292 bool inexact;
1293
1294 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1295 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1296 real_convert (&f0, mode, &f0);
1297 real_convert (&f1, mode, &f1);
1298
1299 if (HONOR_SNANS (mode)
1300 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1301 return 0;
1302
1303 if (code == DIV
1304 && REAL_VALUES_EQUAL (f1, dconst0)
1305 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1306 return 0;
1307
1308 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1309 && flag_trapping_math
1310 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1311 {
1312 int s0 = REAL_VALUE_NEGATIVE (f0);
1313 int s1 = REAL_VALUE_NEGATIVE (f1);
1314
1315 switch (code)
1316 {
1317 case PLUS:
1318 /* Inf + -Inf = NaN plus exception. */
1319 if (s0 != s1)
1320 return 0;
1321 break;
1322 case MINUS:
1323 /* Inf - Inf = NaN plus exception. */
1324 if (s0 == s1)
1325 return 0;
1326 break;
1327 case DIV:
1328 /* Inf / Inf = NaN plus exception. */
1329 return 0;
1330 default:
1331 break;
1332 }
1333 }
1334
1335 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1336 && flag_trapping_math
1337 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1338 || (REAL_VALUE_ISINF (f1)
1339 && REAL_VALUES_EQUAL (f0, dconst0))))
1340 /* Inf * 0 = NaN plus exception. */
1341 return 0;
1342
1343 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
1344 &f0, &f1);
1345 real_convert (&result, mode, &value);
1346
1347 /* Don't constant fold this floating point operation if the
1348 result may dependent upon the run-time rounding mode and
1349 flag_rounding_math is set, or if GCC's software emulation
1350 is unable to accurately represent the result. */
1351
1352 if ((flag_rounding_math
1353 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1354 && !flag_unsafe_math_optimizations))
1355 && (inexact || !real_identical (&result, &value)))
1356 return NULL_RTX;
1357
1358 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
1359 }
1360 }
1361
1362 /* We can fold some multi-word operations. */
1363 if (GET_MODE_CLASS (mode) == MODE_INT
1364 && width == HOST_BITS_PER_WIDE_INT * 2
1365 && (GET_CODE (trueop0) == CONST_DOUBLE
1366 || GET_CODE (trueop0) == CONST_INT)
1367 && (GET_CODE (trueop1) == CONST_DOUBLE
1368 || GET_CODE (trueop1) == CONST_INT))
1369 {
1370 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1371 HOST_WIDE_INT h1, h2, hv, ht;
1372
1373 if (GET_CODE (trueop0) == CONST_DOUBLE)
1374 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1375 else
1376 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1377
1378 if (GET_CODE (trueop1) == CONST_DOUBLE)
1379 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1380 else
1381 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1382
1383 switch (code)
1384 {
1385 case MINUS:
1386 /* A - B == A + (-B). */
1387 neg_double (l2, h2, &lv, &hv);
1388 l2 = lv, h2 = hv;
1389
1390 /* Fall through.... */
1391
1392 case PLUS:
1393 add_double (l1, h1, l2, h2, &lv, &hv);
1394 break;
1395
1396 case MULT:
1397 mul_double (l1, h1, l2, h2, &lv, &hv);
1398 break;
1399
1400 case DIV:
1401 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1402 &lv, &hv, &lt, &ht))
1403 return 0;
1404 break;
1405
1406 case MOD:
1407 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1408 &lt, &ht, &lv, &hv))
1409 return 0;
1410 break;
1411
1412 case UDIV:
1413 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1414 &lv, &hv, &lt, &ht))
1415 return 0;
1416 break;
1417
1418 case UMOD:
1419 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1420 &lt, &ht, &lv, &hv))
1421 return 0;
1422 break;
1423
1424 case AND:
1425 lv = l1 & l2, hv = h1 & h2;
1426 break;
1427
1428 case IOR:
1429 lv = l1 | l2, hv = h1 | h2;
1430 break;
1431
1432 case XOR:
1433 lv = l1 ^ l2, hv = h1 ^ h2;
1434 break;
1435
1436 case SMIN:
1437 if (h1 < h2
1438 || (h1 == h2
1439 && ((unsigned HOST_WIDE_INT) l1
1440 < (unsigned HOST_WIDE_INT) l2)))
1441 lv = l1, hv = h1;
1442 else
1443 lv = l2, hv = h2;
1444 break;
1445
1446 case SMAX:
1447 if (h1 > h2
1448 || (h1 == h2
1449 && ((unsigned HOST_WIDE_INT) l1
1450 > (unsigned HOST_WIDE_INT) l2)))
1451 lv = l1, hv = h1;
1452 else
1453 lv = l2, hv = h2;
1454 break;
1455
1456 case UMIN:
1457 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1458 || (h1 == h2
1459 && ((unsigned HOST_WIDE_INT) l1
1460 < (unsigned HOST_WIDE_INT) l2)))
1461 lv = l1, hv = h1;
1462 else
1463 lv = l2, hv = h2;
1464 break;
1465
1466 case UMAX:
1467 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1468 || (h1 == h2
1469 && ((unsigned HOST_WIDE_INT) l1
1470 > (unsigned HOST_WIDE_INT) l2)))
1471 lv = l1, hv = h1;
1472 else
1473 lv = l2, hv = h2;
1474 break;
1475
1476 case LSHIFTRT: case ASHIFTRT:
1477 case ASHIFT:
1478 case ROTATE: case ROTATERT:
1479 if (SHIFT_COUNT_TRUNCATED)
1480 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1481
1482 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1483 return 0;
1484
1485 if (code == LSHIFTRT || code == ASHIFTRT)
1486 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1487 code == ASHIFTRT);
1488 else if (code == ASHIFT)
1489 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1490 else if (code == ROTATE)
1491 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1492 else /* code == ROTATERT */
1493 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1494 break;
1495
1496 default:
1497 return 0;
1498 }
1499
1500 return immed_double_const (lv, hv, mode);
1501 }
1502
1503 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1504 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1505 {
1506 /* Even if we can't compute a constant result,
1507 there are some cases worth simplifying. */
1508
1509 switch (code)
1510 {
1511 case PLUS:
1512 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1513 when x is NaN, infinite, or finite and nonzero. They aren't
1514 when x is -0 and the rounding mode is not towards -infinity,
1515 since (-0) + 0 is then 0. */
1516 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1517 return op0;
1518
1519 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1520 transformations are safe even for IEEE. */
1521 if (GET_CODE (op0) == NEG)
1522 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1523 else if (GET_CODE (op1) == NEG)
1524 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1525
1526 /* (~a) + 1 -> -a */
1527 if (INTEGRAL_MODE_P (mode)
1528 && GET_CODE (op0) == NOT
1529 && trueop1 == const1_rtx)
1530 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1531
1532 /* Handle both-operands-constant cases. We can only add
1533 CONST_INTs to constants since the sum of relocatable symbols
1534 can't be handled by most assemblers. Don't add CONST_INT
1535 to CONST_INT since overflow won't be computed properly if wider
1536 than HOST_BITS_PER_WIDE_INT. */
1537
1538 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1539 && GET_CODE (op1) == CONST_INT)
1540 return plus_constant (op0, INTVAL (op1));
1541 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1542 && GET_CODE (op0) == CONST_INT)
1543 return plus_constant (op1, INTVAL (op0));
1544
1545 /* See if this is something like X * C - X or vice versa or
1546 if the multiplication is written as a shift. If so, we can
1547 distribute and make a new multiply, shift, or maybe just
1548 have X (if C is 2 in the example above). But don't make
1549 something more expensive than we had before. */
1550
1551 if (! FLOAT_MODE_P (mode))
1552 {
1553 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1554 rtx lhs = op0, rhs = op1;
1555
1556 if (GET_CODE (lhs) == NEG)
1557 coeff0 = -1, lhs = XEXP (lhs, 0);
1558 else if (GET_CODE (lhs) == MULT
1559 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1560 {
1561 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1562 }
1563 else if (GET_CODE (lhs) == ASHIFT
1564 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1565 && INTVAL (XEXP (lhs, 1)) >= 0
1566 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1567 {
1568 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1569 lhs = XEXP (lhs, 0);
1570 }
1571
1572 if (GET_CODE (rhs) == NEG)
1573 coeff1 = -1, rhs = XEXP (rhs, 0);
1574 else if (GET_CODE (rhs) == MULT
1575 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1576 {
1577 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1578 }
1579 else if (GET_CODE (rhs) == ASHIFT
1580 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1581 && INTVAL (XEXP (rhs, 1)) >= 0
1582 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1583 {
1584 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1585 rhs = XEXP (rhs, 0);
1586 }
1587
1588 if (rtx_equal_p (lhs, rhs))
1589 {
1590 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1591 tem = simplify_gen_binary (MULT, mode, lhs,
1592 GEN_INT (coeff0 + coeff1));
1593 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1594 ? tem : 0;
1595 }
1596 }
1597
1598 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1599 if ((GET_CODE (op1) == CONST_INT
1600 || GET_CODE (op1) == CONST_DOUBLE)
1601 && GET_CODE (op0) == XOR
1602 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1603 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1604 && mode_signbit_p (mode, op1))
1605 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1606 simplify_gen_binary (XOR, mode, op1,
1607 XEXP (op0, 1)));
1608
1609 /* If one of the operands is a PLUS or a MINUS, see if we can
1610 simplify this by the associative law.
1611 Don't use the associative law for floating point.
1612 The inaccuracy makes it nonassociative,
1613 and subtle programs can break if operations are associated. */
1614
1615 if (INTEGRAL_MODE_P (mode)
1616 && (plus_minus_operand_p (op0)
1617 || plus_minus_operand_p (op1))
1618 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1619 return tem;
1620
1621 /* Reassociate floating point addition only when the user
1622 specifies unsafe math optimizations. */
1623 if (FLOAT_MODE_P (mode)
1624 && flag_unsafe_math_optimizations)
1625 {
1626 tem = simplify_associative_operation (code, mode, op0, op1);
1627 if (tem)
1628 return tem;
1629 }
1630 break;
1631
1632 case COMPARE:
1633 #ifdef HAVE_cc0
1634 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1635 using cc0, in which case we want to leave it as a COMPARE
1636 so we can distinguish it from a register-register-copy.
1637
1638 In IEEE floating point, x-0 is not the same as x. */
1639
1640 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1641 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1642 && trueop1 == CONST0_RTX (mode))
1643 return op0;
1644 #endif
1645
1646 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1647 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1648 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1649 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1650 {
1651 rtx xop00 = XEXP (op0, 0);
1652 rtx xop10 = XEXP (op1, 0);
1653
1654 #ifdef HAVE_cc0
1655 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1656 #else
1657 if (REG_P (xop00) && REG_P (xop10)
1658 && GET_MODE (xop00) == GET_MODE (xop10)
1659 && REGNO (xop00) == REGNO (xop10)
1660 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1661 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1662 #endif
1663 return xop00;
1664 }
1665 break;
1666
1667 case MINUS:
1668 /* We can't assume x-x is 0 even with non-IEEE floating point,
1669 but since it is zero except in very strange circumstances, we
1670 will treat it as zero with -funsafe-math-optimizations. */
1671 if (rtx_equal_p (trueop0, trueop1)
1672 && ! side_effects_p (op0)
1673 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1674 return CONST0_RTX (mode);
1675
1676 /* Change subtraction from zero into negation. (0 - x) is the
1677 same as -x when x is NaN, infinite, or finite and nonzero.
1678 But if the mode has signed zeros, and does not round towards
1679 -infinity, then 0 - 0 is 0, not -0. */
1680 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1681 return simplify_gen_unary (NEG, mode, op1, mode);
1682
1683 /* (-1 - a) is ~a. */
1684 if (trueop0 == constm1_rtx)
1685 return simplify_gen_unary (NOT, mode, op1, mode);
1686
1687 /* Subtracting 0 has no effect unless the mode has signed zeros
1688 and supports rounding towards -infinity. In such a case,
1689 0 - 0 is -0. */
1690 if (!(HONOR_SIGNED_ZEROS (mode)
1691 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1692 && trueop1 == CONST0_RTX (mode))
1693 return op0;
1694
1695 /* See if this is something like X * C - X or vice versa or
1696 if the multiplication is written as a shift. If so, we can
1697 distribute and make a new multiply, shift, or maybe just
1698 have X (if C is 2 in the example above). But don't make
1699 something more expensive than we had before. */
1700
1701 if (! FLOAT_MODE_P (mode))
1702 {
1703 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1704 rtx lhs = op0, rhs = op1;
1705
1706 if (GET_CODE (lhs) == NEG)
1707 coeff0 = -1, lhs = XEXP (lhs, 0);
1708 else if (GET_CODE (lhs) == MULT
1709 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1710 {
1711 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1712 }
1713 else if (GET_CODE (lhs) == ASHIFT
1714 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1715 && INTVAL (XEXP (lhs, 1)) >= 0
1716 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1717 {
1718 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1719 lhs = XEXP (lhs, 0);
1720 }
1721
1722 if (GET_CODE (rhs) == NEG)
1723 coeff1 = - 1, rhs = XEXP (rhs, 0);
1724 else if (GET_CODE (rhs) == MULT
1725 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1726 {
1727 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1728 }
1729 else if (GET_CODE (rhs) == ASHIFT
1730 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1731 && INTVAL (XEXP (rhs, 1)) >= 0
1732 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1733 {
1734 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1735 rhs = XEXP (rhs, 0);
1736 }
1737
1738 if (rtx_equal_p (lhs, rhs))
1739 {
1740 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1741 tem = simplify_gen_binary (MULT, mode, lhs,
1742 GEN_INT (coeff0 - coeff1));
1743 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1744 ? tem : 0;
1745 }
1746 }
1747
1748 /* (a - (-b)) -> (a + b). True even for IEEE. */
1749 if (GET_CODE (op1) == NEG)
1750 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1751
1752 /* (-x - c) may be simplified as (-c - x). */
1753 if (GET_CODE (op0) == NEG
1754 && (GET_CODE (op1) == CONST_INT
1755 || GET_CODE (op1) == CONST_DOUBLE))
1756 {
1757 tem = simplify_unary_operation (NEG, mode, op1, mode);
1758 if (tem)
1759 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1760 }
1761
1762 /* If one of the operands is a PLUS or a MINUS, see if we can
1763 simplify this by the associative law.
1764 Don't use the associative law for floating point.
1765 The inaccuracy makes it nonassociative,
1766 and subtle programs can break if operations are associated. */
1767
1768 if (INTEGRAL_MODE_P (mode)
1769 && (plus_minus_operand_p (op0)
1770 || plus_minus_operand_p (op1))
1771 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1772 return tem;
1773
1774 /* Don't let a relocatable value get a negative coeff. */
1775 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1776 return simplify_gen_binary (PLUS, mode,
1777 op0,
1778 neg_const_int (mode, op1));
1779
1780 /* (x - (x & y)) -> (x & ~y) */
1781 if (GET_CODE (op1) == AND)
1782 {
1783 if (rtx_equal_p (op0, XEXP (op1, 0)))
1784 {
1785 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1786 GET_MODE (XEXP (op1, 1)));
1787 return simplify_gen_binary (AND, mode, op0, tem);
1788 }
1789 if (rtx_equal_p (op0, XEXP (op1, 1)))
1790 {
1791 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1792 GET_MODE (XEXP (op1, 0)));
1793 return simplify_gen_binary (AND, mode, op0, tem);
1794 }
1795 }
1796 break;
1797
1798 case MULT:
1799 if (trueop1 == constm1_rtx)
1800 return simplify_gen_unary (NEG, mode, op0, mode);
1801
1802 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1803 x is NaN, since x * 0 is then also NaN. Nor is it valid
1804 when the mode has signed zeros, since multiplying a negative
1805 number by 0 will give -0, not 0. */
1806 if (!HONOR_NANS (mode)
1807 && !HONOR_SIGNED_ZEROS (mode)
1808 && trueop1 == CONST0_RTX (mode)
1809 && ! side_effects_p (op0))
1810 return op1;
1811
1812 /* In IEEE floating point, x*1 is not equivalent to x for
1813 signalling NaNs. */
1814 if (!HONOR_SNANS (mode)
1815 && trueop1 == CONST1_RTX (mode))
1816 return op0;
1817
1818 /* Convert multiply by constant power of two into shift unless
1819 we are still generating RTL. This test is a kludge. */
1820 if (GET_CODE (trueop1) == CONST_INT
1821 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1822 /* If the mode is larger than the host word size, and the
1823 uppermost bit is set, then this isn't a power of two due
1824 to implicit sign extension. */
1825 && (width <= HOST_BITS_PER_WIDE_INT
1826 || val != HOST_BITS_PER_WIDE_INT - 1))
1827 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1828
1829 /* x*2 is x+x and x*(-1) is -x */
1830 if (GET_CODE (trueop1) == CONST_DOUBLE
1831 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1832 && GET_MODE (op0) == mode)
1833 {
1834 REAL_VALUE_TYPE d;
1835 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1836
1837 if (REAL_VALUES_EQUAL (d, dconst2))
1838 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1839
1840 if (REAL_VALUES_EQUAL (d, dconstm1))
1841 return simplify_gen_unary (NEG, mode, op0, mode);
1842 }
1843
1844 /* Reassociate multiplication, but for floating point MULTs
1845 only when the user specifies unsafe math optimizations. */
1846 if (! FLOAT_MODE_P (mode)
1847 || flag_unsafe_math_optimizations)
1848 {
1849 tem = simplify_associative_operation (code, mode, op0, op1);
1850 if (tem)
1851 return tem;
1852 }
1853 break;
1854
1855 case IOR:
1856 if (trueop1 == const0_rtx)
1857 return op0;
1858 if (GET_CODE (trueop1) == CONST_INT
1859 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1860 == GET_MODE_MASK (mode)))
1861 return op1;
1862 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1863 return op0;
1864 /* A | (~A) -> -1 */
1865 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1866 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1867 && ! side_effects_p (op0)
1868 && GET_MODE_CLASS (mode) != MODE_CC)
1869 return constm1_rtx;
1870 tem = simplify_associative_operation (code, mode, op0, op1);
1871 if (tem)
1872 return tem;
1873 break;
1874
1875 case XOR:
1876 if (trueop1 == const0_rtx)
1877 return op0;
1878 if (GET_CODE (trueop1) == CONST_INT
1879 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1880 == GET_MODE_MASK (mode)))
1881 return simplify_gen_unary (NOT, mode, op0, mode);
1882 if (trueop0 == trueop1
1883 && ! side_effects_p (op0)
1884 && GET_MODE_CLASS (mode) != MODE_CC)
1885 return const0_rtx;
1886
1887 /* Canonicalize XOR of the most significant bit to PLUS. */
1888 if ((GET_CODE (op1) == CONST_INT
1889 || GET_CODE (op1) == CONST_DOUBLE)
1890 && mode_signbit_p (mode, op1))
1891 return simplify_gen_binary (PLUS, mode, op0, op1);
1892 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1893 if ((GET_CODE (op1) == CONST_INT
1894 || GET_CODE (op1) == CONST_DOUBLE)
1895 && GET_CODE (op0) == PLUS
1896 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1897 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1898 && mode_signbit_p (mode, XEXP (op0, 1)))
1899 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1900 simplify_gen_binary (XOR, mode, op1,
1901 XEXP (op0, 1)));
1902
1903 tem = simplify_associative_operation (code, mode, op0, op1);
1904 if (tem)
1905 return tem;
1906 break;
1907
1908 case AND:
1909 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1910 return const0_rtx;
1911 /* If we are turning off bits already known off in OP0, we need
1912 not do an AND. */
1913 if (GET_CODE (trueop1) == CONST_INT
1914 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1915 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1916 return op0;
1917 if (trueop0 == trueop1 && ! side_effects_p (op0)
1918 && GET_MODE_CLASS (mode) != MODE_CC)
1919 return op0;
1920 /* A & (~A) -> 0 */
1921 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1922 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1923 && ! side_effects_p (op0)
1924 && GET_MODE_CLASS (mode) != MODE_CC)
1925 return const0_rtx;
1926
1927 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1928 there are no nonzero bits of C outside of X's mode. */
1929 if ((GET_CODE (op0) == SIGN_EXTEND
1930 || GET_CODE (op0) == ZERO_EXTEND)
1931 && GET_CODE (trueop1) == CONST_INT
1932 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1933 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1934 & INTVAL (trueop1)) == 0)
1935 {
1936 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1937 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1938 gen_int_mode (INTVAL (trueop1),
1939 imode));
1940 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1941 }
1942
1943 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1944 ((A & N) + B) & M -> (A + B) & M
1945 Similarly if (N & M) == 0,
1946 ((A | N) + B) & M -> (A + B) & M
1947 and for - instead of + and/or ^ instead of |. */
1948 if (GET_CODE (trueop1) == CONST_INT
1949 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1950 && ~INTVAL (trueop1)
1951 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1952 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1953 {
1954 rtx pmop[2];
1955 int which;
1956
1957 pmop[0] = XEXP (op0, 0);
1958 pmop[1] = XEXP (op0, 1);
1959
1960 for (which = 0; which < 2; which++)
1961 {
1962 tem = pmop[which];
1963 switch (GET_CODE (tem))
1964 {
1965 case AND:
1966 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1967 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1968 == INTVAL (trueop1))
1969 pmop[which] = XEXP (tem, 0);
1970 break;
1971 case IOR:
1972 case XOR:
1973 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1974 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1975 pmop[which] = XEXP (tem, 0);
1976 break;
1977 default:
1978 break;
1979 }
1980 }
1981
1982 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1983 {
1984 tem = simplify_gen_binary (GET_CODE (op0), mode,
1985 pmop[0], pmop[1]);
1986 return simplify_gen_binary (code, mode, tem, op1);
1987 }
1988 }
1989 tem = simplify_associative_operation (code, mode, op0, op1);
1990 if (tem)
1991 return tem;
1992 break;
1993
1994 case UDIV:
1995 /* 0/x is 0 (or x&0 if x has side-effects). */
1996 if (trueop0 == const0_rtx)
1997 return side_effects_p (op1)
1998 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1999 : const0_rtx;
2000 /* x/1 is x. */
2001 if (trueop1 == const1_rtx)
2002 {
2003 /* Handle narrowing UDIV. */
2004 rtx x = gen_lowpart_common (mode, op0);
2005 if (x)
2006 return x;
2007 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2008 return gen_lowpart_SUBREG (mode, op0);
2009 return op0;
2010 }
2011 /* Convert divide by power of two into shift. */
2012 if (GET_CODE (trueop1) == CONST_INT
2013 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
2014 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
2015 break;
2016
2017 case DIV:
2018 /* Handle floating point and integers separately. */
2019 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2020 {
2021 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2022 safe for modes with NaNs, since 0.0 / 0.0 will then be
2023 NaN rather than 0.0. Nor is it safe for modes with signed
2024 zeros, since dividing 0 by a negative number gives -0.0 */
2025 if (trueop0 == CONST0_RTX (mode)
2026 && !HONOR_NANS (mode)
2027 && !HONOR_SIGNED_ZEROS (mode)
2028 && ! side_effects_p (op1))
2029 return op0;
2030 /* x/1.0 is x. */
2031 if (trueop1 == CONST1_RTX (mode)
2032 && !HONOR_SNANS (mode))
2033 return op0;
2034
2035 if (GET_CODE (trueop1) == CONST_DOUBLE
2036 && trueop1 != CONST0_RTX (mode))
2037 {
2038 REAL_VALUE_TYPE d;
2039 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2040
2041 /* x/-1.0 is -x. */
2042 if (REAL_VALUES_EQUAL (d, dconstm1)
2043 && !HONOR_SNANS (mode))
2044 return simplify_gen_unary (NEG, mode, op0, mode);
2045
2046 /* Change FP division by a constant into multiplication.
2047 Only do this with -funsafe-math-optimizations. */
2048 if (flag_unsafe_math_optimizations
2049 && !REAL_VALUES_EQUAL (d, dconst0))
2050 {
2051 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2052 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2053 return simplify_gen_binary (MULT, mode, op0, tem);
2054 }
2055 }
2056 }
2057 else
2058 {
2059 /* 0/x is 0 (or x&0 if x has side-effects). */
2060 if (trueop0 == const0_rtx)
2061 return side_effects_p (op1)
2062 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2063 : const0_rtx;
2064 /* x/1 is x. */
2065 if (trueop1 == const1_rtx)
2066 {
2067 /* Handle narrowing DIV. */
2068 rtx x = gen_lowpart_common (mode, op0);
2069 if (x)
2070 return x;
2071 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2072 return gen_lowpart_SUBREG (mode, op0);
2073 return op0;
2074 }
2075 /* x/-1 is -x. */
2076 if (trueop1 == constm1_rtx)
2077 {
2078 rtx x = gen_lowpart_common (mode, op0);
2079 if (!x)
2080 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2081 ? gen_lowpart_SUBREG (mode, op0) : op0;
2082 return simplify_gen_unary (NEG, mode, x, mode);
2083 }
2084 }
2085 break;
2086
2087 case UMOD:
2088 /* 0%x is 0 (or x&0 if x has side-effects). */
2089 if (trueop0 == const0_rtx)
2090 return side_effects_p (op1)
2091 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2092 : const0_rtx;
2093 /* x%1 is 0 (of x&0 if x has side-effects). */
2094 if (trueop1 == const1_rtx)
2095 return side_effects_p (op0)
2096 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2097 : const0_rtx;
2098 /* Implement modulus by power of two as AND. */
2099 if (GET_CODE (trueop1) == CONST_INT
2100 && exact_log2 (INTVAL (trueop1)) > 0)
2101 return simplify_gen_binary (AND, mode, op0,
2102 GEN_INT (INTVAL (op1) - 1));
2103 break;
2104
2105 case MOD:
2106 /* 0%x is 0 (or x&0 if x has side-effects). */
2107 if (trueop0 == const0_rtx)
2108 return side_effects_p (op1)
2109 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2110 : const0_rtx;
2111 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2112 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2113 return side_effects_p (op0)
2114 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2115 : const0_rtx;
2116 break;
2117
2118 case ROTATERT:
2119 case ROTATE:
2120 case ASHIFTRT:
2121 /* Rotating ~0 always results in ~0. */
2122 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2123 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2124 && ! side_effects_p (op1))
2125 return op0;
2126
2127 /* Fall through.... */
2128
2129 case ASHIFT:
2130 case LSHIFTRT:
2131 if (trueop1 == const0_rtx)
2132 return op0;
2133 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2134 return op0;
2135 break;
2136
2137 case SMIN:
2138 if (width <= HOST_BITS_PER_WIDE_INT
2139 && GET_CODE (trueop1) == CONST_INT
2140 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2141 && ! side_effects_p (op0))
2142 return op1;
2143 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2144 return op0;
2145 tem = simplify_associative_operation (code, mode, op0, op1);
2146 if (tem)
2147 return tem;
2148 break;
2149
2150 case SMAX:
2151 if (width <= HOST_BITS_PER_WIDE_INT
2152 && GET_CODE (trueop1) == CONST_INT
2153 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2154 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2155 && ! side_effects_p (op0))
2156 return op1;
2157 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2158 return op0;
2159 tem = simplify_associative_operation (code, mode, op0, op1);
2160 if (tem)
2161 return tem;
2162 break;
2163
2164 case UMIN:
2165 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2166 return op1;
2167 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2168 return op0;
2169 tem = simplify_associative_operation (code, mode, op0, op1);
2170 if (tem)
2171 return tem;
2172 break;
2173
2174 case UMAX:
2175 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2176 return op1;
2177 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2178 return op0;
2179 tem = simplify_associative_operation (code, mode, op0, op1);
2180 if (tem)
2181 return tem;
2182 break;
2183
2184 case SS_PLUS:
2185 case US_PLUS:
2186 case SS_MINUS:
2187 case US_MINUS:
2188 /* ??? There are simplifications that can be done. */
2189 return 0;
2190
2191 case VEC_SELECT:
2192 if (!VECTOR_MODE_P (mode))
2193 {
2194 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2195 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2196 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2197 gcc_assert (XVECLEN (trueop1, 0) == 1);
2198 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2199
2200 if (GET_CODE (trueop0) == CONST_VECTOR)
2201 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2202 (trueop1, 0, 0)));
2203 }
2204 else
2205 {
2206 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2207 gcc_assert (GET_MODE_INNER (mode)
2208 == GET_MODE_INNER (GET_MODE (trueop0)));
2209 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2210
2211 if (GET_CODE (trueop0) == CONST_VECTOR)
2212 {
2213 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2214 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2215 rtvec v = rtvec_alloc (n_elts);
2216 unsigned int i;
2217
2218 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2219 for (i = 0; i < n_elts; i++)
2220 {
2221 rtx x = XVECEXP (trueop1, 0, i);
2222
2223 gcc_assert (GET_CODE (x) == CONST_INT);
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2225 INTVAL (x));
2226 }
2227
2228 return gen_rtx_CONST_VECTOR (mode, v);
2229 }
2230 }
2231 return 0;
2232 case VEC_CONCAT:
2233 {
2234 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2235 ? GET_MODE (trueop0)
2236 : GET_MODE_INNER (mode));
2237 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2238 ? GET_MODE (trueop1)
2239 : GET_MODE_INNER (mode));
2240
2241 gcc_assert (VECTOR_MODE_P (mode));
2242 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2243 == GET_MODE_SIZE (mode));
2244
2245 if (VECTOR_MODE_P (op0_mode))
2246 gcc_assert (GET_MODE_INNER (mode)
2247 == GET_MODE_INNER (op0_mode));
2248 else
2249 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2250
2251 if (VECTOR_MODE_P (op1_mode))
2252 gcc_assert (GET_MODE_INNER (mode)
2253 == GET_MODE_INNER (op1_mode));
2254 else
2255 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2256
2257 if ((GET_CODE (trueop0) == CONST_VECTOR
2258 || GET_CODE (trueop0) == CONST_INT
2259 || GET_CODE (trueop0) == CONST_DOUBLE)
2260 && (GET_CODE (trueop1) == CONST_VECTOR
2261 || GET_CODE (trueop1) == CONST_INT
2262 || GET_CODE (trueop1) == CONST_DOUBLE))
2263 {
2264 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2265 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2266 rtvec v = rtvec_alloc (n_elts);
2267 unsigned int i;
2268 unsigned in_n_elts = 1;
2269
2270 if (VECTOR_MODE_P (op0_mode))
2271 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2272 for (i = 0; i < n_elts; i++)
2273 {
2274 if (i < in_n_elts)
2275 {
2276 if (!VECTOR_MODE_P (op0_mode))
2277 RTVEC_ELT (v, i) = trueop0;
2278 else
2279 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2280 }
2281 else
2282 {
2283 if (!VECTOR_MODE_P (op1_mode))
2284 RTVEC_ELT (v, i) = trueop1;
2285 else
2286 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2287 i - in_n_elts);
2288 }
2289 }
2290
2291 return gen_rtx_CONST_VECTOR (mode, v);
2292 }
2293 }
2294 return 0;
2295
2296 default:
2297 gcc_unreachable ();
2298 }
2299
2300 return 0;
2301 }
2302
2303 /* Get the integer argument values in two forms:
2304 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2305
2306 arg0 = INTVAL (trueop0);
2307 arg1 = INTVAL (trueop1);
2308
2309 if (width < HOST_BITS_PER_WIDE_INT)
2310 {
2311 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2312 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2313
2314 arg0s = arg0;
2315 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2316 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2317
2318 arg1s = arg1;
2319 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2320 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2321 }
2322 else
2323 {
2324 arg0s = arg0;
2325 arg1s = arg1;
2326 }
2327
2328 /* Compute the value of the arithmetic. */
2329
2330 switch (code)
2331 {
2332 case PLUS:
2333 val = arg0s + arg1s;
2334 break;
2335
2336 case MINUS:
2337 val = arg0s - arg1s;
2338 break;
2339
2340 case MULT:
2341 val = arg0s * arg1s;
2342 break;
2343
2344 case DIV:
2345 if (arg1s == 0
2346 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2347 && arg1s == -1))
2348 return 0;
2349 val = arg0s / arg1s;
2350 break;
2351
2352 case MOD:
2353 if (arg1s == 0
2354 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2355 && arg1s == -1))
2356 return 0;
2357 val = arg0s % arg1s;
2358 break;
2359
2360 case UDIV:
2361 if (arg1 == 0
2362 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2363 && arg1s == -1))
2364 return 0;
2365 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2366 break;
2367
2368 case UMOD:
2369 if (arg1 == 0
2370 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2371 && arg1s == -1))
2372 return 0;
2373 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2374 break;
2375
2376 case AND:
2377 val = arg0 & arg1;
2378 break;
2379
2380 case IOR:
2381 val = arg0 | arg1;
2382 break;
2383
2384 case XOR:
2385 val = arg0 ^ arg1;
2386 break;
2387
2388 case LSHIFTRT:
2389 case ASHIFT:
2390 case ASHIFTRT:
2391 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2392 value is in range. We can't return any old value for out-of-range
2393 arguments because either the middle-end (via shift_truncation_mask)
2394 or the back-end might be relying on target-specific knowledge.
2395 Nor can we rely on shift_truncation_mask, since the shift might
2396 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2397 if (SHIFT_COUNT_TRUNCATED)
2398 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2399 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2400 return 0;
2401
2402 val = (code == ASHIFT
2403 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2404 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2405
2406 /* Sign-extend the result for arithmetic right shifts. */
2407 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2408 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2409 break;
2410
2411 case ROTATERT:
2412 if (arg1 < 0)
2413 return 0;
2414
2415 arg1 %= width;
2416 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2417 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2418 break;
2419
2420 case ROTATE:
2421 if (arg1 < 0)
2422 return 0;
2423
2424 arg1 %= width;
2425 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2426 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2427 break;
2428
2429 case COMPARE:
2430 /* Do nothing here. */
2431 return 0;
2432
2433 case SMIN:
2434 val = arg0s <= arg1s ? arg0s : arg1s;
2435 break;
2436
2437 case UMIN:
2438 val = ((unsigned HOST_WIDE_INT) arg0
2439 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2440 break;
2441
2442 case SMAX:
2443 val = arg0s > arg1s ? arg0s : arg1s;
2444 break;
2445
2446 case UMAX:
2447 val = ((unsigned HOST_WIDE_INT) arg0
2448 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2449 break;
2450
2451 case SS_PLUS:
2452 case US_PLUS:
2453 case SS_MINUS:
2454 case US_MINUS:
2455 /* ??? There are simplifications that can be done. */
2456 return 0;
2457
2458 default:
2459 gcc_unreachable ();
2460 }
2461
2462 val = trunc_int_for_mode (val, mode);
2463
2464 return GEN_INT (val);
2465 }
2466 \f
2467 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2468 PLUS or MINUS.
2469
2470 Rather than test for specific case, we do this by a brute-force method
2471 and do all possible simplifications until no more changes occur. Then
2472 we rebuild the operation.
2473
2474 If FORCE is true, then always generate the rtx. This is used to
2475 canonicalize stuff emitted from simplify_gen_binary. Note that this
2476 can still fail if the rtx is too complex. It won't fail just because
2477 the result is not 'simpler' than the input, however. */
2478
2479 struct simplify_plus_minus_op_data
2480 {
2481 rtx op;
2482 int neg;
2483 };
2484
2485 static int
2486 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2487 {
2488 const struct simplify_plus_minus_op_data *d1 = p1;
2489 const struct simplify_plus_minus_op_data *d2 = p2;
2490
2491 return (commutative_operand_precedence (d2->op)
2492 - commutative_operand_precedence (d1->op));
2493 }
2494
2495 static rtx
2496 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2497 rtx op1, int force)
2498 {
2499 struct simplify_plus_minus_op_data ops[8];
2500 rtx result, tem;
2501 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2502 int first, changed;
2503 int i, j;
2504
2505 memset (ops, 0, sizeof ops);
2506
2507 /* Set up the two operands and then expand them until nothing has been
2508 changed. If we run out of room in our array, give up; this should
2509 almost never happen. */
2510
2511 ops[0].op = op0;
2512 ops[0].neg = 0;
2513 ops[1].op = op1;
2514 ops[1].neg = (code == MINUS);
2515
2516 do
2517 {
2518 changed = 0;
2519
2520 for (i = 0; i < n_ops; i++)
2521 {
2522 rtx this_op = ops[i].op;
2523 int this_neg = ops[i].neg;
2524 enum rtx_code this_code = GET_CODE (this_op);
2525
2526 switch (this_code)
2527 {
2528 case PLUS:
2529 case MINUS:
2530 if (n_ops == 7)
2531 return NULL_RTX;
2532
2533 ops[n_ops].op = XEXP (this_op, 1);
2534 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2535 n_ops++;
2536
2537 ops[i].op = XEXP (this_op, 0);
2538 input_ops++;
2539 changed = 1;
2540 break;
2541
2542 case NEG:
2543 ops[i].op = XEXP (this_op, 0);
2544 ops[i].neg = ! this_neg;
2545 changed = 1;
2546 break;
2547
2548 case CONST:
2549 if (n_ops < 7
2550 && GET_CODE (XEXP (this_op, 0)) == PLUS
2551 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2552 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2553 {
2554 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2555 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2556 ops[n_ops].neg = this_neg;
2557 n_ops++;
2558 input_consts++;
2559 changed = 1;
2560 }
2561 break;
2562
2563 case NOT:
2564 /* ~a -> (-a - 1) */
2565 if (n_ops != 7)
2566 {
2567 ops[n_ops].op = constm1_rtx;
2568 ops[n_ops++].neg = this_neg;
2569 ops[i].op = XEXP (this_op, 0);
2570 ops[i].neg = !this_neg;
2571 changed = 1;
2572 }
2573 break;
2574
2575 case CONST_INT:
2576 if (this_neg)
2577 {
2578 ops[i].op = neg_const_int (mode, this_op);
2579 ops[i].neg = 0;
2580 changed = 1;
2581 }
2582 break;
2583
2584 default:
2585 break;
2586 }
2587 }
2588 }
2589 while (changed);
2590
2591 /* If we only have two operands, we can't do anything. */
2592 if (n_ops <= 2 && !force)
2593 return NULL_RTX;
2594
2595 /* Count the number of CONSTs we didn't split above. */
2596 for (i = 0; i < n_ops; i++)
2597 if (GET_CODE (ops[i].op) == CONST)
2598 input_consts++;
2599
2600 /* Now simplify each pair of operands until nothing changes. The first
2601 time through just simplify constants against each other. */
2602
2603 first = 1;
2604 do
2605 {
2606 changed = first;
2607
2608 for (i = 0; i < n_ops - 1; i++)
2609 for (j = i + 1; j < n_ops; j++)
2610 {
2611 rtx lhs = ops[i].op, rhs = ops[j].op;
2612 int lneg = ops[i].neg, rneg = ops[j].neg;
2613
2614 if (lhs != 0 && rhs != 0
2615 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2616 {
2617 enum rtx_code ncode = PLUS;
2618
2619 if (lneg != rneg)
2620 {
2621 ncode = MINUS;
2622 if (lneg)
2623 tem = lhs, lhs = rhs, rhs = tem;
2624 }
2625 else if (swap_commutative_operands_p (lhs, rhs))
2626 tem = lhs, lhs = rhs, rhs = tem;
2627
2628 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2629
2630 /* Reject "simplifications" that just wrap the two
2631 arguments in a CONST. Failure to do so can result
2632 in infinite recursion with simplify_binary_operation
2633 when it calls us to simplify CONST operations. */
2634 if (tem
2635 && ! (GET_CODE (tem) == CONST
2636 && GET_CODE (XEXP (tem, 0)) == ncode
2637 && XEXP (XEXP (tem, 0), 0) == lhs
2638 && XEXP (XEXP (tem, 0), 1) == rhs)
2639 /* Don't allow -x + -1 -> ~x simplifications in the
2640 first pass. This allows us the chance to combine
2641 the -1 with other constants. */
2642 && ! (first
2643 && GET_CODE (tem) == NOT
2644 && XEXP (tem, 0) == rhs))
2645 {
2646 lneg &= rneg;
2647 if (GET_CODE (tem) == NEG)
2648 tem = XEXP (tem, 0), lneg = !lneg;
2649 if (GET_CODE (tem) == CONST_INT && lneg)
2650 tem = neg_const_int (mode, tem), lneg = 0;
2651
2652 ops[i].op = tem;
2653 ops[i].neg = lneg;
2654 ops[j].op = NULL_RTX;
2655 changed = 1;
2656 }
2657 }
2658 }
2659
2660 first = 0;
2661 }
2662 while (changed);
2663
2664 /* Pack all the operands to the lower-numbered entries. */
2665 for (i = 0, j = 0; j < n_ops; j++)
2666 if (ops[j].op)
2667 ops[i++] = ops[j];
2668 n_ops = i;
2669
2670 /* Sort the operations based on swap_commutative_operands_p. */
2671 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2672
2673 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2674 if (n_ops == 2
2675 && GET_CODE (ops[1].op) == CONST_INT
2676 && CONSTANT_P (ops[0].op)
2677 && ops[0].neg)
2678 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2679
2680 /* We suppressed creation of trivial CONST expressions in the
2681 combination loop to avoid recursion. Create one manually now.
2682 The combination loop should have ensured that there is exactly
2683 one CONST_INT, and the sort will have ensured that it is last
2684 in the array and that any other constant will be next-to-last. */
2685
2686 if (n_ops > 1
2687 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2688 && CONSTANT_P (ops[n_ops - 2].op))
2689 {
2690 rtx value = ops[n_ops - 1].op;
2691 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2692 value = neg_const_int (mode, value);
2693 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2694 n_ops--;
2695 }
2696
2697 /* Count the number of CONSTs that we generated. */
2698 n_consts = 0;
2699 for (i = 0; i < n_ops; i++)
2700 if (GET_CODE (ops[i].op) == CONST)
2701 n_consts++;
2702
2703 /* Give up if we didn't reduce the number of operands we had. Make
2704 sure we count a CONST as two operands. If we have the same
2705 number of operands, but have made more CONSTs than before, this
2706 is also an improvement, so accept it. */
2707 if (!force
2708 && (n_ops + n_consts > input_ops
2709 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2710 return NULL_RTX;
2711
2712 /* Put a non-negated operand first, if possible. */
2713
2714 for (i = 0; i < n_ops && ops[i].neg; i++)
2715 continue;
2716 if (i == n_ops)
2717 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2718 else if (i != 0)
2719 {
2720 tem = ops[0].op;
2721 ops[0] = ops[i];
2722 ops[i].op = tem;
2723 ops[i].neg = 1;
2724 }
2725
2726 /* Now make the result by performing the requested operations. */
2727 result = ops[0].op;
2728 for (i = 1; i < n_ops; i++)
2729 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2730 mode, result, ops[i].op);
2731
2732 return result;
2733 }
2734
2735 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2736 static bool
2737 plus_minus_operand_p (rtx x)
2738 {
2739 return GET_CODE (x) == PLUS
2740 || GET_CODE (x) == MINUS
2741 || (GET_CODE (x) == CONST
2742 && GET_CODE (XEXP (x, 0)) == PLUS
2743 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2744 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2745 }
2746
2747 /* Like simplify_binary_operation except used for relational operators.
2748 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2749 not also be VOIDmode.
2750
2751 CMP_MODE specifies in which mode the comparison is done in, so it is
2752 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2753 the operands or, if both are VOIDmode, the operands are compared in
2754 "infinite precision". */
2755 rtx
2756 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2757 enum machine_mode cmp_mode, rtx op0, rtx op1)
2758 {
2759 rtx tem, trueop0, trueop1;
2760
2761 if (cmp_mode == VOIDmode)
2762 cmp_mode = GET_MODE (op0);
2763 if (cmp_mode == VOIDmode)
2764 cmp_mode = GET_MODE (op1);
2765
2766 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2767 if (tem)
2768 {
2769 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2770 {
2771 if (tem == const0_rtx)
2772 return CONST0_RTX (mode);
2773 #ifdef FLOAT_STORE_FLAG_VALUE
2774 {
2775 REAL_VALUE_TYPE val;
2776 val = FLOAT_STORE_FLAG_VALUE (mode);
2777 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2778 }
2779 #else
2780 return NULL_RTX;
2781 #endif
2782 }
2783 if (VECTOR_MODE_P (mode))
2784 {
2785 if (tem == const0_rtx)
2786 return CONST0_RTX (mode);
2787 #ifdef VECTOR_STORE_FLAG_VALUE
2788 {
2789 int i, units;
2790 rtvec v;
2791
2792 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2793 if (val == NULL_RTX)
2794 return NULL_RTX;
2795 if (val == const1_rtx)
2796 return CONST1_RTX (mode);
2797
2798 units = GET_MODE_NUNITS (mode);
2799 v = rtvec_alloc (units);
2800 for (i = 0; i < units; i++)
2801 RTVEC_ELT (v, i) = val;
2802 return gen_rtx_raw_CONST_VECTOR (mode, v);
2803 }
2804 #else
2805 return NULL_RTX;
2806 #endif
2807 }
2808
2809 return tem;
2810 }
2811
2812 /* For the following tests, ensure const0_rtx is op1. */
2813 if (swap_commutative_operands_p (op0, op1)
2814 || (op0 == const0_rtx && op1 != const0_rtx))
2815 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2816
2817 /* If op0 is a compare, extract the comparison arguments from it. */
2818 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2819 return simplify_relational_operation (code, mode, VOIDmode,
2820 XEXP (op0, 0), XEXP (op0, 1));
2821
2822 if (mode == VOIDmode
2823 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2824 || CC0_P (op0))
2825 return NULL_RTX;
2826
2827 trueop0 = avoid_constant_pool_reference (op0);
2828 trueop1 = avoid_constant_pool_reference (op1);
2829 return simplify_relational_operation_1 (code, mode, cmp_mode,
2830 trueop0, trueop1);
2831 }
2832
2833 /* This part of simplify_relational_operation is only used when CMP_MODE
2834 is not in class MODE_CC (i.e. it is a real comparison).
2835
2836 MODE is the mode of the result, while CMP_MODE specifies in which
2837 mode the comparison is done in, so it is the mode of the operands. */
2838
2839 static rtx
2840 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2841 enum machine_mode cmp_mode, rtx op0, rtx op1)
2842 {
2843 enum rtx_code op0code = GET_CODE (op0);
2844
2845 if (GET_CODE (op1) == CONST_INT)
2846 {
2847 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2848 {
2849 /* If op0 is a comparison, extract the comparison arguments form it. */
2850 if (code == NE)
2851 {
2852 if (GET_MODE (op0) == cmp_mode)
2853 return simplify_rtx (op0);
2854 else
2855 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2856 XEXP (op0, 0), XEXP (op0, 1));
2857 }
2858 else if (code == EQ)
2859 {
2860 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2861 if (new_code != UNKNOWN)
2862 return simplify_gen_relational (new_code, mode, VOIDmode,
2863 XEXP (op0, 0), XEXP (op0, 1));
2864 }
2865 }
2866 }
2867
2868 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2869 if ((code == EQ || code == NE)
2870 && (op0code == PLUS || op0code == MINUS)
2871 && CONSTANT_P (op1)
2872 && CONSTANT_P (XEXP (op0, 1))
2873 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2874 {
2875 rtx x = XEXP (op0, 0);
2876 rtx c = XEXP (op0, 1);
2877
2878 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2879 cmp_mode, op1, c);
2880 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2881 }
2882
2883 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2884 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2885 if (code == NE
2886 && op1 == const0_rtx
2887 && GET_MODE_CLASS (mode) == MODE_INT
2888 && cmp_mode != VOIDmode
2889 && cmp_mode != BImode
2890 && nonzero_bits (op0, cmp_mode) == 1
2891 && STORE_FLAG_VALUE == 1)
2892 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2893 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2894 : lowpart_subreg (mode, op0, cmp_mode);
2895
2896 return NULL_RTX;
2897 }
2898
2899 /* Check if the given comparison (done in the given MODE) is actually a
2900 tautology or a contradiction.
2901 If no simplification is possible, this function returns zero.
2902 Otherwise, it returns either const_true_rtx or const0_rtx. */
2903
2904 rtx
2905 simplify_const_relational_operation (enum rtx_code code,
2906 enum machine_mode mode,
2907 rtx op0, rtx op1)
2908 {
2909 int equal, op0lt, op0ltu, op1lt, op1ltu;
2910 rtx tem;
2911 rtx trueop0;
2912 rtx trueop1;
2913
2914 gcc_assert (mode != VOIDmode
2915 || (GET_MODE (op0) == VOIDmode
2916 && GET_MODE (op1) == VOIDmode));
2917
2918 /* If op0 is a compare, extract the comparison arguments from it. */
2919 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2920 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2921
2922 /* We can't simplify MODE_CC values since we don't know what the
2923 actual comparison is. */
2924 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2925 return 0;
2926
2927 /* Make sure the constant is second. */
2928 if (swap_commutative_operands_p (op0, op1))
2929 {
2930 tem = op0, op0 = op1, op1 = tem;
2931 code = swap_condition (code);
2932 }
2933
2934 trueop0 = avoid_constant_pool_reference (op0);
2935 trueop1 = avoid_constant_pool_reference (op1);
2936
2937 /* For integer comparisons of A and B maybe we can simplify A - B and can
2938 then simplify a comparison of that with zero. If A and B are both either
2939 a register or a CONST_INT, this can't help; testing for these cases will
2940 prevent infinite recursion here and speed things up.
2941
2942 If CODE is an unsigned comparison, then we can never do this optimization,
2943 because it gives an incorrect result if the subtraction wraps around zero.
2944 ANSI C defines unsigned operations such that they never overflow, and
2945 thus such cases can not be ignored; but we cannot do it even for
2946 signed comparisons for languages such as Java, so test flag_wrapv. */
2947
2948 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2949 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2950 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2951 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2952 /* We cannot do this for == or != if tem is a nonzero address. */
2953 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2954 && code != GTU && code != GEU && code != LTU && code != LEU)
2955 return simplify_const_relational_operation (signed_condition (code),
2956 mode, tem, const0_rtx);
2957
2958 if (flag_unsafe_math_optimizations && code == ORDERED)
2959 return const_true_rtx;
2960
2961 if (flag_unsafe_math_optimizations && code == UNORDERED)
2962 return const0_rtx;
2963
2964 /* For modes without NaNs, if the two operands are equal, we know the
2965 result except if they have side-effects. */
2966 if (! HONOR_NANS (GET_MODE (trueop0))
2967 && rtx_equal_p (trueop0, trueop1)
2968 && ! side_effects_p (trueop0))
2969 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2970
2971 /* If the operands are floating-point constants, see if we can fold
2972 the result. */
2973 else if (GET_CODE (trueop0) == CONST_DOUBLE
2974 && GET_CODE (trueop1) == CONST_DOUBLE
2975 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2976 {
2977 REAL_VALUE_TYPE d0, d1;
2978
2979 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2980 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2981
2982 /* Comparisons are unordered iff at least one of the values is NaN. */
2983 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2984 switch (code)
2985 {
2986 case UNEQ:
2987 case UNLT:
2988 case UNGT:
2989 case UNLE:
2990 case UNGE:
2991 case NE:
2992 case UNORDERED:
2993 return const_true_rtx;
2994 case EQ:
2995 case LT:
2996 case GT:
2997 case LE:
2998 case GE:
2999 case LTGT:
3000 case ORDERED:
3001 return const0_rtx;
3002 default:
3003 return 0;
3004 }
3005
3006 equal = REAL_VALUES_EQUAL (d0, d1);
3007 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3008 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3009 }
3010
3011 /* Otherwise, see if the operands are both integers. */
3012 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3013 && (GET_CODE (trueop0) == CONST_DOUBLE
3014 || GET_CODE (trueop0) == CONST_INT)
3015 && (GET_CODE (trueop1) == CONST_DOUBLE
3016 || GET_CODE (trueop1) == CONST_INT))
3017 {
3018 int width = GET_MODE_BITSIZE (mode);
3019 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3020 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3021
3022 /* Get the two words comprising each integer constant. */
3023 if (GET_CODE (trueop0) == CONST_DOUBLE)
3024 {
3025 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3026 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3027 }
3028 else
3029 {
3030 l0u = l0s = INTVAL (trueop0);
3031 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3032 }
3033
3034 if (GET_CODE (trueop1) == CONST_DOUBLE)
3035 {
3036 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3037 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3038 }
3039 else
3040 {
3041 l1u = l1s = INTVAL (trueop1);
3042 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3043 }
3044
3045 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3046 we have to sign or zero-extend the values. */
3047 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3048 {
3049 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3050 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3051
3052 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3053 l0s |= ((HOST_WIDE_INT) (-1) << width);
3054
3055 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3056 l1s |= ((HOST_WIDE_INT) (-1) << width);
3057 }
3058 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3059 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3060
3061 equal = (h0u == h1u && l0u == l1u);
3062 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3063 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3064 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3065 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3066 }
3067
3068 /* Otherwise, there are some code-specific tests we can make. */
3069 else
3070 {
3071 /* Optimize comparisons with upper and lower bounds. */
3072 if (SCALAR_INT_MODE_P (mode)
3073 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3074 {
3075 rtx mmin, mmax;
3076 int sign;
3077
3078 if (code == GEU
3079 || code == LEU
3080 || code == GTU
3081 || code == LTU)
3082 sign = 0;
3083 else
3084 sign = 1;
3085
3086 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3087
3088 tem = NULL_RTX;
3089 switch (code)
3090 {
3091 case GEU:
3092 case GE:
3093 /* x >= min is always true. */
3094 if (rtx_equal_p (trueop1, mmin))
3095 tem = const_true_rtx;
3096 else
3097 break;
3098
3099 case LEU:
3100 case LE:
3101 /* x <= max is always true. */
3102 if (rtx_equal_p (trueop1, mmax))
3103 tem = const_true_rtx;
3104 break;
3105
3106 case GTU:
3107 case GT:
3108 /* x > max is always false. */
3109 if (rtx_equal_p (trueop1, mmax))
3110 tem = const0_rtx;
3111 break;
3112
3113 case LTU:
3114 case LT:
3115 /* x < min is always false. */
3116 if (rtx_equal_p (trueop1, mmin))
3117 tem = const0_rtx;
3118 break;
3119
3120 default:
3121 break;
3122 }
3123 if (tem == const0_rtx
3124 || tem == const_true_rtx)
3125 return tem;
3126 }
3127
3128 switch (code)
3129 {
3130 case EQ:
3131 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3132 return const0_rtx;
3133 break;
3134
3135 case NE:
3136 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3137 return const_true_rtx;
3138 break;
3139
3140 case LT:
3141 /* Optimize abs(x) < 0.0. */
3142 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3143 {
3144 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3145 : trueop0;
3146 if (GET_CODE (tem) == ABS)
3147 return const0_rtx;
3148 }
3149 break;
3150
3151 case GE:
3152 /* Optimize abs(x) >= 0.0. */
3153 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3154 {
3155 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3156 : trueop0;
3157 if (GET_CODE (tem) == ABS)
3158 return const_true_rtx;
3159 }
3160 break;
3161
3162 case UNGE:
3163 /* Optimize ! (abs(x) < 0.0). */
3164 if (trueop1 == CONST0_RTX (mode))
3165 {
3166 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3167 : trueop0;
3168 if (GET_CODE (tem) == ABS)
3169 return const_true_rtx;
3170 }
3171 break;
3172
3173 default:
3174 break;
3175 }
3176
3177 return 0;
3178 }
3179
3180 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3181 as appropriate. */
3182 switch (code)
3183 {
3184 case EQ:
3185 case UNEQ:
3186 return equal ? const_true_rtx : const0_rtx;
3187 case NE:
3188 case LTGT:
3189 return ! equal ? const_true_rtx : const0_rtx;
3190 case LT:
3191 case UNLT:
3192 return op0lt ? const_true_rtx : const0_rtx;
3193 case GT:
3194 case UNGT:
3195 return op1lt ? const_true_rtx : const0_rtx;
3196 case LTU:
3197 return op0ltu ? const_true_rtx : const0_rtx;
3198 case GTU:
3199 return op1ltu ? const_true_rtx : const0_rtx;
3200 case LE:
3201 case UNLE:
3202 return equal || op0lt ? const_true_rtx : const0_rtx;
3203 case GE:
3204 case UNGE:
3205 return equal || op1lt ? const_true_rtx : const0_rtx;
3206 case LEU:
3207 return equal || op0ltu ? const_true_rtx : const0_rtx;
3208 case GEU:
3209 return equal || op1ltu ? const_true_rtx : const0_rtx;
3210 case ORDERED:
3211 return const_true_rtx;
3212 case UNORDERED:
3213 return const0_rtx;
3214 default:
3215 gcc_unreachable ();
3216 }
3217 }
3218 \f
3219 /* Simplify CODE, an operation with result mode MODE and three operands,
3220 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3221 a constant. Return 0 if no simplifications is possible. */
3222
3223 rtx
3224 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3225 enum machine_mode op0_mode, rtx op0, rtx op1,
3226 rtx op2)
3227 {
3228 unsigned int width = GET_MODE_BITSIZE (mode);
3229
3230 /* VOIDmode means "infinite" precision. */
3231 if (width == 0)
3232 width = HOST_BITS_PER_WIDE_INT;
3233
3234 switch (code)
3235 {
3236 case SIGN_EXTRACT:
3237 case ZERO_EXTRACT:
3238 if (GET_CODE (op0) == CONST_INT
3239 && GET_CODE (op1) == CONST_INT
3240 && GET_CODE (op2) == CONST_INT
3241 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3242 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3243 {
3244 /* Extracting a bit-field from a constant */
3245 HOST_WIDE_INT val = INTVAL (op0);
3246
3247 if (BITS_BIG_ENDIAN)
3248 val >>= (GET_MODE_BITSIZE (op0_mode)
3249 - INTVAL (op2) - INTVAL (op1));
3250 else
3251 val >>= INTVAL (op2);
3252
3253 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3254 {
3255 /* First zero-extend. */
3256 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3257 /* If desired, propagate sign bit. */
3258 if (code == SIGN_EXTRACT
3259 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3260 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3261 }
3262
3263 /* Clear the bits that don't belong in our mode,
3264 unless they and our sign bit are all one.
3265 So we get either a reasonable negative value or a reasonable
3266 unsigned value for this mode. */
3267 if (width < HOST_BITS_PER_WIDE_INT
3268 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3269 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3270 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3271
3272 return gen_int_mode (val, mode);
3273 }
3274 break;
3275
3276 case IF_THEN_ELSE:
3277 if (GET_CODE (op0) == CONST_INT)
3278 return op0 != const0_rtx ? op1 : op2;
3279
3280 /* Convert c ? a : a into "a". */
3281 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3282 return op1;
3283
3284 /* Convert a != b ? a : b into "a". */
3285 if (GET_CODE (op0) == NE
3286 && ! side_effects_p (op0)
3287 && ! HONOR_NANS (mode)
3288 && ! HONOR_SIGNED_ZEROS (mode)
3289 && ((rtx_equal_p (XEXP (op0, 0), op1)
3290 && rtx_equal_p (XEXP (op0, 1), op2))
3291 || (rtx_equal_p (XEXP (op0, 0), op2)
3292 && rtx_equal_p (XEXP (op0, 1), op1))))
3293 return op1;
3294
3295 /* Convert a == b ? a : b into "b". */
3296 if (GET_CODE (op0) == EQ
3297 && ! side_effects_p (op0)
3298 && ! HONOR_NANS (mode)
3299 && ! HONOR_SIGNED_ZEROS (mode)
3300 && ((rtx_equal_p (XEXP (op0, 0), op1)
3301 && rtx_equal_p (XEXP (op0, 1), op2))
3302 || (rtx_equal_p (XEXP (op0, 0), op2)
3303 && rtx_equal_p (XEXP (op0, 1), op1))))
3304 return op2;
3305
3306 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3307 {
3308 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3309 ? GET_MODE (XEXP (op0, 1))
3310 : GET_MODE (XEXP (op0, 0)));
3311 rtx temp;
3312
3313 /* Look for happy constants in op1 and op2. */
3314 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3315 {
3316 HOST_WIDE_INT t = INTVAL (op1);
3317 HOST_WIDE_INT f = INTVAL (op2);
3318
3319 if (t == STORE_FLAG_VALUE && f == 0)
3320 code = GET_CODE (op0);
3321 else if (t == 0 && f == STORE_FLAG_VALUE)
3322 {
3323 enum rtx_code tmp;
3324 tmp = reversed_comparison_code (op0, NULL_RTX);
3325 if (tmp == UNKNOWN)
3326 break;
3327 code = tmp;
3328 }
3329 else
3330 break;
3331
3332 return simplify_gen_relational (code, mode, cmp_mode,
3333 XEXP (op0, 0), XEXP (op0, 1));
3334 }
3335
3336 if (cmp_mode == VOIDmode)
3337 cmp_mode = op0_mode;
3338 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3339 cmp_mode, XEXP (op0, 0),
3340 XEXP (op0, 1));
3341
3342 /* See if any simplifications were possible. */
3343 if (temp)
3344 {
3345 if (GET_CODE (temp) == CONST_INT)
3346 return temp == const0_rtx ? op2 : op1;
3347 else if (temp)
3348 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3349 }
3350 }
3351 break;
3352
3353 case VEC_MERGE:
3354 gcc_assert (GET_MODE (op0) == mode);
3355 gcc_assert (GET_MODE (op1) == mode);
3356 gcc_assert (VECTOR_MODE_P (mode));
3357 op2 = avoid_constant_pool_reference (op2);
3358 if (GET_CODE (op2) == CONST_INT)
3359 {
3360 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3361 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3362 int mask = (1 << n_elts) - 1;
3363
3364 if (!(INTVAL (op2) & mask))
3365 return op1;
3366 if ((INTVAL (op2) & mask) == mask)
3367 return op0;
3368
3369 op0 = avoid_constant_pool_reference (op0);
3370 op1 = avoid_constant_pool_reference (op1);
3371 if (GET_CODE (op0) == CONST_VECTOR
3372 && GET_CODE (op1) == CONST_VECTOR)
3373 {
3374 rtvec v = rtvec_alloc (n_elts);
3375 unsigned int i;
3376
3377 for (i = 0; i < n_elts; i++)
3378 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3379 ? CONST_VECTOR_ELT (op0, i)
3380 : CONST_VECTOR_ELT (op1, i));
3381 return gen_rtx_CONST_VECTOR (mode, v);
3382 }
3383 }
3384 break;
3385
3386 default:
3387 gcc_unreachable ();
3388 }
3389
3390 return 0;
3391 }
3392
3393 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3394 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3395
3396 Works by unpacking OP into a collection of 8-bit values
3397 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3398 and then repacking them again for OUTERMODE. */
3399
3400 static rtx
3401 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3402 enum machine_mode innermode, unsigned int byte)
3403 {
3404 /* We support up to 512-bit values (for V8DFmode). */
3405 enum {
3406 max_bitsize = 512,
3407 value_bit = 8,
3408 value_mask = (1 << value_bit) - 1
3409 };
3410 unsigned char value[max_bitsize / value_bit];
3411 int value_start;
3412 int i;
3413 int elem;
3414
3415 int num_elem;
3416 rtx * elems;
3417 int elem_bitsize;
3418 rtx result_s;
3419 rtvec result_v = NULL;
3420 enum mode_class outer_class;
3421 enum machine_mode outer_submode;
3422
3423 /* Some ports misuse CCmode. */
3424 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3425 return op;
3426
3427 /* We have no way to represent a complex constant at the rtl level. */
3428 if (COMPLEX_MODE_P (outermode))
3429 return NULL_RTX;
3430
3431 /* Unpack the value. */
3432
3433 if (GET_CODE (op) == CONST_VECTOR)
3434 {
3435 num_elem = CONST_VECTOR_NUNITS (op);
3436 elems = &CONST_VECTOR_ELT (op, 0);
3437 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3438 }
3439 else
3440 {
3441 num_elem = 1;
3442 elems = &op;
3443 elem_bitsize = max_bitsize;
3444 }
3445 /* If this asserts, it is too complicated; reducing value_bit may help. */
3446 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3447 /* I don't know how to handle endianness of sub-units. */
3448 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3449
3450 for (elem = 0; elem < num_elem; elem++)
3451 {
3452 unsigned char * vp;
3453 rtx el = elems[elem];
3454
3455 /* Vectors are kept in target memory order. (This is probably
3456 a mistake.) */
3457 {
3458 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3459 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3460 / BITS_PER_UNIT);
3461 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3462 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3463 unsigned bytele = (subword_byte % UNITS_PER_WORD
3464 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3465 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3466 }
3467
3468 switch (GET_CODE (el))
3469 {
3470 case CONST_INT:
3471 for (i = 0;
3472 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3473 i += value_bit)
3474 *vp++ = INTVAL (el) >> i;
3475 /* CONST_INTs are always logically sign-extended. */
3476 for (; i < elem_bitsize; i += value_bit)
3477 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3478 break;
3479
3480 case CONST_DOUBLE:
3481 if (GET_MODE (el) == VOIDmode)
3482 {
3483 /* If this triggers, someone should have generated a
3484 CONST_INT instead. */
3485 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3486
3487 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3488 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3489 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3490 {
3491 *vp++
3492 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3493 i += value_bit;
3494 }
3495 /* It shouldn't matter what's done here, so fill it with
3496 zero. */
3497 for (; i < max_bitsize; i += value_bit)
3498 *vp++ = 0;
3499 }
3500 else
3501 {
3502 long tmp[max_bitsize / 32];
3503 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3504
3505 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3506 gcc_assert (bitsize <= elem_bitsize);
3507 gcc_assert (bitsize % value_bit == 0);
3508
3509 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3510 GET_MODE (el));
3511
3512 /* real_to_target produces its result in words affected by
3513 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3514 and use WORDS_BIG_ENDIAN instead; see the documentation
3515 of SUBREG in rtl.texi. */
3516 for (i = 0; i < bitsize; i += value_bit)
3517 {
3518 int ibase;
3519 if (WORDS_BIG_ENDIAN)
3520 ibase = bitsize - 1 - i;
3521 else
3522 ibase = i;
3523 *vp++ = tmp[ibase / 32] >> i % 32;
3524 }
3525
3526 /* It shouldn't matter what's done here, so fill it with
3527 zero. */
3528 for (; i < elem_bitsize; i += value_bit)
3529 *vp++ = 0;
3530 }
3531 break;
3532
3533 default:
3534 gcc_unreachable ();
3535 }
3536 }
3537
3538 /* Now, pick the right byte to start with. */
3539 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3540 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3541 will already have offset 0. */
3542 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3543 {
3544 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3545 - byte);
3546 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3547 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3548 byte = (subword_byte % UNITS_PER_WORD
3549 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3550 }
3551
3552 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3553 so if it's become negative it will instead be very large.) */
3554 gcc_assert (byte < GET_MODE_SIZE (innermode));
3555
3556 /* Convert from bytes to chunks of size value_bit. */
3557 value_start = byte * (BITS_PER_UNIT / value_bit);
3558
3559 /* Re-pack the value. */
3560
3561 if (VECTOR_MODE_P (outermode))
3562 {
3563 num_elem = GET_MODE_NUNITS (outermode);
3564 result_v = rtvec_alloc (num_elem);
3565 elems = &RTVEC_ELT (result_v, 0);
3566 outer_submode = GET_MODE_INNER (outermode);
3567 }
3568 else
3569 {
3570 num_elem = 1;
3571 elems = &result_s;
3572 outer_submode = outermode;
3573 }
3574
3575 outer_class = GET_MODE_CLASS (outer_submode);
3576 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3577
3578 gcc_assert (elem_bitsize % value_bit == 0);
3579 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3580
3581 for (elem = 0; elem < num_elem; elem++)
3582 {
3583 unsigned char *vp;
3584
3585 /* Vectors are stored in target memory order. (This is probably
3586 a mistake.) */
3587 {
3588 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3589 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3590 / BITS_PER_UNIT);
3591 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3592 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3593 unsigned bytele = (subword_byte % UNITS_PER_WORD
3594 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3595 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3596 }
3597
3598 switch (outer_class)
3599 {
3600 case MODE_INT:
3601 case MODE_PARTIAL_INT:
3602 {
3603 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3604
3605 for (i = 0;
3606 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3607 i += value_bit)
3608 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3609 for (; i < elem_bitsize; i += value_bit)
3610 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3611 << (i - HOST_BITS_PER_WIDE_INT));
3612
3613 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3614 know why. */
3615 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3616 elems[elem] = gen_int_mode (lo, outer_submode);
3617 else
3618 elems[elem] = immed_double_const (lo, hi, outer_submode);
3619 }
3620 break;
3621
3622 case MODE_FLOAT:
3623 {
3624 REAL_VALUE_TYPE r;
3625 long tmp[max_bitsize / 32];
3626
3627 /* real_from_target wants its input in words affected by
3628 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3629 and use WORDS_BIG_ENDIAN instead; see the documentation
3630 of SUBREG in rtl.texi. */
3631 for (i = 0; i < max_bitsize / 32; i++)
3632 tmp[i] = 0;
3633 for (i = 0; i < elem_bitsize; i += value_bit)
3634 {
3635 int ibase;
3636 if (WORDS_BIG_ENDIAN)
3637 ibase = elem_bitsize - 1 - i;
3638 else
3639 ibase = i;
3640 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3641 }
3642
3643 real_from_target (&r, tmp, outer_submode);
3644 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3645 }
3646 break;
3647
3648 default:
3649 gcc_unreachable ();
3650 }
3651 }
3652 if (VECTOR_MODE_P (outermode))
3653 return gen_rtx_CONST_VECTOR (outermode, result_v);
3654 else
3655 return result_s;
3656 }
3657
3658 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3659 Return 0 if no simplifications are possible. */
3660 rtx
3661 simplify_subreg (enum machine_mode outermode, rtx op,
3662 enum machine_mode innermode, unsigned int byte)
3663 {
3664 /* Little bit of sanity checking. */
3665 gcc_assert (innermode != VOIDmode);
3666 gcc_assert (outermode != VOIDmode);
3667 gcc_assert (innermode != BLKmode);
3668 gcc_assert (outermode != BLKmode);
3669
3670 gcc_assert (GET_MODE (op) == innermode
3671 || GET_MODE (op) == VOIDmode);
3672
3673 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3674 gcc_assert (byte < GET_MODE_SIZE (innermode));
3675
3676 if (outermode == innermode && !byte)
3677 return op;
3678
3679 if (GET_CODE (op) == CONST_INT
3680 || GET_CODE (op) == CONST_DOUBLE
3681 || GET_CODE (op) == CONST_VECTOR)
3682 return simplify_immed_subreg (outermode, op, innermode, byte);
3683
3684 /* Changing mode twice with SUBREG => just change it once,
3685 or not at all if changing back op starting mode. */
3686 if (GET_CODE (op) == SUBREG)
3687 {
3688 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3689 int final_offset = byte + SUBREG_BYTE (op);
3690 rtx newx;
3691
3692 if (outermode == innermostmode
3693 && byte == 0 && SUBREG_BYTE (op) == 0)
3694 return SUBREG_REG (op);
3695
3696 /* The SUBREG_BYTE represents offset, as if the value were stored
3697 in memory. Irritating exception is paradoxical subreg, where
3698 we define SUBREG_BYTE to be 0. On big endian machines, this
3699 value should be negative. For a moment, undo this exception. */
3700 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3701 {
3702 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3703 if (WORDS_BIG_ENDIAN)
3704 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3705 if (BYTES_BIG_ENDIAN)
3706 final_offset += difference % UNITS_PER_WORD;
3707 }
3708 if (SUBREG_BYTE (op) == 0
3709 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3710 {
3711 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3712 if (WORDS_BIG_ENDIAN)
3713 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3714 if (BYTES_BIG_ENDIAN)
3715 final_offset += difference % UNITS_PER_WORD;
3716 }
3717
3718 /* See whether resulting subreg will be paradoxical. */
3719 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3720 {
3721 /* In nonparadoxical subregs we can't handle negative offsets. */
3722 if (final_offset < 0)
3723 return NULL_RTX;
3724 /* Bail out in case resulting subreg would be incorrect. */
3725 if (final_offset % GET_MODE_SIZE (outermode)
3726 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3727 return NULL_RTX;
3728 }
3729 else
3730 {
3731 int offset = 0;
3732 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3733
3734 /* In paradoxical subreg, see if we are still looking on lower part.
3735 If so, our SUBREG_BYTE will be 0. */
3736 if (WORDS_BIG_ENDIAN)
3737 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3738 if (BYTES_BIG_ENDIAN)
3739 offset += difference % UNITS_PER_WORD;
3740 if (offset == final_offset)
3741 final_offset = 0;
3742 else
3743 return NULL_RTX;
3744 }
3745
3746 /* Recurse for further possible simplifications. */
3747 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3748 final_offset);
3749 if (newx)
3750 return newx;
3751 if (validate_subreg (outermode, innermostmode,
3752 SUBREG_REG (op), final_offset))
3753 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3754 return NULL_RTX;
3755 }
3756
3757 /* SUBREG of a hard register => just change the register number
3758 and/or mode. If the hard register is not valid in that mode,
3759 suppress this simplification. If the hard register is the stack,
3760 frame, or argument pointer, leave this as a SUBREG. */
3761
3762 if (REG_P (op)
3763 && REGNO (op) < FIRST_PSEUDO_REGISTER
3764 #ifdef CANNOT_CHANGE_MODE_CLASS
3765 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3766 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3767 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3768 #endif
3769 && ((reload_completed && !frame_pointer_needed)
3770 || (REGNO (op) != FRAME_POINTER_REGNUM
3771 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3772 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3773 #endif
3774 ))
3775 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3776 && REGNO (op) != ARG_POINTER_REGNUM
3777 #endif
3778 && REGNO (op) != STACK_POINTER_REGNUM
3779 && subreg_offset_representable_p (REGNO (op), innermode,
3780 byte, outermode))
3781 {
3782 unsigned int regno = REGNO (op);
3783 unsigned int final_regno
3784 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3785
3786 /* ??? We do allow it if the current REG is not valid for
3787 its mode. This is a kludge to work around how float/complex
3788 arguments are passed on 32-bit SPARC and should be fixed. */
3789 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3790 || ! HARD_REGNO_MODE_OK (regno, innermode))
3791 {
3792 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3793
3794 /* Propagate original regno. We don't have any way to specify
3795 the offset inside original regno, so do so only for lowpart.
3796 The information is used only by alias analysis that can not
3797 grog partial register anyway. */
3798
3799 if (subreg_lowpart_offset (outermode, innermode) == byte)
3800 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3801 return x;
3802 }
3803 }
3804
3805 /* If we have a SUBREG of a register that we are replacing and we are
3806 replacing it with a MEM, make a new MEM and try replacing the
3807 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3808 or if we would be widening it. */
3809
3810 if (MEM_P (op)
3811 && ! mode_dependent_address_p (XEXP (op, 0))
3812 /* Allow splitting of volatile memory references in case we don't
3813 have instruction to move the whole thing. */
3814 && (! MEM_VOLATILE_P (op)
3815 || ! have_insn_for (SET, innermode))
3816 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3817 return adjust_address_nv (op, outermode, byte);
3818
3819 /* Handle complex values represented as CONCAT
3820 of real and imaginary part. */
3821 if (GET_CODE (op) == CONCAT)
3822 {
3823 unsigned int inner_size, final_offset;
3824 rtx part, res;
3825
3826 inner_size = GET_MODE_UNIT_SIZE (innermode);
3827 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3828 final_offset = byte % inner_size;
3829 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3830 return NULL_RTX;
3831
3832 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3833 if (res)
3834 return res;
3835 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3836 return gen_rtx_SUBREG (outermode, part, final_offset);
3837 return NULL_RTX;
3838 }
3839
3840 /* Optimize SUBREG truncations of zero and sign extended values. */
3841 if ((GET_CODE (op) == ZERO_EXTEND
3842 || GET_CODE (op) == SIGN_EXTEND)
3843 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3844 {
3845 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3846
3847 /* If we're requesting the lowpart of a zero or sign extension,
3848 there are three possibilities. If the outermode is the same
3849 as the origmode, we can omit both the extension and the subreg.
3850 If the outermode is not larger than the origmode, we can apply
3851 the truncation without the extension. Finally, if the outermode
3852 is larger than the origmode, but both are integer modes, we
3853 can just extend to the appropriate mode. */
3854 if (bitpos == 0)
3855 {
3856 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3857 if (outermode == origmode)
3858 return XEXP (op, 0);
3859 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3860 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3861 subreg_lowpart_offset (outermode,
3862 origmode));
3863 if (SCALAR_INT_MODE_P (outermode))
3864 return simplify_gen_unary (GET_CODE (op), outermode,
3865 XEXP (op, 0), origmode);
3866 }
3867
3868 /* A SUBREG resulting from a zero extension may fold to zero if
3869 it extracts higher bits that the ZERO_EXTEND's source bits. */
3870 if (GET_CODE (op) == ZERO_EXTEND
3871 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3872 return CONST0_RTX (outermode);
3873 }
3874
3875 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3876 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3877 the outer subreg is effectively a truncation to the original mode. */
3878 if ((GET_CODE (op) == LSHIFTRT
3879 || GET_CODE (op) == ASHIFTRT)
3880 && SCALAR_INT_MODE_P (outermode)
3881 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3882 to avoid the possibility that an outer LSHIFTRT shifts by more
3883 than the sign extension's sign_bit_copies and introduces zeros
3884 into the high bits of the result. */
3885 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3886 && GET_CODE (XEXP (op, 1)) == CONST_INT
3887 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3888 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3889 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3890 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3891 return simplify_gen_binary (ASHIFTRT, outermode,
3892 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3893
3894 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3895 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3896 the outer subreg is effectively a truncation to the original mode. */
3897 if ((GET_CODE (op) == LSHIFTRT
3898 || GET_CODE (op) == ASHIFTRT)
3899 && SCALAR_INT_MODE_P (outermode)
3900 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3901 && GET_CODE (XEXP (op, 1)) == CONST_INT
3902 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3903 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3904 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3905 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3906 return simplify_gen_binary (LSHIFTRT, outermode,
3907 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3908
3909 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3910 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3911 the outer subreg is effectively a truncation to the original mode. */
3912 if (GET_CODE (op) == ASHIFT
3913 && SCALAR_INT_MODE_P (outermode)
3914 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3915 && GET_CODE (XEXP (op, 1)) == CONST_INT
3916 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3917 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3918 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3919 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3920 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3921 return simplify_gen_binary (ASHIFT, outermode,
3922 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3923
3924 return NULL_RTX;
3925 }
3926
3927 /* Make a SUBREG operation or equivalent if it folds. */
3928
3929 rtx
3930 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3931 enum machine_mode innermode, unsigned int byte)
3932 {
3933 rtx newx;
3934
3935 newx = simplify_subreg (outermode, op, innermode, byte);
3936 if (newx)
3937 return newx;
3938
3939 if (GET_CODE (op) == SUBREG
3940 || GET_CODE (op) == CONCAT
3941 || GET_MODE (op) == VOIDmode)
3942 return NULL_RTX;
3943
3944 if (validate_subreg (outermode, innermode, op, byte))
3945 return gen_rtx_SUBREG (outermode, op, byte);
3946
3947 return NULL_RTX;
3948 }
3949
3950 /* Simplify X, an rtx expression.
3951
3952 Return the simplified expression or NULL if no simplifications
3953 were possible.
3954
3955 This is the preferred entry point into the simplification routines;
3956 however, we still allow passes to call the more specific routines.
3957
3958 Right now GCC has three (yes, three) major bodies of RTL simplification
3959 code that need to be unified.
3960
3961 1. fold_rtx in cse.c. This code uses various CSE specific
3962 information to aid in RTL simplification.
3963
3964 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3965 it uses combine specific information to aid in RTL
3966 simplification.
3967
3968 3. The routines in this file.
3969
3970
3971 Long term we want to only have one body of simplification code; to
3972 get to that state I recommend the following steps:
3973
3974 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3975 which are not pass dependent state into these routines.
3976
3977 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3978 use this routine whenever possible.
3979
3980 3. Allow for pass dependent state to be provided to these
3981 routines and add simplifications based on the pass dependent
3982 state. Remove code from cse.c & combine.c that becomes
3983 redundant/dead.
3984
3985 It will take time, but ultimately the compiler will be easier to
3986 maintain and improve. It's totally silly that when we add a
3987 simplification that it needs to be added to 4 places (3 for RTL
3988 simplification and 1 for tree simplification. */
3989
3990 rtx
3991 simplify_rtx (rtx x)
3992 {
3993 enum rtx_code code = GET_CODE (x);
3994 enum machine_mode mode = GET_MODE (x);
3995
3996 switch (GET_RTX_CLASS (code))
3997 {
3998 case RTX_UNARY:
3999 return simplify_unary_operation (code, mode,
4000 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4001 case RTX_COMM_ARITH:
4002 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4003 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4004
4005 /* Fall through.... */
4006
4007 case RTX_BIN_ARITH:
4008 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4009
4010 case RTX_TERNARY:
4011 case RTX_BITFIELD_OPS:
4012 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4013 XEXP (x, 0), XEXP (x, 1),
4014 XEXP (x, 2));
4015
4016 case RTX_COMPARE:
4017 case RTX_COMM_COMPARE:
4018 return simplify_relational_operation (code, mode,
4019 ((GET_MODE (XEXP (x, 0))
4020 != VOIDmode)
4021 ? GET_MODE (XEXP (x, 0))
4022 : GET_MODE (XEXP (x, 1))),
4023 XEXP (x, 0),
4024 XEXP (x, 1));
4025
4026 case RTX_EXTRA:
4027 if (code == SUBREG)
4028 return simplify_gen_subreg (mode, SUBREG_REG (x),
4029 GET_MODE (SUBREG_REG (x)),
4030 SUBREG_BYTE (x));
4031 break;
4032
4033 case RTX_OBJ:
4034 if (code == LO_SUM)
4035 {
4036 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4037 if (GET_CODE (XEXP (x, 0)) == HIGH
4038 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4039 return XEXP (x, 1);
4040 }
4041 break;
4042
4043 default:
4044 break;
4045 }
4046 return NULL;
4047 }