simplify-rtx.c (simplify_unary_operation): Deal with logicals on floats.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 \f
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
76 {
77 rtx tem;
78
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == 'c'
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
83
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
88
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
91
92 if (code == PLUS || code == MINUS)
93 {
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
97 }
98
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
100 }
101 \f
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
104 rtx
105 avoid_constant_pool_reference (rtx x)
106 {
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
109
110 switch (GET_CODE (x))
111 {
112 case MEM:
113 break;
114
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 {
121 REAL_VALUE_TYPE d;
122
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 }
126 return x;
127
128 default:
129 return x;
130 }
131
132 addr = XEXP (x, 0);
133
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
136
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
139
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
143
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
146
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
151 {
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
154 }
155
156 return c;
157 }
158 \f
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
161
162 rtx
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
165 {
166 rtx tem;
167
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
171
172 return gen_rtx_fmt_e (code, mode, op);
173 }
174
175 /* Likewise for ternary operations. */
176
177 rtx
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 {
181 rtx tem;
182
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
187
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
189 }
190 \f
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
193 */
194
195 rtx
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 {
199 rtx tem;
200
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
205
206 if (cmp_mode != VOIDmode)
207 {
208 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
209
210 if (tem)
211 {
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
214 {
215 REAL_VALUE_TYPE val;
216 if (tem == const0_rtx)
217 return CONST0_RTX (mode);
218 if (tem != const_true_rtx)
219 abort ();
220 val = FLOAT_STORE_FLAG_VALUE (mode);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
222 }
223 #endif
224 return tem;
225 }
226 }
227
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0, op1)
230 || (op0 == const0_rtx && op1 != const0_rtx))
231 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
235 return simplify_gen_relational (code, mode, VOIDmode,
236 XEXP (op0, 0), XEXP (op0, 1));
237
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
240 {
241 if (code == NE)
242 {
243 if (GET_MODE (op0) == mode)
244 return op0;
245 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
247 }
248 else if (code == EQ)
249 {
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
252 return simplify_gen_relational (new, mode, VOIDmode,
253 XEXP (op0, 0), XEXP (op0, 1));
254 }
255 }
256
257 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 }
259 \f
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
262
263 rtx
264 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 {
266 enum rtx_code code = GET_CODE (x);
267 enum machine_mode mode = GET_MODE (x);
268 enum machine_mode op_mode;
269 rtx op0, op1, op2;
270
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
274
275 if (x == old)
276 return new;
277
278 switch (GET_RTX_CLASS (code))
279 {
280 case '1':
281 op0 = XEXP (x, 0);
282 op_mode = GET_MODE (op0);
283 op0 = simplify_replace_rtx (op0, old, new);
284 if (op0 == XEXP (x, 0))
285 return x;
286 return simplify_gen_unary (code, mode, op0, op_mode);
287
288 case '2':
289 case 'c':
290 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return x;
294 return simplify_gen_binary (code, mode, op0, op1);
295
296 case '<':
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old, new);
301 op1 = simplify_replace_rtx (op1, old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306 case '3':
307 case 'b':
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319 case 'x':
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
322 {
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
330 }
331 break;
332
333 case 'o':
334 if (code == MEM)
335 {
336 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
340 }
341 else if (code == LO_SUM)
342 {
343 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
345
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
349
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
353 }
354 else if (code == REG)
355 {
356 if (REG_P (old) && REGNO (x) == REGNO (old))
357 return new;
358 }
359 break;
360
361 default:
362 break;
363 }
364 return x;
365 }
366 \f
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
370 rtx
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
373 {
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
376
377 if (code == VEC_DUPLICATE)
378 {
379 if (!VECTOR_MODE_P (mode))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE (trueop))
384 abort ();
385 if (GET_MODE (trueop) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop))
387 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
388 abort ();
389 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
390 || GET_CODE (trueop) == CONST_VECTOR)
391 {
392 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
393 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
394 rtvec v = rtvec_alloc (n_elts);
395 unsigned int i;
396
397 if (GET_CODE (trueop) != CONST_VECTOR)
398 for (i = 0; i < n_elts; i++)
399 RTVEC_ELT (v, i) = trueop;
400 else
401 {
402 enum machine_mode inmode = GET_MODE (trueop);
403 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
404 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
405
406 if (in_n_elts >= n_elts || n_elts % in_n_elts)
407 abort ();
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
410 }
411 return gen_rtx_CONST_VECTOR (mode, v);
412 }
413 }
414 else if (GET_CODE (op) == CONST)
415 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
416
417 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
418 {
419 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
420 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
421 enum machine_mode opmode = GET_MODE (trueop);
422 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
423 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
424 rtvec v = rtvec_alloc (n_elts);
425 unsigned int i;
426
427 if (op_n_elts != n_elts)
428 abort ();
429
430 for (i = 0; i < n_elts; i++)
431 {
432 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
433 CONST_VECTOR_ELT (trueop, i),
434 GET_MODE_INNER (opmode));
435 if (!x)
436 return 0;
437 RTVEC_ELT (v, i) = x;
438 }
439 return gen_rtx_CONST_VECTOR (mode, v);
440 }
441
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
445
446 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
447 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 REAL_VALUE_FROM_INT (d, lv, hv, mode);
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
460 }
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
464 {
465 HOST_WIDE_INT hv, lv;
466 REAL_VALUE_TYPE d;
467
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
470 else
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
472
473 if (op_mode == VOIDmode)
474 {
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
477 if (hv < 0)
478 return 0;
479 }
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 ;
482 else
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
484
485 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
486 d = real_value_truncate (mode, d);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
488 }
489
490 if (GET_CODE (trueop) == CONST_INT
491 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
492 {
493 HOST_WIDE_INT arg0 = INTVAL (trueop);
494 HOST_WIDE_INT val;
495
496 switch (code)
497 {
498 case NOT:
499 val = ~ arg0;
500 break;
501
502 case NEG:
503 val = - arg0;
504 break;
505
506 case ABS:
507 val = (arg0 >= 0 ? arg0 : - arg0);
508 break;
509
510 case FFS:
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0 &= GET_MODE_MASK (mode);
514 val = exact_log2 (arg0 & (- arg0)) + 1;
515 break;
516
517 case CLZ:
518 arg0 &= GET_MODE_MASK (mode);
519 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
520 ;
521 else
522 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
523 break;
524
525 case CTZ:
526 arg0 &= GET_MODE_MASK (mode);
527 if (arg0 == 0)
528 {
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
532 val = GET_MODE_BITSIZE (mode);
533 }
534 else
535 val = exact_log2 (arg0 & -arg0);
536 break;
537
538 case POPCOUNT:
539 arg0 &= GET_MODE_MASK (mode);
540 val = 0;
541 while (arg0)
542 val++, arg0 &= arg0 - 1;
543 break;
544
545 case PARITY:
546 arg0 &= GET_MODE_MASK (mode);
547 val = 0;
548 while (arg0)
549 val++, arg0 &= arg0 - 1;
550 val &= 1;
551 break;
552
553 case TRUNCATE:
554 val = arg0;
555 break;
556
557 case ZERO_EXTEND:
558 /* When zero-extending a CONST_INT, we need to know its
559 original mode. */
560 if (op_mode == VOIDmode)
561 abort ();
562 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
563 {
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width != GET_MODE_BITSIZE (op_mode))
568 abort ();
569 val = arg0;
570 }
571 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
572 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
573 else
574 return 0;
575 break;
576
577 case SIGN_EXTEND:
578 if (op_mode == VOIDmode)
579 op_mode = mode;
580 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
581 {
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width != GET_MODE_BITSIZE (op_mode))
586 abort ();
587 val = arg0;
588 }
589 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
590 {
591 val
592 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
593 if (val
594 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
595 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 }
597 else
598 return 0;
599 break;
600
601 case SQRT:
602 case FLOAT_EXTEND:
603 case FLOAT_TRUNCATE:
604 case SS_TRUNCATE:
605 case US_TRUNCATE:
606 return 0;
607
608 default:
609 abort ();
610 }
611
612 val = trunc_int_for_mode (val, mode);
613
614 return GEN_INT (val);
615 }
616
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop) == VOIDmode
620 && width <= HOST_BITS_PER_WIDE_INT * 2
621 && (GET_CODE (trueop) == CONST_DOUBLE
622 || GET_CODE (trueop) == CONST_INT))
623 {
624 unsigned HOST_WIDE_INT l1, lv;
625 HOST_WIDE_INT h1, hv;
626
627 if (GET_CODE (trueop) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
629 else
630 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
631
632 switch (code)
633 {
634 case NOT:
635 lv = ~ l1;
636 hv = ~ h1;
637 break;
638
639 case NEG:
640 neg_double (l1, h1, &lv, &hv);
641 break;
642
643 case ABS:
644 if (h1 < 0)
645 neg_double (l1, h1, &lv, &hv);
646 else
647 lv = l1, hv = h1;
648 break;
649
650 case FFS:
651 hv = 0;
652 if (l1 == 0)
653 {
654 if (h1 == 0)
655 lv = 0;
656 else
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
658 }
659 else
660 lv = exact_log2 (l1 & -l1) + 1;
661 break;
662
663 case CLZ:
664 hv = 0;
665 if (h1 != 0)
666 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
667 - HOST_BITS_PER_WIDE_INT;
668 else if (l1 != 0)
669 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
671 lv = GET_MODE_BITSIZE (mode);
672 break;
673
674 case CTZ:
675 hv = 0;
676 if (l1 != 0)
677 lv = exact_log2 (l1 & -l1);
678 else if (h1 != 0)
679 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
682 break;
683
684 case POPCOUNT:
685 hv = 0;
686 lv = 0;
687 while (l1)
688 lv++, l1 &= l1 - 1;
689 while (h1)
690 lv++, h1 &= h1 - 1;
691 break;
692
693 case PARITY:
694 hv = 0;
695 lv = 0;
696 while (l1)
697 lv++, l1 &= l1 - 1;
698 while (h1)
699 lv++, h1 &= h1 - 1;
700 lv &= 1;
701 break;
702
703 case TRUNCATE:
704 /* This is just a change-of-mode, so do nothing. */
705 lv = l1, hv = h1;
706 break;
707
708 case ZERO_EXTEND:
709 if (op_mode == VOIDmode)
710 abort ();
711
712 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
713 return 0;
714
715 hv = 0;
716 lv = l1 & GET_MODE_MASK (op_mode);
717 break;
718
719 case SIGN_EXTEND:
720 if (op_mode == VOIDmode
721 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
722 return 0;
723 else
724 {
725 lv = l1 & GET_MODE_MASK (op_mode);
726 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
727 && (lv & ((HOST_WIDE_INT) 1
728 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
729 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
730
731 hv = HWI_SIGN_EXTEND (lv);
732 }
733 break;
734
735 case SQRT:
736 return 0;
737
738 default:
739 return 0;
740 }
741
742 return immed_double_const (lv, hv, mode);
743 }
744
745 else if (GET_CODE (trueop) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode) == MODE_FLOAT)
747 {
748 REAL_VALUE_TYPE d, t;
749 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
750
751 switch (code)
752 {
753 case SQRT:
754 if (HONOR_SNANS (mode) && real_isnan (&d))
755 return 0;
756 real_sqrt (&t, mode, &d);
757 d = t;
758 break;
759 case ABS:
760 d = REAL_VALUE_ABS (d);
761 break;
762 case NEG:
763 d = REAL_VALUE_NEGATE (d);
764 break;
765 case FLOAT_TRUNCATE:
766 d = real_value_truncate (mode, d);
767 break;
768 case FLOAT_EXTEND:
769 /* All this does is change the mode. */
770 break;
771 case FIX:
772 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
773 break;
774 case NOT:
775 {
776 long tmp[4];
777 int i;
778
779 real_to_target (tmp, &d, GET_MODE (trueop));
780 for (i = 0; i < 4; i++)
781 tmp[i] = ~tmp[i];
782 real_from_target (&d, tmp, mode);
783 }
784 default:
785 abort ();
786 }
787 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
788 }
789
790 else if (GET_CODE (trueop) == CONST_DOUBLE
791 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
792 && GET_MODE_CLASS (mode) == MODE_INT
793 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
794 {
795 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
796 operators are intentionally left unspecified (to ease implementation
797 by target backends), for consistency, this routine implements the
798 same semantics for constant folding as used by the middle-end. */
799
800 HOST_WIDE_INT xh, xl, th, tl;
801 REAL_VALUE_TYPE x, t;
802 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
803 switch (code)
804 {
805 case FIX:
806 if (REAL_VALUE_ISNAN (x))
807 return const0_rtx;
808
809 /* Test against the signed upper bound. */
810 if (width > HOST_BITS_PER_WIDE_INT)
811 {
812 th = ((unsigned HOST_WIDE_INT) 1
813 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
814 tl = -1;
815 }
816 else
817 {
818 th = 0;
819 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
820 }
821 real_from_integer (&t, VOIDmode, tl, th, 0);
822 if (REAL_VALUES_LESS (t, x))
823 {
824 xh = th;
825 xl = tl;
826 break;
827 }
828
829 /* Test against the signed lower bound. */
830 if (width > HOST_BITS_PER_WIDE_INT)
831 {
832 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
833 tl = 0;
834 }
835 else
836 {
837 th = -1;
838 tl = (HOST_WIDE_INT) -1 << (width - 1);
839 }
840 real_from_integer (&t, VOIDmode, tl, th, 0);
841 if (REAL_VALUES_LESS (x, t))
842 {
843 xh = th;
844 xl = tl;
845 break;
846 }
847 REAL_VALUE_TO_INT (&xl, &xh, x);
848 break;
849
850 case UNSIGNED_FIX:
851 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
852 return const0_rtx;
853
854 /* Test against the unsigned upper bound. */
855 if (width == 2*HOST_BITS_PER_WIDE_INT)
856 {
857 th = -1;
858 tl = -1;
859 }
860 else if (width >= HOST_BITS_PER_WIDE_INT)
861 {
862 th = ((unsigned HOST_WIDE_INT) 1
863 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
864 tl = -1;
865 }
866 else
867 {
868 th = 0;
869 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
870 }
871 real_from_integer (&t, VOIDmode, tl, th, 1);
872 if (REAL_VALUES_LESS (t, x))
873 {
874 xh = th;
875 xl = tl;
876 break;
877 }
878
879 REAL_VALUE_TO_INT (&xl, &xh, x);
880 break;
881
882 default:
883 abort ();
884 }
885 return immed_double_const (xl, xh, mode);
886 }
887
888 /* This was formerly used only for non-IEEE float.
889 eggert@twinsun.com says it is safe for IEEE also. */
890 else
891 {
892 enum rtx_code reversed;
893 rtx temp;
894
895 /* There are some simplifications we can do even if the operands
896 aren't constant. */
897 switch (code)
898 {
899 case NOT:
900 /* (not (not X)) == X. */
901 if (GET_CODE (op) == NOT)
902 return XEXP (op, 0);
903
904 /* (not (eq X Y)) == (ne X Y), etc. */
905 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
906 && (mode == BImode || STORE_FLAG_VALUE == -1)
907 && ((reversed = reversed_comparison_code (op, NULL_RTX))
908 != UNKNOWN))
909 return simplify_gen_relational (reversed, mode, VOIDmode,
910 XEXP (op, 0), XEXP (op, 1));
911
912 /* (not (plus X -1)) can become (neg X). */
913 if (GET_CODE (op) == PLUS
914 && XEXP (op, 1) == constm1_rtx)
915 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
916
917 /* Similarly, (not (neg X)) is (plus X -1). */
918 if (GET_CODE (op) == NEG)
919 return plus_constant (XEXP (op, 0), -1);
920
921 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
922 if (GET_CODE (op) == XOR
923 && GET_CODE (XEXP (op, 1)) == CONST_INT
924 && (temp = simplify_unary_operation (NOT, mode,
925 XEXP (op, 1),
926 mode)) != 0)
927 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
928
929
930 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
931 operands other than 1, but that is not valid. We could do a
932 similar simplification for (not (lshiftrt C X)) where C is
933 just the sign bit, but this doesn't seem common enough to
934 bother with. */
935 if (GET_CODE (op) == ASHIFT
936 && XEXP (op, 0) == const1_rtx)
937 {
938 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
939 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
940 }
941
942 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
943 by reversing the comparison code if valid. */
944 if (STORE_FLAG_VALUE == -1
945 && GET_RTX_CLASS (GET_CODE (op)) == '<'
946 && (reversed = reversed_comparison_code (op, NULL_RTX))
947 != UNKNOWN)
948 return simplify_gen_relational (reversed, mode, VOIDmode,
949 XEXP (op, 0), XEXP (op, 1));
950
951 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
952 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
953 so we can perform the above simplification. */
954
955 if (STORE_FLAG_VALUE == -1
956 && GET_CODE (op) == ASHIFTRT
957 && GET_CODE (XEXP (op, 1)) == CONST_INT
958 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
959 return simplify_gen_relational (GE, mode, VOIDmode,
960 XEXP (op, 0), const0_rtx);
961
962 break;
963
964 case NEG:
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op) == NEG)
967 return XEXP (op, 0);
968
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op) == PLUS
971 && XEXP (op, 1) == const1_rtx)
972 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op) == NOT)
976 return plus_constant (XEXP (op, 0), 1);
977
978 /* (neg (minus X Y)) can become (minus Y X). This transformation
979 isn't safe for modes with signed zeros, since if X and Y are
980 both +0, (minus Y X) is the same as (minus X Y). If the
981 rounding mode is towards +infinity (or -infinity) then the two
982 expressions will be rounded differently. */
983 if (GET_CODE (op) == MINUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
986 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
987 XEXP (op, 0));
988
989 if (GET_CODE (op) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
992 {
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (GET_CODE (XEXP (op, 1)) == CONST_INT
995 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
996 {
997 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
998 mode);
999 if (temp)
1000 return simplify_gen_binary (MINUS, mode, temp,
1001 XEXP (op, 0));
1002 }
1003
1004 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1005 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1006 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1007 }
1008
1009 /* (neg (mult A B)) becomes (mult (neg A) B).
1010 This works even for floating-point values. */
1011 if (GET_CODE (op) == MULT
1012 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1013 {
1014 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1015 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1016 }
1017
1018 /* NEG commutes with ASHIFT since it is multiplication. Only do
1019 this if we can then eliminate the NEG (e.g., if the operand
1020 is a constant). */
1021 if (GET_CODE (op) == ASHIFT)
1022 {
1023 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1024 mode);
1025 if (temp)
1026 return simplify_gen_binary (ASHIFT, mode, temp,
1027 XEXP (op, 1));
1028 }
1029
1030 break;
1031
1032 case SIGN_EXTEND:
1033 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1034 becomes just the MINUS if its mode is MODE. This allows
1035 folding switch statements on machines using casesi (such as
1036 the VAX). */
1037 if (GET_CODE (op) == TRUNCATE
1038 && GET_MODE (XEXP (op, 0)) == mode
1039 && GET_CODE (XEXP (op, 0)) == MINUS
1040 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1041 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1042 return XEXP (op, 0);
1043
1044 /* Check for a sign extension of a subreg of a promoted
1045 variable, where the promotion is sign-extended, and the
1046 target mode is the same as the variable's promotion. */
1047 if (GET_CODE (op) == SUBREG
1048 && SUBREG_PROMOTED_VAR_P (op)
1049 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1050 && GET_MODE (XEXP (op, 0)) == mode)
1051 return XEXP (op, 0);
1052
1053 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1054 if (! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1056 && (CONSTANT_P (op)
1057 || (GET_CODE (op) == SUBREG
1058 && GET_CODE (SUBREG_REG (op)) == REG
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1062 #endif
1063 break;
1064
1065 case ZERO_EXTEND:
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op)
1072 && GET_MODE (XEXP (op, 0)) == mode)
1073 return XEXP (op, 0);
1074
1075 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1076 if (POINTERS_EXTEND_UNSIGNED > 0
1077 && mode == Pmode && GET_MODE (op) == ptr_mode
1078 && (CONSTANT_P (op)
1079 || (GET_CODE (op) == SUBREG
1080 && GET_CODE (SUBREG_REG (op)) == REG
1081 && REG_POINTER (SUBREG_REG (op))
1082 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1083 return convert_memory_address (Pmode, op);
1084 #endif
1085 break;
1086
1087 default:
1088 break;
1089 }
1090
1091 return 0;
1092 }
1093 }
1094 \f
1095 /* Subroutine of simplify_binary_operation to simplify a commutative,
1096 associative binary operation CODE with result mode MODE, operating
1097 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1098 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1099 canonicalization is possible. */
1100
1101 static rtx
1102 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1103 rtx op0, rtx op1)
1104 {
1105 rtx tem;
1106
1107 /* Linearize the operator to the left. */
1108 if (GET_CODE (op1) == code)
1109 {
1110 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1111 if (GET_CODE (op0) == code)
1112 {
1113 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1114 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1115 }
1116
1117 /* "a op (b op c)" becomes "(b op c) op a". */
1118 if (! swap_commutative_operands_p (op1, op0))
1119 return simplify_gen_binary (code, mode, op1, op0);
1120
1121 tem = op0;
1122 op0 = op1;
1123 op1 = tem;
1124 }
1125
1126 if (GET_CODE (op0) == code)
1127 {
1128 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1129 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1130 {
1131 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1132 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1133 }
1134
1135 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1136 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1137 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1138 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1139 if (tem != 0)
1140 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1141
1142 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1143 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1144 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1145 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1146 if (tem != 0)
1147 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1148 }
1149
1150 return 0;
1151 }
1152
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1155
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1158 rtx
1159 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1160 rtx op0, rtx op1)
1161 {
1162 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1163 HOST_WIDE_INT val;
1164 unsigned int width = GET_MODE_BITSIZE (mode);
1165 rtx trueop0, trueop1;
1166 rtx tem;
1167
1168 /* Relational operations don't work here. We must know the mode
1169 of the operands in order to do the comparison correctly.
1170 Assuming a full word can give incorrect results.
1171 Consider comparing 128 with -128 in QImode. */
1172
1173 if (GET_RTX_CLASS (code) == '<')
1174 abort ();
1175
1176 /* Make sure the constant is second. */
1177 if (GET_RTX_CLASS (code) == 'c'
1178 && swap_commutative_operands_p (op0, op1))
1179 {
1180 tem = op0, op0 = op1, op1 = tem;
1181 }
1182
1183 trueop0 = avoid_constant_pool_reference (op0);
1184 trueop1 = avoid_constant_pool_reference (op1);
1185
1186 if (VECTOR_MODE_P (mode)
1187 && GET_CODE (trueop0) == CONST_VECTOR
1188 && GET_CODE (trueop1) == CONST_VECTOR)
1189 {
1190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1192 enum machine_mode op0mode = GET_MODE (trueop0);
1193 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1194 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1195 enum machine_mode op1mode = GET_MODE (trueop1);
1196 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1197 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1198 rtvec v = rtvec_alloc (n_elts);
1199 unsigned int i;
1200
1201 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1202 abort ();
1203
1204 for (i = 0; i < n_elts; i++)
1205 {
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1209 if (!x)
1210 return 0;
1211 RTVEC_ELT (v, i) = x;
1212 }
1213
1214 return gen_rtx_CONST_VECTOR (mode, v);
1215 }
1216
1217 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1218 && GET_CODE (trueop0) == CONST_DOUBLE
1219 && GET_CODE (trueop1) == CONST_DOUBLE
1220 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1221 {
1222 if (code == AND
1223 || code == IOR
1224 || code == XOR)
1225 {
1226 long tmp0[4];
1227 long tmp1[4];
1228 REAL_VALUE_TYPE r;
1229 int i;
1230
1231 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1232 GET_MODE (op0));
1233 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1234 GET_MODE (op1));
1235 for (i = 0; i < 4; i++)
1236 {
1237 if (code == AND)
1238 tmp0[i] &= tmp1[i];
1239 else if (code == IOR)
1240 tmp0[i] |= tmp1[i];
1241 else if (code == XOR)
1242 tmp0[i] ^= tmp1[i];
1243 else
1244 abort ();
1245 }
1246 real_from_target (&r, tmp0, mode);
1247 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1248 }
1249 else
1250 {
1251 REAL_VALUE_TYPE f0, f1, value;
1252
1253 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1254 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1255 f0 = real_value_truncate (mode, f0);
1256 f1 = real_value_truncate (mode, f1);
1257
1258 if (HONOR_SNANS (mode)
1259 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1260 return 0;
1261
1262 if (code == DIV
1263 && REAL_VALUES_EQUAL (f1, dconst0)
1264 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1265 return 0;
1266
1267 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1268
1269 value = real_value_truncate (mode, value);
1270 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1271 }
1272 }
1273
1274 /* We can fold some multi-word operations. */
1275 if (GET_MODE_CLASS (mode) == MODE_INT
1276 && width == HOST_BITS_PER_WIDE_INT * 2
1277 && (GET_CODE (trueop0) == CONST_DOUBLE
1278 || GET_CODE (trueop0) == CONST_INT)
1279 && (GET_CODE (trueop1) == CONST_DOUBLE
1280 || GET_CODE (trueop1) == CONST_INT))
1281 {
1282 unsigned HOST_WIDE_INT l1, l2, lv;
1283 HOST_WIDE_INT h1, h2, hv;
1284
1285 if (GET_CODE (trueop0) == CONST_DOUBLE)
1286 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1287 else
1288 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1289
1290 if (GET_CODE (trueop1) == CONST_DOUBLE)
1291 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1292 else
1293 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1294
1295 switch (code)
1296 {
1297 case MINUS:
1298 /* A - B == A + (-B). */
1299 neg_double (l2, h2, &lv, &hv);
1300 l2 = lv, h2 = hv;
1301
1302 /* Fall through.... */
1303
1304 case PLUS:
1305 add_double (l1, h1, l2, h2, &lv, &hv);
1306 break;
1307
1308 case MULT:
1309 mul_double (l1, h1, l2, h2, &lv, &hv);
1310 break;
1311
1312 case DIV: case MOD: case UDIV: case UMOD:
1313 /* We'd need to include tree.h to do this and it doesn't seem worth
1314 it. */
1315 return 0;
1316
1317 case AND:
1318 lv = l1 & l2, hv = h1 & h2;
1319 break;
1320
1321 case IOR:
1322 lv = l1 | l2, hv = h1 | h2;
1323 break;
1324
1325 case XOR:
1326 lv = l1 ^ l2, hv = h1 ^ h2;
1327 break;
1328
1329 case SMIN:
1330 if (h1 < h2
1331 || (h1 == h2
1332 && ((unsigned HOST_WIDE_INT) l1
1333 < (unsigned HOST_WIDE_INT) l2)))
1334 lv = l1, hv = h1;
1335 else
1336 lv = l2, hv = h2;
1337 break;
1338
1339 case SMAX:
1340 if (h1 > h2
1341 || (h1 == h2
1342 && ((unsigned HOST_WIDE_INT) l1
1343 > (unsigned HOST_WIDE_INT) l2)))
1344 lv = l1, hv = h1;
1345 else
1346 lv = l2, hv = h2;
1347 break;
1348
1349 case UMIN:
1350 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1351 || (h1 == h2
1352 && ((unsigned HOST_WIDE_INT) l1
1353 < (unsigned HOST_WIDE_INT) l2)))
1354 lv = l1, hv = h1;
1355 else
1356 lv = l2, hv = h2;
1357 break;
1358
1359 case UMAX:
1360 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1361 || (h1 == h2
1362 && ((unsigned HOST_WIDE_INT) l1
1363 > (unsigned HOST_WIDE_INT) l2)))
1364 lv = l1, hv = h1;
1365 else
1366 lv = l2, hv = h2;
1367 break;
1368
1369 case LSHIFTRT: case ASHIFTRT:
1370 case ASHIFT:
1371 case ROTATE: case ROTATERT:
1372 #ifdef SHIFT_COUNT_TRUNCATED
1373 if (SHIFT_COUNT_TRUNCATED)
1374 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1375 #endif
1376
1377 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1378 return 0;
1379
1380 if (code == LSHIFTRT || code == ASHIFTRT)
1381 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1382 code == ASHIFTRT);
1383 else if (code == ASHIFT)
1384 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1385 else if (code == ROTATE)
1386 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1387 else /* code == ROTATERT */
1388 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1389 break;
1390
1391 default:
1392 return 0;
1393 }
1394
1395 return immed_double_const (lv, hv, mode);
1396 }
1397
1398 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1399 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1400 {
1401 /* Even if we can't compute a constant result,
1402 there are some cases worth simplifying. */
1403
1404 switch (code)
1405 {
1406 case PLUS:
1407 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1408 when x is NaN, infinite, or finite and nonzero. They aren't
1409 when x is -0 and the rounding mode is not towards -infinity,
1410 since (-0) + 0 is then 0. */
1411 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1412 return op0;
1413
1414 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1415 transformations are safe even for IEEE. */
1416 if (GET_CODE (op0) == NEG)
1417 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1418 else if (GET_CODE (op1) == NEG)
1419 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1420
1421 /* (~a) + 1 -> -a */
1422 if (INTEGRAL_MODE_P (mode)
1423 && GET_CODE (op0) == NOT
1424 && trueop1 == const1_rtx)
1425 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1426
1427 /* Handle both-operands-constant cases. We can only add
1428 CONST_INTs to constants since the sum of relocatable symbols
1429 can't be handled by most assemblers. Don't add CONST_INT
1430 to CONST_INT since overflow won't be computed properly if wider
1431 than HOST_BITS_PER_WIDE_INT. */
1432
1433 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1434 && GET_CODE (op1) == CONST_INT)
1435 return plus_constant (op0, INTVAL (op1));
1436 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1437 && GET_CODE (op0) == CONST_INT)
1438 return plus_constant (op1, INTVAL (op0));
1439
1440 /* See if this is something like X * C - X or vice versa or
1441 if the multiplication is written as a shift. If so, we can
1442 distribute and make a new multiply, shift, or maybe just
1443 have X (if C is 2 in the example above). But don't make
1444 real multiply if we didn't have one before. */
1445
1446 if (! FLOAT_MODE_P (mode))
1447 {
1448 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1449 rtx lhs = op0, rhs = op1;
1450 int had_mult = 0;
1451
1452 if (GET_CODE (lhs) == NEG)
1453 coeff0 = -1, lhs = XEXP (lhs, 0);
1454 else if (GET_CODE (lhs) == MULT
1455 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1456 {
1457 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1458 had_mult = 1;
1459 }
1460 else if (GET_CODE (lhs) == ASHIFT
1461 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs, 1)) >= 0
1463 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1464 {
1465 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1466 lhs = XEXP (lhs, 0);
1467 }
1468
1469 if (GET_CODE (rhs) == NEG)
1470 coeff1 = -1, rhs = XEXP (rhs, 0);
1471 else if (GET_CODE (rhs) == MULT
1472 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1473 {
1474 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1475 had_mult = 1;
1476 }
1477 else if (GET_CODE (rhs) == ASHIFT
1478 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1479 && INTVAL (XEXP (rhs, 1)) >= 0
1480 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1481 {
1482 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1483 rhs = XEXP (rhs, 0);
1484 }
1485
1486 if (rtx_equal_p (lhs, rhs))
1487 {
1488 tem = simplify_gen_binary (MULT, mode, lhs,
1489 GEN_INT (coeff0 + coeff1));
1490 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1491 }
1492 }
1493
1494 /* If one of the operands is a PLUS or a MINUS, see if we can
1495 simplify this by the associative law.
1496 Don't use the associative law for floating point.
1497 The inaccuracy makes it nonassociative,
1498 and subtle programs can break if operations are associated. */
1499
1500 if (INTEGRAL_MODE_P (mode)
1501 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1502 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1503 || (GET_CODE (op0) == CONST
1504 && GET_CODE (XEXP (op0, 0)) == PLUS)
1505 || (GET_CODE (op1) == CONST
1506 && GET_CODE (XEXP (op1, 0)) == PLUS))
1507 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1508 return tem;
1509
1510 /* Reassociate floating point addition only when the user
1511 specifies unsafe math optimizations. */
1512 if (FLOAT_MODE_P (mode)
1513 && flag_unsafe_math_optimizations)
1514 {
1515 tem = simplify_associative_operation (code, mode, op0, op1);
1516 if (tem)
1517 return tem;
1518 }
1519 break;
1520
1521 case COMPARE:
1522 #ifdef HAVE_cc0
1523 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1524 using cc0, in which case we want to leave it as a COMPARE
1525 so we can distinguish it from a register-register-copy.
1526
1527 In IEEE floating point, x-0 is not the same as x. */
1528
1529 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1530 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1531 && trueop1 == CONST0_RTX (mode))
1532 return op0;
1533 #endif
1534
1535 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1536 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1537 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1538 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1539 {
1540 rtx xop00 = XEXP (op0, 0);
1541 rtx xop10 = XEXP (op1, 0);
1542
1543 #ifdef HAVE_cc0
1544 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1545 #else
1546 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1547 && GET_MODE (xop00) == GET_MODE (xop10)
1548 && REGNO (xop00) == REGNO (xop10)
1549 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1550 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1551 #endif
1552 return xop00;
1553 }
1554 break;
1555
1556 case MINUS:
1557 /* We can't assume x-x is 0 even with non-IEEE floating point,
1558 but since it is zero except in very strange circumstances, we
1559 will treat it as zero with -funsafe-math-optimizations. */
1560 if (rtx_equal_p (trueop0, trueop1)
1561 && ! side_effects_p (op0)
1562 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1563 return CONST0_RTX (mode);
1564
1565 /* Change subtraction from zero into negation. (0 - x) is the
1566 same as -x when x is NaN, infinite, or finite and nonzero.
1567 But if the mode has signed zeros, and does not round towards
1568 -infinity, then 0 - 0 is 0, not -0. */
1569 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1570 return simplify_gen_unary (NEG, mode, op1, mode);
1571
1572 /* (-1 - a) is ~a. */
1573 if (trueop0 == constm1_rtx)
1574 return simplify_gen_unary (NOT, mode, op1, mode);
1575
1576 /* Subtracting 0 has no effect unless the mode has signed zeros
1577 and supports rounding towards -infinity. In such a case,
1578 0 - 0 is -0. */
1579 if (!(HONOR_SIGNED_ZEROS (mode)
1580 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1581 && trueop1 == CONST0_RTX (mode))
1582 return op0;
1583
1584 /* See if this is something like X * C - X or vice versa or
1585 if the multiplication is written as a shift. If so, we can
1586 distribute and make a new multiply, shift, or maybe just
1587 have X (if C is 2 in the example above). But don't make
1588 real multiply if we didn't have one before. */
1589
1590 if (! FLOAT_MODE_P (mode))
1591 {
1592 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1593 rtx lhs = op0, rhs = op1;
1594 int had_mult = 0;
1595
1596 if (GET_CODE (lhs) == NEG)
1597 coeff0 = -1, lhs = XEXP (lhs, 0);
1598 else if (GET_CODE (lhs) == MULT
1599 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1600 {
1601 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1602 had_mult = 1;
1603 }
1604 else if (GET_CODE (lhs) == ASHIFT
1605 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1606 && INTVAL (XEXP (lhs, 1)) >= 0
1607 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1608 {
1609 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1610 lhs = XEXP (lhs, 0);
1611 }
1612
1613 if (GET_CODE (rhs) == NEG)
1614 coeff1 = - 1, rhs = XEXP (rhs, 0);
1615 else if (GET_CODE (rhs) == MULT
1616 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1617 {
1618 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1619 had_mult = 1;
1620 }
1621 else if (GET_CODE (rhs) == ASHIFT
1622 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1623 && INTVAL (XEXP (rhs, 1)) >= 0
1624 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1625 {
1626 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1627 rhs = XEXP (rhs, 0);
1628 }
1629
1630 if (rtx_equal_p (lhs, rhs))
1631 {
1632 tem = simplify_gen_binary (MULT, mode, lhs,
1633 GEN_INT (coeff0 - coeff1));
1634 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1635 }
1636 }
1637
1638 /* (a - (-b)) -> (a + b). True even for IEEE. */
1639 if (GET_CODE (op1) == NEG)
1640 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1641
1642 /* (-x - c) may be simplified as (-c - x). */
1643 if (GET_CODE (op0) == NEG
1644 && (GET_CODE (op1) == CONST_INT
1645 || GET_CODE (op1) == CONST_DOUBLE))
1646 {
1647 tem = simplify_unary_operation (NEG, mode, op1, mode);
1648 if (tem)
1649 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1650 }
1651
1652 /* If one of the operands is a PLUS or a MINUS, see if we can
1653 simplify this by the associative law.
1654 Don't use the associative law for floating point.
1655 The inaccuracy makes it nonassociative,
1656 and subtle programs can break if operations are associated. */
1657
1658 if (INTEGRAL_MODE_P (mode)
1659 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1660 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1661 || (GET_CODE (op0) == CONST
1662 && GET_CODE (XEXP (op0, 0)) == PLUS)
1663 || (GET_CODE (op1) == CONST
1664 && GET_CODE (XEXP (op1, 0)) == PLUS))
1665 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1666 return tem;
1667
1668 /* Don't let a relocatable value get a negative coeff. */
1669 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1670 return simplify_gen_binary (PLUS, mode,
1671 op0,
1672 neg_const_int (mode, op1));
1673
1674 /* (x - (x & y)) -> (x & ~y) */
1675 if (GET_CODE (op1) == AND)
1676 {
1677 if (rtx_equal_p (op0, XEXP (op1, 0)))
1678 {
1679 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1680 GET_MODE (XEXP (op1, 1)));
1681 return simplify_gen_binary (AND, mode, op0, tem);
1682 }
1683 if (rtx_equal_p (op0, XEXP (op1, 1)))
1684 {
1685 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1686 GET_MODE (XEXP (op1, 0)));
1687 return simplify_gen_binary (AND, mode, op0, tem);
1688 }
1689 }
1690 break;
1691
1692 case MULT:
1693 if (trueop1 == constm1_rtx)
1694 return simplify_gen_unary (NEG, mode, op0, mode);
1695
1696 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1697 x is NaN, since x * 0 is then also NaN. Nor is it valid
1698 when the mode has signed zeros, since multiplying a negative
1699 number by 0 will give -0, not 0. */
1700 if (!HONOR_NANS (mode)
1701 && !HONOR_SIGNED_ZEROS (mode)
1702 && trueop1 == CONST0_RTX (mode)
1703 && ! side_effects_p (op0))
1704 return op1;
1705
1706 /* In IEEE floating point, x*1 is not equivalent to x for
1707 signalling NaNs. */
1708 if (!HONOR_SNANS (mode)
1709 && trueop1 == CONST1_RTX (mode))
1710 return op0;
1711
1712 /* Convert multiply by constant power of two into shift unless
1713 we are still generating RTL. This test is a kludge. */
1714 if (GET_CODE (trueop1) == CONST_INT
1715 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1716 /* If the mode is larger than the host word size, and the
1717 uppermost bit is set, then this isn't a power of two due
1718 to implicit sign extension. */
1719 && (width <= HOST_BITS_PER_WIDE_INT
1720 || val != HOST_BITS_PER_WIDE_INT - 1)
1721 && ! rtx_equal_function_value_matters)
1722 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1723
1724 /* x*2 is x+x and x*(-1) is -x */
1725 if (GET_CODE (trueop1) == CONST_DOUBLE
1726 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1727 && GET_MODE (op0) == mode)
1728 {
1729 REAL_VALUE_TYPE d;
1730 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1731
1732 if (REAL_VALUES_EQUAL (d, dconst2))
1733 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1734
1735 if (REAL_VALUES_EQUAL (d, dconstm1))
1736 return simplify_gen_unary (NEG, mode, op0, mode);
1737 }
1738
1739 /* Reassociate multiplication, but for floating point MULTs
1740 only when the user specifies unsafe math optimizations. */
1741 if (! FLOAT_MODE_P (mode)
1742 || flag_unsafe_math_optimizations)
1743 {
1744 tem = simplify_associative_operation (code, mode, op0, op1);
1745 if (tem)
1746 return tem;
1747 }
1748 break;
1749
1750 case IOR:
1751 if (trueop1 == const0_rtx)
1752 return op0;
1753 if (GET_CODE (trueop1) == CONST_INT
1754 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1755 == GET_MODE_MASK (mode)))
1756 return op1;
1757 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1758 return op0;
1759 /* A | (~A) -> -1 */
1760 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1761 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1762 && ! side_effects_p (op0)
1763 && GET_MODE_CLASS (mode) != MODE_CC)
1764 return constm1_rtx;
1765 tem = simplify_associative_operation (code, mode, op0, op1);
1766 if (tem)
1767 return tem;
1768 break;
1769
1770 case XOR:
1771 if (trueop1 == const0_rtx)
1772 return op0;
1773 if (GET_CODE (trueop1) == CONST_INT
1774 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1775 == GET_MODE_MASK (mode)))
1776 return simplify_gen_unary (NOT, mode, op0, mode);
1777 if (trueop0 == trueop1 && ! side_effects_p (op0)
1778 && GET_MODE_CLASS (mode) != MODE_CC)
1779 return const0_rtx;
1780 tem = simplify_associative_operation (code, mode, op0, op1);
1781 if (tem)
1782 return tem;
1783 break;
1784
1785 case AND:
1786 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1787 return const0_rtx;
1788 if (GET_CODE (trueop1) == CONST_INT
1789 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1790 == GET_MODE_MASK (mode)))
1791 return op0;
1792 if (trueop0 == trueop1 && ! side_effects_p (op0)
1793 && GET_MODE_CLASS (mode) != MODE_CC)
1794 return op0;
1795 /* A & (~A) -> 0 */
1796 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1797 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1798 && ! side_effects_p (op0)
1799 && GET_MODE_CLASS (mode) != MODE_CC)
1800 return const0_rtx;
1801 tem = simplify_associative_operation (code, mode, op0, op1);
1802 if (tem)
1803 return tem;
1804 break;
1805
1806 case UDIV:
1807 /* Convert divide by power of two into shift (divide by 1 handled
1808 below). */
1809 if (GET_CODE (trueop1) == CONST_INT
1810 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1811 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1812
1813 /* Fall through.... */
1814
1815 case DIV:
1816 if (trueop1 == CONST1_RTX (mode))
1817 {
1818 /* On some platforms DIV uses narrower mode than its
1819 operands. */
1820 rtx x = gen_lowpart_common (mode, op0);
1821 if (x)
1822 return x;
1823 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1824 return gen_lowpart_SUBREG (mode, op0);
1825 else
1826 return op0;
1827 }
1828
1829 /* Maybe change 0 / x to 0. This transformation isn't safe for
1830 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1831 Nor is it safe for modes with signed zeros, since dividing
1832 0 by a negative number gives -0, not 0. */
1833 if (!HONOR_NANS (mode)
1834 && !HONOR_SIGNED_ZEROS (mode)
1835 && trueop0 == CONST0_RTX (mode)
1836 && ! side_effects_p (op1))
1837 return op0;
1838
1839 /* Change division by a constant into multiplication. Only do
1840 this with -funsafe-math-optimizations. */
1841 else if (GET_CODE (trueop1) == CONST_DOUBLE
1842 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1843 && trueop1 != CONST0_RTX (mode)
1844 && flag_unsafe_math_optimizations)
1845 {
1846 REAL_VALUE_TYPE d;
1847 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1848
1849 if (! REAL_VALUES_EQUAL (d, dconst0))
1850 {
1851 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1852 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1853 return simplify_gen_binary (MULT, mode, op0, tem);
1854 }
1855 }
1856 break;
1857
1858 case UMOD:
1859 /* Handle modulus by power of two (mod with 1 handled below). */
1860 if (GET_CODE (trueop1) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1)) > 0)
1862 return simplify_gen_binary (AND, mode, op0,
1863 GEN_INT (INTVAL (op1) - 1));
1864
1865 /* Fall through.... */
1866
1867 case MOD:
1868 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1869 && ! side_effects_p (op0) && ! side_effects_p (op1))
1870 return const0_rtx;
1871 break;
1872
1873 case ROTATERT:
1874 case ROTATE:
1875 case ASHIFTRT:
1876 /* Rotating ~0 always results in ~0. */
1877 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1878 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1879 && ! side_effects_p (op1))
1880 return op0;
1881
1882 /* Fall through.... */
1883
1884 case ASHIFT:
1885 case LSHIFTRT:
1886 if (trueop1 == const0_rtx)
1887 return op0;
1888 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1889 return op0;
1890 break;
1891
1892 case SMIN:
1893 if (width <= HOST_BITS_PER_WIDE_INT
1894 && GET_CODE (trueop1) == CONST_INT
1895 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1896 && ! side_effects_p (op0))
1897 return op1;
1898 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1899 return op0;
1900 tem = simplify_associative_operation (code, mode, op0, op1);
1901 if (tem)
1902 return tem;
1903 break;
1904
1905 case SMAX:
1906 if (width <= HOST_BITS_PER_WIDE_INT
1907 && GET_CODE (trueop1) == CONST_INT
1908 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1909 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1910 && ! side_effects_p (op0))
1911 return op1;
1912 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1913 return op0;
1914 tem = simplify_associative_operation (code, mode, op0, op1);
1915 if (tem)
1916 return tem;
1917 break;
1918
1919 case UMIN:
1920 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1921 return op1;
1922 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1923 return op0;
1924 tem = simplify_associative_operation (code, mode, op0, op1);
1925 if (tem)
1926 return tem;
1927 break;
1928
1929 case UMAX:
1930 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1931 return op1;
1932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933 return op0;
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1938
1939 case SS_PLUS:
1940 case US_PLUS:
1941 case SS_MINUS:
1942 case US_MINUS:
1943 /* ??? There are simplifications that can be done. */
1944 return 0;
1945
1946 case VEC_SELECT:
1947 if (!VECTOR_MODE_P (mode))
1948 {
1949 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1950 || (mode
1951 != GET_MODE_INNER (GET_MODE (trueop0)))
1952 || GET_CODE (trueop1) != PARALLEL
1953 || XVECLEN (trueop1, 0) != 1
1954 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1955 abort ();
1956
1957 if (GET_CODE (trueop0) == CONST_VECTOR)
1958 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1959 }
1960 else
1961 {
1962 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1963 || (GET_MODE_INNER (mode)
1964 != GET_MODE_INNER (GET_MODE (trueop0)))
1965 || GET_CODE (trueop1) != PARALLEL)
1966 abort ();
1967
1968 if (GET_CODE (trueop0) == CONST_VECTOR)
1969 {
1970 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1971 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1972 rtvec v = rtvec_alloc (n_elts);
1973 unsigned int i;
1974
1975 if (XVECLEN (trueop1, 0) != (int) n_elts)
1976 abort ();
1977 for (i = 0; i < n_elts; i++)
1978 {
1979 rtx x = XVECEXP (trueop1, 0, i);
1980
1981 if (GET_CODE (x) != CONST_INT)
1982 abort ();
1983 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1984 }
1985
1986 return gen_rtx_CONST_VECTOR (mode, v);
1987 }
1988 }
1989 return 0;
1990 case VEC_CONCAT:
1991 {
1992 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1993 ? GET_MODE (trueop0)
1994 : GET_MODE_INNER (mode));
1995 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1996 ? GET_MODE (trueop1)
1997 : GET_MODE_INNER (mode));
1998
1999 if (!VECTOR_MODE_P (mode)
2000 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2001 != GET_MODE_SIZE (mode)))
2002 abort ();
2003
2004 if ((VECTOR_MODE_P (op0_mode)
2005 && (GET_MODE_INNER (mode)
2006 != GET_MODE_INNER (op0_mode)))
2007 || (!VECTOR_MODE_P (op0_mode)
2008 && GET_MODE_INNER (mode) != op0_mode))
2009 abort ();
2010
2011 if ((VECTOR_MODE_P (op1_mode)
2012 && (GET_MODE_INNER (mode)
2013 != GET_MODE_INNER (op1_mode)))
2014 || (!VECTOR_MODE_P (op1_mode)
2015 && GET_MODE_INNER (mode) != op1_mode))
2016 abort ();
2017
2018 if ((GET_CODE (trueop0) == CONST_VECTOR
2019 || GET_CODE (trueop0) == CONST_INT
2020 || GET_CODE (trueop0) == CONST_DOUBLE)
2021 && (GET_CODE (trueop1) == CONST_VECTOR
2022 || GET_CODE (trueop1) == CONST_INT
2023 || GET_CODE (trueop1) == CONST_DOUBLE))
2024 {
2025 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2026 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2027 rtvec v = rtvec_alloc (n_elts);
2028 unsigned int i;
2029 unsigned in_n_elts = 1;
2030
2031 if (VECTOR_MODE_P (op0_mode))
2032 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2033 for (i = 0; i < n_elts; i++)
2034 {
2035 if (i < in_n_elts)
2036 {
2037 if (!VECTOR_MODE_P (op0_mode))
2038 RTVEC_ELT (v, i) = trueop0;
2039 else
2040 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2041 }
2042 else
2043 {
2044 if (!VECTOR_MODE_P (op1_mode))
2045 RTVEC_ELT (v, i) = trueop1;
2046 else
2047 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2048 i - in_n_elts);
2049 }
2050 }
2051
2052 return gen_rtx_CONST_VECTOR (mode, v);
2053 }
2054 }
2055 return 0;
2056
2057 default:
2058 abort ();
2059 }
2060
2061 return 0;
2062 }
2063
2064 /* Get the integer argument values in two forms:
2065 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2066
2067 arg0 = INTVAL (trueop0);
2068 arg1 = INTVAL (trueop1);
2069
2070 if (width < HOST_BITS_PER_WIDE_INT)
2071 {
2072 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2073 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2074
2075 arg0s = arg0;
2076 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2077 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2078
2079 arg1s = arg1;
2080 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2081 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2082 }
2083 else
2084 {
2085 arg0s = arg0;
2086 arg1s = arg1;
2087 }
2088
2089 /* Compute the value of the arithmetic. */
2090
2091 switch (code)
2092 {
2093 case PLUS:
2094 val = arg0s + arg1s;
2095 break;
2096
2097 case MINUS:
2098 val = arg0s - arg1s;
2099 break;
2100
2101 case MULT:
2102 val = arg0s * arg1s;
2103 break;
2104
2105 case DIV:
2106 if (arg1s == 0
2107 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2108 && arg1s == -1))
2109 return 0;
2110 val = arg0s / arg1s;
2111 break;
2112
2113 case MOD:
2114 if (arg1s == 0
2115 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2116 && arg1s == -1))
2117 return 0;
2118 val = arg0s % arg1s;
2119 break;
2120
2121 case UDIV:
2122 if (arg1 == 0
2123 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2124 && arg1s == -1))
2125 return 0;
2126 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2127 break;
2128
2129 case UMOD:
2130 if (arg1 == 0
2131 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2132 && arg1s == -1))
2133 return 0;
2134 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2135 break;
2136
2137 case AND:
2138 val = arg0 & arg1;
2139 break;
2140
2141 case IOR:
2142 val = arg0 | arg1;
2143 break;
2144
2145 case XOR:
2146 val = arg0 ^ arg1;
2147 break;
2148
2149 case LSHIFTRT:
2150 /* If shift count is undefined, don't fold it; let the machine do
2151 what it wants. But truncate it if the machine will do that. */
2152 if (arg1 < 0)
2153 return 0;
2154
2155 #ifdef SHIFT_COUNT_TRUNCATED
2156 if (SHIFT_COUNT_TRUNCATED)
2157 arg1 %= width;
2158 #endif
2159
2160 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2161 break;
2162
2163 case ASHIFT:
2164 if (arg1 < 0)
2165 return 0;
2166
2167 #ifdef SHIFT_COUNT_TRUNCATED
2168 if (SHIFT_COUNT_TRUNCATED)
2169 arg1 %= width;
2170 #endif
2171
2172 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2173 break;
2174
2175 case ASHIFTRT:
2176 if (arg1 < 0)
2177 return 0;
2178
2179 #ifdef SHIFT_COUNT_TRUNCATED
2180 if (SHIFT_COUNT_TRUNCATED)
2181 arg1 %= width;
2182 #endif
2183
2184 val = arg0s >> arg1;
2185
2186 /* Bootstrap compiler may not have sign extended the right shift.
2187 Manually extend the sign to insure bootstrap cc matches gcc. */
2188 if (arg0s < 0 && arg1 > 0)
2189 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2190
2191 break;
2192
2193 case ROTATERT:
2194 if (arg1 < 0)
2195 return 0;
2196
2197 arg1 %= width;
2198 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2199 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2200 break;
2201
2202 case ROTATE:
2203 if (arg1 < 0)
2204 return 0;
2205
2206 arg1 %= width;
2207 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2208 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2209 break;
2210
2211 case COMPARE:
2212 /* Do nothing here. */
2213 return 0;
2214
2215 case SMIN:
2216 val = arg0s <= arg1s ? arg0s : arg1s;
2217 break;
2218
2219 case UMIN:
2220 val = ((unsigned HOST_WIDE_INT) arg0
2221 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2222 break;
2223
2224 case SMAX:
2225 val = arg0s > arg1s ? arg0s : arg1s;
2226 break;
2227
2228 case UMAX:
2229 val = ((unsigned HOST_WIDE_INT) arg0
2230 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2231 break;
2232
2233 case SS_PLUS:
2234 case US_PLUS:
2235 case SS_MINUS:
2236 case US_MINUS:
2237 /* ??? There are simplifications that can be done. */
2238 return 0;
2239
2240 default:
2241 abort ();
2242 }
2243
2244 val = trunc_int_for_mode (val, mode);
2245
2246 return GEN_INT (val);
2247 }
2248 \f
2249 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2250 PLUS or MINUS.
2251
2252 Rather than test for specific case, we do this by a brute-force method
2253 and do all possible simplifications until no more changes occur. Then
2254 we rebuild the operation.
2255
2256 If FORCE is true, then always generate the rtx. This is used to
2257 canonicalize stuff emitted from simplify_gen_binary. Note that this
2258 can still fail if the rtx is too complex. It won't fail just because
2259 the result is not 'simpler' than the input, however. */
2260
2261 struct simplify_plus_minus_op_data
2262 {
2263 rtx op;
2264 int neg;
2265 };
2266
2267 static int
2268 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2269 {
2270 const struct simplify_plus_minus_op_data *d1 = p1;
2271 const struct simplify_plus_minus_op_data *d2 = p2;
2272
2273 return (commutative_operand_precedence (d2->op)
2274 - commutative_operand_precedence (d1->op));
2275 }
2276
2277 static rtx
2278 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2279 rtx op1, int force)
2280 {
2281 struct simplify_plus_minus_op_data ops[8];
2282 rtx result, tem;
2283 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2284 int first, changed;
2285 int i, j;
2286
2287 memset (ops, 0, sizeof ops);
2288
2289 /* Set up the two operands and then expand them until nothing has been
2290 changed. If we run out of room in our array, give up; this should
2291 almost never happen. */
2292
2293 ops[0].op = op0;
2294 ops[0].neg = 0;
2295 ops[1].op = op1;
2296 ops[1].neg = (code == MINUS);
2297
2298 do
2299 {
2300 changed = 0;
2301
2302 for (i = 0; i < n_ops; i++)
2303 {
2304 rtx this_op = ops[i].op;
2305 int this_neg = ops[i].neg;
2306 enum rtx_code this_code = GET_CODE (this_op);
2307
2308 switch (this_code)
2309 {
2310 case PLUS:
2311 case MINUS:
2312 if (n_ops == 7)
2313 return NULL_RTX;
2314
2315 ops[n_ops].op = XEXP (this_op, 1);
2316 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2317 n_ops++;
2318
2319 ops[i].op = XEXP (this_op, 0);
2320 input_ops++;
2321 changed = 1;
2322 break;
2323
2324 case NEG:
2325 ops[i].op = XEXP (this_op, 0);
2326 ops[i].neg = ! this_neg;
2327 changed = 1;
2328 break;
2329
2330 case CONST:
2331 if (n_ops < 7
2332 && GET_CODE (XEXP (this_op, 0)) == PLUS
2333 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2334 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2335 {
2336 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2337 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2338 ops[n_ops].neg = this_neg;
2339 n_ops++;
2340 input_consts++;
2341 changed = 1;
2342 }
2343 break;
2344
2345 case NOT:
2346 /* ~a -> (-a - 1) */
2347 if (n_ops != 7)
2348 {
2349 ops[n_ops].op = constm1_rtx;
2350 ops[n_ops++].neg = this_neg;
2351 ops[i].op = XEXP (this_op, 0);
2352 ops[i].neg = !this_neg;
2353 changed = 1;
2354 }
2355 break;
2356
2357 case CONST_INT:
2358 if (this_neg)
2359 {
2360 ops[i].op = neg_const_int (mode, this_op);
2361 ops[i].neg = 0;
2362 changed = 1;
2363 }
2364 break;
2365
2366 default:
2367 break;
2368 }
2369 }
2370 }
2371 while (changed);
2372
2373 /* If we only have two operands, we can't do anything. */
2374 if (n_ops <= 2 && !force)
2375 return NULL_RTX;
2376
2377 /* Count the number of CONSTs we didn't split above. */
2378 for (i = 0; i < n_ops; i++)
2379 if (GET_CODE (ops[i].op) == CONST)
2380 input_consts++;
2381
2382 /* Now simplify each pair of operands until nothing changes. The first
2383 time through just simplify constants against each other. */
2384
2385 first = 1;
2386 do
2387 {
2388 changed = first;
2389
2390 for (i = 0; i < n_ops - 1; i++)
2391 for (j = i + 1; j < n_ops; j++)
2392 {
2393 rtx lhs = ops[i].op, rhs = ops[j].op;
2394 int lneg = ops[i].neg, rneg = ops[j].neg;
2395
2396 if (lhs != 0 && rhs != 0
2397 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2398 {
2399 enum rtx_code ncode = PLUS;
2400
2401 if (lneg != rneg)
2402 {
2403 ncode = MINUS;
2404 if (lneg)
2405 tem = lhs, lhs = rhs, rhs = tem;
2406 }
2407 else if (swap_commutative_operands_p (lhs, rhs))
2408 tem = lhs, lhs = rhs, rhs = tem;
2409
2410 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2411
2412 /* Reject "simplifications" that just wrap the two
2413 arguments in a CONST. Failure to do so can result
2414 in infinite recursion with simplify_binary_operation
2415 when it calls us to simplify CONST operations. */
2416 if (tem
2417 && ! (GET_CODE (tem) == CONST
2418 && GET_CODE (XEXP (tem, 0)) == ncode
2419 && XEXP (XEXP (tem, 0), 0) == lhs
2420 && XEXP (XEXP (tem, 0), 1) == rhs)
2421 /* Don't allow -x + -1 -> ~x simplifications in the
2422 first pass. This allows us the chance to combine
2423 the -1 with other constants. */
2424 && ! (first
2425 && GET_CODE (tem) == NOT
2426 && XEXP (tem, 0) == rhs))
2427 {
2428 lneg &= rneg;
2429 if (GET_CODE (tem) == NEG)
2430 tem = XEXP (tem, 0), lneg = !lneg;
2431 if (GET_CODE (tem) == CONST_INT && lneg)
2432 tem = neg_const_int (mode, tem), lneg = 0;
2433
2434 ops[i].op = tem;
2435 ops[i].neg = lneg;
2436 ops[j].op = NULL_RTX;
2437 changed = 1;
2438 }
2439 }
2440 }
2441
2442 first = 0;
2443 }
2444 while (changed);
2445
2446 /* Pack all the operands to the lower-numbered entries. */
2447 for (i = 0, j = 0; j < n_ops; j++)
2448 if (ops[j].op)
2449 ops[i++] = ops[j];
2450 n_ops = i;
2451
2452 /* Sort the operations based on swap_commutative_operands_p. */
2453 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2454
2455 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2456 if (n_ops == 2
2457 && GET_CODE (ops[1].op) == CONST_INT
2458 && CONSTANT_P (ops[0].op)
2459 && ops[0].neg)
2460 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2461
2462 /* We suppressed creation of trivial CONST expressions in the
2463 combination loop to avoid recursion. Create one manually now.
2464 The combination loop should have ensured that there is exactly
2465 one CONST_INT, and the sort will have ensured that it is last
2466 in the array and that any other constant will be next-to-last. */
2467
2468 if (n_ops > 1
2469 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2470 && CONSTANT_P (ops[n_ops - 2].op))
2471 {
2472 rtx value = ops[n_ops - 1].op;
2473 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2474 value = neg_const_int (mode, value);
2475 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2476 n_ops--;
2477 }
2478
2479 /* Count the number of CONSTs that we generated. */
2480 n_consts = 0;
2481 for (i = 0; i < n_ops; i++)
2482 if (GET_CODE (ops[i].op) == CONST)
2483 n_consts++;
2484
2485 /* Give up if we didn't reduce the number of operands we had. Make
2486 sure we count a CONST as two operands. If we have the same
2487 number of operands, but have made more CONSTs than before, this
2488 is also an improvement, so accept it. */
2489 if (!force
2490 && (n_ops + n_consts > input_ops
2491 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2492 return NULL_RTX;
2493
2494 /* Put a non-negated operand first, if possible. */
2495
2496 for (i = 0; i < n_ops && ops[i].neg; i++)
2497 continue;
2498 if (i == n_ops)
2499 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2500 else if (i != 0)
2501 {
2502 tem = ops[0].op;
2503 ops[0] = ops[i];
2504 ops[i].op = tem;
2505 ops[i].neg = 1;
2506 }
2507
2508 /* Now make the result by performing the requested operations. */
2509 result = ops[0].op;
2510 for (i = 1; i < n_ops; i++)
2511 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2512 mode, result, ops[i].op);
2513
2514 return result;
2515 }
2516
2517 /* Like simplify_binary_operation except used for relational operators.
2518 MODE is the mode of the operands, not that of the result. If MODE
2519 is VOIDmode, both operands must also be VOIDmode and we compare the
2520 operands in "infinite precision".
2521
2522 If no simplification is possible, this function returns zero. Otherwise,
2523 it returns either const_true_rtx or const0_rtx. */
2524
2525 rtx
2526 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2527 rtx op0, rtx op1)
2528 {
2529 int equal, op0lt, op0ltu, op1lt, op1ltu;
2530 rtx tem;
2531 rtx trueop0;
2532 rtx trueop1;
2533
2534 if (mode == VOIDmode
2535 && (GET_MODE (op0) != VOIDmode
2536 || GET_MODE (op1) != VOIDmode))
2537 abort ();
2538
2539 /* If op0 is a compare, extract the comparison arguments from it. */
2540 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2541 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2542
2543 /* We can't simplify MODE_CC values since we don't know what the
2544 actual comparison is. */
2545 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2546 return 0;
2547
2548 /* Make sure the constant is second. */
2549 if (swap_commutative_operands_p (op0, op1))
2550 {
2551 tem = op0, op0 = op1, op1 = tem;
2552 code = swap_condition (code);
2553 }
2554
2555 trueop0 = avoid_constant_pool_reference (op0);
2556 trueop1 = avoid_constant_pool_reference (op1);
2557
2558 /* For integer comparisons of A and B maybe we can simplify A - B and can
2559 then simplify a comparison of that with zero. If A and B are both either
2560 a register or a CONST_INT, this can't help; testing for these cases will
2561 prevent infinite recursion here and speed things up.
2562
2563 If CODE is an unsigned comparison, then we can never do this optimization,
2564 because it gives an incorrect result if the subtraction wraps around zero.
2565 ANSI C defines unsigned operations such that they never overflow, and
2566 thus such cases can not be ignored. */
2567
2568 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2569 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2570 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2571 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2572 /* We cannot do this for == or != if tem is a nonzero address. */
2573 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2574 && code != GTU && code != GEU && code != LTU && code != LEU)
2575 return simplify_relational_operation (signed_condition (code),
2576 mode, tem, const0_rtx);
2577
2578 if (flag_unsafe_math_optimizations && code == ORDERED)
2579 return const_true_rtx;
2580
2581 if (flag_unsafe_math_optimizations && code == UNORDERED)
2582 return const0_rtx;
2583
2584 /* For modes without NaNs, if the two operands are equal, we know the
2585 result except if they have side-effects. */
2586 if (! HONOR_NANS (GET_MODE (trueop0))
2587 && rtx_equal_p (trueop0, trueop1)
2588 && ! side_effects_p (trueop0))
2589 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2590
2591 /* If the operands are floating-point constants, see if we can fold
2592 the result. */
2593 else if (GET_CODE (trueop0) == CONST_DOUBLE
2594 && GET_CODE (trueop1) == CONST_DOUBLE
2595 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2596 {
2597 REAL_VALUE_TYPE d0, d1;
2598
2599 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2600 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2601
2602 /* Comparisons are unordered iff at least one of the values is NaN. */
2603 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2604 switch (code)
2605 {
2606 case UNEQ:
2607 case UNLT:
2608 case UNGT:
2609 case UNLE:
2610 case UNGE:
2611 case NE:
2612 case UNORDERED:
2613 return const_true_rtx;
2614 case EQ:
2615 case LT:
2616 case GT:
2617 case LE:
2618 case GE:
2619 case LTGT:
2620 case ORDERED:
2621 return const0_rtx;
2622 default:
2623 return 0;
2624 }
2625
2626 equal = REAL_VALUES_EQUAL (d0, d1);
2627 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2628 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2629 }
2630
2631 /* Otherwise, see if the operands are both integers. */
2632 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2633 && (GET_CODE (trueop0) == CONST_DOUBLE
2634 || GET_CODE (trueop0) == CONST_INT)
2635 && (GET_CODE (trueop1) == CONST_DOUBLE
2636 || GET_CODE (trueop1) == CONST_INT))
2637 {
2638 int width = GET_MODE_BITSIZE (mode);
2639 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2640 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2641
2642 /* Get the two words comprising each integer constant. */
2643 if (GET_CODE (trueop0) == CONST_DOUBLE)
2644 {
2645 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2646 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2647 }
2648 else
2649 {
2650 l0u = l0s = INTVAL (trueop0);
2651 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2652 }
2653
2654 if (GET_CODE (trueop1) == CONST_DOUBLE)
2655 {
2656 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2657 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2658 }
2659 else
2660 {
2661 l1u = l1s = INTVAL (trueop1);
2662 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2663 }
2664
2665 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2666 we have to sign or zero-extend the values. */
2667 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2668 {
2669 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2670 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2671
2672 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2673 l0s |= ((HOST_WIDE_INT) (-1) << width);
2674
2675 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2676 l1s |= ((HOST_WIDE_INT) (-1) << width);
2677 }
2678 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2679 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2680
2681 equal = (h0u == h1u && l0u == l1u);
2682 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2683 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2684 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2685 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2686 }
2687
2688 /* Otherwise, there are some code-specific tests we can make. */
2689 else
2690 {
2691 switch (code)
2692 {
2693 case EQ:
2694 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2695 return const0_rtx;
2696 break;
2697
2698 case NE:
2699 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2700 return const_true_rtx;
2701 break;
2702
2703 case GEU:
2704 /* Unsigned values are never negative. */
2705 if (trueop1 == const0_rtx)
2706 return const_true_rtx;
2707 break;
2708
2709 case LTU:
2710 if (trueop1 == const0_rtx)
2711 return const0_rtx;
2712 break;
2713
2714 case LEU:
2715 /* Unsigned values are never greater than the largest
2716 unsigned value. */
2717 if (GET_CODE (trueop1) == CONST_INT
2718 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2719 && INTEGRAL_MODE_P (mode))
2720 return const_true_rtx;
2721 break;
2722
2723 case GTU:
2724 if (GET_CODE (trueop1) == CONST_INT
2725 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2726 && INTEGRAL_MODE_P (mode))
2727 return const0_rtx;
2728 break;
2729
2730 case LT:
2731 /* Optimize abs(x) < 0.0. */
2732 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2733 {
2734 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2735 : trueop0;
2736 if (GET_CODE (tem) == ABS)
2737 return const0_rtx;
2738 }
2739 break;
2740
2741 case GE:
2742 /* Optimize abs(x) >= 0.0. */
2743 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2744 {
2745 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2746 : trueop0;
2747 if (GET_CODE (tem) == ABS)
2748 return const_true_rtx;
2749 }
2750 break;
2751
2752 case UNGE:
2753 /* Optimize ! (abs(x) < 0.0). */
2754 if (trueop1 == CONST0_RTX (mode))
2755 {
2756 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2757 : trueop0;
2758 if (GET_CODE (tem) == ABS)
2759 return const_true_rtx;
2760 }
2761 break;
2762
2763 default:
2764 break;
2765 }
2766
2767 return 0;
2768 }
2769
2770 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2771 as appropriate. */
2772 switch (code)
2773 {
2774 case EQ:
2775 case UNEQ:
2776 return equal ? const_true_rtx : const0_rtx;
2777 case NE:
2778 case LTGT:
2779 return ! equal ? const_true_rtx : const0_rtx;
2780 case LT:
2781 case UNLT:
2782 return op0lt ? const_true_rtx : const0_rtx;
2783 case GT:
2784 case UNGT:
2785 return op1lt ? const_true_rtx : const0_rtx;
2786 case LTU:
2787 return op0ltu ? const_true_rtx : const0_rtx;
2788 case GTU:
2789 return op1ltu ? const_true_rtx : const0_rtx;
2790 case LE:
2791 case UNLE:
2792 return equal || op0lt ? const_true_rtx : const0_rtx;
2793 case GE:
2794 case UNGE:
2795 return equal || op1lt ? const_true_rtx : const0_rtx;
2796 case LEU:
2797 return equal || op0ltu ? const_true_rtx : const0_rtx;
2798 case GEU:
2799 return equal || op1ltu ? const_true_rtx : const0_rtx;
2800 case ORDERED:
2801 return const_true_rtx;
2802 case UNORDERED:
2803 return const0_rtx;
2804 default:
2805 abort ();
2806 }
2807 }
2808 \f
2809 /* Simplify CODE, an operation with result mode MODE and three operands,
2810 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2811 a constant. Return 0 if no simplifications is possible. */
2812
2813 rtx
2814 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2815 enum machine_mode op0_mode, rtx op0, rtx op1,
2816 rtx op2)
2817 {
2818 unsigned int width = GET_MODE_BITSIZE (mode);
2819
2820 /* VOIDmode means "infinite" precision. */
2821 if (width == 0)
2822 width = HOST_BITS_PER_WIDE_INT;
2823
2824 switch (code)
2825 {
2826 case SIGN_EXTRACT:
2827 case ZERO_EXTRACT:
2828 if (GET_CODE (op0) == CONST_INT
2829 && GET_CODE (op1) == CONST_INT
2830 && GET_CODE (op2) == CONST_INT
2831 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2832 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2833 {
2834 /* Extracting a bit-field from a constant */
2835 HOST_WIDE_INT val = INTVAL (op0);
2836
2837 if (BITS_BIG_ENDIAN)
2838 val >>= (GET_MODE_BITSIZE (op0_mode)
2839 - INTVAL (op2) - INTVAL (op1));
2840 else
2841 val >>= INTVAL (op2);
2842
2843 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2844 {
2845 /* First zero-extend. */
2846 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2847 /* If desired, propagate sign bit. */
2848 if (code == SIGN_EXTRACT
2849 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2850 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2851 }
2852
2853 /* Clear the bits that don't belong in our mode,
2854 unless they and our sign bit are all one.
2855 So we get either a reasonable negative value or a reasonable
2856 unsigned value for this mode. */
2857 if (width < HOST_BITS_PER_WIDE_INT
2858 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2859 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2860 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2861
2862 return GEN_INT (val);
2863 }
2864 break;
2865
2866 case IF_THEN_ELSE:
2867 if (GET_CODE (op0) == CONST_INT)
2868 return op0 != const0_rtx ? op1 : op2;
2869
2870 /* Convert c ? a : a into "a". */
2871 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2872 return op1;
2873
2874 /* Convert a != b ? a : b into "a". */
2875 if (GET_CODE (op0) == NE
2876 && ! side_effects_p (op0)
2877 && ! HONOR_NANS (mode)
2878 && ! HONOR_SIGNED_ZEROS (mode)
2879 && ((rtx_equal_p (XEXP (op0, 0), op1)
2880 && rtx_equal_p (XEXP (op0, 1), op2))
2881 || (rtx_equal_p (XEXP (op0, 0), op2)
2882 && rtx_equal_p (XEXP (op0, 1), op1))))
2883 return op1;
2884
2885 /* Convert a == b ? a : b into "b". */
2886 if (GET_CODE (op0) == EQ
2887 && ! side_effects_p (op0)
2888 && ! HONOR_NANS (mode)
2889 && ! HONOR_SIGNED_ZEROS (mode)
2890 && ((rtx_equal_p (XEXP (op0, 0), op1)
2891 && rtx_equal_p (XEXP (op0, 1), op2))
2892 || (rtx_equal_p (XEXP (op0, 0), op2)
2893 && rtx_equal_p (XEXP (op0, 1), op1))))
2894 return op2;
2895
2896 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2897 {
2898 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2899 ? GET_MODE (XEXP (op0, 1))
2900 : GET_MODE (XEXP (op0, 0)));
2901 rtx temp;
2902 if (cmp_mode == VOIDmode)
2903 cmp_mode = op0_mode;
2904 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2905 XEXP (op0, 0), XEXP (op0, 1));
2906
2907 /* See if any simplifications were possible. */
2908 if (temp == const0_rtx)
2909 return op2;
2910 else if (temp == const_true_rtx)
2911 return op1;
2912 else if (temp)
2913 abort ();
2914
2915 /* Look for happy constants in op1 and op2. */
2916 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2917 {
2918 HOST_WIDE_INT t = INTVAL (op1);
2919 HOST_WIDE_INT f = INTVAL (op2);
2920
2921 if (t == STORE_FLAG_VALUE && f == 0)
2922 code = GET_CODE (op0);
2923 else if (t == 0 && f == STORE_FLAG_VALUE)
2924 {
2925 enum rtx_code tmp;
2926 tmp = reversed_comparison_code (op0, NULL_RTX);
2927 if (tmp == UNKNOWN)
2928 break;
2929 code = tmp;
2930 }
2931 else
2932 break;
2933
2934 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2935 }
2936 }
2937 break;
2938
2939 case VEC_MERGE:
2940 if (GET_MODE (op0) != mode
2941 || GET_MODE (op1) != mode
2942 || !VECTOR_MODE_P (mode))
2943 abort ();
2944 op2 = avoid_constant_pool_reference (op2);
2945 if (GET_CODE (op2) == CONST_INT)
2946 {
2947 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2948 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2949 int mask = (1 << n_elts) - 1;
2950
2951 if (!(INTVAL (op2) & mask))
2952 return op1;
2953 if ((INTVAL (op2) & mask) == mask)
2954 return op0;
2955
2956 op0 = avoid_constant_pool_reference (op0);
2957 op1 = avoid_constant_pool_reference (op1);
2958 if (GET_CODE (op0) == CONST_VECTOR
2959 && GET_CODE (op1) == CONST_VECTOR)
2960 {
2961 rtvec v = rtvec_alloc (n_elts);
2962 unsigned int i;
2963
2964 for (i = 0; i < n_elts; i++)
2965 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2966 ? CONST_VECTOR_ELT (op0, i)
2967 : CONST_VECTOR_ELT (op1, i));
2968 return gen_rtx_CONST_VECTOR (mode, v);
2969 }
2970 }
2971 break;
2972
2973 default:
2974 abort ();
2975 }
2976
2977 return 0;
2978 }
2979
2980 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2981 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2982
2983 Works by unpacking OP into a collection of 8-bit values
2984 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2985 and then repacking them again for OUTERMODE. */
2986
2987 static rtx
2988 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2989 enum machine_mode innermode, unsigned int byte)
2990 {
2991 /* We support up to 512-bit values (for V8DFmode). */
2992 enum {
2993 max_bitsize = 512,
2994 value_bit = 8,
2995 value_mask = (1 << value_bit) - 1
2996 };
2997 unsigned char value[max_bitsize / value_bit];
2998 int value_start;
2999 int i;
3000 int elem;
3001
3002 int num_elem;
3003 rtx * elems;
3004 int elem_bitsize;
3005 rtx result_s;
3006 rtvec result_v = NULL;
3007 enum mode_class outer_class;
3008 enum machine_mode outer_submode;
3009
3010 /* Some ports misuse CCmode. */
3011 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3012 return op;
3013
3014 /* Unpack the value. */
3015
3016 if (GET_CODE (op) == CONST_VECTOR)
3017 {
3018 num_elem = CONST_VECTOR_NUNITS (op);
3019 elems = &CONST_VECTOR_ELT (op, 0);
3020 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3021 }
3022 else
3023 {
3024 num_elem = 1;
3025 elems = &op;
3026 elem_bitsize = max_bitsize;
3027 }
3028
3029 if (BITS_PER_UNIT % value_bit != 0)
3030 abort (); /* Too complicated; reducing value_bit may help. */
3031 if (elem_bitsize % BITS_PER_UNIT != 0)
3032 abort (); /* I don't know how to handle endianness of sub-units. */
3033
3034 for (elem = 0; elem < num_elem; elem++)
3035 {
3036 unsigned char * vp;
3037 rtx el = elems[elem];
3038
3039 /* Vectors are kept in target memory order. (This is probably
3040 a mistake.) */
3041 {
3042 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3043 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3044 / BITS_PER_UNIT);
3045 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3046 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3047 unsigned bytele = (subword_byte % UNITS_PER_WORD
3048 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3049 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3050 }
3051
3052 switch (GET_CODE (el))
3053 {
3054 case CONST_INT:
3055 for (i = 0;
3056 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3057 i += value_bit)
3058 *vp++ = INTVAL (el) >> i;
3059 /* CONST_INTs are always logically sign-extended. */
3060 for (; i < elem_bitsize; i += value_bit)
3061 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3062 break;
3063
3064 case CONST_DOUBLE:
3065 if (GET_MODE (el) == VOIDmode)
3066 {
3067 /* If this triggers, someone should have generated a
3068 CONST_INT instead. */
3069 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3070 abort ();
3071
3072 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3073 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3074 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3075 {
3076 *vp++
3077 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3078 i += value_bit;
3079 }
3080 /* It shouldn't matter what's done here, so fill it with
3081 zero. */
3082 for (; i < max_bitsize; i += value_bit)
3083 *vp++ = 0;
3084 }
3085 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3086 {
3087 long tmp[max_bitsize / 32];
3088 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3089
3090 if (bitsize > elem_bitsize)
3091 abort ();
3092 if (bitsize % value_bit != 0)
3093 abort ();
3094
3095 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3096 GET_MODE (el));
3097
3098 /* real_to_target produces its result in words affected by
3099 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3100 and use WORDS_BIG_ENDIAN instead; see the documentation
3101 of SUBREG in rtl.texi. */
3102 for (i = 0; i < bitsize; i += value_bit)
3103 {
3104 int ibase;
3105 if (WORDS_BIG_ENDIAN)
3106 ibase = bitsize - 1 - i;
3107 else
3108 ibase = i;
3109 *vp++ = tmp[ibase / 32] >> i % 32;
3110 }
3111
3112 /* It shouldn't matter what's done here, so fill it with
3113 zero. */
3114 for (; i < elem_bitsize; i += value_bit)
3115 *vp++ = 0;
3116 }
3117 else
3118 abort ();
3119 break;
3120
3121 default:
3122 abort ();
3123 }
3124 }
3125
3126 /* Now, pick the right byte to start with. */
3127 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3128 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3129 will already have offset 0. */
3130 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3131 {
3132 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3133 - byte);
3134 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3135 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3136 byte = (subword_byte % UNITS_PER_WORD
3137 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3138 }
3139
3140 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3141 so if it's become negative it will instead be very large.) */
3142 if (byte >= GET_MODE_SIZE (innermode))
3143 abort ();
3144
3145 /* Convert from bytes to chunks of size value_bit. */
3146 value_start = byte * (BITS_PER_UNIT / value_bit);
3147
3148 /* Re-pack the value. */
3149
3150 if (VECTOR_MODE_P (outermode))
3151 {
3152 num_elem = GET_MODE_NUNITS (outermode);
3153 result_v = rtvec_alloc (num_elem);
3154 elems = &RTVEC_ELT (result_v, 0);
3155 outer_submode = GET_MODE_INNER (outermode);
3156 }
3157 else
3158 {
3159 num_elem = 1;
3160 elems = &result_s;
3161 outer_submode = outermode;
3162 }
3163
3164 outer_class = GET_MODE_CLASS (outer_submode);
3165 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3166
3167 if (elem_bitsize % value_bit != 0)
3168 abort ();
3169 if (elem_bitsize + value_start * value_bit > max_bitsize)
3170 abort ();
3171
3172 for (elem = 0; elem < num_elem; elem++)
3173 {
3174 unsigned char *vp;
3175
3176 /* Vectors are stored in target memory order. (This is probably
3177 a mistake.) */
3178 {
3179 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3180 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3181 / BITS_PER_UNIT);
3182 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3183 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3184 unsigned bytele = (subword_byte % UNITS_PER_WORD
3185 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3186 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3187 }
3188
3189 switch (outer_class)
3190 {
3191 case MODE_INT:
3192 case MODE_PARTIAL_INT:
3193 {
3194 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3195
3196 for (i = 0;
3197 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3198 i += value_bit)
3199 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3200 for (; i < elem_bitsize; i += value_bit)
3201 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3202 << (i - HOST_BITS_PER_WIDE_INT));
3203
3204 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3205 know why. */
3206 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3207 elems[elem] = gen_int_mode (lo, outer_submode);
3208 else
3209 elems[elem] = immed_double_const (lo, hi, outer_submode);
3210 }
3211 break;
3212
3213 case MODE_FLOAT:
3214 {
3215 REAL_VALUE_TYPE r;
3216 long tmp[max_bitsize / 32];
3217
3218 /* real_from_target wants its input in words affected by
3219 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3220 and use WORDS_BIG_ENDIAN instead; see the documentation
3221 of SUBREG in rtl.texi. */
3222 for (i = 0; i < max_bitsize / 32; i++)
3223 tmp[i] = 0;
3224 for (i = 0; i < elem_bitsize; i += value_bit)
3225 {
3226 int ibase;
3227 if (WORDS_BIG_ENDIAN)
3228 ibase = elem_bitsize - 1 - i;
3229 else
3230 ibase = i;
3231 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3232 }
3233
3234 real_from_target (&r, tmp, outer_submode);
3235 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3236 }
3237 break;
3238
3239 default:
3240 abort ();
3241 }
3242 }
3243 if (VECTOR_MODE_P (outermode))
3244 return gen_rtx_CONST_VECTOR (outermode, result_v);
3245 else
3246 return result_s;
3247 }
3248
3249 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3250 Return 0 if no simplifications are possible. */
3251 rtx
3252 simplify_subreg (enum machine_mode outermode, rtx op,
3253 enum machine_mode innermode, unsigned int byte)
3254 {
3255 /* Little bit of sanity checking. */
3256 if (innermode == VOIDmode || outermode == VOIDmode
3257 || innermode == BLKmode || outermode == BLKmode)
3258 abort ();
3259
3260 if (GET_MODE (op) != innermode
3261 && GET_MODE (op) != VOIDmode)
3262 abort ();
3263
3264 if (byte % GET_MODE_SIZE (outermode)
3265 || byte >= GET_MODE_SIZE (innermode))
3266 abort ();
3267
3268 if (outermode == innermode && !byte)
3269 return op;
3270
3271 if (GET_CODE (op) == CONST_INT
3272 || GET_CODE (op) == CONST_DOUBLE
3273 || GET_CODE (op) == CONST_VECTOR)
3274 return simplify_immed_subreg (outermode, op, innermode, byte);
3275
3276 /* Changing mode twice with SUBREG => just change it once,
3277 or not at all if changing back op starting mode. */
3278 if (GET_CODE (op) == SUBREG)
3279 {
3280 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3281 int final_offset = byte + SUBREG_BYTE (op);
3282 rtx new;
3283
3284 if (outermode == innermostmode
3285 && byte == 0 && SUBREG_BYTE (op) == 0)
3286 return SUBREG_REG (op);
3287
3288 /* The SUBREG_BYTE represents offset, as if the value were stored
3289 in memory. Irritating exception is paradoxical subreg, where
3290 we define SUBREG_BYTE to be 0. On big endian machines, this
3291 value should be negative. For a moment, undo this exception. */
3292 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3293 {
3294 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3295 if (WORDS_BIG_ENDIAN)
3296 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3297 if (BYTES_BIG_ENDIAN)
3298 final_offset += difference % UNITS_PER_WORD;
3299 }
3300 if (SUBREG_BYTE (op) == 0
3301 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3302 {
3303 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3304 if (WORDS_BIG_ENDIAN)
3305 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3306 if (BYTES_BIG_ENDIAN)
3307 final_offset += difference % UNITS_PER_WORD;
3308 }
3309
3310 /* See whether resulting subreg will be paradoxical. */
3311 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3312 {
3313 /* In nonparadoxical subregs we can't handle negative offsets. */
3314 if (final_offset < 0)
3315 return NULL_RTX;
3316 /* Bail out in case resulting subreg would be incorrect. */
3317 if (final_offset % GET_MODE_SIZE (outermode)
3318 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3319 return NULL_RTX;
3320 }
3321 else
3322 {
3323 int offset = 0;
3324 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3325
3326 /* In paradoxical subreg, see if we are still looking on lower part.
3327 If so, our SUBREG_BYTE will be 0. */
3328 if (WORDS_BIG_ENDIAN)
3329 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3330 if (BYTES_BIG_ENDIAN)
3331 offset += difference % UNITS_PER_WORD;
3332 if (offset == final_offset)
3333 final_offset = 0;
3334 else
3335 return NULL_RTX;
3336 }
3337
3338 /* Recurse for further possible simplifications. */
3339 new = simplify_subreg (outermode, SUBREG_REG (op),
3340 GET_MODE (SUBREG_REG (op)),
3341 final_offset);
3342 if (new)
3343 return new;
3344 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3345 }
3346
3347 /* SUBREG of a hard register => just change the register number
3348 and/or mode. If the hard register is not valid in that mode,
3349 suppress this simplification. If the hard register is the stack,
3350 frame, or argument pointer, leave this as a SUBREG. */
3351
3352 if (REG_P (op)
3353 && (! REG_FUNCTION_VALUE_P (op)
3354 || ! rtx_equal_function_value_matters)
3355 && REGNO (op) < FIRST_PSEUDO_REGISTER
3356 #ifdef CANNOT_CHANGE_MODE_CLASS
3357 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3358 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3359 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3360 #endif
3361 && ((reload_completed && !frame_pointer_needed)
3362 || (REGNO (op) != FRAME_POINTER_REGNUM
3363 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3364 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3365 #endif
3366 ))
3367 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3368 && REGNO (op) != ARG_POINTER_REGNUM
3369 #endif
3370 && REGNO (op) != STACK_POINTER_REGNUM
3371 && subreg_offset_representable_p (REGNO (op), innermode,
3372 byte, outermode))
3373 {
3374 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3375 int final_regno = subreg_hard_regno (tem, 0);
3376
3377 /* ??? We do allow it if the current REG is not valid for
3378 its mode. This is a kludge to work around how float/complex
3379 arguments are passed on 32-bit SPARC and should be fixed. */
3380 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3381 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3382 {
3383 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3384
3385 /* Propagate original regno. We don't have any way to specify
3386 the offset inside original regno, so do so only for lowpart.
3387 The information is used only by alias analysis that can not
3388 grog partial register anyway. */
3389
3390 if (subreg_lowpart_offset (outermode, innermode) == byte)
3391 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3392 return x;
3393 }
3394 }
3395
3396 /* If we have a SUBREG of a register that we are replacing and we are
3397 replacing it with a MEM, make a new MEM and try replacing the
3398 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3399 or if we would be widening it. */
3400
3401 if (GET_CODE (op) == MEM
3402 && ! mode_dependent_address_p (XEXP (op, 0))
3403 /* Allow splitting of volatile memory references in case we don't
3404 have instruction to move the whole thing. */
3405 && (! MEM_VOLATILE_P (op)
3406 || ! have_insn_for (SET, innermode))
3407 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3408 return adjust_address_nv (op, outermode, byte);
3409
3410 /* Handle complex values represented as CONCAT
3411 of real and imaginary part. */
3412 if (GET_CODE (op) == CONCAT)
3413 {
3414 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3415 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3416 unsigned int final_offset;
3417 rtx res;
3418
3419 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3420 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3421 if (res)
3422 return res;
3423 /* We can at least simplify it by referring directly to the
3424 relevant part. */
3425 return gen_rtx_SUBREG (outermode, part, final_offset);
3426 }
3427
3428 /* Optimize SUBREG truncations of zero and sign extended values. */
3429 if ((GET_CODE (op) == ZERO_EXTEND
3430 || GET_CODE (op) == SIGN_EXTEND)
3431 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3432 {
3433 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3434
3435 /* If we're requesting the lowpart of a zero or sign extension,
3436 there are three possibilities. If the outermode is the same
3437 as the origmode, we can omit both the extension and the subreg.
3438 If the outermode is not larger than the origmode, we can apply
3439 the truncation without the extension. Finally, if the outermode
3440 is larger than the origmode, but both are integer modes, we
3441 can just extend to the appropriate mode. */
3442 if (bitpos == 0)
3443 {
3444 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3445 if (outermode == origmode)
3446 return XEXP (op, 0);
3447 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3448 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3449 subreg_lowpart_offset (outermode,
3450 origmode));
3451 if (SCALAR_INT_MODE_P (outermode))
3452 return simplify_gen_unary (GET_CODE (op), outermode,
3453 XEXP (op, 0), origmode);
3454 }
3455
3456 /* A SUBREG resulting from a zero extension may fold to zero if
3457 it extracts higher bits that the ZERO_EXTEND's source bits. */
3458 if (GET_CODE (op) == ZERO_EXTEND
3459 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3460 return CONST0_RTX (outermode);
3461 }
3462
3463 return NULL_RTX;
3464 }
3465
3466 /* Make a SUBREG operation or equivalent if it folds. */
3467
3468 rtx
3469 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3470 enum machine_mode innermode, unsigned int byte)
3471 {
3472 rtx new;
3473 /* Little bit of sanity checking. */
3474 if (innermode == VOIDmode || outermode == VOIDmode
3475 || innermode == BLKmode || outermode == BLKmode)
3476 abort ();
3477
3478 if (GET_MODE (op) != innermode
3479 && GET_MODE (op) != VOIDmode)
3480 abort ();
3481
3482 if (byte % GET_MODE_SIZE (outermode)
3483 || byte >= GET_MODE_SIZE (innermode))
3484 abort ();
3485
3486 if (GET_CODE (op) == QUEUED)
3487 return NULL_RTX;
3488
3489 new = simplify_subreg (outermode, op, innermode, byte);
3490 if (new)
3491 return new;
3492
3493 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3494 return NULL_RTX;
3495
3496 return gen_rtx_SUBREG (outermode, op, byte);
3497 }
3498 /* Simplify X, an rtx expression.
3499
3500 Return the simplified expression or NULL if no simplifications
3501 were possible.
3502
3503 This is the preferred entry point into the simplification routines;
3504 however, we still allow passes to call the more specific routines.
3505
3506 Right now GCC has three (yes, three) major bodies of RTL simplification
3507 code that need to be unified.
3508
3509 1. fold_rtx in cse.c. This code uses various CSE specific
3510 information to aid in RTL simplification.
3511
3512 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3513 it uses combine specific information to aid in RTL
3514 simplification.
3515
3516 3. The routines in this file.
3517
3518
3519 Long term we want to only have one body of simplification code; to
3520 get to that state I recommend the following steps:
3521
3522 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3523 which are not pass dependent state into these routines.
3524
3525 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3526 use this routine whenever possible.
3527
3528 3. Allow for pass dependent state to be provided to these
3529 routines and add simplifications based on the pass dependent
3530 state. Remove code from cse.c & combine.c that becomes
3531 redundant/dead.
3532
3533 It will take time, but ultimately the compiler will be easier to
3534 maintain and improve. It's totally silly that when we add a
3535 simplification that it needs to be added to 4 places (3 for RTL
3536 simplification and 1 for tree simplification. */
3537
3538 rtx
3539 simplify_rtx (rtx x)
3540 {
3541 enum rtx_code code = GET_CODE (x);
3542 enum machine_mode mode = GET_MODE (x);
3543 rtx temp;
3544
3545 switch (GET_RTX_CLASS (code))
3546 {
3547 case '1':
3548 return simplify_unary_operation (code, mode,
3549 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3550 case 'c':
3551 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3552 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3553
3554 /* Fall through.... */
3555
3556 case '2':
3557 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3558
3559 case '3':
3560 case 'b':
3561 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3562 XEXP (x, 0), XEXP (x, 1),
3563 XEXP (x, 2));
3564
3565 case '<':
3566 temp = simplify_relational_operation (code,
3567 ((GET_MODE (XEXP (x, 0))
3568 != VOIDmode)
3569 ? GET_MODE (XEXP (x, 0))
3570 : GET_MODE (XEXP (x, 1))),
3571 XEXP (x, 0), XEXP (x, 1));
3572 #ifdef FLOAT_STORE_FLAG_VALUE
3573 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3574 {
3575 if (temp == const0_rtx)
3576 temp = CONST0_RTX (mode);
3577 else
3578 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3579 mode);
3580 }
3581 #endif
3582 return temp;
3583
3584 case 'x':
3585 if (code == SUBREG)
3586 return simplify_gen_subreg (mode, SUBREG_REG (x),
3587 GET_MODE (SUBREG_REG (x)),
3588 SUBREG_BYTE (x));
3589 if (code == CONSTANT_P_RTX)
3590 {
3591 if (CONSTANT_P (XEXP (x, 0)))
3592 return const1_rtx;
3593 }
3594 break;
3595
3596 case 'o':
3597 if (code == LO_SUM)
3598 {
3599 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3600 if (GET_CODE (XEXP (x, 0)) == HIGH
3601 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3602 return XEXP (x, 1);
3603 }
3604 break;
3605
3606 default:
3607 break;
3608 }
3609 return NULL;
3610 }