simplify-rtx.c (simplify_plus_minus): Remove final parameter.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0, op1))
120 tem = op0, op0 = op1, op1 = tem;
121
122 /* If this simplifies, do it. */
123 tem = simplify_binary_operation (code, mode, op0, op1);
124 if (tem)
125 return tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. */
403 if (COMPARISON_P (op)
404 && (mode == BImode || STORE_FLAG_VALUE == -1)
405 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
406 return simplify_gen_relational (reversed, mode, VOIDmode,
407 XEXP (op, 0), XEXP (op, 1));
408
409 /* (not (plus X -1)) can become (neg X). */
410 if (GET_CODE (op) == PLUS
411 && XEXP (op, 1) == constm1_rtx)
412 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
413
414 /* Similarly, (not (neg X)) is (plus X -1). */
415 if (GET_CODE (op) == NEG)
416 return plus_constant (XEXP (op, 0), -1);
417
418 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
419 if (GET_CODE (op) == XOR
420 && GET_CODE (XEXP (op, 1)) == CONST_INT
421 && (temp = simplify_unary_operation (NOT, mode,
422 XEXP (op, 1), mode)) != 0)
423 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
424
425 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
426 if (GET_CODE (op) == PLUS
427 && GET_CODE (XEXP (op, 1)) == CONST_INT
428 && mode_signbit_p (mode, XEXP (op, 1))
429 && (temp = simplify_unary_operation (NOT, mode,
430 XEXP (op, 1), mode)) != 0)
431 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
432
433
434 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
435 operands other than 1, but that is not valid. We could do a
436 similar simplification for (not (lshiftrt C X)) where C is
437 just the sign bit, but this doesn't seem common enough to
438 bother with. */
439 if (GET_CODE (op) == ASHIFT
440 && XEXP (op, 0) == const1_rtx)
441 {
442 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
443 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
444 }
445
446 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
447 by reversing the comparison code if valid. */
448 if (STORE_FLAG_VALUE == -1
449 && COMPARISON_P (op)
450 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
451 return simplify_gen_relational (reversed, mode, VOIDmode,
452 XEXP (op, 0), XEXP (op, 1));
453
454 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
455 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
456 so we can perform the above simplification. */
457
458 if (STORE_FLAG_VALUE == -1
459 && GET_CODE (op) == ASHIFTRT
460 && GET_CODE (XEXP (op, 1)) == CONST_INT
461 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
462 return simplify_gen_relational (GE, mode, VOIDmode,
463 XEXP (op, 0), const0_rtx);
464
465 break;
466
467 case NEG:
468 /* (neg (neg X)) == X. */
469 if (GET_CODE (op) == NEG)
470 return XEXP (op, 0);
471
472 /* (neg (plus X 1)) can become (not X). */
473 if (GET_CODE (op) == PLUS
474 && XEXP (op, 1) == const1_rtx)
475 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
476
477 /* Similarly, (neg (not X)) is (plus X 1). */
478 if (GET_CODE (op) == NOT)
479 return plus_constant (XEXP (op, 0), 1);
480
481 /* (neg (minus X Y)) can become (minus Y X). This transformation
482 isn't safe for modes with signed zeros, since if X and Y are
483 both +0, (minus Y X) is the same as (minus X Y). If the
484 rounding mode is towards +infinity (or -infinity) then the two
485 expressions will be rounded differently. */
486 if (GET_CODE (op) == MINUS
487 && !HONOR_SIGNED_ZEROS (mode)
488 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
489 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
490
491 if (GET_CODE (op) == PLUS
492 && !HONOR_SIGNED_ZEROS (mode)
493 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
494 {
495 /* (neg (plus A C)) is simplified to (minus -C A). */
496 if (GET_CODE (XEXP (op, 1)) == CONST_INT
497 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
498 {
499 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
500 if (temp)
501 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
502 }
503
504 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
505 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
506 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
507 }
508
509 /* (neg (mult A B)) becomes (mult (neg A) B).
510 This works even for floating-point values. */
511 if (GET_CODE (op) == MULT
512 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
513 {
514 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
515 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
516 }
517
518 /* NEG commutes with ASHIFT since it is multiplication. Only do
519 this if we can then eliminate the NEG (e.g., if the operand
520 is a constant). */
521 if (GET_CODE (op) == ASHIFT)
522 {
523 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
524 if (temp)
525 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
526 }
527
528 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
529 C is equal to the width of MODE minus 1. */
530 if (GET_CODE (op) == ASHIFTRT
531 && GET_CODE (XEXP (op, 1)) == CONST_INT
532 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
533 return simplify_gen_binary (LSHIFTRT, mode,
534 XEXP (op, 0), XEXP (op, 1));
535
536 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
537 C is equal to the width of MODE minus 1. */
538 if (GET_CODE (op) == LSHIFTRT
539 && GET_CODE (XEXP (op, 1)) == CONST_INT
540 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
541 return simplify_gen_binary (ASHIFTRT, mode,
542 XEXP (op, 0), XEXP (op, 1));
543
544 break;
545
546 case SIGN_EXTEND:
547 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
548 becomes just the MINUS if its mode is MODE. This allows
549 folding switch statements on machines using casesi (such as
550 the VAX). */
551 if (GET_CODE (op) == TRUNCATE
552 && GET_MODE (XEXP (op, 0)) == mode
553 && GET_CODE (XEXP (op, 0)) == MINUS
554 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
555 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
556 return XEXP (op, 0);
557
558 /* Check for a sign extension of a subreg of a promoted
559 variable, where the promotion is sign-extended, and the
560 target mode is the same as the variable's promotion. */
561 if (GET_CODE (op) == SUBREG
562 && SUBREG_PROMOTED_VAR_P (op)
563 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
564 && GET_MODE (XEXP (op, 0)) == mode)
565 return XEXP (op, 0);
566
567 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
568 if (! POINTERS_EXTEND_UNSIGNED
569 && mode == Pmode && GET_MODE (op) == ptr_mode
570 && (CONSTANT_P (op)
571 || (GET_CODE (op) == SUBREG
572 && REG_P (SUBREG_REG (op))
573 && REG_POINTER (SUBREG_REG (op))
574 && GET_MODE (SUBREG_REG (op)) == Pmode)))
575 return convert_memory_address (Pmode, op);
576 #endif
577 break;
578
579 case ZERO_EXTEND:
580 /* Check for a zero extension of a subreg of a promoted
581 variable, where the promotion is zero-extended, and the
582 target mode is the same as the variable's promotion. */
583 if (GET_CODE (op) == SUBREG
584 && SUBREG_PROMOTED_VAR_P (op)
585 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
586 && GET_MODE (XEXP (op, 0)) == mode)
587 return XEXP (op, 0);
588
589 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
590 if (POINTERS_EXTEND_UNSIGNED > 0
591 && mode == Pmode && GET_MODE (op) == ptr_mode
592 && (CONSTANT_P (op)
593 || (GET_CODE (op) == SUBREG
594 && REG_P (SUBREG_REG (op))
595 && REG_POINTER (SUBREG_REG (op))
596 && GET_MODE (SUBREG_REG (op)) == Pmode)))
597 return convert_memory_address (Pmode, op);
598 #endif
599 break;
600
601 default:
602 break;
603 }
604
605 return 0;
606 }
607
608 /* Try to compute the value of a unary operation CODE whose output mode is to
609 be MODE with input operand OP whose mode was originally OP_MODE.
610 Return zero if the value cannot be computed. */
611 rtx
612 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
613 rtx op, enum machine_mode op_mode)
614 {
615 unsigned int width = GET_MODE_BITSIZE (mode);
616
617 if (code == VEC_DUPLICATE)
618 {
619 gcc_assert (VECTOR_MODE_P (mode));
620 if (GET_MODE (op) != VOIDmode)
621 {
622 if (!VECTOR_MODE_P (GET_MODE (op)))
623 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
624 else
625 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
626 (GET_MODE (op)));
627 }
628 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
629 || GET_CODE (op) == CONST_VECTOR)
630 {
631 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
632 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
633 rtvec v = rtvec_alloc (n_elts);
634 unsigned int i;
635
636 if (GET_CODE (op) != CONST_VECTOR)
637 for (i = 0; i < n_elts; i++)
638 RTVEC_ELT (v, i) = op;
639 else
640 {
641 enum machine_mode inmode = GET_MODE (op);
642 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
643 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
644
645 gcc_assert (in_n_elts < n_elts);
646 gcc_assert ((n_elts % in_n_elts) == 0);
647 for (i = 0; i < n_elts; i++)
648 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
649 }
650 return gen_rtx_CONST_VECTOR (mode, v);
651 }
652 }
653
654 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
655 {
656 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
657 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
658 enum machine_mode opmode = GET_MODE (op);
659 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
660 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
661 rtvec v = rtvec_alloc (n_elts);
662 unsigned int i;
663
664 gcc_assert (op_n_elts == n_elts);
665 for (i = 0; i < n_elts; i++)
666 {
667 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
668 CONST_VECTOR_ELT (op, i),
669 GET_MODE_INNER (opmode));
670 if (!x)
671 return 0;
672 RTVEC_ELT (v, i) = x;
673 }
674 return gen_rtx_CONST_VECTOR (mode, v);
675 }
676
677 /* The order of these tests is critical so that, for example, we don't
678 check the wrong mode (input vs. output) for a conversion operation,
679 such as FIX. At some point, this should be simplified. */
680
681 if (code == FLOAT && GET_MODE (op) == VOIDmode
682 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
683 {
684 HOST_WIDE_INT hv, lv;
685 REAL_VALUE_TYPE d;
686
687 if (GET_CODE (op) == CONST_INT)
688 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
689 else
690 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
691
692 REAL_VALUE_FROM_INT (d, lv, hv, mode);
693 d = real_value_truncate (mode, d);
694 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
695 }
696 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
697 && (GET_CODE (op) == CONST_DOUBLE
698 || GET_CODE (op) == CONST_INT))
699 {
700 HOST_WIDE_INT hv, lv;
701 REAL_VALUE_TYPE d;
702
703 if (GET_CODE (op) == CONST_INT)
704 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
705 else
706 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
707
708 if (op_mode == VOIDmode)
709 {
710 /* We don't know how to interpret negative-looking numbers in
711 this case, so don't try to fold those. */
712 if (hv < 0)
713 return 0;
714 }
715 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
716 ;
717 else
718 hv = 0, lv &= GET_MODE_MASK (op_mode);
719
720 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
721 d = real_value_truncate (mode, d);
722 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
723 }
724
725 if (GET_CODE (op) == CONST_INT
726 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
727 {
728 HOST_WIDE_INT arg0 = INTVAL (op);
729 HOST_WIDE_INT val;
730
731 switch (code)
732 {
733 case NOT:
734 val = ~ arg0;
735 break;
736
737 case NEG:
738 val = - arg0;
739 break;
740
741 case ABS:
742 val = (arg0 >= 0 ? arg0 : - arg0);
743 break;
744
745 case FFS:
746 /* Don't use ffs here. Instead, get low order bit and then its
747 number. If arg0 is zero, this will return 0, as desired. */
748 arg0 &= GET_MODE_MASK (mode);
749 val = exact_log2 (arg0 & (- arg0)) + 1;
750 break;
751
752 case CLZ:
753 arg0 &= GET_MODE_MASK (mode);
754 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 ;
756 else
757 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
758 break;
759
760 case CTZ:
761 arg0 &= GET_MODE_MASK (mode);
762 if (arg0 == 0)
763 {
764 /* Even if the value at zero is undefined, we have to come
765 up with some replacement. Seems good enough. */
766 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
767 val = GET_MODE_BITSIZE (mode);
768 }
769 else
770 val = exact_log2 (arg0 & -arg0);
771 break;
772
773 case POPCOUNT:
774 arg0 &= GET_MODE_MASK (mode);
775 val = 0;
776 while (arg0)
777 val++, arg0 &= arg0 - 1;
778 break;
779
780 case PARITY:
781 arg0 &= GET_MODE_MASK (mode);
782 val = 0;
783 while (arg0)
784 val++, arg0 &= arg0 - 1;
785 val &= 1;
786 break;
787
788 case TRUNCATE:
789 val = arg0;
790 break;
791
792 case ZERO_EXTEND:
793 /* When zero-extending a CONST_INT, we need to know its
794 original mode. */
795 gcc_assert (op_mode != VOIDmode);
796 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
797 {
798 /* If we were really extending the mode,
799 we would have to distinguish between zero-extension
800 and sign-extension. */
801 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
802 val = arg0;
803 }
804 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
805 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
806 else
807 return 0;
808 break;
809
810 case SIGN_EXTEND:
811 if (op_mode == VOIDmode)
812 op_mode = mode;
813 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
814 {
815 /* If we were really extending the mode,
816 we would have to distinguish between zero-extension
817 and sign-extension. */
818 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
819 val = arg0;
820 }
821 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
822 {
823 val
824 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
825 if (val
826 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
827 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
828 }
829 else
830 return 0;
831 break;
832
833 case SQRT:
834 case FLOAT_EXTEND:
835 case FLOAT_TRUNCATE:
836 case SS_TRUNCATE:
837 case US_TRUNCATE:
838 return 0;
839
840 default:
841 gcc_unreachable ();
842 }
843
844 return gen_int_mode (val, mode);
845 }
846
847 /* We can do some operations on integer CONST_DOUBLEs. Also allow
848 for a DImode operation on a CONST_INT. */
849 else if (GET_MODE (op) == VOIDmode
850 && width <= HOST_BITS_PER_WIDE_INT * 2
851 && (GET_CODE (op) == CONST_DOUBLE
852 || GET_CODE (op) == CONST_INT))
853 {
854 unsigned HOST_WIDE_INT l1, lv;
855 HOST_WIDE_INT h1, hv;
856
857 if (GET_CODE (op) == CONST_DOUBLE)
858 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
859 else
860 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
861
862 switch (code)
863 {
864 case NOT:
865 lv = ~ l1;
866 hv = ~ h1;
867 break;
868
869 case NEG:
870 neg_double (l1, h1, &lv, &hv);
871 break;
872
873 case ABS:
874 if (h1 < 0)
875 neg_double (l1, h1, &lv, &hv);
876 else
877 lv = l1, hv = h1;
878 break;
879
880 case FFS:
881 hv = 0;
882 if (l1 == 0)
883 {
884 if (h1 == 0)
885 lv = 0;
886 else
887 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
888 }
889 else
890 lv = exact_log2 (l1 & -l1) + 1;
891 break;
892
893 case CLZ:
894 hv = 0;
895 if (h1 != 0)
896 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
897 - HOST_BITS_PER_WIDE_INT;
898 else if (l1 != 0)
899 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
900 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
901 lv = GET_MODE_BITSIZE (mode);
902 break;
903
904 case CTZ:
905 hv = 0;
906 if (l1 != 0)
907 lv = exact_log2 (l1 & -l1);
908 else if (h1 != 0)
909 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
910 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
911 lv = GET_MODE_BITSIZE (mode);
912 break;
913
914 case POPCOUNT:
915 hv = 0;
916 lv = 0;
917 while (l1)
918 lv++, l1 &= l1 - 1;
919 while (h1)
920 lv++, h1 &= h1 - 1;
921 break;
922
923 case PARITY:
924 hv = 0;
925 lv = 0;
926 while (l1)
927 lv++, l1 &= l1 - 1;
928 while (h1)
929 lv++, h1 &= h1 - 1;
930 lv &= 1;
931 break;
932
933 case TRUNCATE:
934 /* This is just a change-of-mode, so do nothing. */
935 lv = l1, hv = h1;
936 break;
937
938 case ZERO_EXTEND:
939 gcc_assert (op_mode != VOIDmode);
940
941 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
942 return 0;
943
944 hv = 0;
945 lv = l1 & GET_MODE_MASK (op_mode);
946 break;
947
948 case SIGN_EXTEND:
949 if (op_mode == VOIDmode
950 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
951 return 0;
952 else
953 {
954 lv = l1 & GET_MODE_MASK (op_mode);
955 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
956 && (lv & ((HOST_WIDE_INT) 1
957 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
958 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
959
960 hv = HWI_SIGN_EXTEND (lv);
961 }
962 break;
963
964 case SQRT:
965 return 0;
966
967 default:
968 return 0;
969 }
970
971 return immed_double_const (lv, hv, mode);
972 }
973
974 else if (GET_CODE (op) == CONST_DOUBLE
975 && SCALAR_FLOAT_MODE_P (mode))
976 {
977 REAL_VALUE_TYPE d, t;
978 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
979
980 switch (code)
981 {
982 case SQRT:
983 if (HONOR_SNANS (mode) && real_isnan (&d))
984 return 0;
985 real_sqrt (&t, mode, &d);
986 d = t;
987 break;
988 case ABS:
989 d = REAL_VALUE_ABS (d);
990 break;
991 case NEG:
992 d = REAL_VALUE_NEGATE (d);
993 break;
994 case FLOAT_TRUNCATE:
995 d = real_value_truncate (mode, d);
996 break;
997 case FLOAT_EXTEND:
998 /* All this does is change the mode. */
999 break;
1000 case FIX:
1001 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1002 break;
1003 case NOT:
1004 {
1005 long tmp[4];
1006 int i;
1007
1008 real_to_target (tmp, &d, GET_MODE (op));
1009 for (i = 0; i < 4; i++)
1010 tmp[i] = ~tmp[i];
1011 real_from_target (&d, tmp, mode);
1012 break;
1013 }
1014 default:
1015 gcc_unreachable ();
1016 }
1017 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1018 }
1019
1020 else if (GET_CODE (op) == CONST_DOUBLE
1021 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1022 && GET_MODE_CLASS (mode) == MODE_INT
1023 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1024 {
1025 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1026 operators are intentionally left unspecified (to ease implementation
1027 by target backends), for consistency, this routine implements the
1028 same semantics for constant folding as used by the middle-end. */
1029
1030 /* This was formerly used only for non-IEEE float.
1031 eggert@twinsun.com says it is safe for IEEE also. */
1032 HOST_WIDE_INT xh, xl, th, tl;
1033 REAL_VALUE_TYPE x, t;
1034 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1035 switch (code)
1036 {
1037 case FIX:
1038 if (REAL_VALUE_ISNAN (x))
1039 return const0_rtx;
1040
1041 /* Test against the signed upper bound. */
1042 if (width > HOST_BITS_PER_WIDE_INT)
1043 {
1044 th = ((unsigned HOST_WIDE_INT) 1
1045 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1046 tl = -1;
1047 }
1048 else
1049 {
1050 th = 0;
1051 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1052 }
1053 real_from_integer (&t, VOIDmode, tl, th, 0);
1054 if (REAL_VALUES_LESS (t, x))
1055 {
1056 xh = th;
1057 xl = tl;
1058 break;
1059 }
1060
1061 /* Test against the signed lower bound. */
1062 if (width > HOST_BITS_PER_WIDE_INT)
1063 {
1064 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1065 tl = 0;
1066 }
1067 else
1068 {
1069 th = -1;
1070 tl = (HOST_WIDE_INT) -1 << (width - 1);
1071 }
1072 real_from_integer (&t, VOIDmode, tl, th, 0);
1073 if (REAL_VALUES_LESS (x, t))
1074 {
1075 xh = th;
1076 xl = tl;
1077 break;
1078 }
1079 REAL_VALUE_TO_INT (&xl, &xh, x);
1080 break;
1081
1082 case UNSIGNED_FIX:
1083 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1084 return const0_rtx;
1085
1086 /* Test against the unsigned upper bound. */
1087 if (width == 2*HOST_BITS_PER_WIDE_INT)
1088 {
1089 th = -1;
1090 tl = -1;
1091 }
1092 else if (width >= HOST_BITS_PER_WIDE_INT)
1093 {
1094 th = ((unsigned HOST_WIDE_INT) 1
1095 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1096 tl = -1;
1097 }
1098 else
1099 {
1100 th = 0;
1101 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1102 }
1103 real_from_integer (&t, VOIDmode, tl, th, 1);
1104 if (REAL_VALUES_LESS (t, x))
1105 {
1106 xh = th;
1107 xl = tl;
1108 break;
1109 }
1110
1111 REAL_VALUE_TO_INT (&xl, &xh, x);
1112 break;
1113
1114 default:
1115 gcc_unreachable ();
1116 }
1117 return immed_double_const (xl, xh, mode);
1118 }
1119
1120 return NULL_RTX;
1121 }
1122 \f
1123 /* Subroutine of simplify_binary_operation to simplify a commutative,
1124 associative binary operation CODE with result mode MODE, operating
1125 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1126 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1127 canonicalization is possible. */
1128
1129 static rtx
1130 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1131 rtx op0, rtx op1)
1132 {
1133 rtx tem;
1134
1135 /* Linearize the operator to the left. */
1136 if (GET_CODE (op1) == code)
1137 {
1138 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1139 if (GET_CODE (op0) == code)
1140 {
1141 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1142 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1143 }
1144
1145 /* "a op (b op c)" becomes "(b op c) op a". */
1146 if (! swap_commutative_operands_p (op1, op0))
1147 return simplify_gen_binary (code, mode, op1, op0);
1148
1149 tem = op0;
1150 op0 = op1;
1151 op1 = tem;
1152 }
1153
1154 if (GET_CODE (op0) == code)
1155 {
1156 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1157 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1158 {
1159 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1160 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1161 }
1162
1163 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1164 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1165 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1166 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1167 if (tem != 0)
1168 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1169
1170 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1171 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1172 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1173 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1174 if (tem != 0)
1175 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1176 }
1177
1178 return 0;
1179 }
1180
1181
1182 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1183 and OP1. Return 0 if no simplification is possible.
1184
1185 Don't use this for relational operations such as EQ or LT.
1186 Use simplify_relational_operation instead. */
1187 rtx
1188 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1189 rtx op0, rtx op1)
1190 {
1191 rtx trueop0, trueop1;
1192 rtx tem;
1193
1194 /* Relational operations don't work here. We must know the mode
1195 of the operands in order to do the comparison correctly.
1196 Assuming a full word can give incorrect results.
1197 Consider comparing 128 with -128 in QImode. */
1198 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1199 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1200
1201 /* Make sure the constant is second. */
1202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1203 && swap_commutative_operands_p (op0, op1))
1204 {
1205 tem = op0, op0 = op1, op1 = tem;
1206 }
1207
1208 trueop0 = avoid_constant_pool_reference (op0);
1209 trueop1 = avoid_constant_pool_reference (op1);
1210
1211 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1212 if (tem)
1213 return tem;
1214 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1215 }
1216
1217 static rtx
1218 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1219 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1220 {
1221 rtx tem;
1222 HOST_WIDE_INT val;
1223 unsigned int width = GET_MODE_BITSIZE (mode);
1224
1225 /* Even if we can't compute a constant result,
1226 there are some cases worth simplifying. */
1227
1228 switch (code)
1229 {
1230 case PLUS:
1231 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1232 when x is NaN, infinite, or finite and nonzero. They aren't
1233 when x is -0 and the rounding mode is not towards -infinity,
1234 since (-0) + 0 is then 0. */
1235 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1236 return op0;
1237
1238 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1239 transformations are safe even for IEEE. */
1240 if (GET_CODE (op0) == NEG)
1241 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1242 else if (GET_CODE (op1) == NEG)
1243 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1244
1245 /* (~a) + 1 -> -a */
1246 if (INTEGRAL_MODE_P (mode)
1247 && GET_CODE (op0) == NOT
1248 && trueop1 == const1_rtx)
1249 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1250
1251 /* Handle both-operands-constant cases. We can only add
1252 CONST_INTs to constants since the sum of relocatable symbols
1253 can't be handled by most assemblers. Don't add CONST_INT
1254 to CONST_INT since overflow won't be computed properly if wider
1255 than HOST_BITS_PER_WIDE_INT. */
1256
1257 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1258 && GET_CODE (op1) == CONST_INT)
1259 return plus_constant (op0, INTVAL (op1));
1260 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1261 && GET_CODE (op0) == CONST_INT)
1262 return plus_constant (op1, INTVAL (op0));
1263
1264 /* See if this is something like X * C - X or vice versa or
1265 if the multiplication is written as a shift. If so, we can
1266 distribute and make a new multiply, shift, or maybe just
1267 have X (if C is 2 in the example above). But don't make
1268 something more expensive than we had before. */
1269
1270 if (SCALAR_INT_MODE_P (mode))
1271 {
1272 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1273 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1274 rtx lhs = op0, rhs = op1;
1275
1276 if (GET_CODE (lhs) == NEG)
1277 {
1278 coeff0l = -1;
1279 coeff0h = -1;
1280 lhs = XEXP (lhs, 0);
1281 }
1282 else if (GET_CODE (lhs) == MULT
1283 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1284 {
1285 coeff0l = INTVAL (XEXP (lhs, 1));
1286 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1287 lhs = XEXP (lhs, 0);
1288 }
1289 else if (GET_CODE (lhs) == ASHIFT
1290 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1291 && INTVAL (XEXP (lhs, 1)) >= 0
1292 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1293 {
1294 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1295 coeff0h = 0;
1296 lhs = XEXP (lhs, 0);
1297 }
1298
1299 if (GET_CODE (rhs) == NEG)
1300 {
1301 coeff1l = -1;
1302 coeff1h = -1;
1303 rhs = XEXP (rhs, 0);
1304 }
1305 else if (GET_CODE (rhs) == MULT
1306 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1307 {
1308 coeff1l = INTVAL (XEXP (rhs, 1));
1309 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1310 rhs = XEXP (rhs, 0);
1311 }
1312 else if (GET_CODE (rhs) == ASHIFT
1313 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1314 && INTVAL (XEXP (rhs, 1)) >= 0
1315 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1316 {
1317 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1318 coeff1h = 0;
1319 rhs = XEXP (rhs, 0);
1320 }
1321
1322 if (rtx_equal_p (lhs, rhs))
1323 {
1324 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1325 rtx coeff;
1326 unsigned HOST_WIDE_INT l;
1327 HOST_WIDE_INT h;
1328
1329 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1330 coeff = immed_double_const (l, h, mode);
1331
1332 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1333 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1334 ? tem : 0;
1335 }
1336 }
1337
1338 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1339 if ((GET_CODE (op1) == CONST_INT
1340 || GET_CODE (op1) == CONST_DOUBLE)
1341 && GET_CODE (op0) == XOR
1342 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1343 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1344 && mode_signbit_p (mode, op1))
1345 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1346 simplify_gen_binary (XOR, mode, op1,
1347 XEXP (op0, 1)));
1348
1349 /* If one of the operands is a PLUS or a MINUS, see if we can
1350 simplify this by the associative law.
1351 Don't use the associative law for floating point.
1352 The inaccuracy makes it nonassociative,
1353 and subtle programs can break if operations are associated. */
1354
1355 if (INTEGRAL_MODE_P (mode)
1356 && (plus_minus_operand_p (op0)
1357 || plus_minus_operand_p (op1))
1358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1359 return tem;
1360
1361 /* Reassociate floating point addition only when the user
1362 specifies unsafe math optimizations. */
1363 if (FLOAT_MODE_P (mode)
1364 && flag_unsafe_math_optimizations)
1365 {
1366 tem = simplify_associative_operation (code, mode, op0, op1);
1367 if (tem)
1368 return tem;
1369 }
1370 break;
1371
1372 case COMPARE:
1373 #ifdef HAVE_cc0
1374 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1375 using cc0, in which case we want to leave it as a COMPARE
1376 so we can distinguish it from a register-register-copy.
1377
1378 In IEEE floating point, x-0 is not the same as x. */
1379
1380 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1381 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1382 && trueop1 == CONST0_RTX (mode))
1383 return op0;
1384 #endif
1385
1386 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1387 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1388 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1389 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1390 {
1391 rtx xop00 = XEXP (op0, 0);
1392 rtx xop10 = XEXP (op1, 0);
1393
1394 #ifdef HAVE_cc0
1395 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1396 #else
1397 if (REG_P (xop00) && REG_P (xop10)
1398 && GET_MODE (xop00) == GET_MODE (xop10)
1399 && REGNO (xop00) == REGNO (xop10)
1400 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1401 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1402 #endif
1403 return xop00;
1404 }
1405 break;
1406
1407 case MINUS:
1408 /* We can't assume x-x is 0 even with non-IEEE floating point,
1409 but since it is zero except in very strange circumstances, we
1410 will treat it as zero with -funsafe-math-optimizations. */
1411 if (rtx_equal_p (trueop0, trueop1)
1412 && ! side_effects_p (op0)
1413 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1414 return CONST0_RTX (mode);
1415
1416 /* Change subtraction from zero into negation. (0 - x) is the
1417 same as -x when x is NaN, infinite, or finite and nonzero.
1418 But if the mode has signed zeros, and does not round towards
1419 -infinity, then 0 - 0 is 0, not -0. */
1420 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1421 return simplify_gen_unary (NEG, mode, op1, mode);
1422
1423 /* (-1 - a) is ~a. */
1424 if (trueop0 == constm1_rtx)
1425 return simplify_gen_unary (NOT, mode, op1, mode);
1426
1427 /* Subtracting 0 has no effect unless the mode has signed zeros
1428 and supports rounding towards -infinity. In such a case,
1429 0 - 0 is -0. */
1430 if (!(HONOR_SIGNED_ZEROS (mode)
1431 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1432 && trueop1 == CONST0_RTX (mode))
1433 return op0;
1434
1435 /* See if this is something like X * C - X or vice versa or
1436 if the multiplication is written as a shift. If so, we can
1437 distribute and make a new multiply, shift, or maybe just
1438 have X (if C is 2 in the example above). But don't make
1439 something more expensive than we had before. */
1440
1441 if (SCALAR_INT_MODE_P (mode))
1442 {
1443 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1444 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1445 rtx lhs = op0, rhs = op1;
1446
1447 if (GET_CODE (lhs) == NEG)
1448 {
1449 coeff0l = -1;
1450 coeff0h = -1;
1451 lhs = XEXP (lhs, 0);
1452 }
1453 else if (GET_CODE (lhs) == MULT
1454 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1455 {
1456 coeff0l = INTVAL (XEXP (lhs, 1));
1457 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1458 lhs = XEXP (lhs, 0);
1459 }
1460 else if (GET_CODE (lhs) == ASHIFT
1461 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs, 1)) >= 0
1463 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1464 {
1465 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1466 coeff0h = 0;
1467 lhs = XEXP (lhs, 0);
1468 }
1469
1470 if (GET_CODE (rhs) == NEG)
1471 {
1472 negcoeff1l = 1;
1473 negcoeff1h = 0;
1474 rhs = XEXP (rhs, 0);
1475 }
1476 else if (GET_CODE (rhs) == MULT
1477 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1478 {
1479 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1480 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1481 rhs = XEXP (rhs, 0);
1482 }
1483 else if (GET_CODE (rhs) == ASHIFT
1484 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1485 && INTVAL (XEXP (rhs, 1)) >= 0
1486 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1487 {
1488 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1489 negcoeff1h = -1;
1490 rhs = XEXP (rhs, 0);
1491 }
1492
1493 if (rtx_equal_p (lhs, rhs))
1494 {
1495 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1496 rtx coeff;
1497 unsigned HOST_WIDE_INT l;
1498 HOST_WIDE_INT h;
1499
1500 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1501 coeff = immed_double_const (l, h, mode);
1502
1503 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1504 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1505 ? tem : 0;
1506 }
1507 }
1508
1509 /* (a - (-b)) -> (a + b). True even for IEEE. */
1510 if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1512
1513 /* (-x - c) may be simplified as (-c - x). */
1514 if (GET_CODE (op0) == NEG
1515 && (GET_CODE (op1) == CONST_INT
1516 || GET_CODE (op1) == CONST_DOUBLE))
1517 {
1518 tem = simplify_unary_operation (NEG, mode, op1, mode);
1519 if (tem)
1520 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1521 }
1522
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1525 return simplify_gen_binary (PLUS, mode,
1526 op0,
1527 neg_const_int (mode, op1));
1528
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1) == AND)
1531 {
1532 if (rtx_equal_p (op0, XEXP (op1, 0)))
1533 {
1534 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1535 GET_MODE (XEXP (op1, 1)));
1536 return simplify_gen_binary (AND, mode, op0, tem);
1537 }
1538 if (rtx_equal_p (op0, XEXP (op1, 1)))
1539 {
1540 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1541 GET_MODE (XEXP (op1, 0)));
1542 return simplify_gen_binary (AND, mode, op0, tem);
1543 }
1544 }
1545
1546 /* If one of the operands is a PLUS or a MINUS, see if we can
1547 simplify this by the associative law. This will, for example,
1548 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1549 Don't use the associative law for floating point.
1550 The inaccuracy makes it nonassociative,
1551 and subtle programs can break if operations are associated. */
1552
1553 if (INTEGRAL_MODE_P (mode)
1554 && (plus_minus_operand_p (op0)
1555 || plus_minus_operand_p (op1))
1556 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1557 return tem;
1558 break;
1559
1560 case MULT:
1561 if (trueop1 == constm1_rtx)
1562 return simplify_gen_unary (NEG, mode, op0, mode);
1563
1564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1565 x is NaN, since x * 0 is then also NaN. Nor is it valid
1566 when the mode has signed zeros, since multiplying a negative
1567 number by 0 will give -0, not 0. */
1568 if (!HONOR_NANS (mode)
1569 && !HONOR_SIGNED_ZEROS (mode)
1570 && trueop1 == CONST0_RTX (mode)
1571 && ! side_effects_p (op0))
1572 return op1;
1573
1574 /* In IEEE floating point, x*1 is not equivalent to x for
1575 signalling NaNs. */
1576 if (!HONOR_SNANS (mode)
1577 && trueop1 == CONST1_RTX (mode))
1578 return op0;
1579
1580 /* Convert multiply by constant power of two into shift unless
1581 we are still generating RTL. This test is a kludge. */
1582 if (GET_CODE (trueop1) == CONST_INT
1583 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1584 /* If the mode is larger than the host word size, and the
1585 uppermost bit is set, then this isn't a power of two due
1586 to implicit sign extension. */
1587 && (width <= HOST_BITS_PER_WIDE_INT
1588 || val != HOST_BITS_PER_WIDE_INT - 1))
1589 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1590
1591 /* Likewise for multipliers wider than a word. */
1592 else if (GET_CODE (trueop1) == CONST_DOUBLE
1593 && (GET_MODE (trueop1) == VOIDmode
1594 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1595 && GET_MODE (op0) == mode
1596 && CONST_DOUBLE_LOW (trueop1) == 0
1597 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1598 return simplify_gen_binary (ASHIFT, mode, op0,
1599 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1600
1601 /* x*2 is x+x and x*(-1) is -x */
1602 if (GET_CODE (trueop1) == CONST_DOUBLE
1603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1604 && GET_MODE (op0) == mode)
1605 {
1606 REAL_VALUE_TYPE d;
1607 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1608
1609 if (REAL_VALUES_EQUAL (d, dconst2))
1610 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1611
1612 if (REAL_VALUES_EQUAL (d, dconstm1))
1613 return simplify_gen_unary (NEG, mode, op0, mode);
1614 }
1615
1616 /* Reassociate multiplication, but for floating point MULTs
1617 only when the user specifies unsafe math optimizations. */
1618 if (! FLOAT_MODE_P (mode)
1619 || flag_unsafe_math_optimizations)
1620 {
1621 tem = simplify_associative_operation (code, mode, op0, op1);
1622 if (tem)
1623 return tem;
1624 }
1625 break;
1626
1627 case IOR:
1628 if (trueop1 == const0_rtx)
1629 return op0;
1630 if (GET_CODE (trueop1) == CONST_INT
1631 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1632 == GET_MODE_MASK (mode)))
1633 return op1;
1634 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1635 return op0;
1636 /* A | (~A) -> -1 */
1637 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1638 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1639 && ! side_effects_p (op0)
1640 && SCALAR_INT_MODE_P (mode))
1641 return constm1_rtx;
1642 tem = simplify_associative_operation (code, mode, op0, op1);
1643 if (tem)
1644 return tem;
1645 break;
1646
1647 case XOR:
1648 if (trueop1 == const0_rtx)
1649 return op0;
1650 if (GET_CODE (trueop1) == CONST_INT
1651 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1652 == GET_MODE_MASK (mode)))
1653 return simplify_gen_unary (NOT, mode, op0, mode);
1654 if (rtx_equal_p (trueop0, trueop1)
1655 && ! side_effects_p (op0)
1656 && GET_MODE_CLASS (mode) != MODE_CC)
1657 return CONST0_RTX (mode);
1658
1659 /* Canonicalize XOR of the most significant bit to PLUS. */
1660 if ((GET_CODE (op1) == CONST_INT
1661 || GET_CODE (op1) == CONST_DOUBLE)
1662 && mode_signbit_p (mode, op1))
1663 return simplify_gen_binary (PLUS, mode, op0, op1);
1664 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1665 if ((GET_CODE (op1) == CONST_INT
1666 || GET_CODE (op1) == CONST_DOUBLE)
1667 && GET_CODE (op0) == PLUS
1668 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1669 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1670 && mode_signbit_p (mode, XEXP (op0, 1)))
1671 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1672 simplify_gen_binary (XOR, mode, op1,
1673 XEXP (op0, 1)));
1674
1675 tem = simplify_associative_operation (code, mode, op0, op1);
1676 if (tem)
1677 return tem;
1678 break;
1679
1680 case AND:
1681 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1682 return trueop1;
1683 /* If we are turning off bits already known off in OP0, we need
1684 not do an AND. */
1685 if (GET_CODE (trueop1) == CONST_INT
1686 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1687 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1688 return op0;
1689 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
1690 && GET_MODE_CLASS (mode) != MODE_CC)
1691 return op0;
1692 /* A & (~A) -> 0 */
1693 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1694 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1695 && ! side_effects_p (op0)
1696 && GET_MODE_CLASS (mode) != MODE_CC)
1697 return CONST0_RTX (mode);
1698
1699 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1700 there are no nonzero bits of C outside of X's mode. */
1701 if ((GET_CODE (op0) == SIGN_EXTEND
1702 || GET_CODE (op0) == ZERO_EXTEND)
1703 && GET_CODE (trueop1) == CONST_INT
1704 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1705 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1706 & INTVAL (trueop1)) == 0)
1707 {
1708 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1709 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1710 gen_int_mode (INTVAL (trueop1),
1711 imode));
1712 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1713 }
1714
1715 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1716 ((A & N) + B) & M -> (A + B) & M
1717 Similarly if (N & M) == 0,
1718 ((A | N) + B) & M -> (A + B) & M
1719 and for - instead of + and/or ^ instead of |. */
1720 if (GET_CODE (trueop1) == CONST_INT
1721 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1722 && ~INTVAL (trueop1)
1723 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1724 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1725 {
1726 rtx pmop[2];
1727 int which;
1728
1729 pmop[0] = XEXP (op0, 0);
1730 pmop[1] = XEXP (op0, 1);
1731
1732 for (which = 0; which < 2; which++)
1733 {
1734 tem = pmop[which];
1735 switch (GET_CODE (tem))
1736 {
1737 case AND:
1738 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1739 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1740 == INTVAL (trueop1))
1741 pmop[which] = XEXP (tem, 0);
1742 break;
1743 case IOR:
1744 case XOR:
1745 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1746 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1747 pmop[which] = XEXP (tem, 0);
1748 break;
1749 default:
1750 break;
1751 }
1752 }
1753
1754 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1755 {
1756 tem = simplify_gen_binary (GET_CODE (op0), mode,
1757 pmop[0], pmop[1]);
1758 return simplify_gen_binary (code, mode, tem, op1);
1759 }
1760 }
1761 tem = simplify_associative_operation (code, mode, op0, op1);
1762 if (tem)
1763 return tem;
1764 break;
1765
1766 case UDIV:
1767 /* 0/x is 0 (or x&0 if x has side-effects). */
1768 if (trueop0 == CONST0_RTX (mode))
1769 {
1770 if (side_effects_p (op1))
1771 return simplify_gen_binary (AND, mode, op1, trueop0);
1772 return trueop0;
1773 }
1774 /* x/1 is x. */
1775 if (trueop1 == CONST1_RTX (mode))
1776 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1777 /* Convert divide by power of two into shift. */
1778 if (GET_CODE (trueop1) == CONST_INT
1779 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1780 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1781 break;
1782
1783 case DIV:
1784 /* Handle floating point and integers separately. */
1785 if (SCALAR_FLOAT_MODE_P (mode))
1786 {
1787 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1788 safe for modes with NaNs, since 0.0 / 0.0 will then be
1789 NaN rather than 0.0. Nor is it safe for modes with signed
1790 zeros, since dividing 0 by a negative number gives -0.0 */
1791 if (trueop0 == CONST0_RTX (mode)
1792 && !HONOR_NANS (mode)
1793 && !HONOR_SIGNED_ZEROS (mode)
1794 && ! side_effects_p (op1))
1795 return op0;
1796 /* x/1.0 is x. */
1797 if (trueop1 == CONST1_RTX (mode)
1798 && !HONOR_SNANS (mode))
1799 return op0;
1800
1801 if (GET_CODE (trueop1) == CONST_DOUBLE
1802 && trueop1 != CONST0_RTX (mode))
1803 {
1804 REAL_VALUE_TYPE d;
1805 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1806
1807 /* x/-1.0 is -x. */
1808 if (REAL_VALUES_EQUAL (d, dconstm1)
1809 && !HONOR_SNANS (mode))
1810 return simplify_gen_unary (NEG, mode, op0, mode);
1811
1812 /* Change FP division by a constant into multiplication.
1813 Only do this with -funsafe-math-optimizations. */
1814 if (flag_unsafe_math_optimizations
1815 && !REAL_VALUES_EQUAL (d, dconst0))
1816 {
1817 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1818 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1819 return simplify_gen_binary (MULT, mode, op0, tem);
1820 }
1821 }
1822 }
1823 else
1824 {
1825 /* 0/x is 0 (or x&0 if x has side-effects). */
1826 if (trueop0 == CONST0_RTX (mode))
1827 {
1828 if (side_effects_p (op1))
1829 return simplify_gen_binary (AND, mode, op1, trueop0);
1830 return trueop0;
1831 }
1832 /* x/1 is x. */
1833 if (trueop1 == CONST1_RTX (mode))
1834 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1835 /* x/-1 is -x. */
1836 if (trueop1 == constm1_rtx)
1837 {
1838 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1839 return simplify_gen_unary (NEG, mode, x, mode);
1840 }
1841 }
1842 break;
1843
1844 case UMOD:
1845 /* 0%x is 0 (or x&0 if x has side-effects). */
1846 if (trueop0 == CONST0_RTX (mode))
1847 {
1848 if (side_effects_p (op1))
1849 return simplify_gen_binary (AND, mode, op1, trueop0);
1850 return trueop0;
1851 }
1852 /* x%1 is 0 (of x&0 if x has side-effects). */
1853 if (trueop1 == CONST1_RTX (mode))
1854 {
1855 if (side_effects_p (op0))
1856 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1857 return CONST0_RTX (mode);
1858 }
1859 /* Implement modulus by power of two as AND. */
1860 if (GET_CODE (trueop1) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1)) > 0)
1862 return simplify_gen_binary (AND, mode, op0,
1863 GEN_INT (INTVAL (op1) - 1));
1864 break;
1865
1866 case MOD:
1867 /* 0%x is 0 (or x&0 if x has side-effects). */
1868 if (trueop0 == CONST0_RTX (mode))
1869 {
1870 if (side_effects_p (op1))
1871 return simplify_gen_binary (AND, mode, op1, trueop0);
1872 return trueop0;
1873 }
1874 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1875 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1876 {
1877 if (side_effects_p (op0))
1878 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1879 return CONST0_RTX (mode);
1880 }
1881 break;
1882
1883 case ROTATERT:
1884 case ROTATE:
1885 case ASHIFTRT:
1886 /* Rotating ~0 always results in ~0. */
1887 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1888 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1889 && ! side_effects_p (op1))
1890 return op0;
1891
1892 /* Fall through.... */
1893
1894 case ASHIFT:
1895 case LSHIFTRT:
1896 if (trueop1 == CONST0_RTX (mode))
1897 return op0;
1898 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1899 return op0;
1900 break;
1901
1902 case SMIN:
1903 if (width <= HOST_BITS_PER_WIDE_INT
1904 && GET_CODE (trueop1) == CONST_INT
1905 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1906 && ! side_effects_p (op0))
1907 return op1;
1908 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1909 return op0;
1910 tem = simplify_associative_operation (code, mode, op0, op1);
1911 if (tem)
1912 return tem;
1913 break;
1914
1915 case SMAX:
1916 if (width <= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1) == CONST_INT
1918 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1919 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1920 && ! side_effects_p (op0))
1921 return op1;
1922 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1923 return op0;
1924 tem = simplify_associative_operation (code, mode, op0, op1);
1925 if (tem)
1926 return tem;
1927 break;
1928
1929 case UMIN:
1930 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1931 return op1;
1932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933 return op0;
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1938
1939 case UMAX:
1940 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1941 return op1;
1942 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1943 return op0;
1944 tem = simplify_associative_operation (code, mode, op0, op1);
1945 if (tem)
1946 return tem;
1947 break;
1948
1949 case SS_PLUS:
1950 case US_PLUS:
1951 case SS_MINUS:
1952 case US_MINUS:
1953 /* ??? There are simplifications that can be done. */
1954 return 0;
1955
1956 case VEC_SELECT:
1957 if (!VECTOR_MODE_P (mode))
1958 {
1959 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1960 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1961 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1962 gcc_assert (XVECLEN (trueop1, 0) == 1);
1963 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1964
1965 if (GET_CODE (trueop0) == CONST_VECTOR)
1966 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1967 (trueop1, 0, 0)));
1968 }
1969 else
1970 {
1971 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1972 gcc_assert (GET_MODE_INNER (mode)
1973 == GET_MODE_INNER (GET_MODE (trueop0)));
1974 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1975
1976 if (GET_CODE (trueop0) == CONST_VECTOR)
1977 {
1978 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1979 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1980 rtvec v = rtvec_alloc (n_elts);
1981 unsigned int i;
1982
1983 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1984 for (i = 0; i < n_elts; i++)
1985 {
1986 rtx x = XVECEXP (trueop1, 0, i);
1987
1988 gcc_assert (GET_CODE (x) == CONST_INT);
1989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1990 INTVAL (x));
1991 }
1992
1993 return gen_rtx_CONST_VECTOR (mode, v);
1994 }
1995 }
1996 return 0;
1997 case VEC_CONCAT:
1998 {
1999 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2000 ? GET_MODE (trueop0)
2001 : GET_MODE_INNER (mode));
2002 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2003 ? GET_MODE (trueop1)
2004 : GET_MODE_INNER (mode));
2005
2006 gcc_assert (VECTOR_MODE_P (mode));
2007 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2008 == GET_MODE_SIZE (mode));
2009
2010 if (VECTOR_MODE_P (op0_mode))
2011 gcc_assert (GET_MODE_INNER (mode)
2012 == GET_MODE_INNER (op0_mode));
2013 else
2014 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2015
2016 if (VECTOR_MODE_P (op1_mode))
2017 gcc_assert (GET_MODE_INNER (mode)
2018 == GET_MODE_INNER (op1_mode));
2019 else
2020 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2021
2022 if ((GET_CODE (trueop0) == CONST_VECTOR
2023 || GET_CODE (trueop0) == CONST_INT
2024 || GET_CODE (trueop0) == CONST_DOUBLE)
2025 && (GET_CODE (trueop1) == CONST_VECTOR
2026 || GET_CODE (trueop1) == CONST_INT
2027 || GET_CODE (trueop1) == CONST_DOUBLE))
2028 {
2029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2031 rtvec v = rtvec_alloc (n_elts);
2032 unsigned int i;
2033 unsigned in_n_elts = 1;
2034
2035 if (VECTOR_MODE_P (op0_mode))
2036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2037 for (i = 0; i < n_elts; i++)
2038 {
2039 if (i < in_n_elts)
2040 {
2041 if (!VECTOR_MODE_P (op0_mode))
2042 RTVEC_ELT (v, i) = trueop0;
2043 else
2044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2045 }
2046 else
2047 {
2048 if (!VECTOR_MODE_P (op1_mode))
2049 RTVEC_ELT (v, i) = trueop1;
2050 else
2051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2052 i - in_n_elts);
2053 }
2054 }
2055
2056 return gen_rtx_CONST_VECTOR (mode, v);
2057 }
2058 }
2059 return 0;
2060
2061 default:
2062 gcc_unreachable ();
2063 }
2064
2065 return 0;
2066 }
2067
2068 rtx
2069 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2070 rtx op0, rtx op1)
2071 {
2072 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2073 HOST_WIDE_INT val;
2074 unsigned int width = GET_MODE_BITSIZE (mode);
2075
2076 if (VECTOR_MODE_P (mode)
2077 && code != VEC_CONCAT
2078 && GET_CODE (op0) == CONST_VECTOR
2079 && GET_CODE (op1) == CONST_VECTOR)
2080 {
2081 unsigned n_elts = GET_MODE_NUNITS (mode);
2082 enum machine_mode op0mode = GET_MODE (op0);
2083 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2084 enum machine_mode op1mode = GET_MODE (op1);
2085 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2086 rtvec v = rtvec_alloc (n_elts);
2087 unsigned int i;
2088
2089 gcc_assert (op0_n_elts == n_elts);
2090 gcc_assert (op1_n_elts == n_elts);
2091 for (i = 0; i < n_elts; i++)
2092 {
2093 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2094 CONST_VECTOR_ELT (op0, i),
2095 CONST_VECTOR_ELT (op1, i));
2096 if (!x)
2097 return 0;
2098 RTVEC_ELT (v, i) = x;
2099 }
2100
2101 return gen_rtx_CONST_VECTOR (mode, v);
2102 }
2103
2104 if (VECTOR_MODE_P (mode)
2105 && code == VEC_CONCAT
2106 && CONSTANT_P (op0) && CONSTANT_P (op1))
2107 {
2108 unsigned n_elts = GET_MODE_NUNITS (mode);
2109 rtvec v = rtvec_alloc (n_elts);
2110
2111 gcc_assert (n_elts >= 2);
2112 if (n_elts == 2)
2113 {
2114 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2115 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2116
2117 RTVEC_ELT (v, 0) = op0;
2118 RTVEC_ELT (v, 1) = op1;
2119 }
2120 else
2121 {
2122 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2123 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2124 unsigned i;
2125
2126 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2127 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2128 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2129
2130 for (i = 0; i < op0_n_elts; ++i)
2131 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2132 for (i = 0; i < op1_n_elts; ++i)
2133 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2134 }
2135
2136 return gen_rtx_CONST_VECTOR (mode, v);
2137 }
2138
2139 if (SCALAR_FLOAT_MODE_P (mode)
2140 && GET_CODE (op0) == CONST_DOUBLE
2141 && GET_CODE (op1) == CONST_DOUBLE
2142 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2143 {
2144 if (code == AND
2145 || code == IOR
2146 || code == XOR)
2147 {
2148 long tmp0[4];
2149 long tmp1[4];
2150 REAL_VALUE_TYPE r;
2151 int i;
2152
2153 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2154 GET_MODE (op0));
2155 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2156 GET_MODE (op1));
2157 for (i = 0; i < 4; i++)
2158 {
2159 switch (code)
2160 {
2161 case AND:
2162 tmp0[i] &= tmp1[i];
2163 break;
2164 case IOR:
2165 tmp0[i] |= tmp1[i];
2166 break;
2167 case XOR:
2168 tmp0[i] ^= tmp1[i];
2169 break;
2170 default:
2171 gcc_unreachable ();
2172 }
2173 }
2174 real_from_target (&r, tmp0, mode);
2175 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2176 }
2177 else
2178 {
2179 REAL_VALUE_TYPE f0, f1, value, result;
2180 bool inexact;
2181
2182 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2183 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2184 real_convert (&f0, mode, &f0);
2185 real_convert (&f1, mode, &f1);
2186
2187 if (HONOR_SNANS (mode)
2188 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2189 return 0;
2190
2191 if (code == DIV
2192 && REAL_VALUES_EQUAL (f1, dconst0)
2193 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2194 return 0;
2195
2196 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2197 && flag_trapping_math
2198 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2199 {
2200 int s0 = REAL_VALUE_NEGATIVE (f0);
2201 int s1 = REAL_VALUE_NEGATIVE (f1);
2202
2203 switch (code)
2204 {
2205 case PLUS:
2206 /* Inf + -Inf = NaN plus exception. */
2207 if (s0 != s1)
2208 return 0;
2209 break;
2210 case MINUS:
2211 /* Inf - Inf = NaN plus exception. */
2212 if (s0 == s1)
2213 return 0;
2214 break;
2215 case DIV:
2216 /* Inf / Inf = NaN plus exception. */
2217 return 0;
2218 default:
2219 break;
2220 }
2221 }
2222
2223 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2224 && flag_trapping_math
2225 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2226 || (REAL_VALUE_ISINF (f1)
2227 && REAL_VALUES_EQUAL (f0, dconst0))))
2228 /* Inf * 0 = NaN plus exception. */
2229 return 0;
2230
2231 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2232 &f0, &f1);
2233 real_convert (&result, mode, &value);
2234
2235 /* Don't constant fold this floating point operation if
2236 the result has overflowed and flag_trapping_math. */
2237
2238 if (flag_trapping_math
2239 && MODE_HAS_INFINITIES (mode)
2240 && REAL_VALUE_ISINF (result)
2241 && !REAL_VALUE_ISINF (f0)
2242 && !REAL_VALUE_ISINF (f1))
2243 /* Overflow plus exception. */
2244 return 0;
2245
2246 /* Don't constant fold this floating point operation if the
2247 result may dependent upon the run-time rounding mode and
2248 flag_rounding_math is set, or if GCC's software emulation
2249 is unable to accurately represent the result. */
2250
2251 if ((flag_rounding_math
2252 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2253 && !flag_unsafe_math_optimizations))
2254 && (inexact || !real_identical (&result, &value)))
2255 return NULL_RTX;
2256
2257 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2258 }
2259 }
2260
2261 /* We can fold some multi-word operations. */
2262 if (GET_MODE_CLASS (mode) == MODE_INT
2263 && width == HOST_BITS_PER_WIDE_INT * 2
2264 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2265 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2266 {
2267 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2268 HOST_WIDE_INT h1, h2, hv, ht;
2269
2270 if (GET_CODE (op0) == CONST_DOUBLE)
2271 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2272 else
2273 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2274
2275 if (GET_CODE (op1) == CONST_DOUBLE)
2276 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2277 else
2278 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2279
2280 switch (code)
2281 {
2282 case MINUS:
2283 /* A - B == A + (-B). */
2284 neg_double (l2, h2, &lv, &hv);
2285 l2 = lv, h2 = hv;
2286
2287 /* Fall through.... */
2288
2289 case PLUS:
2290 add_double (l1, h1, l2, h2, &lv, &hv);
2291 break;
2292
2293 case MULT:
2294 mul_double (l1, h1, l2, h2, &lv, &hv);
2295 break;
2296
2297 case DIV:
2298 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2299 &lv, &hv, &lt, &ht))
2300 return 0;
2301 break;
2302
2303 case MOD:
2304 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2305 &lt, &ht, &lv, &hv))
2306 return 0;
2307 break;
2308
2309 case UDIV:
2310 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2311 &lv, &hv, &lt, &ht))
2312 return 0;
2313 break;
2314
2315 case UMOD:
2316 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2317 &lt, &ht, &lv, &hv))
2318 return 0;
2319 break;
2320
2321 case AND:
2322 lv = l1 & l2, hv = h1 & h2;
2323 break;
2324
2325 case IOR:
2326 lv = l1 | l2, hv = h1 | h2;
2327 break;
2328
2329 case XOR:
2330 lv = l1 ^ l2, hv = h1 ^ h2;
2331 break;
2332
2333 case SMIN:
2334 if (h1 < h2
2335 || (h1 == h2
2336 && ((unsigned HOST_WIDE_INT) l1
2337 < (unsigned HOST_WIDE_INT) l2)))
2338 lv = l1, hv = h1;
2339 else
2340 lv = l2, hv = h2;
2341 break;
2342
2343 case SMAX:
2344 if (h1 > h2
2345 || (h1 == h2
2346 && ((unsigned HOST_WIDE_INT) l1
2347 > (unsigned HOST_WIDE_INT) l2)))
2348 lv = l1, hv = h1;
2349 else
2350 lv = l2, hv = h2;
2351 break;
2352
2353 case UMIN:
2354 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2355 || (h1 == h2
2356 && ((unsigned HOST_WIDE_INT) l1
2357 < (unsigned HOST_WIDE_INT) l2)))
2358 lv = l1, hv = h1;
2359 else
2360 lv = l2, hv = h2;
2361 break;
2362
2363 case UMAX:
2364 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2365 || (h1 == h2
2366 && ((unsigned HOST_WIDE_INT) l1
2367 > (unsigned HOST_WIDE_INT) l2)))
2368 lv = l1, hv = h1;
2369 else
2370 lv = l2, hv = h2;
2371 break;
2372
2373 case LSHIFTRT: case ASHIFTRT:
2374 case ASHIFT:
2375 case ROTATE: case ROTATERT:
2376 if (SHIFT_COUNT_TRUNCATED)
2377 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2378
2379 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2380 return 0;
2381
2382 if (code == LSHIFTRT || code == ASHIFTRT)
2383 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2384 code == ASHIFTRT);
2385 else if (code == ASHIFT)
2386 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2387 else if (code == ROTATE)
2388 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2389 else /* code == ROTATERT */
2390 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2391 break;
2392
2393 default:
2394 return 0;
2395 }
2396
2397 return immed_double_const (lv, hv, mode);
2398 }
2399
2400 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2401 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2402 {
2403 /* Get the integer argument values in two forms:
2404 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2405
2406 arg0 = INTVAL (op0);
2407 arg1 = INTVAL (op1);
2408
2409 if (width < HOST_BITS_PER_WIDE_INT)
2410 {
2411 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2412 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2413
2414 arg0s = arg0;
2415 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2416 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2417
2418 arg1s = arg1;
2419 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2420 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2421 }
2422 else
2423 {
2424 arg0s = arg0;
2425 arg1s = arg1;
2426 }
2427
2428 /* Compute the value of the arithmetic. */
2429
2430 switch (code)
2431 {
2432 case PLUS:
2433 val = arg0s + arg1s;
2434 break;
2435
2436 case MINUS:
2437 val = arg0s - arg1s;
2438 break;
2439
2440 case MULT:
2441 val = arg0s * arg1s;
2442 break;
2443
2444 case DIV:
2445 if (arg1s == 0
2446 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2447 && arg1s == -1))
2448 return 0;
2449 val = arg0s / arg1s;
2450 break;
2451
2452 case MOD:
2453 if (arg1s == 0
2454 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2455 && arg1s == -1))
2456 return 0;
2457 val = arg0s % arg1s;
2458 break;
2459
2460 case UDIV:
2461 if (arg1 == 0
2462 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2463 && arg1s == -1))
2464 return 0;
2465 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2466 break;
2467
2468 case UMOD:
2469 if (arg1 == 0
2470 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2471 && arg1s == -1))
2472 return 0;
2473 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2474 break;
2475
2476 case AND:
2477 val = arg0 & arg1;
2478 break;
2479
2480 case IOR:
2481 val = arg0 | arg1;
2482 break;
2483
2484 case XOR:
2485 val = arg0 ^ arg1;
2486 break;
2487
2488 case LSHIFTRT:
2489 case ASHIFT:
2490 case ASHIFTRT:
2491 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2492 the value is in range. We can't return any old value for
2493 out-of-range arguments because either the middle-end (via
2494 shift_truncation_mask) or the back-end might be relying on
2495 target-specific knowledge. Nor can we rely on
2496 shift_truncation_mask, since the shift might not be part of an
2497 ashlM3, lshrM3 or ashrM3 instruction. */
2498 if (SHIFT_COUNT_TRUNCATED)
2499 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2500 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2501 return 0;
2502
2503 val = (code == ASHIFT
2504 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2505 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2506
2507 /* Sign-extend the result for arithmetic right shifts. */
2508 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2509 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2510 break;
2511
2512 case ROTATERT:
2513 if (arg1 < 0)
2514 return 0;
2515
2516 arg1 %= width;
2517 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2518 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2519 break;
2520
2521 case ROTATE:
2522 if (arg1 < 0)
2523 return 0;
2524
2525 arg1 %= width;
2526 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2527 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2528 break;
2529
2530 case COMPARE:
2531 /* Do nothing here. */
2532 return 0;
2533
2534 case SMIN:
2535 val = arg0s <= arg1s ? arg0s : arg1s;
2536 break;
2537
2538 case UMIN:
2539 val = ((unsigned HOST_WIDE_INT) arg0
2540 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2541 break;
2542
2543 case SMAX:
2544 val = arg0s > arg1s ? arg0s : arg1s;
2545 break;
2546
2547 case UMAX:
2548 val = ((unsigned HOST_WIDE_INT) arg0
2549 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2550 break;
2551
2552 case SS_PLUS:
2553 case US_PLUS:
2554 case SS_MINUS:
2555 case US_MINUS:
2556 /* ??? There are simplifications that can be done. */
2557 return 0;
2558
2559 default:
2560 gcc_unreachable ();
2561 }
2562
2563 return gen_int_mode (val, mode);
2564 }
2565
2566 return NULL_RTX;
2567 }
2568
2569
2570 \f
2571 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2572 PLUS or MINUS.
2573
2574 Rather than test for specific case, we do this by a brute-force method
2575 and do all possible simplifications until no more changes occur. Then
2576 we rebuild the operation. */
2577
2578 struct simplify_plus_minus_op_data
2579 {
2580 rtx op;
2581 short neg;
2582 short ix;
2583 };
2584
2585 static int
2586 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2587 {
2588 const struct simplify_plus_minus_op_data *d1 = p1;
2589 const struct simplify_plus_minus_op_data *d2 = p2;
2590 int result;
2591
2592 result = (commutative_operand_precedence (d2->op)
2593 - commutative_operand_precedence (d1->op));
2594 if (result)
2595 return result;
2596 return d1->ix - d2->ix;
2597 }
2598
2599 static rtx
2600 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2601 rtx op1)
2602 {
2603 struct simplify_plus_minus_op_data ops[8];
2604 rtx result, tem;
2605 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2606 int first, changed, canonicalized = 0;
2607 int i, j;
2608
2609 memset (ops, 0, sizeof ops);
2610
2611 /* Set up the two operands and then expand them until nothing has been
2612 changed. If we run out of room in our array, give up; this should
2613 almost never happen. */
2614
2615 ops[0].op = op0;
2616 ops[0].neg = 0;
2617 ops[1].op = op1;
2618 ops[1].neg = (code == MINUS);
2619
2620 do
2621 {
2622 changed = 0;
2623
2624 for (i = 0; i < n_ops; i++)
2625 {
2626 rtx this_op = ops[i].op;
2627 int this_neg = ops[i].neg;
2628 enum rtx_code this_code = GET_CODE (this_op);
2629
2630 switch (this_code)
2631 {
2632 case PLUS:
2633 case MINUS:
2634 if (n_ops == 7)
2635 return NULL_RTX;
2636
2637 ops[n_ops].op = XEXP (this_op, 1);
2638 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2639 n_ops++;
2640
2641 ops[i].op = XEXP (this_op, 0);
2642 input_ops++;
2643 changed = 1;
2644 canonicalized |= this_neg;
2645 break;
2646
2647 case NEG:
2648 ops[i].op = XEXP (this_op, 0);
2649 ops[i].neg = ! this_neg;
2650 changed = 1;
2651 canonicalized = 1;
2652 break;
2653
2654 case CONST:
2655 if (n_ops < 7
2656 && GET_CODE (XEXP (this_op, 0)) == PLUS
2657 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2658 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2659 {
2660 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2661 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2662 ops[n_ops].neg = this_neg;
2663 n_ops++;
2664 input_consts++;
2665 changed = 1;
2666 canonicalized = 1;
2667 }
2668 break;
2669
2670 case NOT:
2671 /* ~a -> (-a - 1) */
2672 if (n_ops != 7)
2673 {
2674 ops[n_ops].op = constm1_rtx;
2675 ops[n_ops++].neg = this_neg;
2676 ops[i].op = XEXP (this_op, 0);
2677 ops[i].neg = !this_neg;
2678 changed = 1;
2679 canonicalized = 1;
2680 }
2681 break;
2682
2683 case CONST_INT:
2684 if (this_neg)
2685 {
2686 ops[i].op = neg_const_int (mode, this_op);
2687 ops[i].neg = 0;
2688 changed = 1;
2689 canonicalized = 1;
2690 }
2691 break;
2692
2693 default:
2694 break;
2695 }
2696 }
2697 }
2698 while (changed);
2699
2700 gcc_assert (n_ops >= 2);
2701 if (!canonicalized)
2702 return NULL_RTX;
2703
2704 /* If we only have two operands, we can avoid the loops. */
2705 if (n_ops == 2)
2706 {
2707 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
2708 rtx lhs, rhs;
2709
2710 /* Get the two operands. Be careful with the order, especially for
2711 the cases where code == MINUS. */
2712 if (ops[0].neg && ops[1].neg)
2713 {
2714 lhs = gen_rtx_NEG (mode, ops[0].op);
2715 rhs = ops[1].op;
2716 }
2717 else if (ops[0].neg)
2718 {
2719 lhs = ops[1].op;
2720 rhs = ops[0].op;
2721 }
2722 else
2723 {
2724 lhs = ops[0].op;
2725 rhs = ops[1].op;
2726 }
2727
2728 return simplify_const_binary_operation (code, mode, lhs, rhs);
2729 }
2730
2731 /* Count the number of CONSTs we didn't split above. */
2732 for (i = 0; i < n_ops; i++)
2733 if (GET_CODE (ops[i].op) == CONST)
2734 input_consts++;
2735
2736 /* Now simplify each pair of operands until nothing changes. The first
2737 time through just simplify constants against each other. */
2738
2739 first = 1;
2740 do
2741 {
2742 changed = first;
2743
2744 for (i = 0; i < n_ops - 1; i++)
2745 for (j = i + 1; j < n_ops; j++)
2746 {
2747 rtx lhs = ops[i].op, rhs = ops[j].op;
2748 int lneg = ops[i].neg, rneg = ops[j].neg;
2749
2750 if (lhs != 0 && rhs != 0
2751 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2752 {
2753 enum rtx_code ncode = PLUS;
2754
2755 if (lneg != rneg)
2756 {
2757 ncode = MINUS;
2758 if (lneg)
2759 tem = lhs, lhs = rhs, rhs = tem;
2760 }
2761 else if (swap_commutative_operands_p (lhs, rhs))
2762 tem = lhs, lhs = rhs, rhs = tem;
2763
2764 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2765
2766 /* Reject "simplifications" that just wrap the two
2767 arguments in a CONST. Failure to do so can result
2768 in infinite recursion with simplify_binary_operation
2769 when it calls us to simplify CONST operations. */
2770 if (tem
2771 && ! (GET_CODE (tem) == CONST
2772 && GET_CODE (XEXP (tem, 0)) == ncode
2773 && XEXP (XEXP (tem, 0), 0) == lhs
2774 && XEXP (XEXP (tem, 0), 1) == rhs)
2775 /* Don't allow -x + -1 -> ~x simplifications in the
2776 first pass. This allows us the chance to combine
2777 the -1 with other constants. */
2778 && ! (first
2779 && GET_CODE (tem) == NOT
2780 && XEXP (tem, 0) == rhs))
2781 {
2782 lneg &= rneg;
2783 if (GET_CODE (tem) == NEG)
2784 tem = XEXP (tem, 0), lneg = !lneg;
2785 if (GET_CODE (tem) == CONST_INT && lneg)
2786 tem = neg_const_int (mode, tem), lneg = 0;
2787
2788 ops[i].op = tem;
2789 ops[i].neg = lneg;
2790 ops[j].op = NULL_RTX;
2791 changed = 1;
2792 }
2793 }
2794 }
2795
2796 first = 0;
2797 }
2798 while (changed);
2799
2800 /* Pack all the operands to the lower-numbered entries. */
2801 for (i = 0, j = 0; j < n_ops; j++)
2802 if (ops[j].op)
2803 {
2804 ops[i] = ops[j];
2805 /* Stabilize sort. */
2806 ops[i].ix = i;
2807 i++;
2808 }
2809 n_ops = i;
2810
2811 /* Sort the operations based on swap_commutative_operands_p. */
2812 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2813
2814 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2815 if (n_ops == 2
2816 && GET_CODE (ops[1].op) == CONST_INT
2817 && CONSTANT_P (ops[0].op)
2818 && ops[0].neg)
2819 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2820
2821 /* We suppressed creation of trivial CONST expressions in the
2822 combination loop to avoid recursion. Create one manually now.
2823 The combination loop should have ensured that there is exactly
2824 one CONST_INT, and the sort will have ensured that it is last
2825 in the array and that any other constant will be next-to-last. */
2826
2827 if (n_ops > 1
2828 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2829 && CONSTANT_P (ops[n_ops - 2].op))
2830 {
2831 rtx value = ops[n_ops - 1].op;
2832 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2833 value = neg_const_int (mode, value);
2834 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2835 n_ops--;
2836 }
2837
2838 /* Count the number of CONSTs that we generated. */
2839 n_consts = 0;
2840 for (i = 0; i < n_ops; i++)
2841 if (GET_CODE (ops[i].op) == CONST)
2842 n_consts++;
2843
2844 /* Put a non-negated operand first, if possible. */
2845
2846 for (i = 0; i < n_ops && ops[i].neg; i++)
2847 continue;
2848 if (i == n_ops)
2849 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2850 else if (i != 0)
2851 {
2852 tem = ops[0].op;
2853 ops[0] = ops[i];
2854 ops[i].op = tem;
2855 ops[i].neg = 1;
2856 }
2857
2858 /* Now make the result by performing the requested operations. */
2859 result = ops[0].op;
2860 for (i = 1; i < n_ops; i++)
2861 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2862 mode, result, ops[i].op);
2863
2864 return result;
2865 }
2866
2867 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2868 static bool
2869 plus_minus_operand_p (rtx x)
2870 {
2871 return GET_CODE (x) == PLUS
2872 || GET_CODE (x) == MINUS
2873 || (GET_CODE (x) == CONST
2874 && GET_CODE (XEXP (x, 0)) == PLUS
2875 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2876 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2877 }
2878
2879 /* Like simplify_binary_operation except used for relational operators.
2880 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2881 not also be VOIDmode.
2882
2883 CMP_MODE specifies in which mode the comparison is done in, so it is
2884 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2885 the operands or, if both are VOIDmode, the operands are compared in
2886 "infinite precision". */
2887 rtx
2888 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2889 enum machine_mode cmp_mode, rtx op0, rtx op1)
2890 {
2891 rtx tem, trueop0, trueop1;
2892
2893 if (cmp_mode == VOIDmode)
2894 cmp_mode = GET_MODE (op0);
2895 if (cmp_mode == VOIDmode)
2896 cmp_mode = GET_MODE (op1);
2897
2898 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2899 if (tem)
2900 {
2901 if (SCALAR_FLOAT_MODE_P (mode))
2902 {
2903 if (tem == const0_rtx)
2904 return CONST0_RTX (mode);
2905 #ifdef FLOAT_STORE_FLAG_VALUE
2906 {
2907 REAL_VALUE_TYPE val;
2908 val = FLOAT_STORE_FLAG_VALUE (mode);
2909 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2910 }
2911 #else
2912 return NULL_RTX;
2913 #endif
2914 }
2915 if (VECTOR_MODE_P (mode))
2916 {
2917 if (tem == const0_rtx)
2918 return CONST0_RTX (mode);
2919 #ifdef VECTOR_STORE_FLAG_VALUE
2920 {
2921 int i, units;
2922 rtvec v;
2923
2924 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2925 if (val == NULL_RTX)
2926 return NULL_RTX;
2927 if (val == const1_rtx)
2928 return CONST1_RTX (mode);
2929
2930 units = GET_MODE_NUNITS (mode);
2931 v = rtvec_alloc (units);
2932 for (i = 0; i < units; i++)
2933 RTVEC_ELT (v, i) = val;
2934 return gen_rtx_raw_CONST_VECTOR (mode, v);
2935 }
2936 #else
2937 return NULL_RTX;
2938 #endif
2939 }
2940
2941 return tem;
2942 }
2943
2944 /* For the following tests, ensure const0_rtx is op1. */
2945 if (swap_commutative_operands_p (op0, op1)
2946 || (op0 == const0_rtx && op1 != const0_rtx))
2947 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2948
2949 /* If op0 is a compare, extract the comparison arguments from it. */
2950 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2951 return simplify_relational_operation (code, mode, VOIDmode,
2952 XEXP (op0, 0), XEXP (op0, 1));
2953
2954 if (mode == VOIDmode
2955 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2956 || CC0_P (op0))
2957 return NULL_RTX;
2958
2959 trueop0 = avoid_constant_pool_reference (op0);
2960 trueop1 = avoid_constant_pool_reference (op1);
2961 return simplify_relational_operation_1 (code, mode, cmp_mode,
2962 trueop0, trueop1);
2963 }
2964
2965 /* This part of simplify_relational_operation is only used when CMP_MODE
2966 is not in class MODE_CC (i.e. it is a real comparison).
2967
2968 MODE is the mode of the result, while CMP_MODE specifies in which
2969 mode the comparison is done in, so it is the mode of the operands. */
2970
2971 static rtx
2972 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2973 enum machine_mode cmp_mode, rtx op0, rtx op1)
2974 {
2975 enum rtx_code op0code = GET_CODE (op0);
2976
2977 if (GET_CODE (op1) == CONST_INT)
2978 {
2979 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2980 {
2981 /* If op0 is a comparison, extract the comparison arguments form it. */
2982 if (code == NE)
2983 {
2984 if (GET_MODE (op0) == mode)
2985 return simplify_rtx (op0);
2986 else
2987 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2988 XEXP (op0, 0), XEXP (op0, 1));
2989 }
2990 else if (code == EQ)
2991 {
2992 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2993 if (new_code != UNKNOWN)
2994 return simplify_gen_relational (new_code, mode, VOIDmode,
2995 XEXP (op0, 0), XEXP (op0, 1));
2996 }
2997 }
2998 }
2999
3000 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3001 if ((code == EQ || code == NE)
3002 && (op0code == PLUS || op0code == MINUS)
3003 && CONSTANT_P (op1)
3004 && CONSTANT_P (XEXP (op0, 1))
3005 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3006 {
3007 rtx x = XEXP (op0, 0);
3008 rtx c = XEXP (op0, 1);
3009
3010 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3011 cmp_mode, op1, c);
3012 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3013 }
3014
3015 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3016 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3017 if (code == NE
3018 && op1 == const0_rtx
3019 && GET_MODE_CLASS (mode) == MODE_INT
3020 && cmp_mode != VOIDmode
3021 /* ??? Work-around BImode bugs in the ia64 backend. */
3022 && mode != BImode
3023 && cmp_mode != BImode
3024 && nonzero_bits (op0, cmp_mode) == 1
3025 && STORE_FLAG_VALUE == 1)
3026 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3027 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3028 : lowpart_subreg (mode, op0, cmp_mode);
3029
3030 return NULL_RTX;
3031 }
3032
3033 /* Check if the given comparison (done in the given MODE) is actually a
3034 tautology or a contradiction.
3035 If no simplification is possible, this function returns zero.
3036 Otherwise, it returns either const_true_rtx or const0_rtx. */
3037
3038 rtx
3039 simplify_const_relational_operation (enum rtx_code code,
3040 enum machine_mode mode,
3041 rtx op0, rtx op1)
3042 {
3043 int equal, op0lt, op0ltu, op1lt, op1ltu;
3044 rtx tem;
3045 rtx trueop0;
3046 rtx trueop1;
3047
3048 gcc_assert (mode != VOIDmode
3049 || (GET_MODE (op0) == VOIDmode
3050 && GET_MODE (op1) == VOIDmode));
3051
3052 /* If op0 is a compare, extract the comparison arguments from it. */
3053 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3054 {
3055 op1 = XEXP (op0, 1);
3056 op0 = XEXP (op0, 0);
3057
3058 if (GET_MODE (op0) != VOIDmode)
3059 mode = GET_MODE (op0);
3060 else if (GET_MODE (op1) != VOIDmode)
3061 mode = GET_MODE (op1);
3062 else
3063 return 0;
3064 }
3065
3066 /* We can't simplify MODE_CC values since we don't know what the
3067 actual comparison is. */
3068 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3069 return 0;
3070
3071 /* Make sure the constant is second. */
3072 if (swap_commutative_operands_p (op0, op1))
3073 {
3074 tem = op0, op0 = op1, op1 = tem;
3075 code = swap_condition (code);
3076 }
3077
3078 trueop0 = avoid_constant_pool_reference (op0);
3079 trueop1 = avoid_constant_pool_reference (op1);
3080
3081 /* For integer comparisons of A and B maybe we can simplify A - B and can
3082 then simplify a comparison of that with zero. If A and B are both either
3083 a register or a CONST_INT, this can't help; testing for these cases will
3084 prevent infinite recursion here and speed things up.
3085
3086 If CODE is an unsigned comparison, then we can never do this optimization,
3087 because it gives an incorrect result if the subtraction wraps around zero.
3088 ANSI C defines unsigned operations such that they never overflow, and
3089 thus such cases can not be ignored; but we cannot do it even for
3090 signed comparisons for languages such as Java, so test flag_wrapv. */
3091
3092 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3093 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3094 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3095 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3096 /* We cannot do this for == or != if tem is a nonzero address. */
3097 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3098 && code != GTU && code != GEU && code != LTU && code != LEU)
3099 return simplify_const_relational_operation (signed_condition (code),
3100 mode, tem, const0_rtx);
3101
3102 if (flag_unsafe_math_optimizations && code == ORDERED)
3103 return const_true_rtx;
3104
3105 if (flag_unsafe_math_optimizations && code == UNORDERED)
3106 return const0_rtx;
3107
3108 /* For modes without NaNs, if the two operands are equal, we know the
3109 result except if they have side-effects. */
3110 if (! HONOR_NANS (GET_MODE (trueop0))
3111 && rtx_equal_p (trueop0, trueop1)
3112 && ! side_effects_p (trueop0))
3113 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3114
3115 /* If the operands are floating-point constants, see if we can fold
3116 the result. */
3117 else if (GET_CODE (trueop0) == CONST_DOUBLE
3118 && GET_CODE (trueop1) == CONST_DOUBLE
3119 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3120 {
3121 REAL_VALUE_TYPE d0, d1;
3122
3123 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3124 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3125
3126 /* Comparisons are unordered iff at least one of the values is NaN. */
3127 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3128 switch (code)
3129 {
3130 case UNEQ:
3131 case UNLT:
3132 case UNGT:
3133 case UNLE:
3134 case UNGE:
3135 case NE:
3136 case UNORDERED:
3137 return const_true_rtx;
3138 case EQ:
3139 case LT:
3140 case GT:
3141 case LE:
3142 case GE:
3143 case LTGT:
3144 case ORDERED:
3145 return const0_rtx;
3146 default:
3147 return 0;
3148 }
3149
3150 equal = REAL_VALUES_EQUAL (d0, d1);
3151 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3152 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3153 }
3154
3155 /* Otherwise, see if the operands are both integers. */
3156 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3157 && (GET_CODE (trueop0) == CONST_DOUBLE
3158 || GET_CODE (trueop0) == CONST_INT)
3159 && (GET_CODE (trueop1) == CONST_DOUBLE
3160 || GET_CODE (trueop1) == CONST_INT))
3161 {
3162 int width = GET_MODE_BITSIZE (mode);
3163 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3164 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3165
3166 /* Get the two words comprising each integer constant. */
3167 if (GET_CODE (trueop0) == CONST_DOUBLE)
3168 {
3169 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3170 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3171 }
3172 else
3173 {
3174 l0u = l0s = INTVAL (trueop0);
3175 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3176 }
3177
3178 if (GET_CODE (trueop1) == CONST_DOUBLE)
3179 {
3180 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3181 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3182 }
3183 else
3184 {
3185 l1u = l1s = INTVAL (trueop1);
3186 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3187 }
3188
3189 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3190 we have to sign or zero-extend the values. */
3191 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3192 {
3193 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3194 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3195
3196 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3197 l0s |= ((HOST_WIDE_INT) (-1) << width);
3198
3199 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3200 l1s |= ((HOST_WIDE_INT) (-1) << width);
3201 }
3202 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3203 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3204
3205 equal = (h0u == h1u && l0u == l1u);
3206 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3207 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3208 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3209 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3210 }
3211
3212 /* Otherwise, there are some code-specific tests we can make. */
3213 else
3214 {
3215 /* Optimize comparisons with upper and lower bounds. */
3216 if (SCALAR_INT_MODE_P (mode)
3217 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3218 {
3219 rtx mmin, mmax;
3220 int sign;
3221
3222 if (code == GEU
3223 || code == LEU
3224 || code == GTU
3225 || code == LTU)
3226 sign = 0;
3227 else
3228 sign = 1;
3229
3230 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3231
3232 tem = NULL_RTX;
3233 switch (code)
3234 {
3235 case GEU:
3236 case GE:
3237 /* x >= min is always true. */
3238 if (rtx_equal_p (trueop1, mmin))
3239 tem = const_true_rtx;
3240 else
3241 break;
3242
3243 case LEU:
3244 case LE:
3245 /* x <= max is always true. */
3246 if (rtx_equal_p (trueop1, mmax))
3247 tem = const_true_rtx;
3248 break;
3249
3250 case GTU:
3251 case GT:
3252 /* x > max is always false. */
3253 if (rtx_equal_p (trueop1, mmax))
3254 tem = const0_rtx;
3255 break;
3256
3257 case LTU:
3258 case LT:
3259 /* x < min is always false. */
3260 if (rtx_equal_p (trueop1, mmin))
3261 tem = const0_rtx;
3262 break;
3263
3264 default:
3265 break;
3266 }
3267 if (tem == const0_rtx
3268 || tem == const_true_rtx)
3269 return tem;
3270 }
3271
3272 switch (code)
3273 {
3274 case EQ:
3275 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3276 return const0_rtx;
3277 break;
3278
3279 case NE:
3280 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3281 return const_true_rtx;
3282 break;
3283
3284 case LT:
3285 /* Optimize abs(x) < 0.0. */
3286 if (trueop1 == CONST0_RTX (mode)
3287 && !HONOR_SNANS (mode)
3288 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3289 {
3290 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3291 : trueop0;
3292 if (GET_CODE (tem) == ABS)
3293 return const0_rtx;
3294 }
3295 break;
3296
3297 case GE:
3298 /* Optimize abs(x) >= 0.0. */
3299 if (trueop1 == CONST0_RTX (mode)
3300 && !HONOR_NANS (mode)
3301 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3302 {
3303 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3304 : trueop0;
3305 if (GET_CODE (tem) == ABS)
3306 return const_true_rtx;
3307 }
3308 break;
3309
3310 case UNGE:
3311 /* Optimize ! (abs(x) < 0.0). */
3312 if (trueop1 == CONST0_RTX (mode))
3313 {
3314 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3315 : trueop0;
3316 if (GET_CODE (tem) == ABS)
3317 return const_true_rtx;
3318 }
3319 break;
3320
3321 default:
3322 break;
3323 }
3324
3325 return 0;
3326 }
3327
3328 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3329 as appropriate. */
3330 switch (code)
3331 {
3332 case EQ:
3333 case UNEQ:
3334 return equal ? const_true_rtx : const0_rtx;
3335 case NE:
3336 case LTGT:
3337 return ! equal ? const_true_rtx : const0_rtx;
3338 case LT:
3339 case UNLT:
3340 return op0lt ? const_true_rtx : const0_rtx;
3341 case GT:
3342 case UNGT:
3343 return op1lt ? const_true_rtx : const0_rtx;
3344 case LTU:
3345 return op0ltu ? const_true_rtx : const0_rtx;
3346 case GTU:
3347 return op1ltu ? const_true_rtx : const0_rtx;
3348 case LE:
3349 case UNLE:
3350 return equal || op0lt ? const_true_rtx : const0_rtx;
3351 case GE:
3352 case UNGE:
3353 return equal || op1lt ? const_true_rtx : const0_rtx;
3354 case LEU:
3355 return equal || op0ltu ? const_true_rtx : const0_rtx;
3356 case GEU:
3357 return equal || op1ltu ? const_true_rtx : const0_rtx;
3358 case ORDERED:
3359 return const_true_rtx;
3360 case UNORDERED:
3361 return const0_rtx;
3362 default:
3363 gcc_unreachable ();
3364 }
3365 }
3366 \f
3367 /* Simplify CODE, an operation with result mode MODE and three operands,
3368 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3369 a constant. Return 0 if no simplifications is possible. */
3370
3371 rtx
3372 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3373 enum machine_mode op0_mode, rtx op0, rtx op1,
3374 rtx op2)
3375 {
3376 unsigned int width = GET_MODE_BITSIZE (mode);
3377
3378 /* VOIDmode means "infinite" precision. */
3379 if (width == 0)
3380 width = HOST_BITS_PER_WIDE_INT;
3381
3382 switch (code)
3383 {
3384 case SIGN_EXTRACT:
3385 case ZERO_EXTRACT:
3386 if (GET_CODE (op0) == CONST_INT
3387 && GET_CODE (op1) == CONST_INT
3388 && GET_CODE (op2) == CONST_INT
3389 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3390 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3391 {
3392 /* Extracting a bit-field from a constant */
3393 HOST_WIDE_INT val = INTVAL (op0);
3394
3395 if (BITS_BIG_ENDIAN)
3396 val >>= (GET_MODE_BITSIZE (op0_mode)
3397 - INTVAL (op2) - INTVAL (op1));
3398 else
3399 val >>= INTVAL (op2);
3400
3401 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3402 {
3403 /* First zero-extend. */
3404 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3405 /* If desired, propagate sign bit. */
3406 if (code == SIGN_EXTRACT
3407 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3408 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3409 }
3410
3411 /* Clear the bits that don't belong in our mode,
3412 unless they and our sign bit are all one.
3413 So we get either a reasonable negative value or a reasonable
3414 unsigned value for this mode. */
3415 if (width < HOST_BITS_PER_WIDE_INT
3416 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3417 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3418 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3419
3420 return gen_int_mode (val, mode);
3421 }
3422 break;
3423
3424 case IF_THEN_ELSE:
3425 if (GET_CODE (op0) == CONST_INT)
3426 return op0 != const0_rtx ? op1 : op2;
3427
3428 /* Convert c ? a : a into "a". */
3429 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3430 return op1;
3431
3432 /* Convert a != b ? a : b into "a". */
3433 if (GET_CODE (op0) == NE
3434 && ! side_effects_p (op0)
3435 && ! HONOR_NANS (mode)
3436 && ! HONOR_SIGNED_ZEROS (mode)
3437 && ((rtx_equal_p (XEXP (op0, 0), op1)
3438 && rtx_equal_p (XEXP (op0, 1), op2))
3439 || (rtx_equal_p (XEXP (op0, 0), op2)
3440 && rtx_equal_p (XEXP (op0, 1), op1))))
3441 return op1;
3442
3443 /* Convert a == b ? a : b into "b". */
3444 if (GET_CODE (op0) == EQ
3445 && ! side_effects_p (op0)
3446 && ! HONOR_NANS (mode)
3447 && ! HONOR_SIGNED_ZEROS (mode)
3448 && ((rtx_equal_p (XEXP (op0, 0), op1)
3449 && rtx_equal_p (XEXP (op0, 1), op2))
3450 || (rtx_equal_p (XEXP (op0, 0), op2)
3451 && rtx_equal_p (XEXP (op0, 1), op1))))
3452 return op2;
3453
3454 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3455 {
3456 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3457 ? GET_MODE (XEXP (op0, 1))
3458 : GET_MODE (XEXP (op0, 0)));
3459 rtx temp;
3460
3461 /* Look for happy constants in op1 and op2. */
3462 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3463 {
3464 HOST_WIDE_INT t = INTVAL (op1);
3465 HOST_WIDE_INT f = INTVAL (op2);
3466
3467 if (t == STORE_FLAG_VALUE && f == 0)
3468 code = GET_CODE (op0);
3469 else if (t == 0 && f == STORE_FLAG_VALUE)
3470 {
3471 enum rtx_code tmp;
3472 tmp = reversed_comparison_code (op0, NULL_RTX);
3473 if (tmp == UNKNOWN)
3474 break;
3475 code = tmp;
3476 }
3477 else
3478 break;
3479
3480 return simplify_gen_relational (code, mode, cmp_mode,
3481 XEXP (op0, 0), XEXP (op0, 1));
3482 }
3483
3484 if (cmp_mode == VOIDmode)
3485 cmp_mode = op0_mode;
3486 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3487 cmp_mode, XEXP (op0, 0),
3488 XEXP (op0, 1));
3489
3490 /* See if any simplifications were possible. */
3491 if (temp)
3492 {
3493 if (GET_CODE (temp) == CONST_INT)
3494 return temp == const0_rtx ? op2 : op1;
3495 else if (temp)
3496 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3497 }
3498 }
3499 break;
3500
3501 case VEC_MERGE:
3502 gcc_assert (GET_MODE (op0) == mode);
3503 gcc_assert (GET_MODE (op1) == mode);
3504 gcc_assert (VECTOR_MODE_P (mode));
3505 op2 = avoid_constant_pool_reference (op2);
3506 if (GET_CODE (op2) == CONST_INT)
3507 {
3508 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3509 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3510 int mask = (1 << n_elts) - 1;
3511
3512 if (!(INTVAL (op2) & mask))
3513 return op1;
3514 if ((INTVAL (op2) & mask) == mask)
3515 return op0;
3516
3517 op0 = avoid_constant_pool_reference (op0);
3518 op1 = avoid_constant_pool_reference (op1);
3519 if (GET_CODE (op0) == CONST_VECTOR
3520 && GET_CODE (op1) == CONST_VECTOR)
3521 {
3522 rtvec v = rtvec_alloc (n_elts);
3523 unsigned int i;
3524
3525 for (i = 0; i < n_elts; i++)
3526 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3527 ? CONST_VECTOR_ELT (op0, i)
3528 : CONST_VECTOR_ELT (op1, i));
3529 return gen_rtx_CONST_VECTOR (mode, v);
3530 }
3531 }
3532 break;
3533
3534 default:
3535 gcc_unreachable ();
3536 }
3537
3538 return 0;
3539 }
3540
3541 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3542 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3543
3544 Works by unpacking OP into a collection of 8-bit values
3545 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3546 and then repacking them again for OUTERMODE. */
3547
3548 static rtx
3549 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3550 enum machine_mode innermode, unsigned int byte)
3551 {
3552 /* We support up to 512-bit values (for V8DFmode). */
3553 enum {
3554 max_bitsize = 512,
3555 value_bit = 8,
3556 value_mask = (1 << value_bit) - 1
3557 };
3558 unsigned char value[max_bitsize / value_bit];
3559 int value_start;
3560 int i;
3561 int elem;
3562
3563 int num_elem;
3564 rtx * elems;
3565 int elem_bitsize;
3566 rtx result_s;
3567 rtvec result_v = NULL;
3568 enum mode_class outer_class;
3569 enum machine_mode outer_submode;
3570
3571 /* Some ports misuse CCmode. */
3572 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3573 return op;
3574
3575 /* We have no way to represent a complex constant at the rtl level. */
3576 if (COMPLEX_MODE_P (outermode))
3577 return NULL_RTX;
3578
3579 /* Unpack the value. */
3580
3581 if (GET_CODE (op) == CONST_VECTOR)
3582 {
3583 num_elem = CONST_VECTOR_NUNITS (op);
3584 elems = &CONST_VECTOR_ELT (op, 0);
3585 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3586 }
3587 else
3588 {
3589 num_elem = 1;
3590 elems = &op;
3591 elem_bitsize = max_bitsize;
3592 }
3593 /* If this asserts, it is too complicated; reducing value_bit may help. */
3594 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3595 /* I don't know how to handle endianness of sub-units. */
3596 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3597
3598 for (elem = 0; elem < num_elem; elem++)
3599 {
3600 unsigned char * vp;
3601 rtx el = elems[elem];
3602
3603 /* Vectors are kept in target memory order. (This is probably
3604 a mistake.) */
3605 {
3606 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3607 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3608 / BITS_PER_UNIT);
3609 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3610 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3611 unsigned bytele = (subword_byte % UNITS_PER_WORD
3612 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3613 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3614 }
3615
3616 switch (GET_CODE (el))
3617 {
3618 case CONST_INT:
3619 for (i = 0;
3620 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3621 i += value_bit)
3622 *vp++ = INTVAL (el) >> i;
3623 /* CONST_INTs are always logically sign-extended. */
3624 for (; i < elem_bitsize; i += value_bit)
3625 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3626 break;
3627
3628 case CONST_DOUBLE:
3629 if (GET_MODE (el) == VOIDmode)
3630 {
3631 /* If this triggers, someone should have generated a
3632 CONST_INT instead. */
3633 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3634
3635 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3636 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3637 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3638 {
3639 *vp++
3640 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3641 i += value_bit;
3642 }
3643 /* It shouldn't matter what's done here, so fill it with
3644 zero. */
3645 for (; i < elem_bitsize; i += value_bit)
3646 *vp++ = 0;
3647 }
3648 else
3649 {
3650 long tmp[max_bitsize / 32];
3651 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3652
3653 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
3654 gcc_assert (bitsize <= elem_bitsize);
3655 gcc_assert (bitsize % value_bit == 0);
3656
3657 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3658 GET_MODE (el));
3659
3660 /* real_to_target produces its result in words affected by
3661 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3662 and use WORDS_BIG_ENDIAN instead; see the documentation
3663 of SUBREG in rtl.texi. */
3664 for (i = 0; i < bitsize; i += value_bit)
3665 {
3666 int ibase;
3667 if (WORDS_BIG_ENDIAN)
3668 ibase = bitsize - 1 - i;
3669 else
3670 ibase = i;
3671 *vp++ = tmp[ibase / 32] >> i % 32;
3672 }
3673
3674 /* It shouldn't matter what's done here, so fill it with
3675 zero. */
3676 for (; i < elem_bitsize; i += value_bit)
3677 *vp++ = 0;
3678 }
3679 break;
3680
3681 default:
3682 gcc_unreachable ();
3683 }
3684 }
3685
3686 /* Now, pick the right byte to start with. */
3687 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3688 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3689 will already have offset 0. */
3690 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3691 {
3692 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3693 - byte);
3694 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3695 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3696 byte = (subword_byte % UNITS_PER_WORD
3697 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3698 }
3699
3700 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3701 so if it's become negative it will instead be very large.) */
3702 gcc_assert (byte < GET_MODE_SIZE (innermode));
3703
3704 /* Convert from bytes to chunks of size value_bit. */
3705 value_start = byte * (BITS_PER_UNIT / value_bit);
3706
3707 /* Re-pack the value. */
3708
3709 if (VECTOR_MODE_P (outermode))
3710 {
3711 num_elem = GET_MODE_NUNITS (outermode);
3712 result_v = rtvec_alloc (num_elem);
3713 elems = &RTVEC_ELT (result_v, 0);
3714 outer_submode = GET_MODE_INNER (outermode);
3715 }
3716 else
3717 {
3718 num_elem = 1;
3719 elems = &result_s;
3720 outer_submode = outermode;
3721 }
3722
3723 outer_class = GET_MODE_CLASS (outer_submode);
3724 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3725
3726 gcc_assert (elem_bitsize % value_bit == 0);
3727 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3728
3729 for (elem = 0; elem < num_elem; elem++)
3730 {
3731 unsigned char *vp;
3732
3733 /* Vectors are stored in target memory order. (This is probably
3734 a mistake.) */
3735 {
3736 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3737 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3738 / BITS_PER_UNIT);
3739 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3740 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3741 unsigned bytele = (subword_byte % UNITS_PER_WORD
3742 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3743 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3744 }
3745
3746 switch (outer_class)
3747 {
3748 case MODE_INT:
3749 case MODE_PARTIAL_INT:
3750 {
3751 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3752
3753 for (i = 0;
3754 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3755 i += value_bit)
3756 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3757 for (; i < elem_bitsize; i += value_bit)
3758 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3759 << (i - HOST_BITS_PER_WIDE_INT));
3760
3761 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3762 know why. */
3763 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3764 elems[elem] = gen_int_mode (lo, outer_submode);
3765 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
3766 elems[elem] = immed_double_const (lo, hi, outer_submode);
3767 else
3768 return NULL_RTX;
3769 }
3770 break;
3771
3772 case MODE_FLOAT:
3773 {
3774 REAL_VALUE_TYPE r;
3775 long tmp[max_bitsize / 32];
3776
3777 /* real_from_target wants its input in words affected by
3778 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3779 and use WORDS_BIG_ENDIAN instead; see the documentation
3780 of SUBREG in rtl.texi. */
3781 for (i = 0; i < max_bitsize / 32; i++)
3782 tmp[i] = 0;
3783 for (i = 0; i < elem_bitsize; i += value_bit)
3784 {
3785 int ibase;
3786 if (WORDS_BIG_ENDIAN)
3787 ibase = elem_bitsize - 1 - i;
3788 else
3789 ibase = i;
3790 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3791 }
3792
3793 real_from_target (&r, tmp, outer_submode);
3794 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3795 }
3796 break;
3797
3798 default:
3799 gcc_unreachable ();
3800 }
3801 }
3802 if (VECTOR_MODE_P (outermode))
3803 return gen_rtx_CONST_VECTOR (outermode, result_v);
3804 else
3805 return result_s;
3806 }
3807
3808 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3809 Return 0 if no simplifications are possible. */
3810 rtx
3811 simplify_subreg (enum machine_mode outermode, rtx op,
3812 enum machine_mode innermode, unsigned int byte)
3813 {
3814 /* Little bit of sanity checking. */
3815 gcc_assert (innermode != VOIDmode);
3816 gcc_assert (outermode != VOIDmode);
3817 gcc_assert (innermode != BLKmode);
3818 gcc_assert (outermode != BLKmode);
3819
3820 gcc_assert (GET_MODE (op) == innermode
3821 || GET_MODE (op) == VOIDmode);
3822
3823 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3824 gcc_assert (byte < GET_MODE_SIZE (innermode));
3825
3826 if (outermode == innermode && !byte)
3827 return op;
3828
3829 if (GET_CODE (op) == CONST_INT
3830 || GET_CODE (op) == CONST_DOUBLE
3831 || GET_CODE (op) == CONST_VECTOR)
3832 return simplify_immed_subreg (outermode, op, innermode, byte);
3833
3834 /* Changing mode twice with SUBREG => just change it once,
3835 or not at all if changing back op starting mode. */
3836 if (GET_CODE (op) == SUBREG)
3837 {
3838 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3839 int final_offset = byte + SUBREG_BYTE (op);
3840 rtx newx;
3841
3842 if (outermode == innermostmode
3843 && byte == 0 && SUBREG_BYTE (op) == 0)
3844 return SUBREG_REG (op);
3845
3846 /* The SUBREG_BYTE represents offset, as if the value were stored
3847 in memory. Irritating exception is paradoxical subreg, where
3848 we define SUBREG_BYTE to be 0. On big endian machines, this
3849 value should be negative. For a moment, undo this exception. */
3850 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3851 {
3852 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3853 if (WORDS_BIG_ENDIAN)
3854 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3855 if (BYTES_BIG_ENDIAN)
3856 final_offset += difference % UNITS_PER_WORD;
3857 }
3858 if (SUBREG_BYTE (op) == 0
3859 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3860 {
3861 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3862 if (WORDS_BIG_ENDIAN)
3863 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3864 if (BYTES_BIG_ENDIAN)
3865 final_offset += difference % UNITS_PER_WORD;
3866 }
3867
3868 /* See whether resulting subreg will be paradoxical. */
3869 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3870 {
3871 /* In nonparadoxical subregs we can't handle negative offsets. */
3872 if (final_offset < 0)
3873 return NULL_RTX;
3874 /* Bail out in case resulting subreg would be incorrect. */
3875 if (final_offset % GET_MODE_SIZE (outermode)
3876 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3877 return NULL_RTX;
3878 }
3879 else
3880 {
3881 int offset = 0;
3882 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3883
3884 /* In paradoxical subreg, see if we are still looking on lower part.
3885 If so, our SUBREG_BYTE will be 0. */
3886 if (WORDS_BIG_ENDIAN)
3887 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3888 if (BYTES_BIG_ENDIAN)
3889 offset += difference % UNITS_PER_WORD;
3890 if (offset == final_offset)
3891 final_offset = 0;
3892 else
3893 return NULL_RTX;
3894 }
3895
3896 /* Recurse for further possible simplifications. */
3897 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3898 final_offset);
3899 if (newx)
3900 return newx;
3901 if (validate_subreg (outermode, innermostmode,
3902 SUBREG_REG (op), final_offset))
3903 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3904 return NULL_RTX;
3905 }
3906
3907 /* SUBREG of a hard register => just change the register number
3908 and/or mode. If the hard register is not valid in that mode,
3909 suppress this simplification. If the hard register is the stack,
3910 frame, or argument pointer, leave this as a SUBREG. */
3911
3912 if (REG_P (op)
3913 && REGNO (op) < FIRST_PSEUDO_REGISTER
3914 #ifdef CANNOT_CHANGE_MODE_CLASS
3915 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3916 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3917 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3918 #endif
3919 && ((reload_completed && !frame_pointer_needed)
3920 || (REGNO (op) != FRAME_POINTER_REGNUM
3921 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3922 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3923 #endif
3924 ))
3925 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3926 && REGNO (op) != ARG_POINTER_REGNUM
3927 #endif
3928 && REGNO (op) != STACK_POINTER_REGNUM
3929 && subreg_offset_representable_p (REGNO (op), innermode,
3930 byte, outermode))
3931 {
3932 unsigned int regno = REGNO (op);
3933 unsigned int final_regno
3934 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3935
3936 /* ??? We do allow it if the current REG is not valid for
3937 its mode. This is a kludge to work around how float/complex
3938 arguments are passed on 32-bit SPARC and should be fixed. */
3939 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3940 || ! HARD_REGNO_MODE_OK (regno, innermode))
3941 {
3942 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3943
3944 /* Propagate original regno. We don't have any way to specify
3945 the offset inside original regno, so do so only for lowpart.
3946 The information is used only by alias analysis that can not
3947 grog partial register anyway. */
3948
3949 if (subreg_lowpart_offset (outermode, innermode) == byte)
3950 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3951 return x;
3952 }
3953 }
3954
3955 /* If we have a SUBREG of a register that we are replacing and we are
3956 replacing it with a MEM, make a new MEM and try replacing the
3957 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3958 or if we would be widening it. */
3959
3960 if (MEM_P (op)
3961 && ! mode_dependent_address_p (XEXP (op, 0))
3962 /* Allow splitting of volatile memory references in case we don't
3963 have instruction to move the whole thing. */
3964 && (! MEM_VOLATILE_P (op)
3965 || ! have_insn_for (SET, innermode))
3966 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3967 return adjust_address_nv (op, outermode, byte);
3968
3969 /* Handle complex values represented as CONCAT
3970 of real and imaginary part. */
3971 if (GET_CODE (op) == CONCAT)
3972 {
3973 unsigned int inner_size, final_offset;
3974 rtx part, res;
3975
3976 inner_size = GET_MODE_UNIT_SIZE (innermode);
3977 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3978 final_offset = byte % inner_size;
3979 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3980 return NULL_RTX;
3981
3982 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3983 if (res)
3984 return res;
3985 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3986 return gen_rtx_SUBREG (outermode, part, final_offset);
3987 return NULL_RTX;
3988 }
3989
3990 /* Optimize SUBREG truncations of zero and sign extended values. */
3991 if ((GET_CODE (op) == ZERO_EXTEND
3992 || GET_CODE (op) == SIGN_EXTEND)
3993 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3994 {
3995 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3996
3997 /* If we're requesting the lowpart of a zero or sign extension,
3998 there are three possibilities. If the outermode is the same
3999 as the origmode, we can omit both the extension and the subreg.
4000 If the outermode is not larger than the origmode, we can apply
4001 the truncation without the extension. Finally, if the outermode
4002 is larger than the origmode, but both are integer modes, we
4003 can just extend to the appropriate mode. */
4004 if (bitpos == 0)
4005 {
4006 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4007 if (outermode == origmode)
4008 return XEXP (op, 0);
4009 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4010 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4011 subreg_lowpart_offset (outermode,
4012 origmode));
4013 if (SCALAR_INT_MODE_P (outermode))
4014 return simplify_gen_unary (GET_CODE (op), outermode,
4015 XEXP (op, 0), origmode);
4016 }
4017
4018 /* A SUBREG resulting from a zero extension may fold to zero if
4019 it extracts higher bits that the ZERO_EXTEND's source bits. */
4020 if (GET_CODE (op) == ZERO_EXTEND
4021 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4022 return CONST0_RTX (outermode);
4023 }
4024
4025 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4026 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4027 the outer subreg is effectively a truncation to the original mode. */
4028 if ((GET_CODE (op) == LSHIFTRT
4029 || GET_CODE (op) == ASHIFTRT)
4030 && SCALAR_INT_MODE_P (outermode)
4031 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4032 to avoid the possibility that an outer LSHIFTRT shifts by more
4033 than the sign extension's sign_bit_copies and introduces zeros
4034 into the high bits of the result. */
4035 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4036 && GET_CODE (XEXP (op, 1)) == CONST_INT
4037 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4038 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4039 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4040 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4041 return simplify_gen_binary (ASHIFTRT, outermode,
4042 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4043
4044 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4045 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4046 the outer subreg is effectively a truncation to the original mode. */
4047 if ((GET_CODE (op) == LSHIFTRT
4048 || GET_CODE (op) == ASHIFTRT)
4049 && SCALAR_INT_MODE_P (outermode)
4050 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4051 && GET_CODE (XEXP (op, 1)) == CONST_INT
4052 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4053 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4054 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4055 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4056 return simplify_gen_binary (LSHIFTRT, outermode,
4057 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4058
4059 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4060 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4061 the outer subreg is effectively a truncation to the original mode. */
4062 if (GET_CODE (op) == ASHIFT
4063 && SCALAR_INT_MODE_P (outermode)
4064 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4065 && GET_CODE (XEXP (op, 1)) == CONST_INT
4066 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4067 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4068 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4069 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4070 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4071 return simplify_gen_binary (ASHIFT, outermode,
4072 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4073
4074 return NULL_RTX;
4075 }
4076
4077 /* Make a SUBREG operation or equivalent if it folds. */
4078
4079 rtx
4080 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4081 enum machine_mode innermode, unsigned int byte)
4082 {
4083 rtx newx;
4084
4085 newx = simplify_subreg (outermode, op, innermode, byte);
4086 if (newx)
4087 return newx;
4088
4089 if (GET_CODE (op) == SUBREG
4090 || GET_CODE (op) == CONCAT
4091 || GET_MODE (op) == VOIDmode)
4092 return NULL_RTX;
4093
4094 if (validate_subreg (outermode, innermode, op, byte))
4095 return gen_rtx_SUBREG (outermode, op, byte);
4096
4097 return NULL_RTX;
4098 }
4099
4100 /* Simplify X, an rtx expression.
4101
4102 Return the simplified expression or NULL if no simplifications
4103 were possible.
4104
4105 This is the preferred entry point into the simplification routines;
4106 however, we still allow passes to call the more specific routines.
4107
4108 Right now GCC has three (yes, three) major bodies of RTL simplification
4109 code that need to be unified.
4110
4111 1. fold_rtx in cse.c. This code uses various CSE specific
4112 information to aid in RTL simplification.
4113
4114 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4115 it uses combine specific information to aid in RTL
4116 simplification.
4117
4118 3. The routines in this file.
4119
4120
4121 Long term we want to only have one body of simplification code; to
4122 get to that state I recommend the following steps:
4123
4124 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4125 which are not pass dependent state into these routines.
4126
4127 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4128 use this routine whenever possible.
4129
4130 3. Allow for pass dependent state to be provided to these
4131 routines and add simplifications based on the pass dependent
4132 state. Remove code from cse.c & combine.c that becomes
4133 redundant/dead.
4134
4135 It will take time, but ultimately the compiler will be easier to
4136 maintain and improve. It's totally silly that when we add a
4137 simplification that it needs to be added to 4 places (3 for RTL
4138 simplification and 1 for tree simplification. */
4139
4140 rtx
4141 simplify_rtx (rtx x)
4142 {
4143 enum rtx_code code = GET_CODE (x);
4144 enum machine_mode mode = GET_MODE (x);
4145
4146 switch (GET_RTX_CLASS (code))
4147 {
4148 case RTX_UNARY:
4149 return simplify_unary_operation (code, mode,
4150 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4151 case RTX_COMM_ARITH:
4152 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4153 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4154
4155 /* Fall through.... */
4156
4157 case RTX_BIN_ARITH:
4158 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4159
4160 case RTX_TERNARY:
4161 case RTX_BITFIELD_OPS:
4162 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4163 XEXP (x, 0), XEXP (x, 1),
4164 XEXP (x, 2));
4165
4166 case RTX_COMPARE:
4167 case RTX_COMM_COMPARE:
4168 return simplify_relational_operation (code, mode,
4169 ((GET_MODE (XEXP (x, 0))
4170 != VOIDmode)
4171 ? GET_MODE (XEXP (x, 0))
4172 : GET_MODE (XEXP (x, 1))),
4173 XEXP (x, 0),
4174 XEXP (x, 1));
4175
4176 case RTX_EXTRA:
4177 if (code == SUBREG)
4178 return simplify_gen_subreg (mode, SUBREG_REG (x),
4179 GET_MODE (SUBREG_REG (x)),
4180 SUBREG_BYTE (x));
4181 break;
4182
4183 case RTX_OBJ:
4184 if (code == LO_SUM)
4185 {
4186 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4187 if (GET_CODE (XEXP (x, 0)) == HIGH
4188 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4189 return XEXP (x, 1);
4190 }
4191 break;
4192
4193 default:
4194 break;
4195 }
4196 return NULL;
4197 }