re PR middle-end/31530 (Incorrect folding of multiplication and sign change when...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
43
44 /* Simplification and canonicalization of RTL. */
45
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66 \f
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
71 {
72 return gen_int_mode (- INTVAL (i), mode);
73 }
74
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
80 {
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 }
108 \f
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112 rtx
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115 {
116 rtx tem;
117
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
122
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
127
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 }
130 \f
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
133 rtx
134 avoid_constant_pool_reference (rtx x)
135 {
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
139
140 switch (GET_CODE (x))
141 {
142 case MEM:
143 break;
144
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 {
151 REAL_VALUE_TYPE d;
152
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 }
156 return x;
157
158 default:
159 return x;
160 }
161
162 if (GET_MODE (x) == BLKmode)
163 return x;
164
165 addr = XEXP (x, 0);
166
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
169
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 {
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
177 }
178
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
181
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
186 {
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
189
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
194 {
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
198 }
199 else
200 return c;
201 }
202
203 return x;
204 }
205 \f
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
208
209 rtx
210 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
211 enum machine_mode op_mode)
212 {
213 rtx tem;
214
215 /* If this simplifies, use it. */
216 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
217 return tem;
218
219 return gen_rtx_fmt_e (code, mode, op);
220 }
221
222 /* Likewise for ternary operations. */
223
224 rtx
225 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
226 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
227 {
228 rtx tem;
229
230 /* If this simplifies, use it. */
231 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
232 op0, op1, op2)))
233 return tem;
234
235 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
236 }
237
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
240
241 rtx
242 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
243 enum machine_mode cmp_mode, rtx op0, rtx op1)
244 {
245 rtx tem;
246
247 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
248 op0, op1)))
249 return tem;
250
251 return gen_rtx_fmt_ee (code, mode, op0, op1);
252 }
253 \f
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
256
257 rtx
258 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
259 {
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
262 enum machine_mode op_mode;
263 rtx op0, op1, op2;
264
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
268
269 if (x == old_rtx)
270 return new_rtx;
271
272 switch (GET_RTX_CLASS (code))
273 {
274 case RTX_UNARY:
275 op0 = XEXP (x, 0);
276 op_mode = GET_MODE (op0);
277 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0))
279 return x;
280 return simplify_gen_unary (code, mode, op0, op_mode);
281
282 case RTX_BIN_ARITH:
283 case RTX_COMM_ARITH:
284 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_binary (code, mode, op0, op1);
289
290 case RTX_COMPARE:
291 case RTX_COMM_COMPARE:
292 op0 = XEXP (x, 0);
293 op1 = XEXP (x, 1);
294 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
295 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
296 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return x;
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300
301 case RTX_TERNARY:
302 case RTX_BITFIELD_OPS:
303 op0 = XEXP (x, 0);
304 op_mode = GET_MODE (op0);
305 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
306 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
307 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
308 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 return x;
310 if (op_mode == VOIDmode)
311 op_mode = GET_MODE (op0);
312 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
313
314 case RTX_EXTRA:
315 /* The only case we try to handle is a SUBREG. */
316 if (code == SUBREG)
317 {
318 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
319 if (op0 == SUBREG_REG (x))
320 return x;
321 op0 = simplify_gen_subreg (GET_MODE (x), op0,
322 GET_MODE (SUBREG_REG (x)),
323 SUBREG_BYTE (x));
324 return op0 ? op0 : x;
325 }
326 break;
327
328 case RTX_OBJ:
329 if (code == MEM)
330 {
331 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
332 if (op0 == XEXP (x, 0))
333 return x;
334 return replace_equiv_address_nv (x, op0);
335 }
336 else if (code == LO_SUM)
337 {
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
343 return op1;
344
345 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return x;
347 return gen_rtx_LO_SUM (mode, op0, op1);
348 }
349 else if (code == REG)
350 {
351 if (rtx_equal_p (x, old_rtx))
352 return new_rtx;
353 }
354 break;
355
356 default:
357 break;
358 }
359 return x;
360 }
361 \f
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
365 rtx
366 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
367 rtx op, enum machine_mode op_mode)
368 {
369 rtx trueop, tem;
370
371 if (GET_CODE (op) == CONST)
372 op = XEXP (op, 0);
373
374 trueop = avoid_constant_pool_reference (op);
375
376 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
377 if (tem)
378 return tem;
379
380 return simplify_unary_operation_1 (code, mode, op);
381 }
382
383 /* Perform some simplifications we can do even if the operands
384 aren't constant. */
385 static rtx
386 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 {
388 enum rtx_code reversed;
389 rtx temp;
390
391 switch (code)
392 {
393 case NOT:
394 /* (not (not X)) == X. */
395 if (GET_CODE (op) == NOT)
396 return XEXP (op, 0);
397
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op)
401 && (mode == BImode || STORE_FLAG_VALUE == -1)
402 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
403 return simplify_gen_relational (reversed, mode, VOIDmode,
404 XEXP (op, 0), XEXP (op, 1));
405
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op) == PLUS
408 && XEXP (op, 1) == constm1_rtx)
409 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op) == NEG)
413 return plus_constant (XEXP (op, 0), -1);
414
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op) == XOR
417 && GET_CODE (XEXP (op, 1)) == CONST_INT
418 && (temp = simplify_unary_operation (NOT, mode,
419 XEXP (op, 1), mode)) != 0)
420 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == PLUS
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && mode_signbit_p (mode, XEXP (op, 1))
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
429
430
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
435 bother with. */
436 if (GET_CODE (op) == ASHIFT
437 && XEXP (op, 0) == const1_rtx)
438 {
439 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
440 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
441 }
442
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
446
447 if (STORE_FLAG_VALUE == -1
448 && GET_CODE (op) == ASHIFTRT
449 && GET_CODE (XEXP (op, 1)) == CONST_INT
450 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
451 return simplify_gen_relational (GE, mode, VOIDmode,
452 XEXP (op, 0), const0_rtx);
453
454
455 if (GET_CODE (op) == SUBREG
456 && subreg_lowpart_p (op)
457 && (GET_MODE_SIZE (GET_MODE (op))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
459 && GET_CODE (SUBREG_REG (op)) == ASHIFT
460 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 {
462 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
463 rtx x;
464
465 x = gen_rtx_ROTATE (inner_mode,
466 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 inner_mode),
468 XEXP (SUBREG_REG (op), 1));
469 return rtl_hooks.gen_lowpart_no_emit (mode, x);
470 }
471
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
475 coded. */
476
477 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 {
479 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
480 enum machine_mode op_mode;
481
482 op_mode = GET_MODE (in1);
483 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484
485 op_mode = GET_MODE (in2);
486 if (op_mode == VOIDmode)
487 op_mode = mode;
488 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489
490 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
491 {
492 rtx tem = in2;
493 in2 = in1; in1 = tem;
494 }
495
496 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
497 mode, in1, in2);
498 }
499 break;
500
501 case NEG:
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op) == NEG)
504 return XEXP (op, 0);
505
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op) == PLUS
508 && XEXP (op, 1) == const1_rtx)
509 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op) == NOT)
513 return plus_constant (XEXP (op, 0), 1);
514
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
523 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524
525 if (GET_CODE (op) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 {
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op, 1)) == CONST_INT
531 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 {
533 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 if (temp)
535 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
536 }
537
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
541 }
542
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 {
548 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
549 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
550 }
551
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
554 is a constant). */
555 if (GET_CODE (op) == ASHIFT)
556 {
557 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 if (temp)
559 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
560 }
561
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op) == ASHIFTRT
565 && GET_CODE (XEXP (op, 1)) == CONST_INT
566 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
567 return simplify_gen_binary (LSHIFTRT, mode,
568 XEXP (op, 0), XEXP (op, 1));
569
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == LSHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (ASHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
577
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op) == XOR
580 && XEXP (op, 1) == const1_rtx
581 && nonzero_bits (XEXP (op, 0), mode) == 1)
582 return plus_constant (XEXP (op, 0), -1);
583
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op) == LT
587 && XEXP (op, 1) == const0_rtx)
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (FLOAT, mode,
708 XEXP (op, 0),
709 GET_MODE (XEXP (op, 0)));
710
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
719
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
726 break;
727
728 case FLOAT_EXTEND:
729 if (DECIMAL_FLOAT_MODE_P (mode))
730 break;
731
732 /* (float_extend (float_extend x)) is (float_extend x)
733
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
736 */
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && ((unsigned)significand_size (GET_MODE (op))
740 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
741 - num_sign_bit_copies (XEXP (op, 0),
742 GET_MODE (XEXP (op, 0)))))))
743 return simplify_gen_unary (GET_CODE (op), mode,
744 XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)));
746
747 break;
748
749 case ABS:
750 /* (abs (neg <foo>)) -> (abs <foo>) */
751 if (GET_CODE (op) == NEG)
752 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
754
755 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
756 do nothing. */
757 if (GET_MODE (op) == VOIDmode)
758 break;
759
760 /* If operand is something known to be positive, ignore the ABS. */
761 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
762 || ((GET_MODE_BITSIZE (GET_MODE (op))
763 <= HOST_BITS_PER_WIDE_INT)
764 && ((nonzero_bits (op, GET_MODE (op))
765 & ((HOST_WIDE_INT) 1
766 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
767 == 0)))
768 return op;
769
770 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
771 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
772 return gen_rtx_NEG (mode, op);
773
774 break;
775
776 case FFS:
777 /* (ffs (*_extend <X>)) = (ffs <X>) */
778 if (GET_CODE (op) == SIGN_EXTEND
779 || GET_CODE (op) == ZERO_EXTEND)
780 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
781 GET_MODE (XEXP (op, 0)));
782 break;
783
784 case POPCOUNT:
785 switch (GET_CODE (op))
786 {
787 case BSWAP:
788 case ZERO_EXTEND:
789 /* (popcount (zero_extend <X>)) = (popcount <X>) */
790 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
791 GET_MODE (XEXP (op, 0)));
792
793 case ROTATE:
794 case ROTATERT:
795 /* Rotations don't affect popcount. */
796 if (!side_effects_p (XEXP (op, 1)))
797 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
799 break;
800
801 default:
802 break;
803 }
804 break;
805
806 case PARITY:
807 switch (GET_CODE (op))
808 {
809 case NOT:
810 case BSWAP:
811 case ZERO_EXTEND:
812 case SIGN_EXTEND:
813 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
814 GET_MODE (XEXP (op, 0)));
815
816 case ROTATE:
817 case ROTATERT:
818 /* Rotations don't affect parity. */
819 if (!side_effects_p (XEXP (op, 1)))
820 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
822 break;
823
824 default:
825 break;
826 }
827 break;
828
829 case BSWAP:
830 /* (bswap (bswap x)) -> x. */
831 if (GET_CODE (op) == BSWAP)
832 return XEXP (op, 0);
833 break;
834
835 case FLOAT:
836 /* (float (sign_extend <X>)) = (float <X>). */
837 if (GET_CODE (op) == SIGN_EXTEND)
838 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)));
840 break;
841
842 case SIGN_EXTEND:
843 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
844 becomes just the MINUS if its mode is MODE. This allows
845 folding switch statements on machines using casesi (such as
846 the VAX). */
847 if (GET_CODE (op) == TRUNCATE
848 && GET_MODE (XEXP (op, 0)) == mode
849 && GET_CODE (XEXP (op, 0)) == MINUS
850 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
851 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
852 return XEXP (op, 0);
853
854 /* Check for a sign extension of a subreg of a promoted
855 variable, where the promotion is sign-extended, and the
856 target mode is the same as the variable's promotion. */
857 if (GET_CODE (op) == SUBREG
858 && SUBREG_PROMOTED_VAR_P (op)
859 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
860 && GET_MODE (XEXP (op, 0)) == mode)
861 return XEXP (op, 0);
862
863 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
864 if (! POINTERS_EXTEND_UNSIGNED
865 && mode == Pmode && GET_MODE (op) == ptr_mode
866 && (CONSTANT_P (op)
867 || (GET_CODE (op) == SUBREG
868 && REG_P (SUBREG_REG (op))
869 && REG_POINTER (SUBREG_REG (op))
870 && GET_MODE (SUBREG_REG (op)) == Pmode)))
871 return convert_memory_address (Pmode, op);
872 #endif
873 break;
874
875 case ZERO_EXTEND:
876 /* Check for a zero extension of a subreg of a promoted
877 variable, where the promotion is zero-extended, and the
878 target mode is the same as the variable's promotion. */
879 if (GET_CODE (op) == SUBREG
880 && SUBREG_PROMOTED_VAR_P (op)
881 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
882 && GET_MODE (XEXP (op, 0)) == mode)
883 return XEXP (op, 0);
884
885 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
886 if (POINTERS_EXTEND_UNSIGNED > 0
887 && mode == Pmode && GET_MODE (op) == ptr_mode
888 && (CONSTANT_P (op)
889 || (GET_CODE (op) == SUBREG
890 && REG_P (SUBREG_REG (op))
891 && REG_POINTER (SUBREG_REG (op))
892 && GET_MODE (SUBREG_REG (op)) == Pmode)))
893 return convert_memory_address (Pmode, op);
894 #endif
895 break;
896
897 default:
898 break;
899 }
900
901 return 0;
902 }
903
904 /* Try to compute the value of a unary operation CODE whose output mode is to
905 be MODE with input operand OP whose mode was originally OP_MODE.
906 Return zero if the value cannot be computed. */
907 rtx
908 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
909 rtx op, enum machine_mode op_mode)
910 {
911 unsigned int width = GET_MODE_BITSIZE (mode);
912
913 if (code == VEC_DUPLICATE)
914 {
915 gcc_assert (VECTOR_MODE_P (mode));
916 if (GET_MODE (op) != VOIDmode)
917 {
918 if (!VECTOR_MODE_P (GET_MODE (op)))
919 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
920 else
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
922 (GET_MODE (op)));
923 }
924 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
925 || GET_CODE (op) == CONST_VECTOR)
926 {
927 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
928 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
929 rtvec v = rtvec_alloc (n_elts);
930 unsigned int i;
931
932 if (GET_CODE (op) != CONST_VECTOR)
933 for (i = 0; i < n_elts; i++)
934 RTVEC_ELT (v, i) = op;
935 else
936 {
937 enum machine_mode inmode = GET_MODE (op);
938 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
939 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
940
941 gcc_assert (in_n_elts < n_elts);
942 gcc_assert ((n_elts % in_n_elts) == 0);
943 for (i = 0; i < n_elts; i++)
944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
945 }
946 return gen_rtx_CONST_VECTOR (mode, v);
947 }
948 }
949
950 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
951 {
952 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
953 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
954 enum machine_mode opmode = GET_MODE (op);
955 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
956 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
957 rtvec v = rtvec_alloc (n_elts);
958 unsigned int i;
959
960 gcc_assert (op_n_elts == n_elts);
961 for (i = 0; i < n_elts; i++)
962 {
963 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
964 CONST_VECTOR_ELT (op, i),
965 GET_MODE_INNER (opmode));
966 if (!x)
967 return 0;
968 RTVEC_ELT (v, i) = x;
969 }
970 return gen_rtx_CONST_VECTOR (mode, v);
971 }
972
973 /* The order of these tests is critical so that, for example, we don't
974 check the wrong mode (input vs. output) for a conversion operation,
975 such as FIX. At some point, this should be simplified. */
976
977 if (code == FLOAT && GET_MODE (op) == VOIDmode
978 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
979 {
980 HOST_WIDE_INT hv, lv;
981 REAL_VALUE_TYPE d;
982
983 if (GET_CODE (op) == CONST_INT)
984 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
985 else
986 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
987
988 REAL_VALUE_FROM_INT (d, lv, hv, mode);
989 d = real_value_truncate (mode, d);
990 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
991 }
992 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
993 && (GET_CODE (op) == CONST_DOUBLE
994 || GET_CODE (op) == CONST_INT))
995 {
996 HOST_WIDE_INT hv, lv;
997 REAL_VALUE_TYPE d;
998
999 if (GET_CODE (op) == CONST_INT)
1000 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1001 else
1002 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1003
1004 if (op_mode == VOIDmode)
1005 {
1006 /* We don't know how to interpret negative-looking numbers in
1007 this case, so don't try to fold those. */
1008 if (hv < 0)
1009 return 0;
1010 }
1011 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1012 ;
1013 else
1014 hv = 0, lv &= GET_MODE_MASK (op_mode);
1015
1016 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1017 d = real_value_truncate (mode, d);
1018 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1019 }
1020
1021 if (GET_CODE (op) == CONST_INT
1022 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1023 {
1024 HOST_WIDE_INT arg0 = INTVAL (op);
1025 HOST_WIDE_INT val;
1026
1027 switch (code)
1028 {
1029 case NOT:
1030 val = ~ arg0;
1031 break;
1032
1033 case NEG:
1034 val = - arg0;
1035 break;
1036
1037 case ABS:
1038 val = (arg0 >= 0 ? arg0 : - arg0);
1039 break;
1040
1041 case FFS:
1042 /* Don't use ffs here. Instead, get low order bit and then its
1043 number. If arg0 is zero, this will return 0, as desired. */
1044 arg0 &= GET_MODE_MASK (mode);
1045 val = exact_log2 (arg0 & (- arg0)) + 1;
1046 break;
1047
1048 case CLZ:
1049 arg0 &= GET_MODE_MASK (mode);
1050 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1051 ;
1052 else
1053 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1054 break;
1055
1056 case CTZ:
1057 arg0 &= GET_MODE_MASK (mode);
1058 if (arg0 == 0)
1059 {
1060 /* Even if the value at zero is undefined, we have to come
1061 up with some replacement. Seems good enough. */
1062 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1063 val = GET_MODE_BITSIZE (mode);
1064 }
1065 else
1066 val = exact_log2 (arg0 & -arg0);
1067 break;
1068
1069 case POPCOUNT:
1070 arg0 &= GET_MODE_MASK (mode);
1071 val = 0;
1072 while (arg0)
1073 val++, arg0 &= arg0 - 1;
1074 break;
1075
1076 case PARITY:
1077 arg0 &= GET_MODE_MASK (mode);
1078 val = 0;
1079 while (arg0)
1080 val++, arg0 &= arg0 - 1;
1081 val &= 1;
1082 break;
1083
1084 case BSWAP:
1085 {
1086 unsigned int s;
1087
1088 val = 0;
1089 for (s = 0; s < width; s += 8)
1090 {
1091 unsigned int d = width - s - 8;
1092 unsigned HOST_WIDE_INT byte;
1093 byte = (arg0 >> s) & 0xff;
1094 val |= byte << d;
1095 }
1096 }
1097 break;
1098
1099 case TRUNCATE:
1100 val = arg0;
1101 break;
1102
1103 case ZERO_EXTEND:
1104 /* When zero-extending a CONST_INT, we need to know its
1105 original mode. */
1106 gcc_assert (op_mode != VOIDmode);
1107 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1108 {
1109 /* If we were really extending the mode,
1110 we would have to distinguish between zero-extension
1111 and sign-extension. */
1112 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1113 val = arg0;
1114 }
1115 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1116 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1117 else
1118 return 0;
1119 break;
1120
1121 case SIGN_EXTEND:
1122 if (op_mode == VOIDmode)
1123 op_mode = mode;
1124 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1125 {
1126 /* If we were really extending the mode,
1127 we would have to distinguish between zero-extension
1128 and sign-extension. */
1129 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1130 val = arg0;
1131 }
1132 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1133 {
1134 val
1135 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1136 if (val
1137 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1138 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1139 }
1140 else
1141 return 0;
1142 break;
1143
1144 case SQRT:
1145 case FLOAT_EXTEND:
1146 case FLOAT_TRUNCATE:
1147 case SS_TRUNCATE:
1148 case US_TRUNCATE:
1149 case SS_NEG:
1150 return 0;
1151
1152 default:
1153 gcc_unreachable ();
1154 }
1155
1156 return gen_int_mode (val, mode);
1157 }
1158
1159 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1160 for a DImode operation on a CONST_INT. */
1161 else if (GET_MODE (op) == VOIDmode
1162 && width <= HOST_BITS_PER_WIDE_INT * 2
1163 && (GET_CODE (op) == CONST_DOUBLE
1164 || GET_CODE (op) == CONST_INT))
1165 {
1166 unsigned HOST_WIDE_INT l1, lv;
1167 HOST_WIDE_INT h1, hv;
1168
1169 if (GET_CODE (op) == CONST_DOUBLE)
1170 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1171 else
1172 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1173
1174 switch (code)
1175 {
1176 case NOT:
1177 lv = ~ l1;
1178 hv = ~ h1;
1179 break;
1180
1181 case NEG:
1182 neg_double (l1, h1, &lv, &hv);
1183 break;
1184
1185 case ABS:
1186 if (h1 < 0)
1187 neg_double (l1, h1, &lv, &hv);
1188 else
1189 lv = l1, hv = h1;
1190 break;
1191
1192 case FFS:
1193 hv = 0;
1194 if (l1 == 0)
1195 {
1196 if (h1 == 0)
1197 lv = 0;
1198 else
1199 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1200 }
1201 else
1202 lv = exact_log2 (l1 & -l1) + 1;
1203 break;
1204
1205 case CLZ:
1206 hv = 0;
1207 if (h1 != 0)
1208 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1209 - HOST_BITS_PER_WIDE_INT;
1210 else if (l1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1212 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1213 lv = GET_MODE_BITSIZE (mode);
1214 break;
1215
1216 case CTZ:
1217 hv = 0;
1218 if (l1 != 0)
1219 lv = exact_log2 (l1 & -l1);
1220 else if (h1 != 0)
1221 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1222 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1223 lv = GET_MODE_BITSIZE (mode);
1224 break;
1225
1226 case POPCOUNT:
1227 hv = 0;
1228 lv = 0;
1229 while (l1)
1230 lv++, l1 &= l1 - 1;
1231 while (h1)
1232 lv++, h1 &= h1 - 1;
1233 break;
1234
1235 case PARITY:
1236 hv = 0;
1237 lv = 0;
1238 while (l1)
1239 lv++, l1 &= l1 - 1;
1240 while (h1)
1241 lv++, h1 &= h1 - 1;
1242 lv &= 1;
1243 break;
1244
1245 case BSWAP:
1246 {
1247 unsigned int s;
1248
1249 hv = 0;
1250 lv = 0;
1251 for (s = 0; s < width; s += 8)
1252 {
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1255
1256 if (s < HOST_BITS_PER_WIDE_INT)
1257 byte = (l1 >> s) & 0xff;
1258 else
1259 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1260
1261 if (d < HOST_BITS_PER_WIDE_INT)
1262 lv |= byte << d;
1263 else
1264 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1265 }
1266 }
1267 break;
1268
1269 case TRUNCATE:
1270 /* This is just a change-of-mode, so do nothing. */
1271 lv = l1, hv = h1;
1272 break;
1273
1274 case ZERO_EXTEND:
1275 gcc_assert (op_mode != VOIDmode);
1276
1277 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1278 return 0;
1279
1280 hv = 0;
1281 lv = l1 & GET_MODE_MASK (op_mode);
1282 break;
1283
1284 case SIGN_EXTEND:
1285 if (op_mode == VOIDmode
1286 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1287 return 0;
1288 else
1289 {
1290 lv = l1 & GET_MODE_MASK (op_mode);
1291 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1292 && (lv & ((HOST_WIDE_INT) 1
1293 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1294 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1295
1296 hv = HWI_SIGN_EXTEND (lv);
1297 }
1298 break;
1299
1300 case SQRT:
1301 return 0;
1302
1303 default:
1304 return 0;
1305 }
1306
1307 return immed_double_const (lv, hv, mode);
1308 }
1309
1310 else if (GET_CODE (op) == CONST_DOUBLE
1311 && SCALAR_FLOAT_MODE_P (mode))
1312 {
1313 REAL_VALUE_TYPE d, t;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1315
1316 switch (code)
1317 {
1318 case SQRT:
1319 if (HONOR_SNANS (mode) && real_isnan (&d))
1320 return 0;
1321 real_sqrt (&t, mode, &d);
1322 d = t;
1323 break;
1324 case ABS:
1325 d = REAL_VALUE_ABS (d);
1326 break;
1327 case NEG:
1328 d = REAL_VALUE_NEGATE (d);
1329 break;
1330 case FLOAT_TRUNCATE:
1331 d = real_value_truncate (mode, d);
1332 break;
1333 case FLOAT_EXTEND:
1334 /* All this does is change the mode. */
1335 break;
1336 case FIX:
1337 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1338 break;
1339 case NOT:
1340 {
1341 long tmp[4];
1342 int i;
1343
1344 real_to_target (tmp, &d, GET_MODE (op));
1345 for (i = 0; i < 4; i++)
1346 tmp[i] = ~tmp[i];
1347 real_from_target (&d, tmp, mode);
1348 break;
1349 }
1350 default:
1351 gcc_unreachable ();
1352 }
1353 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1354 }
1355
1356 else if (GET_CODE (op) == CONST_DOUBLE
1357 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1358 && GET_MODE_CLASS (mode) == MODE_INT
1359 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1360 {
1361 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1362 operators are intentionally left unspecified (to ease implementation
1363 by target backends), for consistency, this routine implements the
1364 same semantics for constant folding as used by the middle-end. */
1365
1366 /* This was formerly used only for non-IEEE float.
1367 eggert@twinsun.com says it is safe for IEEE also. */
1368 HOST_WIDE_INT xh, xl, th, tl;
1369 REAL_VALUE_TYPE x, t;
1370 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1371 switch (code)
1372 {
1373 case FIX:
1374 if (REAL_VALUE_ISNAN (x))
1375 return const0_rtx;
1376
1377 /* Test against the signed upper bound. */
1378 if (width > HOST_BITS_PER_WIDE_INT)
1379 {
1380 th = ((unsigned HOST_WIDE_INT) 1
1381 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1382 tl = -1;
1383 }
1384 else
1385 {
1386 th = 0;
1387 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1388 }
1389 real_from_integer (&t, VOIDmode, tl, th, 0);
1390 if (REAL_VALUES_LESS (t, x))
1391 {
1392 xh = th;
1393 xl = tl;
1394 break;
1395 }
1396
1397 /* Test against the signed lower bound. */
1398 if (width > HOST_BITS_PER_WIDE_INT)
1399 {
1400 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1401 tl = 0;
1402 }
1403 else
1404 {
1405 th = -1;
1406 tl = (HOST_WIDE_INT) -1 << (width - 1);
1407 }
1408 real_from_integer (&t, VOIDmode, tl, th, 0);
1409 if (REAL_VALUES_LESS (x, t))
1410 {
1411 xh = th;
1412 xl = tl;
1413 break;
1414 }
1415 REAL_VALUE_TO_INT (&xl, &xh, x);
1416 break;
1417
1418 case UNSIGNED_FIX:
1419 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1420 return const0_rtx;
1421
1422 /* Test against the unsigned upper bound. */
1423 if (width == 2*HOST_BITS_PER_WIDE_INT)
1424 {
1425 th = -1;
1426 tl = -1;
1427 }
1428 else if (width >= HOST_BITS_PER_WIDE_INT)
1429 {
1430 th = ((unsigned HOST_WIDE_INT) 1
1431 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1432 tl = -1;
1433 }
1434 else
1435 {
1436 th = 0;
1437 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1438 }
1439 real_from_integer (&t, VOIDmode, tl, th, 1);
1440 if (REAL_VALUES_LESS (t, x))
1441 {
1442 xh = th;
1443 xl = tl;
1444 break;
1445 }
1446
1447 REAL_VALUE_TO_INT (&xl, &xh, x);
1448 break;
1449
1450 default:
1451 gcc_unreachable ();
1452 }
1453 return immed_double_const (xl, xh, mode);
1454 }
1455
1456 return NULL_RTX;
1457 }
1458 \f
1459 /* Subroutine of simplify_binary_operation to simplify a commutative,
1460 associative binary operation CODE with result mode MODE, operating
1461 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1462 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1463 canonicalization is possible. */
1464
1465 static rtx
1466 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1467 rtx op0, rtx op1)
1468 {
1469 rtx tem;
1470
1471 /* Linearize the operator to the left. */
1472 if (GET_CODE (op1) == code)
1473 {
1474 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1475 if (GET_CODE (op0) == code)
1476 {
1477 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1478 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1479 }
1480
1481 /* "a op (b op c)" becomes "(b op c) op a". */
1482 if (! swap_commutative_operands_p (op1, op0))
1483 return simplify_gen_binary (code, mode, op1, op0);
1484
1485 tem = op0;
1486 op0 = op1;
1487 op1 = tem;
1488 }
1489
1490 if (GET_CODE (op0) == code)
1491 {
1492 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1493 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1494 {
1495 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1496 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1497 }
1498
1499 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1500 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1501 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1502 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 if (tem != 0)
1504 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1508 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1509 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1510 if (tem != 0)
1511 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1512 }
1513
1514 return 0;
1515 }
1516
1517
1518 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1519 and OP1. Return 0 if no simplification is possible.
1520
1521 Don't use this for relational operations such as EQ or LT.
1522 Use simplify_relational_operation instead. */
1523 rtx
1524 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1525 rtx op0, rtx op1)
1526 {
1527 rtx trueop0, trueop1;
1528 rtx tem;
1529
1530 /* Relational operations don't work here. We must know the mode
1531 of the operands in order to do the comparison correctly.
1532 Assuming a full word can give incorrect results.
1533 Consider comparing 128 with -128 in QImode. */
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1535 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1536
1537 /* Make sure the constant is second. */
1538 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1539 && swap_commutative_operands_p (op0, op1))
1540 {
1541 tem = op0, op0 = op1, op1 = tem;
1542 }
1543
1544 trueop0 = avoid_constant_pool_reference (op0);
1545 trueop1 = avoid_constant_pool_reference (op1);
1546
1547 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1548 if (tem)
1549 return tem;
1550 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1551 }
1552
1553 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1554 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1555 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1556 actual constants. */
1557
1558 static rtx
1559 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1560 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1561 {
1562 rtx tem, reversed, opleft, opright;
1563 HOST_WIDE_INT val;
1564 unsigned int width = GET_MODE_BITSIZE (mode);
1565
1566 /* Even if we can't compute a constant result,
1567 there are some cases worth simplifying. */
1568
1569 switch (code)
1570 {
1571 case PLUS:
1572 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1573 when x is NaN, infinite, or finite and nonzero. They aren't
1574 when x is -0 and the rounding mode is not towards -infinity,
1575 since (-0) + 0 is then 0. */
1576 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1577 return op0;
1578
1579 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1580 transformations are safe even for IEEE. */
1581 if (GET_CODE (op0) == NEG)
1582 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1583 else if (GET_CODE (op1) == NEG)
1584 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1585
1586 /* (~a) + 1 -> -a */
1587 if (INTEGRAL_MODE_P (mode)
1588 && GET_CODE (op0) == NOT
1589 && trueop1 == const1_rtx)
1590 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1591
1592 /* Handle both-operands-constant cases. We can only add
1593 CONST_INTs to constants since the sum of relocatable symbols
1594 can't be handled by most assemblers. Don't add CONST_INT
1595 to CONST_INT since overflow won't be computed properly if wider
1596 than HOST_BITS_PER_WIDE_INT. */
1597
1598 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1599 && GET_CODE (op1) == CONST_INT)
1600 return plus_constant (op0, INTVAL (op1));
1601 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1602 && GET_CODE (op0) == CONST_INT)
1603 return plus_constant (op1, INTVAL (op0));
1604
1605 /* See if this is something like X * C - X or vice versa or
1606 if the multiplication is written as a shift. If so, we can
1607 distribute and make a new multiply, shift, or maybe just
1608 have X (if C is 2 in the example above). But don't make
1609 something more expensive than we had before. */
1610
1611 if (SCALAR_INT_MODE_P (mode))
1612 {
1613 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1614 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1615 rtx lhs = op0, rhs = op1;
1616
1617 if (GET_CODE (lhs) == NEG)
1618 {
1619 coeff0l = -1;
1620 coeff0h = -1;
1621 lhs = XEXP (lhs, 0);
1622 }
1623 else if (GET_CODE (lhs) == MULT
1624 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1625 {
1626 coeff0l = INTVAL (XEXP (lhs, 1));
1627 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1628 lhs = XEXP (lhs, 0);
1629 }
1630 else if (GET_CODE (lhs) == ASHIFT
1631 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1632 && INTVAL (XEXP (lhs, 1)) >= 0
1633 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1634 {
1635 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1636 coeff0h = 0;
1637 lhs = XEXP (lhs, 0);
1638 }
1639
1640 if (GET_CODE (rhs) == NEG)
1641 {
1642 coeff1l = -1;
1643 coeff1h = -1;
1644 rhs = XEXP (rhs, 0);
1645 }
1646 else if (GET_CODE (rhs) == MULT
1647 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1648 {
1649 coeff1l = INTVAL (XEXP (rhs, 1));
1650 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1651 rhs = XEXP (rhs, 0);
1652 }
1653 else if (GET_CODE (rhs) == ASHIFT
1654 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1655 && INTVAL (XEXP (rhs, 1)) >= 0
1656 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1657 {
1658 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1659 coeff1h = 0;
1660 rhs = XEXP (rhs, 0);
1661 }
1662
1663 if (rtx_equal_p (lhs, rhs))
1664 {
1665 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1666 rtx coeff;
1667 unsigned HOST_WIDE_INT l;
1668 HOST_WIDE_INT h;
1669
1670 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1671 coeff = immed_double_const (l, h, mode);
1672
1673 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1674 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1675 ? tem : 0;
1676 }
1677 }
1678
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1) == CONST_INT
1681 || GET_CODE (op1) == CONST_DOUBLE)
1682 && GET_CODE (op0) == XOR
1683 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1685 && mode_signbit_p (mode, op1))
1686 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1687 simplify_gen_binary (XOR, mode, op1,
1688 XEXP (op0, 1)));
1689
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1692 && GET_CODE (op0) == MULT
1693 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 {
1695 rtx in1, in2;
1696
1697 in1 = XEXP (XEXP (op0, 0), 0);
1698 in2 = XEXP (op0, 1);
1699 return simplify_gen_binary (MINUS, mode, op1,
1700 simplify_gen_binary (MULT, mode,
1701 in1, in2));
1702 }
1703
1704 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1705 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 is 1. */
1707 if (COMPARISON_P (op0)
1708 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1709 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1710 && (reversed = reversed_comparison (op0, mode)))
1711 return
1712 simplify_gen_unary (NEG, mode, reversed, mode);
1713
1714 /* If one of the operands is a PLUS or a MINUS, see if we can
1715 simplify this by the associative law.
1716 Don't use the associative law for floating point.
1717 The inaccuracy makes it nonassociative,
1718 and subtle programs can break if operations are associated. */
1719
1720 if (INTEGRAL_MODE_P (mode)
1721 && (plus_minus_operand_p (op0)
1722 || plus_minus_operand_p (op1))
1723 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1724 return tem;
1725
1726 /* Reassociate floating point addition only when the user
1727 specifies unsafe math optimizations. */
1728 if (FLOAT_MODE_P (mode)
1729 && flag_unsafe_math_optimizations)
1730 {
1731 tem = simplify_associative_operation (code, mode, op0, op1);
1732 if (tem)
1733 return tem;
1734 }
1735 break;
1736
1737 case COMPARE:
1738 #ifdef HAVE_cc0
1739 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1740 using cc0, in which case we want to leave it as a COMPARE
1741 so we can distinguish it from a register-register-copy.
1742
1743 In IEEE floating point, x-0 is not the same as x. */
1744
1745 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1746 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1747 && trueop1 == CONST0_RTX (mode))
1748 return op0;
1749 #endif
1750
1751 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1752 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1753 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1754 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1755 {
1756 rtx xop00 = XEXP (op0, 0);
1757 rtx xop10 = XEXP (op1, 0);
1758
1759 #ifdef HAVE_cc0
1760 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1761 #else
1762 if (REG_P (xop00) && REG_P (xop10)
1763 && GET_MODE (xop00) == GET_MODE (xop10)
1764 && REGNO (xop00) == REGNO (xop10)
1765 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1766 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1767 #endif
1768 return xop00;
1769 }
1770 break;
1771
1772 case MINUS:
1773 /* We can't assume x-x is 0 even with non-IEEE floating point,
1774 but since it is zero except in very strange circumstances, we
1775 will treat it as zero with -funsafe-math-optimizations. */
1776 if (rtx_equal_p (trueop0, trueop1)
1777 && ! side_effects_p (op0)
1778 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1779 return CONST0_RTX (mode);
1780
1781 /* Change subtraction from zero into negation. (0 - x) is the
1782 same as -x when x is NaN, infinite, or finite and nonzero.
1783 But if the mode has signed zeros, and does not round towards
1784 -infinity, then 0 - 0 is 0, not -0. */
1785 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1786 return simplify_gen_unary (NEG, mode, op1, mode);
1787
1788 /* (-1 - a) is ~a. */
1789 if (trueop0 == constm1_rtx)
1790 return simplify_gen_unary (NOT, mode, op1, mode);
1791
1792 /* Subtracting 0 has no effect unless the mode has signed zeros
1793 and supports rounding towards -infinity. In such a case,
1794 0 - 0 is -0. */
1795 if (!(HONOR_SIGNED_ZEROS (mode)
1796 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1797 && trueop1 == CONST0_RTX (mode))
1798 return op0;
1799
1800 /* See if this is something like X * C - X or vice versa or
1801 if the multiplication is written as a shift. If so, we can
1802 distribute and make a new multiply, shift, or maybe just
1803 have X (if C is 2 in the example above). But don't make
1804 something more expensive than we had before. */
1805
1806 if (SCALAR_INT_MODE_P (mode))
1807 {
1808 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1809 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1810 rtx lhs = op0, rhs = op1;
1811
1812 if (GET_CODE (lhs) == NEG)
1813 {
1814 coeff0l = -1;
1815 coeff0h = -1;
1816 lhs = XEXP (lhs, 0);
1817 }
1818 else if (GET_CODE (lhs) == MULT
1819 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1820 {
1821 coeff0l = INTVAL (XEXP (lhs, 1));
1822 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1823 lhs = XEXP (lhs, 0);
1824 }
1825 else if (GET_CODE (lhs) == ASHIFT
1826 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1827 && INTVAL (XEXP (lhs, 1)) >= 0
1828 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1829 {
1830 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1831 coeff0h = 0;
1832 lhs = XEXP (lhs, 0);
1833 }
1834
1835 if (GET_CODE (rhs) == NEG)
1836 {
1837 negcoeff1l = 1;
1838 negcoeff1h = 0;
1839 rhs = XEXP (rhs, 0);
1840 }
1841 else if (GET_CODE (rhs) == MULT
1842 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1843 {
1844 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1845 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1846 rhs = XEXP (rhs, 0);
1847 }
1848 else if (GET_CODE (rhs) == ASHIFT
1849 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1850 && INTVAL (XEXP (rhs, 1)) >= 0
1851 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1852 {
1853 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1854 negcoeff1h = -1;
1855 rhs = XEXP (rhs, 0);
1856 }
1857
1858 if (rtx_equal_p (lhs, rhs))
1859 {
1860 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1861 rtx coeff;
1862 unsigned HOST_WIDE_INT l;
1863 HOST_WIDE_INT h;
1864
1865 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1866 coeff = immed_double_const (l, h, mode);
1867
1868 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1869 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1870 ? tem : 0;
1871 }
1872 }
1873
1874 /* (a - (-b)) -> (a + b). True even for IEEE. */
1875 if (GET_CODE (op1) == NEG)
1876 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1877
1878 /* (-x - c) may be simplified as (-c - x). */
1879 if (GET_CODE (op0) == NEG
1880 && (GET_CODE (op1) == CONST_INT
1881 || GET_CODE (op1) == CONST_DOUBLE))
1882 {
1883 tem = simplify_unary_operation (NEG, mode, op1, mode);
1884 if (tem)
1885 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1886 }
1887
1888 /* Don't let a relocatable value get a negative coeff. */
1889 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1890 return simplify_gen_binary (PLUS, mode,
1891 op0,
1892 neg_const_int (mode, op1));
1893
1894 /* (x - (x & y)) -> (x & ~y) */
1895 if (GET_CODE (op1) == AND)
1896 {
1897 if (rtx_equal_p (op0, XEXP (op1, 0)))
1898 {
1899 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1900 GET_MODE (XEXP (op1, 1)));
1901 return simplify_gen_binary (AND, mode, op0, tem);
1902 }
1903 if (rtx_equal_p (op0, XEXP (op1, 1)))
1904 {
1905 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1906 GET_MODE (XEXP (op1, 0)));
1907 return simplify_gen_binary (AND, mode, op0, tem);
1908 }
1909 }
1910
1911 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1912 by reversing the comparison code if valid. */
1913 if (STORE_FLAG_VALUE == 1
1914 && trueop0 == const1_rtx
1915 && COMPARISON_P (op1)
1916 && (reversed = reversed_comparison (op1, mode)))
1917 return reversed;
1918
1919 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1920 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1921 && GET_CODE (op1) == MULT
1922 && GET_CODE (XEXP (op1, 0)) == NEG)
1923 {
1924 rtx in1, in2;
1925
1926 in1 = XEXP (XEXP (op1, 0), 0);
1927 in2 = XEXP (op1, 1);
1928 return simplify_gen_binary (PLUS, mode,
1929 simplify_gen_binary (MULT, mode,
1930 in1, in2),
1931 op0);
1932 }
1933
1934 /* Canonicalize (minus (neg A) (mult B C)) to
1935 (minus (mult (neg B) C) A). */
1936 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1937 && GET_CODE (op1) == MULT
1938 && GET_CODE (op0) == NEG)
1939 {
1940 rtx in1, in2;
1941
1942 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1943 in2 = XEXP (op1, 1);
1944 return simplify_gen_binary (MINUS, mode,
1945 simplify_gen_binary (MULT, mode,
1946 in1, in2),
1947 XEXP (op0, 0));
1948 }
1949
1950 /* If one of the operands is a PLUS or a MINUS, see if we can
1951 simplify this by the associative law. This will, for example,
1952 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1953 Don't use the associative law for floating point.
1954 The inaccuracy makes it nonassociative,
1955 and subtle programs can break if operations are associated. */
1956
1957 if (INTEGRAL_MODE_P (mode)
1958 && (plus_minus_operand_p (op0)
1959 || plus_minus_operand_p (op1))
1960 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1961 return tem;
1962 break;
1963
1964 case MULT:
1965 if (trueop1 == constm1_rtx)
1966 return simplify_gen_unary (NEG, mode, op0, mode);
1967
1968 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1969 x is NaN, since x * 0 is then also NaN. Nor is it valid
1970 when the mode has signed zeros, since multiplying a negative
1971 number by 0 will give -0, not 0. */
1972 if (!HONOR_NANS (mode)
1973 && !HONOR_SIGNED_ZEROS (mode)
1974 && trueop1 == CONST0_RTX (mode)
1975 && ! side_effects_p (op0))
1976 return op1;
1977
1978 /* In IEEE floating point, x*1 is not equivalent to x for
1979 signalling NaNs. */
1980 if (!HONOR_SNANS (mode)
1981 && trueop1 == CONST1_RTX (mode))
1982 return op0;
1983
1984 /* Convert multiply by constant power of two into shift unless
1985 we are still generating RTL. This test is a kludge. */
1986 if (GET_CODE (trueop1) == CONST_INT
1987 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1988 /* If the mode is larger than the host word size, and the
1989 uppermost bit is set, then this isn't a power of two due
1990 to implicit sign extension. */
1991 && (width <= HOST_BITS_PER_WIDE_INT
1992 || val != HOST_BITS_PER_WIDE_INT - 1))
1993 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1994
1995 /* Likewise for multipliers wider than a word. */
1996 if (GET_CODE (trueop1) == CONST_DOUBLE
1997 && (GET_MODE (trueop1) == VOIDmode
1998 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1999 && GET_MODE (op0) == mode
2000 && CONST_DOUBLE_LOW (trueop1) == 0
2001 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2002 return simplify_gen_binary (ASHIFT, mode, op0,
2003 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2004
2005 /* x*2 is x+x and x*(-1) is -x */
2006 if (GET_CODE (trueop1) == CONST_DOUBLE
2007 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2008 && GET_MODE (op0) == mode)
2009 {
2010 REAL_VALUE_TYPE d;
2011 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2012
2013 if (REAL_VALUES_EQUAL (d, dconst2))
2014 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2015
2016 if (!HONOR_SNANS (mode)
2017 && REAL_VALUES_EQUAL (d, dconstm1))
2018 return simplify_gen_unary (NEG, mode, op0, mode);
2019 }
2020
2021 /* Optimize -x * -x as x * x. */
2022 if (FLOAT_MODE_P (mode)
2023 && GET_CODE (op0) == NEG
2024 && GET_CODE (op1) == NEG
2025 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2026 && !side_effects_p (XEXP (op0, 0)))
2027 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2028
2029 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2030 if (SCALAR_FLOAT_MODE_P (mode)
2031 && GET_CODE (op0) == ABS
2032 && GET_CODE (op1) == ABS
2033 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2034 && !side_effects_p (XEXP (op0, 0)))
2035 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2036
2037 /* Reassociate multiplication, but for floating point MULTs
2038 only when the user specifies unsafe math optimizations. */
2039 if (! FLOAT_MODE_P (mode)
2040 || flag_unsafe_math_optimizations)
2041 {
2042 tem = simplify_associative_operation (code, mode, op0, op1);
2043 if (tem)
2044 return tem;
2045 }
2046 break;
2047
2048 case IOR:
2049 if (trueop1 == const0_rtx)
2050 return op0;
2051 if (GET_CODE (trueop1) == CONST_INT
2052 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2053 == GET_MODE_MASK (mode)))
2054 return op1;
2055 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2056 return op0;
2057 /* A | (~A) -> -1 */
2058 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2059 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2060 && ! side_effects_p (op0)
2061 && SCALAR_INT_MODE_P (mode))
2062 return constm1_rtx;
2063
2064 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2065 if (GET_CODE (op1) == CONST_INT
2066 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2067 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2068 return op1;
2069
2070 /* Canonicalize (X & C1) | C2. */
2071 if (GET_CODE (op0) == AND
2072 && GET_CODE (trueop1) == CONST_INT
2073 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2074 {
2075 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2076 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2077 HOST_WIDE_INT c2 = INTVAL (trueop1);
2078
2079 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2080 if ((c1 & c2) == c1
2081 && !side_effects_p (XEXP (op0, 0)))
2082 return trueop1;
2083
2084 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2085 if (((c1|c2) & mask) == mask)
2086 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2087
2088 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2089 if (((c1 & ~c2) & mask) != (c1 & mask))
2090 {
2091 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2092 gen_int_mode (c1 & ~c2, mode));
2093 return simplify_gen_binary (IOR, mode, tem, op1);
2094 }
2095 }
2096
2097 /* Convert (A & B) | A to A. */
2098 if (GET_CODE (op0) == AND
2099 && (rtx_equal_p (XEXP (op0, 0), op1)
2100 || rtx_equal_p (XEXP (op0, 1), op1))
2101 && ! side_effects_p (XEXP (op0, 0))
2102 && ! side_effects_p (XEXP (op0, 1)))
2103 return op1;
2104
2105 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2106 mode size to (rotate A CX). */
2107
2108 if (GET_CODE (op1) == ASHIFT
2109 || GET_CODE (op1) == SUBREG)
2110 {
2111 opleft = op1;
2112 opright = op0;
2113 }
2114 else
2115 {
2116 opright = op1;
2117 opleft = op0;
2118 }
2119
2120 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2121 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2122 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2123 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2124 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2125 == GET_MODE_BITSIZE (mode)))
2126 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2127
2128 /* Same, but for ashift that has been "simplified" to a wider mode
2129 by simplify_shift_const. */
2130
2131 if (GET_CODE (opleft) == SUBREG
2132 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2133 && GET_CODE (opright) == LSHIFTRT
2134 && GET_CODE (XEXP (opright, 0)) == SUBREG
2135 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2136 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2137 && (GET_MODE_SIZE (GET_MODE (opleft))
2138 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2139 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2140 SUBREG_REG (XEXP (opright, 0)))
2141 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2142 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2143 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2144 == GET_MODE_BITSIZE (mode)))
2145 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2146 XEXP (SUBREG_REG (opleft), 1));
2147
2148 /* If we have (ior (and (X C1) C2)), simplify this by making
2149 C1 as small as possible if C1 actually changes. */
2150 if (GET_CODE (op1) == CONST_INT
2151 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2152 || INTVAL (op1) > 0)
2153 && GET_CODE (op0) == AND
2154 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2155 && GET_CODE (op1) == CONST_INT
2156 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2157 return simplify_gen_binary (IOR, mode,
2158 simplify_gen_binary
2159 (AND, mode, XEXP (op0, 0),
2160 GEN_INT (INTVAL (XEXP (op0, 1))
2161 & ~INTVAL (op1))),
2162 op1);
2163
2164 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2165 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2166 the PLUS does not affect any of the bits in OP1: then we can do
2167 the IOR as a PLUS and we can associate. This is valid if OP1
2168 can be safely shifted left C bits. */
2169 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2170 && GET_CODE (XEXP (op0, 0)) == PLUS
2171 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2172 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2173 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2174 {
2175 int count = INTVAL (XEXP (op0, 1));
2176 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2177
2178 if (mask >> count == INTVAL (trueop1)
2179 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2180 return simplify_gen_binary (ASHIFTRT, mode,
2181 plus_constant (XEXP (op0, 0), mask),
2182 XEXP (op0, 1));
2183 }
2184
2185 tem = simplify_associative_operation (code, mode, op0, op1);
2186 if (tem)
2187 return tem;
2188 break;
2189
2190 case XOR:
2191 if (trueop1 == const0_rtx)
2192 return op0;
2193 if (GET_CODE (trueop1) == CONST_INT
2194 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2195 == GET_MODE_MASK (mode)))
2196 return simplify_gen_unary (NOT, mode, op0, mode);
2197 if (rtx_equal_p (trueop0, trueop1)
2198 && ! side_effects_p (op0)
2199 && GET_MODE_CLASS (mode) != MODE_CC)
2200 return CONST0_RTX (mode);
2201
2202 /* Canonicalize XOR of the most significant bit to PLUS. */
2203 if ((GET_CODE (op1) == CONST_INT
2204 || GET_CODE (op1) == CONST_DOUBLE)
2205 && mode_signbit_p (mode, op1))
2206 return simplify_gen_binary (PLUS, mode, op0, op1);
2207 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2208 if ((GET_CODE (op1) == CONST_INT
2209 || GET_CODE (op1) == CONST_DOUBLE)
2210 && GET_CODE (op0) == PLUS
2211 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2212 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2213 && mode_signbit_p (mode, XEXP (op0, 1)))
2214 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2215 simplify_gen_binary (XOR, mode, op1,
2216 XEXP (op0, 1)));
2217
2218 /* If we are XORing two things that have no bits in common,
2219 convert them into an IOR. This helps to detect rotation encoded
2220 using those methods and possibly other simplifications. */
2221
2222 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2223 && (nonzero_bits (op0, mode)
2224 & nonzero_bits (op1, mode)) == 0)
2225 return (simplify_gen_binary (IOR, mode, op0, op1));
2226
2227 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2228 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2229 (NOT y). */
2230 {
2231 int num_negated = 0;
2232
2233 if (GET_CODE (op0) == NOT)
2234 num_negated++, op0 = XEXP (op0, 0);
2235 if (GET_CODE (op1) == NOT)
2236 num_negated++, op1 = XEXP (op1, 0);
2237
2238 if (num_negated == 2)
2239 return simplify_gen_binary (XOR, mode, op0, op1);
2240 else if (num_negated == 1)
2241 return simplify_gen_unary (NOT, mode,
2242 simplify_gen_binary (XOR, mode, op0, op1),
2243 mode);
2244 }
2245
2246 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2247 correspond to a machine insn or result in further simplifications
2248 if B is a constant. */
2249
2250 if (GET_CODE (op0) == AND
2251 && rtx_equal_p (XEXP (op0, 1), op1)
2252 && ! side_effects_p (op1))
2253 return simplify_gen_binary (AND, mode,
2254 simplify_gen_unary (NOT, mode,
2255 XEXP (op0, 0), mode),
2256 op1);
2257
2258 else if (GET_CODE (op0) == AND
2259 && rtx_equal_p (XEXP (op0, 0), op1)
2260 && ! side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode,
2262 simplify_gen_unary (NOT, mode,
2263 XEXP (op0, 1), mode),
2264 op1);
2265
2266 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2267 comparison if STORE_FLAG_VALUE is 1. */
2268 if (STORE_FLAG_VALUE == 1
2269 && trueop1 == const1_rtx
2270 && COMPARISON_P (op0)
2271 && (reversed = reversed_comparison (op0, mode)))
2272 return reversed;
2273
2274 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2275 is (lt foo (const_int 0)), so we can perform the above
2276 simplification if STORE_FLAG_VALUE is 1. */
2277
2278 if (STORE_FLAG_VALUE == 1
2279 && trueop1 == const1_rtx
2280 && GET_CODE (op0) == LSHIFTRT
2281 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2282 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2283 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2284
2285 /* (xor (comparison foo bar) (const_int sign-bit))
2286 when STORE_FLAG_VALUE is the sign bit. */
2287 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2288 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2289 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2290 && trueop1 == const_true_rtx
2291 && COMPARISON_P (op0)
2292 && (reversed = reversed_comparison (op0, mode)))
2293 return reversed;
2294
2295 break;
2296
2297 tem = simplify_associative_operation (code, mode, op0, op1);
2298 if (tem)
2299 return tem;
2300 break;
2301
2302 case AND:
2303 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2304 return trueop1;
2305 /* If we are turning off bits already known off in OP0, we need
2306 not do an AND. */
2307 if (GET_CODE (trueop1) == CONST_INT
2308 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2309 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2310 return op0;
2311 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2312 && GET_MODE_CLASS (mode) != MODE_CC)
2313 return op0;
2314 /* A & (~A) -> 0 */
2315 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2316 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2317 && ! side_effects_p (op0)
2318 && GET_MODE_CLASS (mode) != MODE_CC)
2319 return CONST0_RTX (mode);
2320
2321 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2322 there are no nonzero bits of C outside of X's mode. */
2323 if ((GET_CODE (op0) == SIGN_EXTEND
2324 || GET_CODE (op0) == ZERO_EXTEND)
2325 && GET_CODE (trueop1) == CONST_INT
2326 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2327 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2328 & INTVAL (trueop1)) == 0)
2329 {
2330 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2331 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2332 gen_int_mode (INTVAL (trueop1),
2333 imode));
2334 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2335 }
2336
2337 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2338 if (GET_CODE (op0) == IOR
2339 && GET_CODE (trueop1) == CONST_INT
2340 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2341 {
2342 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2343 return simplify_gen_binary (IOR, mode,
2344 simplify_gen_binary (AND, mode,
2345 XEXP (op0, 0), op1),
2346 gen_int_mode (tmp, mode));
2347 }
2348
2349 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2350 insn (and may simplify more). */
2351 if (GET_CODE (op0) == XOR
2352 && rtx_equal_p (XEXP (op0, 0), op1)
2353 && ! side_effects_p (op1))
2354 return simplify_gen_binary (AND, mode,
2355 simplify_gen_unary (NOT, mode,
2356 XEXP (op0, 1), mode),
2357 op1);
2358
2359 if (GET_CODE (op0) == XOR
2360 && rtx_equal_p (XEXP (op0, 1), op1)
2361 && ! side_effects_p (op1))
2362 return simplify_gen_binary (AND, mode,
2363 simplify_gen_unary (NOT, mode,
2364 XEXP (op0, 0), mode),
2365 op1);
2366
2367 /* Similarly for (~(A ^ B)) & A. */
2368 if (GET_CODE (op0) == NOT
2369 && GET_CODE (XEXP (op0, 0)) == XOR
2370 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2371 && ! side_effects_p (op1))
2372 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2373
2374 if (GET_CODE (op0) == NOT
2375 && GET_CODE (XEXP (op0, 0)) == XOR
2376 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2377 && ! side_effects_p (op1))
2378 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2379
2380 /* Convert (A | B) & A to A. */
2381 if (GET_CODE (op0) == IOR
2382 && (rtx_equal_p (XEXP (op0, 0), op1)
2383 || rtx_equal_p (XEXP (op0, 1), op1))
2384 && ! side_effects_p (XEXP (op0, 0))
2385 && ! side_effects_p (XEXP (op0, 1)))
2386 return op1;
2387
2388 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2389 ((A & N) + B) & M -> (A + B) & M
2390 Similarly if (N & M) == 0,
2391 ((A | N) + B) & M -> (A + B) & M
2392 and for - instead of + and/or ^ instead of |. */
2393 if (GET_CODE (trueop1) == CONST_INT
2394 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2395 && ~INTVAL (trueop1)
2396 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2397 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2398 {
2399 rtx pmop[2];
2400 int which;
2401
2402 pmop[0] = XEXP (op0, 0);
2403 pmop[1] = XEXP (op0, 1);
2404
2405 for (which = 0; which < 2; which++)
2406 {
2407 tem = pmop[which];
2408 switch (GET_CODE (tem))
2409 {
2410 case AND:
2411 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2412 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2413 == INTVAL (trueop1))
2414 pmop[which] = XEXP (tem, 0);
2415 break;
2416 case IOR:
2417 case XOR:
2418 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2419 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2420 pmop[which] = XEXP (tem, 0);
2421 break;
2422 default:
2423 break;
2424 }
2425 }
2426
2427 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2428 {
2429 tem = simplify_gen_binary (GET_CODE (op0), mode,
2430 pmop[0], pmop[1]);
2431 return simplify_gen_binary (code, mode, tem, op1);
2432 }
2433 }
2434 tem = simplify_associative_operation (code, mode, op0, op1);
2435 if (tem)
2436 return tem;
2437 break;
2438
2439 case UDIV:
2440 /* 0/x is 0 (or x&0 if x has side-effects). */
2441 if (trueop0 == CONST0_RTX (mode))
2442 {
2443 if (side_effects_p (op1))
2444 return simplify_gen_binary (AND, mode, op1, trueop0);
2445 return trueop0;
2446 }
2447 /* x/1 is x. */
2448 if (trueop1 == CONST1_RTX (mode))
2449 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2450 /* Convert divide by power of two into shift. */
2451 if (GET_CODE (trueop1) == CONST_INT
2452 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2453 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2454 break;
2455
2456 case DIV:
2457 /* Handle floating point and integers separately. */
2458 if (SCALAR_FLOAT_MODE_P (mode))
2459 {
2460 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2461 safe for modes with NaNs, since 0.0 / 0.0 will then be
2462 NaN rather than 0.0. Nor is it safe for modes with signed
2463 zeros, since dividing 0 by a negative number gives -0.0 */
2464 if (trueop0 == CONST0_RTX (mode)
2465 && !HONOR_NANS (mode)
2466 && !HONOR_SIGNED_ZEROS (mode)
2467 && ! side_effects_p (op1))
2468 return op0;
2469 /* x/1.0 is x. */
2470 if (trueop1 == CONST1_RTX (mode)
2471 && !HONOR_SNANS (mode))
2472 return op0;
2473
2474 if (GET_CODE (trueop1) == CONST_DOUBLE
2475 && trueop1 != CONST0_RTX (mode))
2476 {
2477 REAL_VALUE_TYPE d;
2478 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2479
2480 /* x/-1.0 is -x. */
2481 if (REAL_VALUES_EQUAL (d, dconstm1)
2482 && !HONOR_SNANS (mode))
2483 return simplify_gen_unary (NEG, mode, op0, mode);
2484
2485 /* Change FP division by a constant into multiplication.
2486 Only do this with -funsafe-math-optimizations. */
2487 if (flag_unsafe_math_optimizations
2488 && !REAL_VALUES_EQUAL (d, dconst0))
2489 {
2490 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2491 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2492 return simplify_gen_binary (MULT, mode, op0, tem);
2493 }
2494 }
2495 }
2496 else
2497 {
2498 /* 0/x is 0 (or x&0 if x has side-effects). */
2499 if (trueop0 == CONST0_RTX (mode))
2500 {
2501 if (side_effects_p (op1))
2502 return simplify_gen_binary (AND, mode, op1, trueop0);
2503 return trueop0;
2504 }
2505 /* x/1 is x. */
2506 if (trueop1 == CONST1_RTX (mode))
2507 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2508 /* x/-1 is -x. */
2509 if (trueop1 == constm1_rtx)
2510 {
2511 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2512 return simplify_gen_unary (NEG, mode, x, mode);
2513 }
2514 }
2515 break;
2516
2517 case UMOD:
2518 /* 0%x is 0 (or x&0 if x has side-effects). */
2519 if (trueop0 == CONST0_RTX (mode))
2520 {
2521 if (side_effects_p (op1))
2522 return simplify_gen_binary (AND, mode, op1, trueop0);
2523 return trueop0;
2524 }
2525 /* x%1 is 0 (of x&0 if x has side-effects). */
2526 if (trueop1 == CONST1_RTX (mode))
2527 {
2528 if (side_effects_p (op0))
2529 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2530 return CONST0_RTX (mode);
2531 }
2532 /* Implement modulus by power of two as AND. */
2533 if (GET_CODE (trueop1) == CONST_INT
2534 && exact_log2 (INTVAL (trueop1)) > 0)
2535 return simplify_gen_binary (AND, mode, op0,
2536 GEN_INT (INTVAL (op1) - 1));
2537 break;
2538
2539 case MOD:
2540 /* 0%x is 0 (or x&0 if x has side-effects). */
2541 if (trueop0 == CONST0_RTX (mode))
2542 {
2543 if (side_effects_p (op1))
2544 return simplify_gen_binary (AND, mode, op1, trueop0);
2545 return trueop0;
2546 }
2547 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2548 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2549 {
2550 if (side_effects_p (op0))
2551 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2552 return CONST0_RTX (mode);
2553 }
2554 break;
2555
2556 case ROTATERT:
2557 case ROTATE:
2558 case ASHIFTRT:
2559 if (trueop1 == CONST0_RTX (mode))
2560 return op0;
2561 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2562 return op0;
2563 /* Rotating ~0 always results in ~0. */
2564 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2565 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2566 && ! side_effects_p (op1))
2567 return op0;
2568 break;
2569
2570 case ASHIFT:
2571 case SS_ASHIFT:
2572 if (trueop1 == CONST0_RTX (mode))
2573 return op0;
2574 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2575 return op0;
2576 break;
2577
2578 case LSHIFTRT:
2579 if (trueop1 == CONST0_RTX (mode))
2580 return op0;
2581 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2582 return op0;
2583 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2584 if (GET_CODE (op0) == CLZ
2585 && GET_CODE (trueop1) == CONST_INT
2586 && STORE_FLAG_VALUE == 1
2587 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2588 {
2589 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2590 unsigned HOST_WIDE_INT zero_val = 0;
2591
2592 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2593 && zero_val == GET_MODE_BITSIZE (imode)
2594 && INTVAL (trueop1) == exact_log2 (zero_val))
2595 return simplify_gen_relational (EQ, mode, imode,
2596 XEXP (op0, 0), const0_rtx);
2597 }
2598 break;
2599
2600 case SMIN:
2601 if (width <= HOST_BITS_PER_WIDE_INT
2602 && GET_CODE (trueop1) == CONST_INT
2603 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2604 && ! side_effects_p (op0))
2605 return op1;
2606 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2607 return op0;
2608 tem = simplify_associative_operation (code, mode, op0, op1);
2609 if (tem)
2610 return tem;
2611 break;
2612
2613 case SMAX:
2614 if (width <= HOST_BITS_PER_WIDE_INT
2615 && GET_CODE (trueop1) == CONST_INT
2616 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2617 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2618 && ! side_effects_p (op0))
2619 return op1;
2620 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2621 return op0;
2622 tem = simplify_associative_operation (code, mode, op0, op1);
2623 if (tem)
2624 return tem;
2625 break;
2626
2627 case UMIN:
2628 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2629 return op1;
2630 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2631 return op0;
2632 tem = simplify_associative_operation (code, mode, op0, op1);
2633 if (tem)
2634 return tem;
2635 break;
2636
2637 case UMAX:
2638 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2639 return op1;
2640 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2641 return op0;
2642 tem = simplify_associative_operation (code, mode, op0, op1);
2643 if (tem)
2644 return tem;
2645 break;
2646
2647 case SS_PLUS:
2648 case US_PLUS:
2649 case SS_MINUS:
2650 case US_MINUS:
2651 /* ??? There are simplifications that can be done. */
2652 return 0;
2653
2654 case VEC_SELECT:
2655 if (!VECTOR_MODE_P (mode))
2656 {
2657 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2658 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2659 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2660 gcc_assert (XVECLEN (trueop1, 0) == 1);
2661 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2662
2663 if (GET_CODE (trueop0) == CONST_VECTOR)
2664 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2665 (trueop1, 0, 0)));
2666 }
2667 else
2668 {
2669 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2670 gcc_assert (GET_MODE_INNER (mode)
2671 == GET_MODE_INNER (GET_MODE (trueop0)));
2672 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2673
2674 if (GET_CODE (trueop0) == CONST_VECTOR)
2675 {
2676 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2677 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2678 rtvec v = rtvec_alloc (n_elts);
2679 unsigned int i;
2680
2681 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2682 for (i = 0; i < n_elts; i++)
2683 {
2684 rtx x = XVECEXP (trueop1, 0, i);
2685
2686 gcc_assert (GET_CODE (x) == CONST_INT);
2687 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2688 INTVAL (x));
2689 }
2690
2691 return gen_rtx_CONST_VECTOR (mode, v);
2692 }
2693 }
2694
2695 if (XVECLEN (trueop1, 0) == 1
2696 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2697 && GET_CODE (trueop0) == VEC_CONCAT)
2698 {
2699 rtx vec = trueop0;
2700 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2701
2702 /* Try to find the element in the VEC_CONCAT. */
2703 while (GET_MODE (vec) != mode
2704 && GET_CODE (vec) == VEC_CONCAT)
2705 {
2706 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2707 if (offset < vec_size)
2708 vec = XEXP (vec, 0);
2709 else
2710 {
2711 offset -= vec_size;
2712 vec = XEXP (vec, 1);
2713 }
2714 vec = avoid_constant_pool_reference (vec);
2715 }
2716
2717 if (GET_MODE (vec) == mode)
2718 return vec;
2719 }
2720
2721 return 0;
2722 case VEC_CONCAT:
2723 {
2724 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2725 ? GET_MODE (trueop0)
2726 : GET_MODE_INNER (mode));
2727 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2728 ? GET_MODE (trueop1)
2729 : GET_MODE_INNER (mode));
2730
2731 gcc_assert (VECTOR_MODE_P (mode));
2732 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2733 == GET_MODE_SIZE (mode));
2734
2735 if (VECTOR_MODE_P (op0_mode))
2736 gcc_assert (GET_MODE_INNER (mode)
2737 == GET_MODE_INNER (op0_mode));
2738 else
2739 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2740
2741 if (VECTOR_MODE_P (op1_mode))
2742 gcc_assert (GET_MODE_INNER (mode)
2743 == GET_MODE_INNER (op1_mode));
2744 else
2745 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2746
2747 if ((GET_CODE (trueop0) == CONST_VECTOR
2748 || GET_CODE (trueop0) == CONST_INT
2749 || GET_CODE (trueop0) == CONST_DOUBLE)
2750 && (GET_CODE (trueop1) == CONST_VECTOR
2751 || GET_CODE (trueop1) == CONST_INT
2752 || GET_CODE (trueop1) == CONST_DOUBLE))
2753 {
2754 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2755 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2756 rtvec v = rtvec_alloc (n_elts);
2757 unsigned int i;
2758 unsigned in_n_elts = 1;
2759
2760 if (VECTOR_MODE_P (op0_mode))
2761 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2762 for (i = 0; i < n_elts; i++)
2763 {
2764 if (i < in_n_elts)
2765 {
2766 if (!VECTOR_MODE_P (op0_mode))
2767 RTVEC_ELT (v, i) = trueop0;
2768 else
2769 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2770 }
2771 else
2772 {
2773 if (!VECTOR_MODE_P (op1_mode))
2774 RTVEC_ELT (v, i) = trueop1;
2775 else
2776 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2777 i - in_n_elts);
2778 }
2779 }
2780
2781 return gen_rtx_CONST_VECTOR (mode, v);
2782 }
2783 }
2784 return 0;
2785
2786 default:
2787 gcc_unreachable ();
2788 }
2789
2790 return 0;
2791 }
2792
2793 rtx
2794 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2795 rtx op0, rtx op1)
2796 {
2797 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2798 HOST_WIDE_INT val;
2799 unsigned int width = GET_MODE_BITSIZE (mode);
2800
2801 if (VECTOR_MODE_P (mode)
2802 && code != VEC_CONCAT
2803 && GET_CODE (op0) == CONST_VECTOR
2804 && GET_CODE (op1) == CONST_VECTOR)
2805 {
2806 unsigned n_elts = GET_MODE_NUNITS (mode);
2807 enum machine_mode op0mode = GET_MODE (op0);
2808 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2809 enum machine_mode op1mode = GET_MODE (op1);
2810 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2811 rtvec v = rtvec_alloc (n_elts);
2812 unsigned int i;
2813
2814 gcc_assert (op0_n_elts == n_elts);
2815 gcc_assert (op1_n_elts == n_elts);
2816 for (i = 0; i < n_elts; i++)
2817 {
2818 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2819 CONST_VECTOR_ELT (op0, i),
2820 CONST_VECTOR_ELT (op1, i));
2821 if (!x)
2822 return 0;
2823 RTVEC_ELT (v, i) = x;
2824 }
2825
2826 return gen_rtx_CONST_VECTOR (mode, v);
2827 }
2828
2829 if (VECTOR_MODE_P (mode)
2830 && code == VEC_CONCAT
2831 && CONSTANT_P (op0) && CONSTANT_P (op1))
2832 {
2833 unsigned n_elts = GET_MODE_NUNITS (mode);
2834 rtvec v = rtvec_alloc (n_elts);
2835
2836 gcc_assert (n_elts >= 2);
2837 if (n_elts == 2)
2838 {
2839 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2840 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2841
2842 RTVEC_ELT (v, 0) = op0;
2843 RTVEC_ELT (v, 1) = op1;
2844 }
2845 else
2846 {
2847 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2848 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2849 unsigned i;
2850
2851 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2852 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2853 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2854
2855 for (i = 0; i < op0_n_elts; ++i)
2856 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2857 for (i = 0; i < op1_n_elts; ++i)
2858 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2859 }
2860
2861 return gen_rtx_CONST_VECTOR (mode, v);
2862 }
2863
2864 if (SCALAR_FLOAT_MODE_P (mode)
2865 && GET_CODE (op0) == CONST_DOUBLE
2866 && GET_CODE (op1) == CONST_DOUBLE
2867 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2868 {
2869 if (code == AND
2870 || code == IOR
2871 || code == XOR)
2872 {
2873 long tmp0[4];
2874 long tmp1[4];
2875 REAL_VALUE_TYPE r;
2876 int i;
2877
2878 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2879 GET_MODE (op0));
2880 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2881 GET_MODE (op1));
2882 for (i = 0; i < 4; i++)
2883 {
2884 switch (code)
2885 {
2886 case AND:
2887 tmp0[i] &= tmp1[i];
2888 break;
2889 case IOR:
2890 tmp0[i] |= tmp1[i];
2891 break;
2892 case XOR:
2893 tmp0[i] ^= tmp1[i];
2894 break;
2895 default:
2896 gcc_unreachable ();
2897 }
2898 }
2899 real_from_target (&r, tmp0, mode);
2900 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2901 }
2902 else
2903 {
2904 REAL_VALUE_TYPE f0, f1, value, result;
2905 bool inexact;
2906
2907 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2908 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2909 real_convert (&f0, mode, &f0);
2910 real_convert (&f1, mode, &f1);
2911
2912 if (HONOR_SNANS (mode)
2913 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2914 return 0;
2915
2916 if (code == DIV
2917 && REAL_VALUES_EQUAL (f1, dconst0)
2918 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2919 return 0;
2920
2921 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2922 && flag_trapping_math
2923 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2924 {
2925 int s0 = REAL_VALUE_NEGATIVE (f0);
2926 int s1 = REAL_VALUE_NEGATIVE (f1);
2927
2928 switch (code)
2929 {
2930 case PLUS:
2931 /* Inf + -Inf = NaN plus exception. */
2932 if (s0 != s1)
2933 return 0;
2934 break;
2935 case MINUS:
2936 /* Inf - Inf = NaN plus exception. */
2937 if (s0 == s1)
2938 return 0;
2939 break;
2940 case DIV:
2941 /* Inf / Inf = NaN plus exception. */
2942 return 0;
2943 default:
2944 break;
2945 }
2946 }
2947
2948 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2949 && flag_trapping_math
2950 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2951 || (REAL_VALUE_ISINF (f1)
2952 && REAL_VALUES_EQUAL (f0, dconst0))))
2953 /* Inf * 0 = NaN plus exception. */
2954 return 0;
2955
2956 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2957 &f0, &f1);
2958 real_convert (&result, mode, &value);
2959
2960 /* Don't constant fold this floating point operation if
2961 the result has overflowed and flag_trapping_math. */
2962
2963 if (flag_trapping_math
2964 && MODE_HAS_INFINITIES (mode)
2965 && REAL_VALUE_ISINF (result)
2966 && !REAL_VALUE_ISINF (f0)
2967 && !REAL_VALUE_ISINF (f1))
2968 /* Overflow plus exception. */
2969 return 0;
2970
2971 /* Don't constant fold this floating point operation if the
2972 result may dependent upon the run-time rounding mode and
2973 flag_rounding_math is set, or if GCC's software emulation
2974 is unable to accurately represent the result. */
2975
2976 if ((flag_rounding_math
2977 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2978 && !flag_unsafe_math_optimizations))
2979 && (inexact || !real_identical (&result, &value)))
2980 return NULL_RTX;
2981
2982 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2983 }
2984 }
2985
2986 /* We can fold some multi-word operations. */
2987 if (GET_MODE_CLASS (mode) == MODE_INT
2988 && width == HOST_BITS_PER_WIDE_INT * 2
2989 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2990 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2991 {
2992 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2993 HOST_WIDE_INT h1, h2, hv, ht;
2994
2995 if (GET_CODE (op0) == CONST_DOUBLE)
2996 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2997 else
2998 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2999
3000 if (GET_CODE (op1) == CONST_DOUBLE)
3001 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3002 else
3003 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3004
3005 switch (code)
3006 {
3007 case MINUS:
3008 /* A - B == A + (-B). */
3009 neg_double (l2, h2, &lv, &hv);
3010 l2 = lv, h2 = hv;
3011
3012 /* Fall through.... */
3013
3014 case PLUS:
3015 add_double (l1, h1, l2, h2, &lv, &hv);
3016 break;
3017
3018 case MULT:
3019 mul_double (l1, h1, l2, h2, &lv, &hv);
3020 break;
3021
3022 case DIV:
3023 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3024 &lv, &hv, &lt, &ht))
3025 return 0;
3026 break;
3027
3028 case MOD:
3029 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3030 &lt, &ht, &lv, &hv))
3031 return 0;
3032 break;
3033
3034 case UDIV:
3035 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3036 &lv, &hv, &lt, &ht))
3037 return 0;
3038 break;
3039
3040 case UMOD:
3041 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3042 &lt, &ht, &lv, &hv))
3043 return 0;
3044 break;
3045
3046 case AND:
3047 lv = l1 & l2, hv = h1 & h2;
3048 break;
3049
3050 case IOR:
3051 lv = l1 | l2, hv = h1 | h2;
3052 break;
3053
3054 case XOR:
3055 lv = l1 ^ l2, hv = h1 ^ h2;
3056 break;
3057
3058 case SMIN:
3059 if (h1 < h2
3060 || (h1 == h2
3061 && ((unsigned HOST_WIDE_INT) l1
3062 < (unsigned HOST_WIDE_INT) l2)))
3063 lv = l1, hv = h1;
3064 else
3065 lv = l2, hv = h2;
3066 break;
3067
3068 case SMAX:
3069 if (h1 > h2
3070 || (h1 == h2
3071 && ((unsigned HOST_WIDE_INT) l1
3072 > (unsigned HOST_WIDE_INT) l2)))
3073 lv = l1, hv = h1;
3074 else
3075 lv = l2, hv = h2;
3076 break;
3077
3078 case UMIN:
3079 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3080 || (h1 == h2
3081 && ((unsigned HOST_WIDE_INT) l1
3082 < (unsigned HOST_WIDE_INT) l2)))
3083 lv = l1, hv = h1;
3084 else
3085 lv = l2, hv = h2;
3086 break;
3087
3088 case UMAX:
3089 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3090 || (h1 == h2
3091 && ((unsigned HOST_WIDE_INT) l1
3092 > (unsigned HOST_WIDE_INT) l2)))
3093 lv = l1, hv = h1;
3094 else
3095 lv = l2, hv = h2;
3096 break;
3097
3098 case LSHIFTRT: case ASHIFTRT:
3099 case ASHIFT:
3100 case ROTATE: case ROTATERT:
3101 if (SHIFT_COUNT_TRUNCATED)
3102 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3103
3104 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3105 return 0;
3106
3107 if (code == LSHIFTRT || code == ASHIFTRT)
3108 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3109 code == ASHIFTRT);
3110 else if (code == ASHIFT)
3111 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3112 else if (code == ROTATE)
3113 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3114 else /* code == ROTATERT */
3115 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3116 break;
3117
3118 default:
3119 return 0;
3120 }
3121
3122 return immed_double_const (lv, hv, mode);
3123 }
3124
3125 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3126 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3127 {
3128 /* Get the integer argument values in two forms:
3129 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3130
3131 arg0 = INTVAL (op0);
3132 arg1 = INTVAL (op1);
3133
3134 if (width < HOST_BITS_PER_WIDE_INT)
3135 {
3136 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3137 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3138
3139 arg0s = arg0;
3140 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3141 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3142
3143 arg1s = arg1;
3144 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3145 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3146 }
3147 else
3148 {
3149 arg0s = arg0;
3150 arg1s = arg1;
3151 }
3152
3153 /* Compute the value of the arithmetic. */
3154
3155 switch (code)
3156 {
3157 case PLUS:
3158 val = arg0s + arg1s;
3159 break;
3160
3161 case MINUS:
3162 val = arg0s - arg1s;
3163 break;
3164
3165 case MULT:
3166 val = arg0s * arg1s;
3167 break;
3168
3169 case DIV:
3170 if (arg1s == 0
3171 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3172 && arg1s == -1))
3173 return 0;
3174 val = arg0s / arg1s;
3175 break;
3176
3177 case MOD:
3178 if (arg1s == 0
3179 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3180 && arg1s == -1))
3181 return 0;
3182 val = arg0s % arg1s;
3183 break;
3184
3185 case UDIV:
3186 if (arg1 == 0
3187 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3188 && arg1s == -1))
3189 return 0;
3190 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3191 break;
3192
3193 case UMOD:
3194 if (arg1 == 0
3195 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3196 && arg1s == -1))
3197 return 0;
3198 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3199 break;
3200
3201 case AND:
3202 val = arg0 & arg1;
3203 break;
3204
3205 case IOR:
3206 val = arg0 | arg1;
3207 break;
3208
3209 case XOR:
3210 val = arg0 ^ arg1;
3211 break;
3212
3213 case LSHIFTRT:
3214 case ASHIFT:
3215 case ASHIFTRT:
3216 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3217 the value is in range. We can't return any old value for
3218 out-of-range arguments because either the middle-end (via
3219 shift_truncation_mask) or the back-end might be relying on
3220 target-specific knowledge. Nor can we rely on
3221 shift_truncation_mask, since the shift might not be part of an
3222 ashlM3, lshrM3 or ashrM3 instruction. */
3223 if (SHIFT_COUNT_TRUNCATED)
3224 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3225 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3226 return 0;
3227
3228 val = (code == ASHIFT
3229 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3230 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3231
3232 /* Sign-extend the result for arithmetic right shifts. */
3233 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3234 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3235 break;
3236
3237 case ROTATERT:
3238 if (arg1 < 0)
3239 return 0;
3240
3241 arg1 %= width;
3242 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3243 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3244 break;
3245
3246 case ROTATE:
3247 if (arg1 < 0)
3248 return 0;
3249
3250 arg1 %= width;
3251 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3252 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3253 break;
3254
3255 case COMPARE:
3256 /* Do nothing here. */
3257 return 0;
3258
3259 case SMIN:
3260 val = arg0s <= arg1s ? arg0s : arg1s;
3261 break;
3262
3263 case UMIN:
3264 val = ((unsigned HOST_WIDE_INT) arg0
3265 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3266 break;
3267
3268 case SMAX:
3269 val = arg0s > arg1s ? arg0s : arg1s;
3270 break;
3271
3272 case UMAX:
3273 val = ((unsigned HOST_WIDE_INT) arg0
3274 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3275 break;
3276
3277 case SS_PLUS:
3278 case US_PLUS:
3279 case SS_MINUS:
3280 case US_MINUS:
3281 case SS_ASHIFT:
3282 /* ??? There are simplifications that can be done. */
3283 return 0;
3284
3285 default:
3286 gcc_unreachable ();
3287 }
3288
3289 return gen_int_mode (val, mode);
3290 }
3291
3292 return NULL_RTX;
3293 }
3294
3295
3296 \f
3297 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3298 PLUS or MINUS.
3299
3300 Rather than test for specific case, we do this by a brute-force method
3301 and do all possible simplifications until no more changes occur. Then
3302 we rebuild the operation. */
3303
3304 struct simplify_plus_minus_op_data
3305 {
3306 rtx op;
3307 short neg;
3308 };
3309
3310 static int
3311 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3312 {
3313 const struct simplify_plus_minus_op_data *d1 = p1;
3314 const struct simplify_plus_minus_op_data *d2 = p2;
3315 int result;
3316
3317 result = (commutative_operand_precedence (d2->op)
3318 - commutative_operand_precedence (d1->op));
3319 if (result)
3320 return result;
3321
3322 /* Group together equal REGs to do more simplification. */
3323 if (REG_P (d1->op) && REG_P (d2->op))
3324 return REGNO (d1->op) - REGNO (d2->op);
3325 else
3326 return 0;
3327 }
3328
3329 static rtx
3330 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3331 rtx op1)
3332 {
3333 struct simplify_plus_minus_op_data ops[8];
3334 rtx result, tem;
3335 int n_ops = 2, input_ops = 2;
3336 int changed, n_constants = 0, canonicalized = 0;
3337 int i, j;
3338
3339 memset (ops, 0, sizeof ops);
3340
3341 /* Set up the two operands and then expand them until nothing has been
3342 changed. If we run out of room in our array, give up; this should
3343 almost never happen. */
3344
3345 ops[0].op = op0;
3346 ops[0].neg = 0;
3347 ops[1].op = op1;
3348 ops[1].neg = (code == MINUS);
3349
3350 do
3351 {
3352 changed = 0;
3353
3354 for (i = 0; i < n_ops; i++)
3355 {
3356 rtx this_op = ops[i].op;
3357 int this_neg = ops[i].neg;
3358 enum rtx_code this_code = GET_CODE (this_op);
3359
3360 switch (this_code)
3361 {
3362 case PLUS:
3363 case MINUS:
3364 if (n_ops == 7)
3365 return NULL_RTX;
3366
3367 ops[n_ops].op = XEXP (this_op, 1);
3368 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3369 n_ops++;
3370
3371 ops[i].op = XEXP (this_op, 0);
3372 input_ops++;
3373 changed = 1;
3374 canonicalized |= this_neg;
3375 break;
3376
3377 case NEG:
3378 ops[i].op = XEXP (this_op, 0);
3379 ops[i].neg = ! this_neg;
3380 changed = 1;
3381 canonicalized = 1;
3382 break;
3383
3384 case CONST:
3385 if (n_ops < 7
3386 && GET_CODE (XEXP (this_op, 0)) == PLUS
3387 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3388 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3389 {
3390 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3391 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3392 ops[n_ops].neg = this_neg;
3393 n_ops++;
3394 changed = 1;
3395 canonicalized = 1;
3396 }
3397 break;
3398
3399 case NOT:
3400 /* ~a -> (-a - 1) */
3401 if (n_ops != 7)
3402 {
3403 ops[n_ops].op = constm1_rtx;
3404 ops[n_ops++].neg = this_neg;
3405 ops[i].op = XEXP (this_op, 0);
3406 ops[i].neg = !this_neg;
3407 changed = 1;
3408 canonicalized = 1;
3409 }
3410 break;
3411
3412 case CONST_INT:
3413 n_constants++;
3414 if (this_neg)
3415 {
3416 ops[i].op = neg_const_int (mode, this_op);
3417 ops[i].neg = 0;
3418 changed = 1;
3419 canonicalized = 1;
3420 }
3421 break;
3422
3423 default:
3424 break;
3425 }
3426 }
3427 }
3428 while (changed);
3429
3430 if (n_constants > 1)
3431 canonicalized = 1;
3432
3433 gcc_assert (n_ops >= 2);
3434
3435 /* If we only have two operands, we can avoid the loops. */
3436 if (n_ops == 2)
3437 {
3438 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3439 rtx lhs, rhs;
3440
3441 /* Get the two operands. Be careful with the order, especially for
3442 the cases where code == MINUS. */
3443 if (ops[0].neg && ops[1].neg)
3444 {
3445 lhs = gen_rtx_NEG (mode, ops[0].op);
3446 rhs = ops[1].op;
3447 }
3448 else if (ops[0].neg)
3449 {
3450 lhs = ops[1].op;
3451 rhs = ops[0].op;
3452 }
3453 else
3454 {
3455 lhs = ops[0].op;
3456 rhs = ops[1].op;
3457 }
3458
3459 return simplify_const_binary_operation (code, mode, lhs, rhs);
3460 }
3461
3462 /* Now simplify each pair of operands until nothing changes. */
3463 do
3464 {
3465 /* Insertion sort is good enough for an eight-element array. */
3466 for (i = 1; i < n_ops; i++)
3467 {
3468 struct simplify_plus_minus_op_data save;
3469 j = i - 1;
3470 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3471 continue;
3472
3473 canonicalized = 1;
3474 save = ops[i];
3475 do
3476 ops[j + 1] = ops[j];
3477 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3478 ops[j + 1] = save;
3479 }
3480
3481 /* This is only useful the first time through. */
3482 if (!canonicalized)
3483 return NULL_RTX;
3484
3485 changed = 0;
3486 for (i = n_ops - 1; i > 0; i--)
3487 for (j = i - 1; j >= 0; j--)
3488 {
3489 rtx lhs = ops[j].op, rhs = ops[i].op;
3490 int lneg = ops[j].neg, rneg = ops[i].neg;
3491
3492 if (lhs != 0 && rhs != 0)
3493 {
3494 enum rtx_code ncode = PLUS;
3495
3496 if (lneg != rneg)
3497 {
3498 ncode = MINUS;
3499 if (lneg)
3500 tem = lhs, lhs = rhs, rhs = tem;
3501 }
3502 else if (swap_commutative_operands_p (lhs, rhs))
3503 tem = lhs, lhs = rhs, rhs = tem;
3504
3505 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3506 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3507 {
3508 rtx tem_lhs, tem_rhs;
3509
3510 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3511 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3512 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3513
3514 if (tem && !CONSTANT_P (tem))
3515 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3516 }
3517 else
3518 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3519
3520 /* Reject "simplifications" that just wrap the two
3521 arguments in a CONST. Failure to do so can result
3522 in infinite recursion with simplify_binary_operation
3523 when it calls us to simplify CONST operations. */
3524 if (tem
3525 && ! (GET_CODE (tem) == CONST
3526 && GET_CODE (XEXP (tem, 0)) == ncode
3527 && XEXP (XEXP (tem, 0), 0) == lhs
3528 && XEXP (XEXP (tem, 0), 1) == rhs))
3529 {
3530 lneg &= rneg;
3531 if (GET_CODE (tem) == NEG)
3532 tem = XEXP (tem, 0), lneg = !lneg;
3533 if (GET_CODE (tem) == CONST_INT && lneg)
3534 tem = neg_const_int (mode, tem), lneg = 0;
3535
3536 ops[i].op = tem;
3537 ops[i].neg = lneg;
3538 ops[j].op = NULL_RTX;
3539 changed = 1;
3540 }
3541 }
3542 }
3543
3544 /* Pack all the operands to the lower-numbered entries. */
3545 for (i = 0, j = 0; j < n_ops; j++)
3546 if (ops[j].op)
3547 {
3548 ops[i] = ops[j];
3549 i++;
3550 }
3551 n_ops = i;
3552 }
3553 while (changed);
3554
3555 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3556 if (n_ops == 2
3557 && GET_CODE (ops[1].op) == CONST_INT
3558 && CONSTANT_P (ops[0].op)
3559 && ops[0].neg)
3560 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3561
3562 /* We suppressed creation of trivial CONST expressions in the
3563 combination loop to avoid recursion. Create one manually now.
3564 The combination loop should have ensured that there is exactly
3565 one CONST_INT, and the sort will have ensured that it is last
3566 in the array and that any other constant will be next-to-last. */
3567
3568 if (n_ops > 1
3569 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3570 && CONSTANT_P (ops[n_ops - 2].op))
3571 {
3572 rtx value = ops[n_ops - 1].op;
3573 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3574 value = neg_const_int (mode, value);
3575 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3576 n_ops--;
3577 }
3578
3579 /* Put a non-negated operand first, if possible. */
3580
3581 for (i = 0; i < n_ops && ops[i].neg; i++)
3582 continue;
3583 if (i == n_ops)
3584 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3585 else if (i != 0)
3586 {
3587 tem = ops[0].op;
3588 ops[0] = ops[i];
3589 ops[i].op = tem;
3590 ops[i].neg = 1;
3591 }
3592
3593 /* Now make the result by performing the requested operations. */
3594 result = ops[0].op;
3595 for (i = 1; i < n_ops; i++)
3596 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3597 mode, result, ops[i].op);
3598
3599 return result;
3600 }
3601
3602 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3603 static bool
3604 plus_minus_operand_p (rtx x)
3605 {
3606 return GET_CODE (x) == PLUS
3607 || GET_CODE (x) == MINUS
3608 || (GET_CODE (x) == CONST
3609 && GET_CODE (XEXP (x, 0)) == PLUS
3610 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3611 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3612 }
3613
3614 /* Like simplify_binary_operation except used for relational operators.
3615 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3616 not also be VOIDmode.
3617
3618 CMP_MODE specifies in which mode the comparison is done in, so it is
3619 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3620 the operands or, if both are VOIDmode, the operands are compared in
3621 "infinite precision". */
3622 rtx
3623 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3624 enum machine_mode cmp_mode, rtx op0, rtx op1)
3625 {
3626 rtx tem, trueop0, trueop1;
3627
3628 if (cmp_mode == VOIDmode)
3629 cmp_mode = GET_MODE (op0);
3630 if (cmp_mode == VOIDmode)
3631 cmp_mode = GET_MODE (op1);
3632
3633 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3634 if (tem)
3635 {
3636 if (SCALAR_FLOAT_MODE_P (mode))
3637 {
3638 if (tem == const0_rtx)
3639 return CONST0_RTX (mode);
3640 #ifdef FLOAT_STORE_FLAG_VALUE
3641 {
3642 REAL_VALUE_TYPE val;
3643 val = FLOAT_STORE_FLAG_VALUE (mode);
3644 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3645 }
3646 #else
3647 return NULL_RTX;
3648 #endif
3649 }
3650 if (VECTOR_MODE_P (mode))
3651 {
3652 if (tem == const0_rtx)
3653 return CONST0_RTX (mode);
3654 #ifdef VECTOR_STORE_FLAG_VALUE
3655 {
3656 int i, units;
3657 rtvec v;
3658
3659 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3660 if (val == NULL_RTX)
3661 return NULL_RTX;
3662 if (val == const1_rtx)
3663 return CONST1_RTX (mode);
3664
3665 units = GET_MODE_NUNITS (mode);
3666 v = rtvec_alloc (units);
3667 for (i = 0; i < units; i++)
3668 RTVEC_ELT (v, i) = val;
3669 return gen_rtx_raw_CONST_VECTOR (mode, v);
3670 }
3671 #else
3672 return NULL_RTX;
3673 #endif
3674 }
3675
3676 return tem;
3677 }
3678
3679 /* For the following tests, ensure const0_rtx is op1. */
3680 if (swap_commutative_operands_p (op0, op1)
3681 || (op0 == const0_rtx && op1 != const0_rtx))
3682 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3683
3684 /* If op0 is a compare, extract the comparison arguments from it. */
3685 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3686 return simplify_relational_operation (code, mode, VOIDmode,
3687 XEXP (op0, 0), XEXP (op0, 1));
3688
3689 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3690 || CC0_P (op0))
3691 return NULL_RTX;
3692
3693 trueop0 = avoid_constant_pool_reference (op0);
3694 trueop1 = avoid_constant_pool_reference (op1);
3695 return simplify_relational_operation_1 (code, mode, cmp_mode,
3696 trueop0, trueop1);
3697 }
3698
3699 /* This part of simplify_relational_operation is only used when CMP_MODE
3700 is not in class MODE_CC (i.e. it is a real comparison).
3701
3702 MODE is the mode of the result, while CMP_MODE specifies in which
3703 mode the comparison is done in, so it is the mode of the operands. */
3704
3705 static rtx
3706 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3707 enum machine_mode cmp_mode, rtx op0, rtx op1)
3708 {
3709 enum rtx_code op0code = GET_CODE (op0);
3710
3711 if (op1 == const0_rtx && COMPARISON_P (op0))
3712 {
3713 /* If op0 is a comparison, extract the comparison arguments
3714 from it. */
3715 if (code == NE)
3716 {
3717 if (GET_MODE (op0) == mode)
3718 return simplify_rtx (op0);
3719 else
3720 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3721 XEXP (op0, 0), XEXP (op0, 1));
3722 }
3723 else if (code == EQ)
3724 {
3725 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3726 if (new_code != UNKNOWN)
3727 return simplify_gen_relational (new_code, mode, VOIDmode,
3728 XEXP (op0, 0), XEXP (op0, 1));
3729 }
3730 }
3731
3732 if (op1 == const0_rtx)
3733 {
3734 /* Canonicalize (GTU x 0) as (NE x 0). */
3735 if (code == GTU)
3736 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3737 /* Canonicalize (LEU x 0) as (EQ x 0). */
3738 if (code == LEU)
3739 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3740 }
3741 else if (op1 == const1_rtx)
3742 {
3743 switch (code)
3744 {
3745 case GE:
3746 /* Canonicalize (GE x 1) as (GT x 0). */
3747 return simplify_gen_relational (GT, mode, cmp_mode,
3748 op0, const0_rtx);
3749 case GEU:
3750 /* Canonicalize (GEU x 1) as (NE x 0). */
3751 return simplify_gen_relational (NE, mode, cmp_mode,
3752 op0, const0_rtx);
3753 case LT:
3754 /* Canonicalize (LT x 1) as (LE x 0). */
3755 return simplify_gen_relational (LE, mode, cmp_mode,
3756 op0, const0_rtx);
3757 case LTU:
3758 /* Canonicalize (LTU x 1) as (EQ x 0). */
3759 return simplify_gen_relational (EQ, mode, cmp_mode,
3760 op0, const0_rtx);
3761 default:
3762 break;
3763 }
3764 }
3765 else if (op1 == constm1_rtx)
3766 {
3767 /* Canonicalize (LE x -1) as (LT x 0). */
3768 if (code == LE)
3769 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3770 /* Canonicalize (GT x -1) as (GE x 0). */
3771 if (code == GT)
3772 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3773 }
3774
3775 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3776 if ((code == EQ || code == NE)
3777 && (op0code == PLUS || op0code == MINUS)
3778 && CONSTANT_P (op1)
3779 && CONSTANT_P (XEXP (op0, 1))
3780 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3781 {
3782 rtx x = XEXP (op0, 0);
3783 rtx c = XEXP (op0, 1);
3784
3785 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3786 cmp_mode, op1, c);
3787 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3788 }
3789
3790 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3791 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3792 if (code == NE
3793 && op1 == const0_rtx
3794 && GET_MODE_CLASS (mode) == MODE_INT
3795 && cmp_mode != VOIDmode
3796 /* ??? Work-around BImode bugs in the ia64 backend. */
3797 && mode != BImode
3798 && cmp_mode != BImode
3799 && nonzero_bits (op0, cmp_mode) == 1
3800 && STORE_FLAG_VALUE == 1)
3801 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3802 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3803 : lowpart_subreg (mode, op0, cmp_mode);
3804
3805 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3806 if ((code == EQ || code == NE)
3807 && op1 == const0_rtx
3808 && op0code == XOR)
3809 return simplify_gen_relational (code, mode, cmp_mode,
3810 XEXP (op0, 0), XEXP (op0, 1));
3811
3812 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3813 if ((code == EQ || code == NE)
3814 && op0code == XOR
3815 && rtx_equal_p (XEXP (op0, 0), op1)
3816 && !side_effects_p (XEXP (op0, 0)))
3817 return simplify_gen_relational (code, mode, cmp_mode,
3818 XEXP (op0, 1), const0_rtx);
3819
3820 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3821 if ((code == EQ || code == NE)
3822 && op0code == XOR
3823 && rtx_equal_p (XEXP (op0, 1), op1)
3824 && !side_effects_p (XEXP (op0, 1)))
3825 return simplify_gen_relational (code, mode, cmp_mode,
3826 XEXP (op0, 0), const0_rtx);
3827
3828 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3829 if ((code == EQ || code == NE)
3830 && op0code == XOR
3831 && (GET_CODE (op1) == CONST_INT
3832 || GET_CODE (op1) == CONST_DOUBLE)
3833 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3834 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3835 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3836 simplify_gen_binary (XOR, cmp_mode,
3837 XEXP (op0, 1), op1));
3838
3839 if (op0code == POPCOUNT && op1 == const0_rtx)
3840 switch (code)
3841 {
3842 case EQ:
3843 case LE:
3844 case LEU:
3845 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3846 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3847 XEXP (op0, 0), const0_rtx);
3848
3849 case NE:
3850 case GT:
3851 case GTU:
3852 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3853 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3854 XEXP (op0, 0), const0_rtx);
3855
3856 default:
3857 break;
3858 }
3859
3860 return NULL_RTX;
3861 }
3862
3863 /* Check if the given comparison (done in the given MODE) is actually a
3864 tautology or a contradiction.
3865 If no simplification is possible, this function returns zero.
3866 Otherwise, it returns either const_true_rtx or const0_rtx. */
3867
3868 rtx
3869 simplify_const_relational_operation (enum rtx_code code,
3870 enum machine_mode mode,
3871 rtx op0, rtx op1)
3872 {
3873 int equal, op0lt, op0ltu, op1lt, op1ltu;
3874 rtx tem;
3875 rtx trueop0;
3876 rtx trueop1;
3877
3878 gcc_assert (mode != VOIDmode
3879 || (GET_MODE (op0) == VOIDmode
3880 && GET_MODE (op1) == VOIDmode));
3881
3882 /* If op0 is a compare, extract the comparison arguments from it. */
3883 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3884 {
3885 op1 = XEXP (op0, 1);
3886 op0 = XEXP (op0, 0);
3887
3888 if (GET_MODE (op0) != VOIDmode)
3889 mode = GET_MODE (op0);
3890 else if (GET_MODE (op1) != VOIDmode)
3891 mode = GET_MODE (op1);
3892 else
3893 return 0;
3894 }
3895
3896 /* We can't simplify MODE_CC values since we don't know what the
3897 actual comparison is. */
3898 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3899 return 0;
3900
3901 /* Make sure the constant is second. */
3902 if (swap_commutative_operands_p (op0, op1))
3903 {
3904 tem = op0, op0 = op1, op1 = tem;
3905 code = swap_condition (code);
3906 }
3907
3908 trueop0 = avoid_constant_pool_reference (op0);
3909 trueop1 = avoid_constant_pool_reference (op1);
3910
3911 /* For integer comparisons of A and B maybe we can simplify A - B and can
3912 then simplify a comparison of that with zero. If A and B are both either
3913 a register or a CONST_INT, this can't help; testing for these cases will
3914 prevent infinite recursion here and speed things up.
3915
3916 We can only do this for EQ and NE comparisons as otherwise we may
3917 lose or introduce overflow which we cannot disregard as undefined as
3918 we do not know the signedness of the operation on either the left or
3919 the right hand side of the comparison. */
3920
3921 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3922 && (code == EQ || code == NE)
3923 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3924 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3925 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3926 /* We cannot do this if tem is a nonzero address. */
3927 && ! nonzero_address_p (tem))
3928 return simplify_const_relational_operation (signed_condition (code),
3929 mode, tem, const0_rtx);
3930
3931 if (! HONOR_NANS (mode) && code == ORDERED)
3932 return const_true_rtx;
3933
3934 if (! HONOR_NANS (mode) && code == UNORDERED)
3935 return const0_rtx;
3936
3937 /* For modes without NaNs, if the two operands are equal, we know the
3938 result except if they have side-effects. */
3939 if (! HONOR_NANS (GET_MODE (trueop0))
3940 && rtx_equal_p (trueop0, trueop1)
3941 && ! side_effects_p (trueop0))
3942 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3943
3944 /* If the operands are floating-point constants, see if we can fold
3945 the result. */
3946 else if (GET_CODE (trueop0) == CONST_DOUBLE
3947 && GET_CODE (trueop1) == CONST_DOUBLE
3948 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3949 {
3950 REAL_VALUE_TYPE d0, d1;
3951
3952 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3953 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3954
3955 /* Comparisons are unordered iff at least one of the values is NaN. */
3956 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3957 switch (code)
3958 {
3959 case UNEQ:
3960 case UNLT:
3961 case UNGT:
3962 case UNLE:
3963 case UNGE:
3964 case NE:
3965 case UNORDERED:
3966 return const_true_rtx;
3967 case EQ:
3968 case LT:
3969 case GT:
3970 case LE:
3971 case GE:
3972 case LTGT:
3973 case ORDERED:
3974 return const0_rtx;
3975 default:
3976 return 0;
3977 }
3978
3979 equal = REAL_VALUES_EQUAL (d0, d1);
3980 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3981 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3982 }
3983
3984 /* Otherwise, see if the operands are both integers. */
3985 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3986 && (GET_CODE (trueop0) == CONST_DOUBLE
3987 || GET_CODE (trueop0) == CONST_INT)
3988 && (GET_CODE (trueop1) == CONST_DOUBLE
3989 || GET_CODE (trueop1) == CONST_INT))
3990 {
3991 int width = GET_MODE_BITSIZE (mode);
3992 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3993 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3994
3995 /* Get the two words comprising each integer constant. */
3996 if (GET_CODE (trueop0) == CONST_DOUBLE)
3997 {
3998 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3999 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4000 }
4001 else
4002 {
4003 l0u = l0s = INTVAL (trueop0);
4004 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4005 }
4006
4007 if (GET_CODE (trueop1) == CONST_DOUBLE)
4008 {
4009 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4010 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4011 }
4012 else
4013 {
4014 l1u = l1s = INTVAL (trueop1);
4015 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4016 }
4017
4018 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4019 we have to sign or zero-extend the values. */
4020 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4021 {
4022 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4023 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4024
4025 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4026 l0s |= ((HOST_WIDE_INT) (-1) << width);
4027
4028 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4029 l1s |= ((HOST_WIDE_INT) (-1) << width);
4030 }
4031 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4032 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4033
4034 equal = (h0u == h1u && l0u == l1u);
4035 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4036 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4037 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4038 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4039 }
4040
4041 /* Otherwise, there are some code-specific tests we can make. */
4042 else
4043 {
4044 /* Optimize comparisons with upper and lower bounds. */
4045 if (SCALAR_INT_MODE_P (mode)
4046 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4047 {
4048 rtx mmin, mmax;
4049 int sign;
4050
4051 if (code == GEU
4052 || code == LEU
4053 || code == GTU
4054 || code == LTU)
4055 sign = 0;
4056 else
4057 sign = 1;
4058
4059 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4060
4061 tem = NULL_RTX;
4062 switch (code)
4063 {
4064 case GEU:
4065 case GE:
4066 /* x >= min is always true. */
4067 if (rtx_equal_p (trueop1, mmin))
4068 tem = const_true_rtx;
4069 else
4070 break;
4071
4072 case LEU:
4073 case LE:
4074 /* x <= max is always true. */
4075 if (rtx_equal_p (trueop1, mmax))
4076 tem = const_true_rtx;
4077 break;
4078
4079 case GTU:
4080 case GT:
4081 /* x > max is always false. */
4082 if (rtx_equal_p (trueop1, mmax))
4083 tem = const0_rtx;
4084 break;
4085
4086 case LTU:
4087 case LT:
4088 /* x < min is always false. */
4089 if (rtx_equal_p (trueop1, mmin))
4090 tem = const0_rtx;
4091 break;
4092
4093 default:
4094 break;
4095 }
4096 if (tem == const0_rtx
4097 || tem == const_true_rtx)
4098 return tem;
4099 }
4100
4101 switch (code)
4102 {
4103 case EQ:
4104 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4105 return const0_rtx;
4106 break;
4107
4108 case NE:
4109 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4110 return const_true_rtx;
4111 break;
4112
4113 case LT:
4114 /* Optimize abs(x) < 0.0. */
4115 if (trueop1 == CONST0_RTX (mode)
4116 && !HONOR_SNANS (mode)
4117 && (!INTEGRAL_MODE_P (mode)
4118 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4119 {
4120 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4121 : trueop0;
4122 if (GET_CODE (tem) == ABS)
4123 {
4124 if (INTEGRAL_MODE_P (mode)
4125 && (issue_strict_overflow_warning
4126 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4127 warning (OPT_Wstrict_overflow,
4128 ("assuming signed overflow does not occur when "
4129 "assuming abs (x) < 0 is false"));
4130 return const0_rtx;
4131 }
4132 }
4133
4134 /* Optimize popcount (x) < 0. */
4135 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4136 return const_true_rtx;
4137 break;
4138
4139 case GE:
4140 /* Optimize abs(x) >= 0.0. */
4141 if (trueop1 == CONST0_RTX (mode)
4142 && !HONOR_NANS (mode)
4143 && (!INTEGRAL_MODE_P (mode)
4144 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4145 {
4146 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4147 : trueop0;
4148 if (GET_CODE (tem) == ABS)
4149 {
4150 if (INTEGRAL_MODE_P (mode)
4151 && (issue_strict_overflow_warning
4152 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4153 warning (OPT_Wstrict_overflow,
4154 ("assuming signed overflow does not occur when "
4155 "assuming abs (x) >= 0 is true"));
4156 return const_true_rtx;
4157 }
4158 }
4159
4160 /* Optimize popcount (x) >= 0. */
4161 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4162 return const_true_rtx;
4163 break;
4164
4165 case UNGE:
4166 /* Optimize ! (abs(x) < 0.0). */
4167 if (trueop1 == CONST0_RTX (mode))
4168 {
4169 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4170 : trueop0;
4171 if (GET_CODE (tem) == ABS)
4172 return const_true_rtx;
4173 }
4174 break;
4175
4176 default:
4177 break;
4178 }
4179
4180 return 0;
4181 }
4182
4183 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4184 as appropriate. */
4185 switch (code)
4186 {
4187 case EQ:
4188 case UNEQ:
4189 return equal ? const_true_rtx : const0_rtx;
4190 case NE:
4191 case LTGT:
4192 return ! equal ? const_true_rtx : const0_rtx;
4193 case LT:
4194 case UNLT:
4195 return op0lt ? const_true_rtx : const0_rtx;
4196 case GT:
4197 case UNGT:
4198 return op1lt ? const_true_rtx : const0_rtx;
4199 case LTU:
4200 return op0ltu ? const_true_rtx : const0_rtx;
4201 case GTU:
4202 return op1ltu ? const_true_rtx : const0_rtx;
4203 case LE:
4204 case UNLE:
4205 return equal || op0lt ? const_true_rtx : const0_rtx;
4206 case GE:
4207 case UNGE:
4208 return equal || op1lt ? const_true_rtx : const0_rtx;
4209 case LEU:
4210 return equal || op0ltu ? const_true_rtx : const0_rtx;
4211 case GEU:
4212 return equal || op1ltu ? const_true_rtx : const0_rtx;
4213 case ORDERED:
4214 return const_true_rtx;
4215 case UNORDERED:
4216 return const0_rtx;
4217 default:
4218 gcc_unreachable ();
4219 }
4220 }
4221 \f
4222 /* Simplify CODE, an operation with result mode MODE and three operands,
4223 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4224 a constant. Return 0 if no simplifications is possible. */
4225
4226 rtx
4227 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4228 enum machine_mode op0_mode, rtx op0, rtx op1,
4229 rtx op2)
4230 {
4231 unsigned int width = GET_MODE_BITSIZE (mode);
4232
4233 /* VOIDmode means "infinite" precision. */
4234 if (width == 0)
4235 width = HOST_BITS_PER_WIDE_INT;
4236
4237 switch (code)
4238 {
4239 case SIGN_EXTRACT:
4240 case ZERO_EXTRACT:
4241 if (GET_CODE (op0) == CONST_INT
4242 && GET_CODE (op1) == CONST_INT
4243 && GET_CODE (op2) == CONST_INT
4244 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4245 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4246 {
4247 /* Extracting a bit-field from a constant */
4248 HOST_WIDE_INT val = INTVAL (op0);
4249
4250 if (BITS_BIG_ENDIAN)
4251 val >>= (GET_MODE_BITSIZE (op0_mode)
4252 - INTVAL (op2) - INTVAL (op1));
4253 else
4254 val >>= INTVAL (op2);
4255
4256 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4257 {
4258 /* First zero-extend. */
4259 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4260 /* If desired, propagate sign bit. */
4261 if (code == SIGN_EXTRACT
4262 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4263 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4264 }
4265
4266 /* Clear the bits that don't belong in our mode,
4267 unless they and our sign bit are all one.
4268 So we get either a reasonable negative value or a reasonable
4269 unsigned value for this mode. */
4270 if (width < HOST_BITS_PER_WIDE_INT
4271 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4272 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4273 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4274
4275 return gen_int_mode (val, mode);
4276 }
4277 break;
4278
4279 case IF_THEN_ELSE:
4280 if (GET_CODE (op0) == CONST_INT)
4281 return op0 != const0_rtx ? op1 : op2;
4282
4283 /* Convert c ? a : a into "a". */
4284 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4285 return op1;
4286
4287 /* Convert a != b ? a : b into "a". */
4288 if (GET_CODE (op0) == NE
4289 && ! side_effects_p (op0)
4290 && ! HONOR_NANS (mode)
4291 && ! HONOR_SIGNED_ZEROS (mode)
4292 && ((rtx_equal_p (XEXP (op0, 0), op1)
4293 && rtx_equal_p (XEXP (op0, 1), op2))
4294 || (rtx_equal_p (XEXP (op0, 0), op2)
4295 && rtx_equal_p (XEXP (op0, 1), op1))))
4296 return op1;
4297
4298 /* Convert a == b ? a : b into "b". */
4299 if (GET_CODE (op0) == EQ
4300 && ! side_effects_p (op0)
4301 && ! HONOR_NANS (mode)
4302 && ! HONOR_SIGNED_ZEROS (mode)
4303 && ((rtx_equal_p (XEXP (op0, 0), op1)
4304 && rtx_equal_p (XEXP (op0, 1), op2))
4305 || (rtx_equal_p (XEXP (op0, 0), op2)
4306 && rtx_equal_p (XEXP (op0, 1), op1))))
4307 return op2;
4308
4309 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4310 {
4311 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4312 ? GET_MODE (XEXP (op0, 1))
4313 : GET_MODE (XEXP (op0, 0)));
4314 rtx temp;
4315
4316 /* Look for happy constants in op1 and op2. */
4317 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4318 {
4319 HOST_WIDE_INT t = INTVAL (op1);
4320 HOST_WIDE_INT f = INTVAL (op2);
4321
4322 if (t == STORE_FLAG_VALUE && f == 0)
4323 code = GET_CODE (op0);
4324 else if (t == 0 && f == STORE_FLAG_VALUE)
4325 {
4326 enum rtx_code tmp;
4327 tmp = reversed_comparison_code (op0, NULL_RTX);
4328 if (tmp == UNKNOWN)
4329 break;
4330 code = tmp;
4331 }
4332 else
4333 break;
4334
4335 return simplify_gen_relational (code, mode, cmp_mode,
4336 XEXP (op0, 0), XEXP (op0, 1));
4337 }
4338
4339 if (cmp_mode == VOIDmode)
4340 cmp_mode = op0_mode;
4341 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4342 cmp_mode, XEXP (op0, 0),
4343 XEXP (op0, 1));
4344
4345 /* See if any simplifications were possible. */
4346 if (temp)
4347 {
4348 if (GET_CODE (temp) == CONST_INT)
4349 return temp == const0_rtx ? op2 : op1;
4350 else if (temp)
4351 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4352 }
4353 }
4354 break;
4355
4356 case VEC_MERGE:
4357 gcc_assert (GET_MODE (op0) == mode);
4358 gcc_assert (GET_MODE (op1) == mode);
4359 gcc_assert (VECTOR_MODE_P (mode));
4360 op2 = avoid_constant_pool_reference (op2);
4361 if (GET_CODE (op2) == CONST_INT)
4362 {
4363 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4364 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4365 int mask = (1 << n_elts) - 1;
4366
4367 if (!(INTVAL (op2) & mask))
4368 return op1;
4369 if ((INTVAL (op2) & mask) == mask)
4370 return op0;
4371
4372 op0 = avoid_constant_pool_reference (op0);
4373 op1 = avoid_constant_pool_reference (op1);
4374 if (GET_CODE (op0) == CONST_VECTOR
4375 && GET_CODE (op1) == CONST_VECTOR)
4376 {
4377 rtvec v = rtvec_alloc (n_elts);
4378 unsigned int i;
4379
4380 for (i = 0; i < n_elts; i++)
4381 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4382 ? CONST_VECTOR_ELT (op0, i)
4383 : CONST_VECTOR_ELT (op1, i));
4384 return gen_rtx_CONST_VECTOR (mode, v);
4385 }
4386 }
4387 break;
4388
4389 default:
4390 gcc_unreachable ();
4391 }
4392
4393 return 0;
4394 }
4395
4396 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4397 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4398
4399 Works by unpacking OP into a collection of 8-bit values
4400 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4401 and then repacking them again for OUTERMODE. */
4402
4403 static rtx
4404 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4405 enum machine_mode innermode, unsigned int byte)
4406 {
4407 /* We support up to 512-bit values (for V8DFmode). */
4408 enum {
4409 max_bitsize = 512,
4410 value_bit = 8,
4411 value_mask = (1 << value_bit) - 1
4412 };
4413 unsigned char value[max_bitsize / value_bit];
4414 int value_start;
4415 int i;
4416 int elem;
4417
4418 int num_elem;
4419 rtx * elems;
4420 int elem_bitsize;
4421 rtx result_s;
4422 rtvec result_v = NULL;
4423 enum mode_class outer_class;
4424 enum machine_mode outer_submode;
4425
4426 /* Some ports misuse CCmode. */
4427 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4428 return op;
4429
4430 /* We have no way to represent a complex constant at the rtl level. */
4431 if (COMPLEX_MODE_P (outermode))
4432 return NULL_RTX;
4433
4434 /* Unpack the value. */
4435
4436 if (GET_CODE (op) == CONST_VECTOR)
4437 {
4438 num_elem = CONST_VECTOR_NUNITS (op);
4439 elems = &CONST_VECTOR_ELT (op, 0);
4440 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4441 }
4442 else
4443 {
4444 num_elem = 1;
4445 elems = &op;
4446 elem_bitsize = max_bitsize;
4447 }
4448 /* If this asserts, it is too complicated; reducing value_bit may help. */
4449 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4450 /* I don't know how to handle endianness of sub-units. */
4451 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4452
4453 for (elem = 0; elem < num_elem; elem++)
4454 {
4455 unsigned char * vp;
4456 rtx el = elems[elem];
4457
4458 /* Vectors are kept in target memory order. (This is probably
4459 a mistake.) */
4460 {
4461 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4462 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4463 / BITS_PER_UNIT);
4464 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4465 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4466 unsigned bytele = (subword_byte % UNITS_PER_WORD
4467 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4468 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4469 }
4470
4471 switch (GET_CODE (el))
4472 {
4473 case CONST_INT:
4474 for (i = 0;
4475 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4476 i += value_bit)
4477 *vp++ = INTVAL (el) >> i;
4478 /* CONST_INTs are always logically sign-extended. */
4479 for (; i < elem_bitsize; i += value_bit)
4480 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4481 break;
4482
4483 case CONST_DOUBLE:
4484 if (GET_MODE (el) == VOIDmode)
4485 {
4486 /* If this triggers, someone should have generated a
4487 CONST_INT instead. */
4488 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4489
4490 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4491 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4492 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4493 {
4494 *vp++
4495 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4496 i += value_bit;
4497 }
4498 /* It shouldn't matter what's done here, so fill it with
4499 zero. */
4500 for (; i < elem_bitsize; i += value_bit)
4501 *vp++ = 0;
4502 }
4503 else
4504 {
4505 long tmp[max_bitsize / 32];
4506 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4507
4508 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4509 gcc_assert (bitsize <= elem_bitsize);
4510 gcc_assert (bitsize % value_bit == 0);
4511
4512 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4513 GET_MODE (el));
4514
4515 /* real_to_target produces its result in words affected by
4516 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4517 and use WORDS_BIG_ENDIAN instead; see the documentation
4518 of SUBREG in rtl.texi. */
4519 for (i = 0; i < bitsize; i += value_bit)
4520 {
4521 int ibase;
4522 if (WORDS_BIG_ENDIAN)
4523 ibase = bitsize - 1 - i;
4524 else
4525 ibase = i;
4526 *vp++ = tmp[ibase / 32] >> i % 32;
4527 }
4528
4529 /* It shouldn't matter what's done here, so fill it with
4530 zero. */
4531 for (; i < elem_bitsize; i += value_bit)
4532 *vp++ = 0;
4533 }
4534 break;
4535
4536 default:
4537 gcc_unreachable ();
4538 }
4539 }
4540
4541 /* Now, pick the right byte to start with. */
4542 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4543 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4544 will already have offset 0. */
4545 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4546 {
4547 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4548 - byte);
4549 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4550 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4551 byte = (subword_byte % UNITS_PER_WORD
4552 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4553 }
4554
4555 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4556 so if it's become negative it will instead be very large.) */
4557 gcc_assert (byte < GET_MODE_SIZE (innermode));
4558
4559 /* Convert from bytes to chunks of size value_bit. */
4560 value_start = byte * (BITS_PER_UNIT / value_bit);
4561
4562 /* Re-pack the value. */
4563
4564 if (VECTOR_MODE_P (outermode))
4565 {
4566 num_elem = GET_MODE_NUNITS (outermode);
4567 result_v = rtvec_alloc (num_elem);
4568 elems = &RTVEC_ELT (result_v, 0);
4569 outer_submode = GET_MODE_INNER (outermode);
4570 }
4571 else
4572 {
4573 num_elem = 1;
4574 elems = &result_s;
4575 outer_submode = outermode;
4576 }
4577
4578 outer_class = GET_MODE_CLASS (outer_submode);
4579 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4580
4581 gcc_assert (elem_bitsize % value_bit == 0);
4582 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4583
4584 for (elem = 0; elem < num_elem; elem++)
4585 {
4586 unsigned char *vp;
4587
4588 /* Vectors are stored in target memory order. (This is probably
4589 a mistake.) */
4590 {
4591 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4592 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4593 / BITS_PER_UNIT);
4594 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4595 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4596 unsigned bytele = (subword_byte % UNITS_PER_WORD
4597 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4598 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4599 }
4600
4601 switch (outer_class)
4602 {
4603 case MODE_INT:
4604 case MODE_PARTIAL_INT:
4605 {
4606 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4607
4608 for (i = 0;
4609 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4610 i += value_bit)
4611 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4612 for (; i < elem_bitsize; i += value_bit)
4613 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4614 << (i - HOST_BITS_PER_WIDE_INT));
4615
4616 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4617 know why. */
4618 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4619 elems[elem] = gen_int_mode (lo, outer_submode);
4620 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4621 elems[elem] = immed_double_const (lo, hi, outer_submode);
4622 else
4623 return NULL_RTX;
4624 }
4625 break;
4626
4627 case MODE_FLOAT:
4628 case MODE_DECIMAL_FLOAT:
4629 {
4630 REAL_VALUE_TYPE r;
4631 long tmp[max_bitsize / 32];
4632
4633 /* real_from_target wants its input in words affected by
4634 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4635 and use WORDS_BIG_ENDIAN instead; see the documentation
4636 of SUBREG in rtl.texi. */
4637 for (i = 0; i < max_bitsize / 32; i++)
4638 tmp[i] = 0;
4639 for (i = 0; i < elem_bitsize; i += value_bit)
4640 {
4641 int ibase;
4642 if (WORDS_BIG_ENDIAN)
4643 ibase = elem_bitsize - 1 - i;
4644 else
4645 ibase = i;
4646 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4647 }
4648
4649 real_from_target (&r, tmp, outer_submode);
4650 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4651 }
4652 break;
4653
4654 default:
4655 gcc_unreachable ();
4656 }
4657 }
4658 if (VECTOR_MODE_P (outermode))
4659 return gen_rtx_CONST_VECTOR (outermode, result_v);
4660 else
4661 return result_s;
4662 }
4663
4664 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4665 Return 0 if no simplifications are possible. */
4666 rtx
4667 simplify_subreg (enum machine_mode outermode, rtx op,
4668 enum machine_mode innermode, unsigned int byte)
4669 {
4670 /* Little bit of sanity checking. */
4671 gcc_assert (innermode != VOIDmode);
4672 gcc_assert (outermode != VOIDmode);
4673 gcc_assert (innermode != BLKmode);
4674 gcc_assert (outermode != BLKmode);
4675
4676 gcc_assert (GET_MODE (op) == innermode
4677 || GET_MODE (op) == VOIDmode);
4678
4679 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4680 gcc_assert (byte < GET_MODE_SIZE (innermode));
4681
4682 if (outermode == innermode && !byte)
4683 return op;
4684
4685 if (GET_CODE (op) == CONST_INT
4686 || GET_CODE (op) == CONST_DOUBLE
4687 || GET_CODE (op) == CONST_VECTOR)
4688 return simplify_immed_subreg (outermode, op, innermode, byte);
4689
4690 /* Changing mode twice with SUBREG => just change it once,
4691 or not at all if changing back op starting mode. */
4692 if (GET_CODE (op) == SUBREG)
4693 {
4694 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4695 int final_offset = byte + SUBREG_BYTE (op);
4696 rtx newx;
4697
4698 if (outermode == innermostmode
4699 && byte == 0 && SUBREG_BYTE (op) == 0)
4700 return SUBREG_REG (op);
4701
4702 /* The SUBREG_BYTE represents offset, as if the value were stored
4703 in memory. Irritating exception is paradoxical subreg, where
4704 we define SUBREG_BYTE to be 0. On big endian machines, this
4705 value should be negative. For a moment, undo this exception. */
4706 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4707 {
4708 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4709 if (WORDS_BIG_ENDIAN)
4710 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4711 if (BYTES_BIG_ENDIAN)
4712 final_offset += difference % UNITS_PER_WORD;
4713 }
4714 if (SUBREG_BYTE (op) == 0
4715 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4716 {
4717 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4718 if (WORDS_BIG_ENDIAN)
4719 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4720 if (BYTES_BIG_ENDIAN)
4721 final_offset += difference % UNITS_PER_WORD;
4722 }
4723
4724 /* See whether resulting subreg will be paradoxical. */
4725 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4726 {
4727 /* In nonparadoxical subregs we can't handle negative offsets. */
4728 if (final_offset < 0)
4729 return NULL_RTX;
4730 /* Bail out in case resulting subreg would be incorrect. */
4731 if (final_offset % GET_MODE_SIZE (outermode)
4732 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4733 return NULL_RTX;
4734 }
4735 else
4736 {
4737 int offset = 0;
4738 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4739
4740 /* In paradoxical subreg, see if we are still looking on lower part.
4741 If so, our SUBREG_BYTE will be 0. */
4742 if (WORDS_BIG_ENDIAN)
4743 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4744 if (BYTES_BIG_ENDIAN)
4745 offset += difference % UNITS_PER_WORD;
4746 if (offset == final_offset)
4747 final_offset = 0;
4748 else
4749 return NULL_RTX;
4750 }
4751
4752 /* Recurse for further possible simplifications. */
4753 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4754 final_offset);
4755 if (newx)
4756 return newx;
4757 if (validate_subreg (outermode, innermostmode,
4758 SUBREG_REG (op), final_offset))
4759 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4760 return NULL_RTX;
4761 }
4762
4763 /* Merge implicit and explicit truncations. */
4764
4765 if (GET_CODE (op) == TRUNCATE
4766 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4767 && subreg_lowpart_offset (outermode, innermode) == byte)
4768 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4769 GET_MODE (XEXP (op, 0)));
4770
4771 /* SUBREG of a hard register => just change the register number
4772 and/or mode. If the hard register is not valid in that mode,
4773 suppress this simplification. If the hard register is the stack,
4774 frame, or argument pointer, leave this as a SUBREG. */
4775
4776 if (REG_P (op)
4777 && REGNO (op) < FIRST_PSEUDO_REGISTER
4778 #ifdef CANNOT_CHANGE_MODE_CLASS
4779 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4780 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4781 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4782 #endif
4783 && ((reload_completed && !frame_pointer_needed)
4784 || (REGNO (op) != FRAME_POINTER_REGNUM
4785 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4786 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4787 #endif
4788 ))
4789 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4790 && REGNO (op) != ARG_POINTER_REGNUM
4791 #endif
4792 && REGNO (op) != STACK_POINTER_REGNUM
4793 && subreg_offset_representable_p (REGNO (op), innermode,
4794 byte, outermode))
4795 {
4796 unsigned int regno = REGNO (op);
4797 unsigned int final_regno
4798 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4799
4800 /* ??? We do allow it if the current REG is not valid for
4801 its mode. This is a kludge to work around how float/complex
4802 arguments are passed on 32-bit SPARC and should be fixed. */
4803 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4804 || ! HARD_REGNO_MODE_OK (regno, innermode))
4805 {
4806 rtx x;
4807 int final_offset = byte;
4808
4809 /* Adjust offset for paradoxical subregs. */
4810 if (byte == 0
4811 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4812 {
4813 int difference = (GET_MODE_SIZE (innermode)
4814 - GET_MODE_SIZE (outermode));
4815 if (WORDS_BIG_ENDIAN)
4816 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4817 if (BYTES_BIG_ENDIAN)
4818 final_offset += difference % UNITS_PER_WORD;
4819 }
4820
4821 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4822
4823 /* Propagate original regno. We don't have any way to specify
4824 the offset inside original regno, so do so only for lowpart.
4825 The information is used only by alias analysis that can not
4826 grog partial register anyway. */
4827
4828 if (subreg_lowpart_offset (outermode, innermode) == byte)
4829 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4830 return x;
4831 }
4832 }
4833
4834 /* If we have a SUBREG of a register that we are replacing and we are
4835 replacing it with a MEM, make a new MEM and try replacing the
4836 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4837 or if we would be widening it. */
4838
4839 if (MEM_P (op)
4840 && ! mode_dependent_address_p (XEXP (op, 0))
4841 /* Allow splitting of volatile memory references in case we don't
4842 have instruction to move the whole thing. */
4843 && (! MEM_VOLATILE_P (op)
4844 || ! have_insn_for (SET, innermode))
4845 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4846 return adjust_address_nv (op, outermode, byte);
4847
4848 /* Handle complex values represented as CONCAT
4849 of real and imaginary part. */
4850 if (GET_CODE (op) == CONCAT)
4851 {
4852 unsigned int part_size, final_offset;
4853 rtx part, res;
4854
4855 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4856 if (byte < part_size)
4857 {
4858 part = XEXP (op, 0);
4859 final_offset = byte;
4860 }
4861 else
4862 {
4863 part = XEXP (op, 1);
4864 final_offset = byte - part_size;
4865 }
4866
4867 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4868 return NULL_RTX;
4869
4870 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4871 if (res)
4872 return res;
4873 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4874 return gen_rtx_SUBREG (outermode, part, final_offset);
4875 return NULL_RTX;
4876 }
4877
4878 /* Optimize SUBREG truncations of zero and sign extended values. */
4879 if ((GET_CODE (op) == ZERO_EXTEND
4880 || GET_CODE (op) == SIGN_EXTEND)
4881 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4882 {
4883 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4884
4885 /* If we're requesting the lowpart of a zero or sign extension,
4886 there are three possibilities. If the outermode is the same
4887 as the origmode, we can omit both the extension and the subreg.
4888 If the outermode is not larger than the origmode, we can apply
4889 the truncation without the extension. Finally, if the outermode
4890 is larger than the origmode, but both are integer modes, we
4891 can just extend to the appropriate mode. */
4892 if (bitpos == 0)
4893 {
4894 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4895 if (outermode == origmode)
4896 return XEXP (op, 0);
4897 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4898 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4899 subreg_lowpart_offset (outermode,
4900 origmode));
4901 if (SCALAR_INT_MODE_P (outermode))
4902 return simplify_gen_unary (GET_CODE (op), outermode,
4903 XEXP (op, 0), origmode);
4904 }
4905
4906 /* A SUBREG resulting from a zero extension may fold to zero if
4907 it extracts higher bits that the ZERO_EXTEND's source bits. */
4908 if (GET_CODE (op) == ZERO_EXTEND
4909 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4910 return CONST0_RTX (outermode);
4911 }
4912
4913 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4914 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4915 the outer subreg is effectively a truncation to the original mode. */
4916 if ((GET_CODE (op) == LSHIFTRT
4917 || GET_CODE (op) == ASHIFTRT)
4918 && SCALAR_INT_MODE_P (outermode)
4919 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4920 to avoid the possibility that an outer LSHIFTRT shifts by more
4921 than the sign extension's sign_bit_copies and introduces zeros
4922 into the high bits of the result. */
4923 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4924 && GET_CODE (XEXP (op, 1)) == CONST_INT
4925 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4926 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4927 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4928 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4929 return simplify_gen_binary (ASHIFTRT, outermode,
4930 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4931
4932 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4933 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4934 the outer subreg is effectively a truncation to the original mode. */
4935 if ((GET_CODE (op) == LSHIFTRT
4936 || GET_CODE (op) == ASHIFTRT)
4937 && SCALAR_INT_MODE_P (outermode)
4938 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4939 && GET_CODE (XEXP (op, 1)) == CONST_INT
4940 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4941 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4942 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4943 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4944 return simplify_gen_binary (LSHIFTRT, outermode,
4945 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4946
4947 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4948 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4949 the outer subreg is effectively a truncation to the original mode. */
4950 if (GET_CODE (op) == ASHIFT
4951 && SCALAR_INT_MODE_P (outermode)
4952 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4953 && GET_CODE (XEXP (op, 1)) == CONST_INT
4954 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4955 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4956 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4957 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4958 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4959 return simplify_gen_binary (ASHIFT, outermode,
4960 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4961
4962 return NULL_RTX;
4963 }
4964
4965 /* Make a SUBREG operation or equivalent if it folds. */
4966
4967 rtx
4968 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4969 enum machine_mode innermode, unsigned int byte)
4970 {
4971 rtx newx;
4972
4973 newx = simplify_subreg (outermode, op, innermode, byte);
4974 if (newx)
4975 return newx;
4976
4977 if (GET_CODE (op) == SUBREG
4978 || GET_CODE (op) == CONCAT
4979 || GET_MODE (op) == VOIDmode)
4980 return NULL_RTX;
4981
4982 if (validate_subreg (outermode, innermode, op, byte))
4983 return gen_rtx_SUBREG (outermode, op, byte);
4984
4985 return NULL_RTX;
4986 }
4987
4988 /* Simplify X, an rtx expression.
4989
4990 Return the simplified expression or NULL if no simplifications
4991 were possible.
4992
4993 This is the preferred entry point into the simplification routines;
4994 however, we still allow passes to call the more specific routines.
4995
4996 Right now GCC has three (yes, three) major bodies of RTL simplification
4997 code that need to be unified.
4998
4999 1. fold_rtx in cse.c. This code uses various CSE specific
5000 information to aid in RTL simplification.
5001
5002 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5003 it uses combine specific information to aid in RTL
5004 simplification.
5005
5006 3. The routines in this file.
5007
5008
5009 Long term we want to only have one body of simplification code; to
5010 get to that state I recommend the following steps:
5011
5012 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5013 which are not pass dependent state into these routines.
5014
5015 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5016 use this routine whenever possible.
5017
5018 3. Allow for pass dependent state to be provided to these
5019 routines and add simplifications based on the pass dependent
5020 state. Remove code from cse.c & combine.c that becomes
5021 redundant/dead.
5022
5023 It will take time, but ultimately the compiler will be easier to
5024 maintain and improve. It's totally silly that when we add a
5025 simplification that it needs to be added to 4 places (3 for RTL
5026 simplification and 1 for tree simplification. */
5027
5028 rtx
5029 simplify_rtx (rtx x)
5030 {
5031 enum rtx_code code = GET_CODE (x);
5032 enum machine_mode mode = GET_MODE (x);
5033
5034 switch (GET_RTX_CLASS (code))
5035 {
5036 case RTX_UNARY:
5037 return simplify_unary_operation (code, mode,
5038 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5039 case RTX_COMM_ARITH:
5040 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5041 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5042
5043 /* Fall through.... */
5044
5045 case RTX_BIN_ARITH:
5046 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5047
5048 case RTX_TERNARY:
5049 case RTX_BITFIELD_OPS:
5050 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5051 XEXP (x, 0), XEXP (x, 1),
5052 XEXP (x, 2));
5053
5054 case RTX_COMPARE:
5055 case RTX_COMM_COMPARE:
5056 return simplify_relational_operation (code, mode,
5057 ((GET_MODE (XEXP (x, 0))
5058 != VOIDmode)
5059 ? GET_MODE (XEXP (x, 0))
5060 : GET_MODE (XEXP (x, 1))),
5061 XEXP (x, 0),
5062 XEXP (x, 1));
5063
5064 case RTX_EXTRA:
5065 if (code == SUBREG)
5066 return simplify_subreg (mode, SUBREG_REG (x),
5067 GET_MODE (SUBREG_REG (x)),
5068 SUBREG_BYTE (x));
5069 break;
5070
5071 case RTX_OBJ:
5072 if (code == LO_SUM)
5073 {
5074 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5075 if (GET_CODE (XEXP (x, 0)) == HIGH
5076 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5077 return XEXP (x, 1);
5078 }
5079 break;
5080
5081 default:
5082 break;
5083 }
5084 return NULL;
5085 }