fold-const.c (fold_binary): Guard (X-X) -> 0 transformation with !HONOR_NANS and...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
43
44 /* Simplification and canonicalization of RTL. */
45
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66 \f
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
71 {
72 return gen_int_mode (- INTVAL (i), mode);
73 }
74
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
80 {
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 }
108 \f
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112 rtx
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115 {
116 rtx tem;
117
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
122
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
127
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 }
130 \f
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
133 rtx
134 avoid_constant_pool_reference (rtx x)
135 {
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
139
140 switch (GET_CODE (x))
141 {
142 case MEM:
143 break;
144
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 {
151 REAL_VALUE_TYPE d;
152
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 }
156 return x;
157
158 default:
159 return x;
160 }
161
162 if (GET_MODE (x) == BLKmode)
163 return x;
164
165 addr = XEXP (x, 0);
166
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
169
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 {
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
177 }
178
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
181
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
186 {
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
189
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
194 {
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
198 }
199 else
200 return c;
201 }
202
203 return x;
204 }
205 \f
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
208
209 rtx
210 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
211 enum machine_mode op_mode)
212 {
213 rtx tem;
214
215 /* If this simplifies, use it. */
216 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
217 return tem;
218
219 return gen_rtx_fmt_e (code, mode, op);
220 }
221
222 /* Likewise for ternary operations. */
223
224 rtx
225 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
226 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
227 {
228 rtx tem;
229
230 /* If this simplifies, use it. */
231 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
232 op0, op1, op2)))
233 return tem;
234
235 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
236 }
237
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
240
241 rtx
242 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
243 enum machine_mode cmp_mode, rtx op0, rtx op1)
244 {
245 rtx tem;
246
247 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
248 op0, op1)))
249 return tem;
250
251 return gen_rtx_fmt_ee (code, mode, op0, op1);
252 }
253 \f
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
256
257 rtx
258 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
259 {
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
262 enum machine_mode op_mode;
263 rtx op0, op1, op2;
264
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
268
269 if (x == old_rtx)
270 return new_rtx;
271
272 switch (GET_RTX_CLASS (code))
273 {
274 case RTX_UNARY:
275 op0 = XEXP (x, 0);
276 op_mode = GET_MODE (op0);
277 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0))
279 return x;
280 return simplify_gen_unary (code, mode, op0, op_mode);
281
282 case RTX_BIN_ARITH:
283 case RTX_COMM_ARITH:
284 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_binary (code, mode, op0, op1);
289
290 case RTX_COMPARE:
291 case RTX_COMM_COMPARE:
292 op0 = XEXP (x, 0);
293 op1 = XEXP (x, 1);
294 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
295 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
296 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return x;
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300
301 case RTX_TERNARY:
302 case RTX_BITFIELD_OPS:
303 op0 = XEXP (x, 0);
304 op_mode = GET_MODE (op0);
305 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
306 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
307 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
308 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 return x;
310 if (op_mode == VOIDmode)
311 op_mode = GET_MODE (op0);
312 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
313
314 case RTX_EXTRA:
315 /* The only case we try to handle is a SUBREG. */
316 if (code == SUBREG)
317 {
318 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
319 if (op0 == SUBREG_REG (x))
320 return x;
321 op0 = simplify_gen_subreg (GET_MODE (x), op0,
322 GET_MODE (SUBREG_REG (x)),
323 SUBREG_BYTE (x));
324 return op0 ? op0 : x;
325 }
326 break;
327
328 case RTX_OBJ:
329 if (code == MEM)
330 {
331 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
332 if (op0 == XEXP (x, 0))
333 return x;
334 return replace_equiv_address_nv (x, op0);
335 }
336 else if (code == LO_SUM)
337 {
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
343 return op1;
344
345 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return x;
347 return gen_rtx_LO_SUM (mode, op0, op1);
348 }
349 else if (code == REG)
350 {
351 if (rtx_equal_p (x, old_rtx))
352 return new_rtx;
353 }
354 break;
355
356 default:
357 break;
358 }
359 return x;
360 }
361 \f
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
365 rtx
366 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
367 rtx op, enum machine_mode op_mode)
368 {
369 rtx trueop, tem;
370
371 if (GET_CODE (op) == CONST)
372 op = XEXP (op, 0);
373
374 trueop = avoid_constant_pool_reference (op);
375
376 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
377 if (tem)
378 return tem;
379
380 return simplify_unary_operation_1 (code, mode, op);
381 }
382
383 /* Perform some simplifications we can do even if the operands
384 aren't constant. */
385 static rtx
386 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 {
388 enum rtx_code reversed;
389 rtx temp;
390
391 switch (code)
392 {
393 case NOT:
394 /* (not (not X)) == X. */
395 if (GET_CODE (op) == NOT)
396 return XEXP (op, 0);
397
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op)
401 && (mode == BImode || STORE_FLAG_VALUE == -1)
402 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
403 return simplify_gen_relational (reversed, mode, VOIDmode,
404 XEXP (op, 0), XEXP (op, 1));
405
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op) == PLUS
408 && XEXP (op, 1) == constm1_rtx)
409 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op) == NEG)
413 return plus_constant (XEXP (op, 0), -1);
414
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op) == XOR
417 && GET_CODE (XEXP (op, 1)) == CONST_INT
418 && (temp = simplify_unary_operation (NOT, mode,
419 XEXP (op, 1), mode)) != 0)
420 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == PLUS
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && mode_signbit_p (mode, XEXP (op, 1))
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
429
430
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
435 bother with. */
436 if (GET_CODE (op) == ASHIFT
437 && XEXP (op, 0) == const1_rtx)
438 {
439 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
440 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
441 }
442
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
446
447 if (STORE_FLAG_VALUE == -1
448 && GET_CODE (op) == ASHIFTRT
449 && GET_CODE (XEXP (op, 1)) == CONST_INT
450 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
451 return simplify_gen_relational (GE, mode, VOIDmode,
452 XEXP (op, 0), const0_rtx);
453
454
455 if (GET_CODE (op) == SUBREG
456 && subreg_lowpart_p (op)
457 && (GET_MODE_SIZE (GET_MODE (op))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
459 && GET_CODE (SUBREG_REG (op)) == ASHIFT
460 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 {
462 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
463 rtx x;
464
465 x = gen_rtx_ROTATE (inner_mode,
466 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 inner_mode),
468 XEXP (SUBREG_REG (op), 1));
469 return rtl_hooks.gen_lowpart_no_emit (mode, x);
470 }
471
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
475 coded. */
476
477 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 {
479 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
480 enum machine_mode op_mode;
481
482 op_mode = GET_MODE (in1);
483 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484
485 op_mode = GET_MODE (in2);
486 if (op_mode == VOIDmode)
487 op_mode = mode;
488 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489
490 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
491 {
492 rtx tem = in2;
493 in2 = in1; in1 = tem;
494 }
495
496 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
497 mode, in1, in2);
498 }
499 break;
500
501 case NEG:
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op) == NEG)
504 return XEXP (op, 0);
505
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op) == PLUS
508 && XEXP (op, 1) == const1_rtx)
509 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op) == NOT)
513 return plus_constant (XEXP (op, 0), 1);
514
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
523 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524
525 if (GET_CODE (op) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 {
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op, 1)) == CONST_INT
531 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 {
533 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 if (temp)
535 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
536 }
537
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
541 }
542
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 {
548 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
549 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
550 }
551
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
554 is a constant). */
555 if (GET_CODE (op) == ASHIFT)
556 {
557 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 if (temp)
559 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
560 }
561
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op) == ASHIFTRT
565 && GET_CODE (XEXP (op, 1)) == CONST_INT
566 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
567 return simplify_gen_binary (LSHIFTRT, mode,
568 XEXP (op, 0), XEXP (op, 1));
569
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == LSHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (ASHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
577
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op) == XOR
580 && XEXP (op, 1) == const1_rtx
581 && nonzero_bits (XEXP (op, 0), mode) == 1)
582 return plus_constant (XEXP (op, 0), -1);
583
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op) == LT
587 && XEXP (op, 1) == const0_rtx)
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (FLOAT, mode,
708 XEXP (op, 0),
709 GET_MODE (XEXP (op, 0)));
710
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
719
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
726 break;
727
728 case FLOAT_EXTEND:
729 if (DECIMAL_FLOAT_MODE_P (mode))
730 break;
731
732 /* (float_extend (float_extend x)) is (float_extend x)
733
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
736 */
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && ((unsigned)significand_size (GET_MODE (op))
740 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
741 - num_sign_bit_copies (XEXP (op, 0),
742 GET_MODE (XEXP (op, 0)))))))
743 return simplify_gen_unary (GET_CODE (op), mode,
744 XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)));
746
747 break;
748
749 case ABS:
750 /* (abs (neg <foo>)) -> (abs <foo>) */
751 if (GET_CODE (op) == NEG)
752 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
754
755 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
756 do nothing. */
757 if (GET_MODE (op) == VOIDmode)
758 break;
759
760 /* If operand is something known to be positive, ignore the ABS. */
761 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
762 || ((GET_MODE_BITSIZE (GET_MODE (op))
763 <= HOST_BITS_PER_WIDE_INT)
764 && ((nonzero_bits (op, GET_MODE (op))
765 & ((HOST_WIDE_INT) 1
766 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
767 == 0)))
768 return op;
769
770 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
771 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
772 return gen_rtx_NEG (mode, op);
773
774 break;
775
776 case FFS:
777 /* (ffs (*_extend <X>)) = (ffs <X>) */
778 if (GET_CODE (op) == SIGN_EXTEND
779 || GET_CODE (op) == ZERO_EXTEND)
780 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
781 GET_MODE (XEXP (op, 0)));
782 break;
783
784 case POPCOUNT:
785 switch (GET_CODE (op))
786 {
787 case BSWAP:
788 case ZERO_EXTEND:
789 /* (popcount (zero_extend <X>)) = (popcount <X>) */
790 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
791 GET_MODE (XEXP (op, 0)));
792
793 case ROTATE:
794 case ROTATERT:
795 /* Rotations don't affect popcount. */
796 if (!side_effects_p (XEXP (op, 1)))
797 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
799 break;
800
801 default:
802 break;
803 }
804 break;
805
806 case PARITY:
807 switch (GET_CODE (op))
808 {
809 case NOT:
810 case BSWAP:
811 case ZERO_EXTEND:
812 case SIGN_EXTEND:
813 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
814 GET_MODE (XEXP (op, 0)));
815
816 case ROTATE:
817 case ROTATERT:
818 /* Rotations don't affect parity. */
819 if (!side_effects_p (XEXP (op, 1)))
820 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
822 break;
823
824 default:
825 break;
826 }
827 break;
828
829 case BSWAP:
830 /* (bswap (bswap x)) -> x. */
831 if (GET_CODE (op) == BSWAP)
832 return XEXP (op, 0);
833 break;
834
835 case FLOAT:
836 /* (float (sign_extend <X>)) = (float <X>). */
837 if (GET_CODE (op) == SIGN_EXTEND)
838 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)));
840 break;
841
842 case SIGN_EXTEND:
843 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
844 becomes just the MINUS if its mode is MODE. This allows
845 folding switch statements on machines using casesi (such as
846 the VAX). */
847 if (GET_CODE (op) == TRUNCATE
848 && GET_MODE (XEXP (op, 0)) == mode
849 && GET_CODE (XEXP (op, 0)) == MINUS
850 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
851 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
852 return XEXP (op, 0);
853
854 /* Check for a sign extension of a subreg of a promoted
855 variable, where the promotion is sign-extended, and the
856 target mode is the same as the variable's promotion. */
857 if (GET_CODE (op) == SUBREG
858 && SUBREG_PROMOTED_VAR_P (op)
859 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
860 && GET_MODE (XEXP (op, 0)) == mode)
861 return XEXP (op, 0);
862
863 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
864 if (! POINTERS_EXTEND_UNSIGNED
865 && mode == Pmode && GET_MODE (op) == ptr_mode
866 && (CONSTANT_P (op)
867 || (GET_CODE (op) == SUBREG
868 && REG_P (SUBREG_REG (op))
869 && REG_POINTER (SUBREG_REG (op))
870 && GET_MODE (SUBREG_REG (op)) == Pmode)))
871 return convert_memory_address (Pmode, op);
872 #endif
873 break;
874
875 case ZERO_EXTEND:
876 /* Check for a zero extension of a subreg of a promoted
877 variable, where the promotion is zero-extended, and the
878 target mode is the same as the variable's promotion. */
879 if (GET_CODE (op) == SUBREG
880 && SUBREG_PROMOTED_VAR_P (op)
881 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
882 && GET_MODE (XEXP (op, 0)) == mode)
883 return XEXP (op, 0);
884
885 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
886 if (POINTERS_EXTEND_UNSIGNED > 0
887 && mode == Pmode && GET_MODE (op) == ptr_mode
888 && (CONSTANT_P (op)
889 || (GET_CODE (op) == SUBREG
890 && REG_P (SUBREG_REG (op))
891 && REG_POINTER (SUBREG_REG (op))
892 && GET_MODE (SUBREG_REG (op)) == Pmode)))
893 return convert_memory_address (Pmode, op);
894 #endif
895 break;
896
897 default:
898 break;
899 }
900
901 return 0;
902 }
903
904 /* Try to compute the value of a unary operation CODE whose output mode is to
905 be MODE with input operand OP whose mode was originally OP_MODE.
906 Return zero if the value cannot be computed. */
907 rtx
908 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
909 rtx op, enum machine_mode op_mode)
910 {
911 unsigned int width = GET_MODE_BITSIZE (mode);
912
913 if (code == VEC_DUPLICATE)
914 {
915 gcc_assert (VECTOR_MODE_P (mode));
916 if (GET_MODE (op) != VOIDmode)
917 {
918 if (!VECTOR_MODE_P (GET_MODE (op)))
919 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
920 else
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
922 (GET_MODE (op)));
923 }
924 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
925 || GET_CODE (op) == CONST_VECTOR)
926 {
927 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
928 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
929 rtvec v = rtvec_alloc (n_elts);
930 unsigned int i;
931
932 if (GET_CODE (op) != CONST_VECTOR)
933 for (i = 0; i < n_elts; i++)
934 RTVEC_ELT (v, i) = op;
935 else
936 {
937 enum machine_mode inmode = GET_MODE (op);
938 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
939 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
940
941 gcc_assert (in_n_elts < n_elts);
942 gcc_assert ((n_elts % in_n_elts) == 0);
943 for (i = 0; i < n_elts; i++)
944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
945 }
946 return gen_rtx_CONST_VECTOR (mode, v);
947 }
948 }
949
950 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
951 {
952 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
953 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
954 enum machine_mode opmode = GET_MODE (op);
955 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
956 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
957 rtvec v = rtvec_alloc (n_elts);
958 unsigned int i;
959
960 gcc_assert (op_n_elts == n_elts);
961 for (i = 0; i < n_elts; i++)
962 {
963 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
964 CONST_VECTOR_ELT (op, i),
965 GET_MODE_INNER (opmode));
966 if (!x)
967 return 0;
968 RTVEC_ELT (v, i) = x;
969 }
970 return gen_rtx_CONST_VECTOR (mode, v);
971 }
972
973 /* The order of these tests is critical so that, for example, we don't
974 check the wrong mode (input vs. output) for a conversion operation,
975 such as FIX. At some point, this should be simplified. */
976
977 if (code == FLOAT && GET_MODE (op) == VOIDmode
978 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
979 {
980 HOST_WIDE_INT hv, lv;
981 REAL_VALUE_TYPE d;
982
983 if (GET_CODE (op) == CONST_INT)
984 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
985 else
986 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
987
988 REAL_VALUE_FROM_INT (d, lv, hv, mode);
989 d = real_value_truncate (mode, d);
990 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
991 }
992 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
993 && (GET_CODE (op) == CONST_DOUBLE
994 || GET_CODE (op) == CONST_INT))
995 {
996 HOST_WIDE_INT hv, lv;
997 REAL_VALUE_TYPE d;
998
999 if (GET_CODE (op) == CONST_INT)
1000 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1001 else
1002 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1003
1004 if (op_mode == VOIDmode)
1005 {
1006 /* We don't know how to interpret negative-looking numbers in
1007 this case, so don't try to fold those. */
1008 if (hv < 0)
1009 return 0;
1010 }
1011 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1012 ;
1013 else
1014 hv = 0, lv &= GET_MODE_MASK (op_mode);
1015
1016 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1017 d = real_value_truncate (mode, d);
1018 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1019 }
1020
1021 if (GET_CODE (op) == CONST_INT
1022 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1023 {
1024 HOST_WIDE_INT arg0 = INTVAL (op);
1025 HOST_WIDE_INT val;
1026
1027 switch (code)
1028 {
1029 case NOT:
1030 val = ~ arg0;
1031 break;
1032
1033 case NEG:
1034 val = - arg0;
1035 break;
1036
1037 case ABS:
1038 val = (arg0 >= 0 ? arg0 : - arg0);
1039 break;
1040
1041 case FFS:
1042 /* Don't use ffs here. Instead, get low order bit and then its
1043 number. If arg0 is zero, this will return 0, as desired. */
1044 arg0 &= GET_MODE_MASK (mode);
1045 val = exact_log2 (arg0 & (- arg0)) + 1;
1046 break;
1047
1048 case CLZ:
1049 arg0 &= GET_MODE_MASK (mode);
1050 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1051 ;
1052 else
1053 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1054 break;
1055
1056 case CTZ:
1057 arg0 &= GET_MODE_MASK (mode);
1058 if (arg0 == 0)
1059 {
1060 /* Even if the value at zero is undefined, we have to come
1061 up with some replacement. Seems good enough. */
1062 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1063 val = GET_MODE_BITSIZE (mode);
1064 }
1065 else
1066 val = exact_log2 (arg0 & -arg0);
1067 break;
1068
1069 case POPCOUNT:
1070 arg0 &= GET_MODE_MASK (mode);
1071 val = 0;
1072 while (arg0)
1073 val++, arg0 &= arg0 - 1;
1074 break;
1075
1076 case PARITY:
1077 arg0 &= GET_MODE_MASK (mode);
1078 val = 0;
1079 while (arg0)
1080 val++, arg0 &= arg0 - 1;
1081 val &= 1;
1082 break;
1083
1084 case BSWAP:
1085 {
1086 unsigned int s;
1087
1088 val = 0;
1089 for (s = 0; s < width; s += 8)
1090 {
1091 unsigned int d = width - s - 8;
1092 unsigned HOST_WIDE_INT byte;
1093 byte = (arg0 >> s) & 0xff;
1094 val |= byte << d;
1095 }
1096 }
1097 break;
1098
1099 case TRUNCATE:
1100 val = arg0;
1101 break;
1102
1103 case ZERO_EXTEND:
1104 /* When zero-extending a CONST_INT, we need to know its
1105 original mode. */
1106 gcc_assert (op_mode != VOIDmode);
1107 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1108 {
1109 /* If we were really extending the mode,
1110 we would have to distinguish between zero-extension
1111 and sign-extension. */
1112 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1113 val = arg0;
1114 }
1115 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1116 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1117 else
1118 return 0;
1119 break;
1120
1121 case SIGN_EXTEND:
1122 if (op_mode == VOIDmode)
1123 op_mode = mode;
1124 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1125 {
1126 /* If we were really extending the mode,
1127 we would have to distinguish between zero-extension
1128 and sign-extension. */
1129 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1130 val = arg0;
1131 }
1132 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1133 {
1134 val
1135 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1136 if (val
1137 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1138 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1139 }
1140 else
1141 return 0;
1142 break;
1143
1144 case SQRT:
1145 case FLOAT_EXTEND:
1146 case FLOAT_TRUNCATE:
1147 case SS_TRUNCATE:
1148 case US_TRUNCATE:
1149 case SS_NEG:
1150 return 0;
1151
1152 default:
1153 gcc_unreachable ();
1154 }
1155
1156 return gen_int_mode (val, mode);
1157 }
1158
1159 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1160 for a DImode operation on a CONST_INT. */
1161 else if (GET_MODE (op) == VOIDmode
1162 && width <= HOST_BITS_PER_WIDE_INT * 2
1163 && (GET_CODE (op) == CONST_DOUBLE
1164 || GET_CODE (op) == CONST_INT))
1165 {
1166 unsigned HOST_WIDE_INT l1, lv;
1167 HOST_WIDE_INT h1, hv;
1168
1169 if (GET_CODE (op) == CONST_DOUBLE)
1170 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1171 else
1172 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1173
1174 switch (code)
1175 {
1176 case NOT:
1177 lv = ~ l1;
1178 hv = ~ h1;
1179 break;
1180
1181 case NEG:
1182 neg_double (l1, h1, &lv, &hv);
1183 break;
1184
1185 case ABS:
1186 if (h1 < 0)
1187 neg_double (l1, h1, &lv, &hv);
1188 else
1189 lv = l1, hv = h1;
1190 break;
1191
1192 case FFS:
1193 hv = 0;
1194 if (l1 == 0)
1195 {
1196 if (h1 == 0)
1197 lv = 0;
1198 else
1199 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1200 }
1201 else
1202 lv = exact_log2 (l1 & -l1) + 1;
1203 break;
1204
1205 case CLZ:
1206 hv = 0;
1207 if (h1 != 0)
1208 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1209 - HOST_BITS_PER_WIDE_INT;
1210 else if (l1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1212 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1213 lv = GET_MODE_BITSIZE (mode);
1214 break;
1215
1216 case CTZ:
1217 hv = 0;
1218 if (l1 != 0)
1219 lv = exact_log2 (l1 & -l1);
1220 else if (h1 != 0)
1221 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1222 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1223 lv = GET_MODE_BITSIZE (mode);
1224 break;
1225
1226 case POPCOUNT:
1227 hv = 0;
1228 lv = 0;
1229 while (l1)
1230 lv++, l1 &= l1 - 1;
1231 while (h1)
1232 lv++, h1 &= h1 - 1;
1233 break;
1234
1235 case PARITY:
1236 hv = 0;
1237 lv = 0;
1238 while (l1)
1239 lv++, l1 &= l1 - 1;
1240 while (h1)
1241 lv++, h1 &= h1 - 1;
1242 lv &= 1;
1243 break;
1244
1245 case BSWAP:
1246 {
1247 unsigned int s;
1248
1249 hv = 0;
1250 lv = 0;
1251 for (s = 0; s < width; s += 8)
1252 {
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1255
1256 if (s < HOST_BITS_PER_WIDE_INT)
1257 byte = (l1 >> s) & 0xff;
1258 else
1259 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1260
1261 if (d < HOST_BITS_PER_WIDE_INT)
1262 lv |= byte << d;
1263 else
1264 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1265 }
1266 }
1267 break;
1268
1269 case TRUNCATE:
1270 /* This is just a change-of-mode, so do nothing. */
1271 lv = l1, hv = h1;
1272 break;
1273
1274 case ZERO_EXTEND:
1275 gcc_assert (op_mode != VOIDmode);
1276
1277 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1278 return 0;
1279
1280 hv = 0;
1281 lv = l1 & GET_MODE_MASK (op_mode);
1282 break;
1283
1284 case SIGN_EXTEND:
1285 if (op_mode == VOIDmode
1286 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1287 return 0;
1288 else
1289 {
1290 lv = l1 & GET_MODE_MASK (op_mode);
1291 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1292 && (lv & ((HOST_WIDE_INT) 1
1293 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1294 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1295
1296 hv = HWI_SIGN_EXTEND (lv);
1297 }
1298 break;
1299
1300 case SQRT:
1301 return 0;
1302
1303 default:
1304 return 0;
1305 }
1306
1307 return immed_double_const (lv, hv, mode);
1308 }
1309
1310 else if (GET_CODE (op) == CONST_DOUBLE
1311 && SCALAR_FLOAT_MODE_P (mode))
1312 {
1313 REAL_VALUE_TYPE d, t;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1315
1316 switch (code)
1317 {
1318 case SQRT:
1319 if (HONOR_SNANS (mode) && real_isnan (&d))
1320 return 0;
1321 real_sqrt (&t, mode, &d);
1322 d = t;
1323 break;
1324 case ABS:
1325 d = REAL_VALUE_ABS (d);
1326 break;
1327 case NEG:
1328 d = REAL_VALUE_NEGATE (d);
1329 break;
1330 case FLOAT_TRUNCATE:
1331 d = real_value_truncate (mode, d);
1332 break;
1333 case FLOAT_EXTEND:
1334 /* All this does is change the mode. */
1335 break;
1336 case FIX:
1337 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1338 break;
1339 case NOT:
1340 {
1341 long tmp[4];
1342 int i;
1343
1344 real_to_target (tmp, &d, GET_MODE (op));
1345 for (i = 0; i < 4; i++)
1346 tmp[i] = ~tmp[i];
1347 real_from_target (&d, tmp, mode);
1348 break;
1349 }
1350 default:
1351 gcc_unreachable ();
1352 }
1353 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1354 }
1355
1356 else if (GET_CODE (op) == CONST_DOUBLE
1357 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1358 && GET_MODE_CLASS (mode) == MODE_INT
1359 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1360 {
1361 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1362 operators are intentionally left unspecified (to ease implementation
1363 by target backends), for consistency, this routine implements the
1364 same semantics for constant folding as used by the middle-end. */
1365
1366 /* This was formerly used only for non-IEEE float.
1367 eggert@twinsun.com says it is safe for IEEE also. */
1368 HOST_WIDE_INT xh, xl, th, tl;
1369 REAL_VALUE_TYPE x, t;
1370 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1371 switch (code)
1372 {
1373 case FIX:
1374 if (REAL_VALUE_ISNAN (x))
1375 return const0_rtx;
1376
1377 /* Test against the signed upper bound. */
1378 if (width > HOST_BITS_PER_WIDE_INT)
1379 {
1380 th = ((unsigned HOST_WIDE_INT) 1
1381 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1382 tl = -1;
1383 }
1384 else
1385 {
1386 th = 0;
1387 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1388 }
1389 real_from_integer (&t, VOIDmode, tl, th, 0);
1390 if (REAL_VALUES_LESS (t, x))
1391 {
1392 xh = th;
1393 xl = tl;
1394 break;
1395 }
1396
1397 /* Test against the signed lower bound. */
1398 if (width > HOST_BITS_PER_WIDE_INT)
1399 {
1400 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1401 tl = 0;
1402 }
1403 else
1404 {
1405 th = -1;
1406 tl = (HOST_WIDE_INT) -1 << (width - 1);
1407 }
1408 real_from_integer (&t, VOIDmode, tl, th, 0);
1409 if (REAL_VALUES_LESS (x, t))
1410 {
1411 xh = th;
1412 xl = tl;
1413 break;
1414 }
1415 REAL_VALUE_TO_INT (&xl, &xh, x);
1416 break;
1417
1418 case UNSIGNED_FIX:
1419 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1420 return const0_rtx;
1421
1422 /* Test against the unsigned upper bound. */
1423 if (width == 2*HOST_BITS_PER_WIDE_INT)
1424 {
1425 th = -1;
1426 tl = -1;
1427 }
1428 else if (width >= HOST_BITS_PER_WIDE_INT)
1429 {
1430 th = ((unsigned HOST_WIDE_INT) 1
1431 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1432 tl = -1;
1433 }
1434 else
1435 {
1436 th = 0;
1437 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1438 }
1439 real_from_integer (&t, VOIDmode, tl, th, 1);
1440 if (REAL_VALUES_LESS (t, x))
1441 {
1442 xh = th;
1443 xl = tl;
1444 break;
1445 }
1446
1447 REAL_VALUE_TO_INT (&xl, &xh, x);
1448 break;
1449
1450 default:
1451 gcc_unreachable ();
1452 }
1453 return immed_double_const (xl, xh, mode);
1454 }
1455
1456 return NULL_RTX;
1457 }
1458 \f
1459 /* Subroutine of simplify_binary_operation to simplify a commutative,
1460 associative binary operation CODE with result mode MODE, operating
1461 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1462 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1463 canonicalization is possible. */
1464
1465 static rtx
1466 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1467 rtx op0, rtx op1)
1468 {
1469 rtx tem;
1470
1471 /* Linearize the operator to the left. */
1472 if (GET_CODE (op1) == code)
1473 {
1474 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1475 if (GET_CODE (op0) == code)
1476 {
1477 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1478 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1479 }
1480
1481 /* "a op (b op c)" becomes "(b op c) op a". */
1482 if (! swap_commutative_operands_p (op1, op0))
1483 return simplify_gen_binary (code, mode, op1, op0);
1484
1485 tem = op0;
1486 op0 = op1;
1487 op1 = tem;
1488 }
1489
1490 if (GET_CODE (op0) == code)
1491 {
1492 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1493 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1494 {
1495 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1496 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1497 }
1498
1499 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1500 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1501 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1502 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 if (tem != 0)
1504 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1508 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1509 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1510 if (tem != 0)
1511 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1512 }
1513
1514 return 0;
1515 }
1516
1517
1518 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1519 and OP1. Return 0 if no simplification is possible.
1520
1521 Don't use this for relational operations such as EQ or LT.
1522 Use simplify_relational_operation instead. */
1523 rtx
1524 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1525 rtx op0, rtx op1)
1526 {
1527 rtx trueop0, trueop1;
1528 rtx tem;
1529
1530 /* Relational operations don't work here. We must know the mode
1531 of the operands in order to do the comparison correctly.
1532 Assuming a full word can give incorrect results.
1533 Consider comparing 128 with -128 in QImode. */
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1535 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1536
1537 /* Make sure the constant is second. */
1538 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1539 && swap_commutative_operands_p (op0, op1))
1540 {
1541 tem = op0, op0 = op1, op1 = tem;
1542 }
1543
1544 trueop0 = avoid_constant_pool_reference (op0);
1545 trueop1 = avoid_constant_pool_reference (op1);
1546
1547 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1548 if (tem)
1549 return tem;
1550 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1551 }
1552
1553 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1554 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1555 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1556 actual constants. */
1557
1558 static rtx
1559 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1560 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1561 {
1562 rtx tem, reversed, opleft, opright;
1563 HOST_WIDE_INT val;
1564 unsigned int width = GET_MODE_BITSIZE (mode);
1565
1566 /* Even if we can't compute a constant result,
1567 there are some cases worth simplifying. */
1568
1569 switch (code)
1570 {
1571 case PLUS:
1572 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1573 when x is NaN, infinite, or finite and nonzero. They aren't
1574 when x is -0 and the rounding mode is not towards -infinity,
1575 since (-0) + 0 is then 0. */
1576 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1577 return op0;
1578
1579 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1580 transformations are safe even for IEEE. */
1581 if (GET_CODE (op0) == NEG)
1582 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1583 else if (GET_CODE (op1) == NEG)
1584 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1585
1586 /* (~a) + 1 -> -a */
1587 if (INTEGRAL_MODE_P (mode)
1588 && GET_CODE (op0) == NOT
1589 && trueop1 == const1_rtx)
1590 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1591
1592 /* Handle both-operands-constant cases. We can only add
1593 CONST_INTs to constants since the sum of relocatable symbols
1594 can't be handled by most assemblers. Don't add CONST_INT
1595 to CONST_INT since overflow won't be computed properly if wider
1596 than HOST_BITS_PER_WIDE_INT. */
1597
1598 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1599 && GET_CODE (op1) == CONST_INT)
1600 return plus_constant (op0, INTVAL (op1));
1601 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1602 && GET_CODE (op0) == CONST_INT)
1603 return plus_constant (op1, INTVAL (op0));
1604
1605 /* See if this is something like X * C - X or vice versa or
1606 if the multiplication is written as a shift. If so, we can
1607 distribute and make a new multiply, shift, or maybe just
1608 have X (if C is 2 in the example above). But don't make
1609 something more expensive than we had before. */
1610
1611 if (SCALAR_INT_MODE_P (mode))
1612 {
1613 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1614 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1615 rtx lhs = op0, rhs = op1;
1616
1617 if (GET_CODE (lhs) == NEG)
1618 {
1619 coeff0l = -1;
1620 coeff0h = -1;
1621 lhs = XEXP (lhs, 0);
1622 }
1623 else if (GET_CODE (lhs) == MULT
1624 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1625 {
1626 coeff0l = INTVAL (XEXP (lhs, 1));
1627 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1628 lhs = XEXP (lhs, 0);
1629 }
1630 else if (GET_CODE (lhs) == ASHIFT
1631 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1632 && INTVAL (XEXP (lhs, 1)) >= 0
1633 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1634 {
1635 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1636 coeff0h = 0;
1637 lhs = XEXP (lhs, 0);
1638 }
1639
1640 if (GET_CODE (rhs) == NEG)
1641 {
1642 coeff1l = -1;
1643 coeff1h = -1;
1644 rhs = XEXP (rhs, 0);
1645 }
1646 else if (GET_CODE (rhs) == MULT
1647 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1648 {
1649 coeff1l = INTVAL (XEXP (rhs, 1));
1650 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1651 rhs = XEXP (rhs, 0);
1652 }
1653 else if (GET_CODE (rhs) == ASHIFT
1654 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1655 && INTVAL (XEXP (rhs, 1)) >= 0
1656 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1657 {
1658 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1659 coeff1h = 0;
1660 rhs = XEXP (rhs, 0);
1661 }
1662
1663 if (rtx_equal_p (lhs, rhs))
1664 {
1665 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1666 rtx coeff;
1667 unsigned HOST_WIDE_INT l;
1668 HOST_WIDE_INT h;
1669
1670 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1671 coeff = immed_double_const (l, h, mode);
1672
1673 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1674 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1675 ? tem : 0;
1676 }
1677 }
1678
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1) == CONST_INT
1681 || GET_CODE (op1) == CONST_DOUBLE)
1682 && GET_CODE (op0) == XOR
1683 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1685 && mode_signbit_p (mode, op1))
1686 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1687 simplify_gen_binary (XOR, mode, op1,
1688 XEXP (op0, 1)));
1689
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1692 && GET_CODE (op0) == MULT
1693 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 {
1695 rtx in1, in2;
1696
1697 in1 = XEXP (XEXP (op0, 0), 0);
1698 in2 = XEXP (op0, 1);
1699 return simplify_gen_binary (MINUS, mode, op1,
1700 simplify_gen_binary (MULT, mode,
1701 in1, in2));
1702 }
1703
1704 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1705 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 is 1. */
1707 if (COMPARISON_P (op0)
1708 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1709 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1710 && (reversed = reversed_comparison (op0, mode)))
1711 return
1712 simplify_gen_unary (NEG, mode, reversed, mode);
1713
1714 /* If one of the operands is a PLUS or a MINUS, see if we can
1715 simplify this by the associative law.
1716 Don't use the associative law for floating point.
1717 The inaccuracy makes it nonassociative,
1718 and subtle programs can break if operations are associated. */
1719
1720 if (INTEGRAL_MODE_P (mode)
1721 && (plus_minus_operand_p (op0)
1722 || plus_minus_operand_p (op1))
1723 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1724 return tem;
1725
1726 /* Reassociate floating point addition only when the user
1727 specifies unsafe math optimizations. */
1728 if (FLOAT_MODE_P (mode)
1729 && flag_unsafe_math_optimizations)
1730 {
1731 tem = simplify_associative_operation (code, mode, op0, op1);
1732 if (tem)
1733 return tem;
1734 }
1735 break;
1736
1737 case COMPARE:
1738 #ifdef HAVE_cc0
1739 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1740 using cc0, in which case we want to leave it as a COMPARE
1741 so we can distinguish it from a register-register-copy.
1742
1743 In IEEE floating point, x-0 is not the same as x. */
1744
1745 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1746 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1747 && trueop1 == CONST0_RTX (mode))
1748 return op0;
1749 #endif
1750
1751 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1752 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1753 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1754 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1755 {
1756 rtx xop00 = XEXP (op0, 0);
1757 rtx xop10 = XEXP (op1, 0);
1758
1759 #ifdef HAVE_cc0
1760 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1761 #else
1762 if (REG_P (xop00) && REG_P (xop10)
1763 && GET_MODE (xop00) == GET_MODE (xop10)
1764 && REGNO (xop00) == REGNO (xop10)
1765 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1766 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1767 #endif
1768 return xop00;
1769 }
1770 break;
1771
1772 case MINUS:
1773 /* We can't assume x-x is 0 even with non-IEEE floating point,
1774 but since it is zero except in very strange circumstances, we
1775 will treat it as zero with -funsafe-math-optimizations and
1776 -ffinite-math-only. */
1777 if (rtx_equal_p (trueop0, trueop1)
1778 && ! side_effects_p (op0)
1779 && (! FLOAT_MODE_P (mode)
1780 || (flag_unsafe_math_optimizations
1781 && !HONOR_NANS (mode)
1782 && !HONOR_INFINITIES (mode))))
1783 return CONST0_RTX (mode);
1784
1785 /* Change subtraction from zero into negation. (0 - x) is the
1786 same as -x when x is NaN, infinite, or finite and nonzero.
1787 But if the mode has signed zeros, and does not round towards
1788 -infinity, then 0 - 0 is 0, not -0. */
1789 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1790 return simplify_gen_unary (NEG, mode, op1, mode);
1791
1792 /* (-1 - a) is ~a. */
1793 if (trueop0 == constm1_rtx)
1794 return simplify_gen_unary (NOT, mode, op1, mode);
1795
1796 /* Subtracting 0 has no effect unless the mode has signed zeros
1797 and supports rounding towards -infinity. In such a case,
1798 0 - 0 is -0. */
1799 if (!(HONOR_SIGNED_ZEROS (mode)
1800 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1801 && trueop1 == CONST0_RTX (mode))
1802 return op0;
1803
1804 /* See if this is something like X * C - X or vice versa or
1805 if the multiplication is written as a shift. If so, we can
1806 distribute and make a new multiply, shift, or maybe just
1807 have X (if C is 2 in the example above). But don't make
1808 something more expensive than we had before. */
1809
1810 if (SCALAR_INT_MODE_P (mode))
1811 {
1812 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1813 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1814 rtx lhs = op0, rhs = op1;
1815
1816 if (GET_CODE (lhs) == NEG)
1817 {
1818 coeff0l = -1;
1819 coeff0h = -1;
1820 lhs = XEXP (lhs, 0);
1821 }
1822 else if (GET_CODE (lhs) == MULT
1823 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1824 {
1825 coeff0l = INTVAL (XEXP (lhs, 1));
1826 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1827 lhs = XEXP (lhs, 0);
1828 }
1829 else if (GET_CODE (lhs) == ASHIFT
1830 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1831 && INTVAL (XEXP (lhs, 1)) >= 0
1832 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1833 {
1834 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1835 coeff0h = 0;
1836 lhs = XEXP (lhs, 0);
1837 }
1838
1839 if (GET_CODE (rhs) == NEG)
1840 {
1841 negcoeff1l = 1;
1842 negcoeff1h = 0;
1843 rhs = XEXP (rhs, 0);
1844 }
1845 else if (GET_CODE (rhs) == MULT
1846 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1847 {
1848 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1849 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1850 rhs = XEXP (rhs, 0);
1851 }
1852 else if (GET_CODE (rhs) == ASHIFT
1853 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1854 && INTVAL (XEXP (rhs, 1)) >= 0
1855 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1856 {
1857 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1858 negcoeff1h = -1;
1859 rhs = XEXP (rhs, 0);
1860 }
1861
1862 if (rtx_equal_p (lhs, rhs))
1863 {
1864 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1865 rtx coeff;
1866 unsigned HOST_WIDE_INT l;
1867 HOST_WIDE_INT h;
1868
1869 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1870 coeff = immed_double_const (l, h, mode);
1871
1872 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1873 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1874 ? tem : 0;
1875 }
1876 }
1877
1878 /* (a - (-b)) -> (a + b). True even for IEEE. */
1879 if (GET_CODE (op1) == NEG)
1880 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1881
1882 /* (-x - c) may be simplified as (-c - x). */
1883 if (GET_CODE (op0) == NEG
1884 && (GET_CODE (op1) == CONST_INT
1885 || GET_CODE (op1) == CONST_DOUBLE))
1886 {
1887 tem = simplify_unary_operation (NEG, mode, op1, mode);
1888 if (tem)
1889 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1890 }
1891
1892 /* Don't let a relocatable value get a negative coeff. */
1893 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1894 return simplify_gen_binary (PLUS, mode,
1895 op0,
1896 neg_const_int (mode, op1));
1897
1898 /* (x - (x & y)) -> (x & ~y) */
1899 if (GET_CODE (op1) == AND)
1900 {
1901 if (rtx_equal_p (op0, XEXP (op1, 0)))
1902 {
1903 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1904 GET_MODE (XEXP (op1, 1)));
1905 return simplify_gen_binary (AND, mode, op0, tem);
1906 }
1907 if (rtx_equal_p (op0, XEXP (op1, 1)))
1908 {
1909 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1910 GET_MODE (XEXP (op1, 0)));
1911 return simplify_gen_binary (AND, mode, op0, tem);
1912 }
1913 }
1914
1915 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1916 by reversing the comparison code if valid. */
1917 if (STORE_FLAG_VALUE == 1
1918 && trueop0 == const1_rtx
1919 && COMPARISON_P (op1)
1920 && (reversed = reversed_comparison (op1, mode)))
1921 return reversed;
1922
1923 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1924 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1925 && GET_CODE (op1) == MULT
1926 && GET_CODE (XEXP (op1, 0)) == NEG)
1927 {
1928 rtx in1, in2;
1929
1930 in1 = XEXP (XEXP (op1, 0), 0);
1931 in2 = XEXP (op1, 1);
1932 return simplify_gen_binary (PLUS, mode,
1933 simplify_gen_binary (MULT, mode,
1934 in1, in2),
1935 op0);
1936 }
1937
1938 /* Canonicalize (minus (neg A) (mult B C)) to
1939 (minus (mult (neg B) C) A). */
1940 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1941 && GET_CODE (op1) == MULT
1942 && GET_CODE (op0) == NEG)
1943 {
1944 rtx in1, in2;
1945
1946 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1947 in2 = XEXP (op1, 1);
1948 return simplify_gen_binary (MINUS, mode,
1949 simplify_gen_binary (MULT, mode,
1950 in1, in2),
1951 XEXP (op0, 0));
1952 }
1953
1954 /* If one of the operands is a PLUS or a MINUS, see if we can
1955 simplify this by the associative law. This will, for example,
1956 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1957 Don't use the associative law for floating point.
1958 The inaccuracy makes it nonassociative,
1959 and subtle programs can break if operations are associated. */
1960
1961 if (INTEGRAL_MODE_P (mode)
1962 && (plus_minus_operand_p (op0)
1963 || plus_minus_operand_p (op1))
1964 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1965 return tem;
1966 break;
1967
1968 case MULT:
1969 if (trueop1 == constm1_rtx)
1970 return simplify_gen_unary (NEG, mode, op0, mode);
1971
1972 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1973 x is NaN, since x * 0 is then also NaN. Nor is it valid
1974 when the mode has signed zeros, since multiplying a negative
1975 number by 0 will give -0, not 0. */
1976 if (!HONOR_NANS (mode)
1977 && !HONOR_SIGNED_ZEROS (mode)
1978 && trueop1 == CONST0_RTX (mode)
1979 && ! side_effects_p (op0))
1980 return op1;
1981
1982 /* In IEEE floating point, x*1 is not equivalent to x for
1983 signalling NaNs. */
1984 if (!HONOR_SNANS (mode)
1985 && trueop1 == CONST1_RTX (mode))
1986 return op0;
1987
1988 /* Convert multiply by constant power of two into shift unless
1989 we are still generating RTL. This test is a kludge. */
1990 if (GET_CODE (trueop1) == CONST_INT
1991 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1992 /* If the mode is larger than the host word size, and the
1993 uppermost bit is set, then this isn't a power of two due
1994 to implicit sign extension. */
1995 && (width <= HOST_BITS_PER_WIDE_INT
1996 || val != HOST_BITS_PER_WIDE_INT - 1))
1997 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1998
1999 /* Likewise for multipliers wider than a word. */
2000 if (GET_CODE (trueop1) == CONST_DOUBLE
2001 && (GET_MODE (trueop1) == VOIDmode
2002 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2003 && GET_MODE (op0) == mode
2004 && CONST_DOUBLE_LOW (trueop1) == 0
2005 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2006 return simplify_gen_binary (ASHIFT, mode, op0,
2007 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2008
2009 /* x*2 is x+x and x*(-1) is -x */
2010 if (GET_CODE (trueop1) == CONST_DOUBLE
2011 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2012 && GET_MODE (op0) == mode)
2013 {
2014 REAL_VALUE_TYPE d;
2015 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2016
2017 if (REAL_VALUES_EQUAL (d, dconst2))
2018 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2019
2020 if (!HONOR_SNANS (mode)
2021 && REAL_VALUES_EQUAL (d, dconstm1))
2022 return simplify_gen_unary (NEG, mode, op0, mode);
2023 }
2024
2025 /* Optimize -x * -x as x * x. */
2026 if (FLOAT_MODE_P (mode)
2027 && GET_CODE (op0) == NEG
2028 && GET_CODE (op1) == NEG
2029 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2030 && !side_effects_p (XEXP (op0, 0)))
2031 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2032
2033 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2034 if (SCALAR_FLOAT_MODE_P (mode)
2035 && GET_CODE (op0) == ABS
2036 && GET_CODE (op1) == ABS
2037 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2038 && !side_effects_p (XEXP (op0, 0)))
2039 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2040
2041 /* Reassociate multiplication, but for floating point MULTs
2042 only when the user specifies unsafe math optimizations. */
2043 if (! FLOAT_MODE_P (mode)
2044 || flag_unsafe_math_optimizations)
2045 {
2046 tem = simplify_associative_operation (code, mode, op0, op1);
2047 if (tem)
2048 return tem;
2049 }
2050 break;
2051
2052 case IOR:
2053 if (trueop1 == const0_rtx)
2054 return op0;
2055 if (GET_CODE (trueop1) == CONST_INT
2056 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2057 == GET_MODE_MASK (mode)))
2058 return op1;
2059 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2060 return op0;
2061 /* A | (~A) -> -1 */
2062 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2063 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2064 && ! side_effects_p (op0)
2065 && SCALAR_INT_MODE_P (mode))
2066 return constm1_rtx;
2067
2068 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2069 if (GET_CODE (op1) == CONST_INT
2070 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2071 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2072 return op1;
2073
2074 /* Canonicalize (X & C1) | C2. */
2075 if (GET_CODE (op0) == AND
2076 && GET_CODE (trueop1) == CONST_INT
2077 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2078 {
2079 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2080 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2081 HOST_WIDE_INT c2 = INTVAL (trueop1);
2082
2083 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2084 if ((c1 & c2) == c1
2085 && !side_effects_p (XEXP (op0, 0)))
2086 return trueop1;
2087
2088 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2089 if (((c1|c2) & mask) == mask)
2090 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2091
2092 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2093 if (((c1 & ~c2) & mask) != (c1 & mask))
2094 {
2095 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2096 gen_int_mode (c1 & ~c2, mode));
2097 return simplify_gen_binary (IOR, mode, tem, op1);
2098 }
2099 }
2100
2101 /* Convert (A & B) | A to A. */
2102 if (GET_CODE (op0) == AND
2103 && (rtx_equal_p (XEXP (op0, 0), op1)
2104 || rtx_equal_p (XEXP (op0, 1), op1))
2105 && ! side_effects_p (XEXP (op0, 0))
2106 && ! side_effects_p (XEXP (op0, 1)))
2107 return op1;
2108
2109 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2110 mode size to (rotate A CX). */
2111
2112 if (GET_CODE (op1) == ASHIFT
2113 || GET_CODE (op1) == SUBREG)
2114 {
2115 opleft = op1;
2116 opright = op0;
2117 }
2118 else
2119 {
2120 opright = op1;
2121 opleft = op0;
2122 }
2123
2124 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2125 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2126 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2127 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2128 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2129 == GET_MODE_BITSIZE (mode)))
2130 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2131
2132 /* Same, but for ashift that has been "simplified" to a wider mode
2133 by simplify_shift_const. */
2134
2135 if (GET_CODE (opleft) == SUBREG
2136 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2137 && GET_CODE (opright) == LSHIFTRT
2138 && GET_CODE (XEXP (opright, 0)) == SUBREG
2139 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2140 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2141 && (GET_MODE_SIZE (GET_MODE (opleft))
2142 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2143 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2144 SUBREG_REG (XEXP (opright, 0)))
2145 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2146 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2147 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2148 == GET_MODE_BITSIZE (mode)))
2149 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2150 XEXP (SUBREG_REG (opleft), 1));
2151
2152 /* If we have (ior (and (X C1) C2)), simplify this by making
2153 C1 as small as possible if C1 actually changes. */
2154 if (GET_CODE (op1) == CONST_INT
2155 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 || INTVAL (op1) > 0)
2157 && GET_CODE (op0) == AND
2158 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2159 && GET_CODE (op1) == CONST_INT
2160 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2161 return simplify_gen_binary (IOR, mode,
2162 simplify_gen_binary
2163 (AND, mode, XEXP (op0, 0),
2164 GEN_INT (INTVAL (XEXP (op0, 1))
2165 & ~INTVAL (op1))),
2166 op1);
2167
2168 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2169 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2170 the PLUS does not affect any of the bits in OP1: then we can do
2171 the IOR as a PLUS and we can associate. This is valid if OP1
2172 can be safely shifted left C bits. */
2173 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2174 && GET_CODE (XEXP (op0, 0)) == PLUS
2175 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2178 {
2179 int count = INTVAL (XEXP (op0, 1));
2180 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2181
2182 if (mask >> count == INTVAL (trueop1)
2183 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2184 return simplify_gen_binary (ASHIFTRT, mode,
2185 plus_constant (XEXP (op0, 0), mask),
2186 XEXP (op0, 1));
2187 }
2188
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2192 break;
2193
2194 case XOR:
2195 if (trueop1 == const0_rtx)
2196 return op0;
2197 if (GET_CODE (trueop1) == CONST_INT
2198 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2199 == GET_MODE_MASK (mode)))
2200 return simplify_gen_unary (NOT, mode, op0, mode);
2201 if (rtx_equal_p (trueop0, trueop1)
2202 && ! side_effects_p (op0)
2203 && GET_MODE_CLASS (mode) != MODE_CC)
2204 return CONST0_RTX (mode);
2205
2206 /* Canonicalize XOR of the most significant bit to PLUS. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && mode_signbit_p (mode, op1))
2210 return simplify_gen_binary (PLUS, mode, op0, op1);
2211 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2212 if ((GET_CODE (op1) == CONST_INT
2213 || GET_CODE (op1) == CONST_DOUBLE)
2214 && GET_CODE (op0) == PLUS
2215 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2216 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2217 && mode_signbit_p (mode, XEXP (op0, 1)))
2218 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2219 simplify_gen_binary (XOR, mode, op1,
2220 XEXP (op0, 1)));
2221
2222 /* If we are XORing two things that have no bits in common,
2223 convert them into an IOR. This helps to detect rotation encoded
2224 using those methods and possibly other simplifications. */
2225
2226 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2227 && (nonzero_bits (op0, mode)
2228 & nonzero_bits (op1, mode)) == 0)
2229 return (simplify_gen_binary (IOR, mode, op0, op1));
2230
2231 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2232 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2233 (NOT y). */
2234 {
2235 int num_negated = 0;
2236
2237 if (GET_CODE (op0) == NOT)
2238 num_negated++, op0 = XEXP (op0, 0);
2239 if (GET_CODE (op1) == NOT)
2240 num_negated++, op1 = XEXP (op1, 0);
2241
2242 if (num_negated == 2)
2243 return simplify_gen_binary (XOR, mode, op0, op1);
2244 else if (num_negated == 1)
2245 return simplify_gen_unary (NOT, mode,
2246 simplify_gen_binary (XOR, mode, op0, op1),
2247 mode);
2248 }
2249
2250 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2251 correspond to a machine insn or result in further simplifications
2252 if B is a constant. */
2253
2254 if (GET_CODE (op0) == AND
2255 && rtx_equal_p (XEXP (op0, 1), op1)
2256 && ! side_effects_p (op1))
2257 return simplify_gen_binary (AND, mode,
2258 simplify_gen_unary (NOT, mode,
2259 XEXP (op0, 0), mode),
2260 op1);
2261
2262 else if (GET_CODE (op0) == AND
2263 && rtx_equal_p (XEXP (op0, 0), op1)
2264 && ! side_effects_p (op1))
2265 return simplify_gen_binary (AND, mode,
2266 simplify_gen_unary (NOT, mode,
2267 XEXP (op0, 1), mode),
2268 op1);
2269
2270 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2271 comparison if STORE_FLAG_VALUE is 1. */
2272 if (STORE_FLAG_VALUE == 1
2273 && trueop1 == const1_rtx
2274 && COMPARISON_P (op0)
2275 && (reversed = reversed_comparison (op0, mode)))
2276 return reversed;
2277
2278 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2279 is (lt foo (const_int 0)), so we can perform the above
2280 simplification if STORE_FLAG_VALUE is 1. */
2281
2282 if (STORE_FLAG_VALUE == 1
2283 && trueop1 == const1_rtx
2284 && GET_CODE (op0) == LSHIFTRT
2285 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2286 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2287 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2288
2289 /* (xor (comparison foo bar) (const_int sign-bit))
2290 when STORE_FLAG_VALUE is the sign bit. */
2291 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2292 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2293 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2294 && trueop1 == const_true_rtx
2295 && COMPARISON_P (op0)
2296 && (reversed = reversed_comparison (op0, mode)))
2297 return reversed;
2298
2299 break;
2300
2301 tem = simplify_associative_operation (code, mode, op0, op1);
2302 if (tem)
2303 return tem;
2304 break;
2305
2306 case AND:
2307 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2308 return trueop1;
2309 /* If we are turning off bits already known off in OP0, we need
2310 not do an AND. */
2311 if (GET_CODE (trueop1) == CONST_INT
2312 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2313 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2314 return op0;
2315 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2316 && GET_MODE_CLASS (mode) != MODE_CC)
2317 return op0;
2318 /* A & (~A) -> 0 */
2319 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2320 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2321 && ! side_effects_p (op0)
2322 && GET_MODE_CLASS (mode) != MODE_CC)
2323 return CONST0_RTX (mode);
2324
2325 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2326 there are no nonzero bits of C outside of X's mode. */
2327 if ((GET_CODE (op0) == SIGN_EXTEND
2328 || GET_CODE (op0) == ZERO_EXTEND)
2329 && GET_CODE (trueop1) == CONST_INT
2330 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2331 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2332 & INTVAL (trueop1)) == 0)
2333 {
2334 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2335 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2336 gen_int_mode (INTVAL (trueop1),
2337 imode));
2338 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2339 }
2340
2341 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2342 if (GET_CODE (op0) == IOR
2343 && GET_CODE (trueop1) == CONST_INT
2344 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2345 {
2346 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2347 return simplify_gen_binary (IOR, mode,
2348 simplify_gen_binary (AND, mode,
2349 XEXP (op0, 0), op1),
2350 gen_int_mode (tmp, mode));
2351 }
2352
2353 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2354 insn (and may simplify more). */
2355 if (GET_CODE (op0) == XOR
2356 && rtx_equal_p (XEXP (op0, 0), op1)
2357 && ! side_effects_p (op1))
2358 return simplify_gen_binary (AND, mode,
2359 simplify_gen_unary (NOT, mode,
2360 XEXP (op0, 1), mode),
2361 op1);
2362
2363 if (GET_CODE (op0) == XOR
2364 && rtx_equal_p (XEXP (op0, 1), op1)
2365 && ! side_effects_p (op1))
2366 return simplify_gen_binary (AND, mode,
2367 simplify_gen_unary (NOT, mode,
2368 XEXP (op0, 0), mode),
2369 op1);
2370
2371 /* Similarly for (~(A ^ B)) & A. */
2372 if (GET_CODE (op0) == NOT
2373 && GET_CODE (XEXP (op0, 0)) == XOR
2374 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2375 && ! side_effects_p (op1))
2376 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2377
2378 if (GET_CODE (op0) == NOT
2379 && GET_CODE (XEXP (op0, 0)) == XOR
2380 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2381 && ! side_effects_p (op1))
2382 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2383
2384 /* Convert (A | B) & A to A. */
2385 if (GET_CODE (op0) == IOR
2386 && (rtx_equal_p (XEXP (op0, 0), op1)
2387 || rtx_equal_p (XEXP (op0, 1), op1))
2388 && ! side_effects_p (XEXP (op0, 0))
2389 && ! side_effects_p (XEXP (op0, 1)))
2390 return op1;
2391
2392 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2393 ((A & N) + B) & M -> (A + B) & M
2394 Similarly if (N & M) == 0,
2395 ((A | N) + B) & M -> (A + B) & M
2396 and for - instead of + and/or ^ instead of |. */
2397 if (GET_CODE (trueop1) == CONST_INT
2398 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2399 && ~INTVAL (trueop1)
2400 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2401 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2402 {
2403 rtx pmop[2];
2404 int which;
2405
2406 pmop[0] = XEXP (op0, 0);
2407 pmop[1] = XEXP (op0, 1);
2408
2409 for (which = 0; which < 2; which++)
2410 {
2411 tem = pmop[which];
2412 switch (GET_CODE (tem))
2413 {
2414 case AND:
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2417 == INTVAL (trueop1))
2418 pmop[which] = XEXP (tem, 0);
2419 break;
2420 case IOR:
2421 case XOR:
2422 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2423 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2424 pmop[which] = XEXP (tem, 0);
2425 break;
2426 default:
2427 break;
2428 }
2429 }
2430
2431 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2432 {
2433 tem = simplify_gen_binary (GET_CODE (op0), mode,
2434 pmop[0], pmop[1]);
2435 return simplify_gen_binary (code, mode, tem, op1);
2436 }
2437 }
2438 tem = simplify_associative_operation (code, mode, op0, op1);
2439 if (tem)
2440 return tem;
2441 break;
2442
2443 case UDIV:
2444 /* 0/x is 0 (or x&0 if x has side-effects). */
2445 if (trueop0 == CONST0_RTX (mode))
2446 {
2447 if (side_effects_p (op1))
2448 return simplify_gen_binary (AND, mode, op1, trueop0);
2449 return trueop0;
2450 }
2451 /* x/1 is x. */
2452 if (trueop1 == CONST1_RTX (mode))
2453 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2454 /* Convert divide by power of two into shift. */
2455 if (GET_CODE (trueop1) == CONST_INT
2456 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2457 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2458 break;
2459
2460 case DIV:
2461 /* Handle floating point and integers separately. */
2462 if (SCALAR_FLOAT_MODE_P (mode))
2463 {
2464 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2465 safe for modes with NaNs, since 0.0 / 0.0 will then be
2466 NaN rather than 0.0. Nor is it safe for modes with signed
2467 zeros, since dividing 0 by a negative number gives -0.0 */
2468 if (trueop0 == CONST0_RTX (mode)
2469 && !HONOR_NANS (mode)
2470 && !HONOR_SIGNED_ZEROS (mode)
2471 && ! side_effects_p (op1))
2472 return op0;
2473 /* x/1.0 is x. */
2474 if (trueop1 == CONST1_RTX (mode)
2475 && !HONOR_SNANS (mode))
2476 return op0;
2477
2478 if (GET_CODE (trueop1) == CONST_DOUBLE
2479 && trueop1 != CONST0_RTX (mode))
2480 {
2481 REAL_VALUE_TYPE d;
2482 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2483
2484 /* x/-1.0 is -x. */
2485 if (REAL_VALUES_EQUAL (d, dconstm1)
2486 && !HONOR_SNANS (mode))
2487 return simplify_gen_unary (NEG, mode, op0, mode);
2488
2489 /* Change FP division by a constant into multiplication.
2490 Only do this with -funsafe-math-optimizations. */
2491 if (flag_unsafe_math_optimizations
2492 && !REAL_VALUES_EQUAL (d, dconst0))
2493 {
2494 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2495 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2496 return simplify_gen_binary (MULT, mode, op0, tem);
2497 }
2498 }
2499 }
2500 else
2501 {
2502 /* 0/x is 0 (or x&0 if x has side-effects). */
2503 if (trueop0 == CONST0_RTX (mode))
2504 {
2505 if (side_effects_p (op1))
2506 return simplify_gen_binary (AND, mode, op1, trueop0);
2507 return trueop0;
2508 }
2509 /* x/1 is x. */
2510 if (trueop1 == CONST1_RTX (mode))
2511 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2512 /* x/-1 is -x. */
2513 if (trueop1 == constm1_rtx)
2514 {
2515 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2516 return simplify_gen_unary (NEG, mode, x, mode);
2517 }
2518 }
2519 break;
2520
2521 case UMOD:
2522 /* 0%x is 0 (or x&0 if x has side-effects). */
2523 if (trueop0 == CONST0_RTX (mode))
2524 {
2525 if (side_effects_p (op1))
2526 return simplify_gen_binary (AND, mode, op1, trueop0);
2527 return trueop0;
2528 }
2529 /* x%1 is 0 (of x&0 if x has side-effects). */
2530 if (trueop1 == CONST1_RTX (mode))
2531 {
2532 if (side_effects_p (op0))
2533 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2534 return CONST0_RTX (mode);
2535 }
2536 /* Implement modulus by power of two as AND. */
2537 if (GET_CODE (trueop1) == CONST_INT
2538 && exact_log2 (INTVAL (trueop1)) > 0)
2539 return simplify_gen_binary (AND, mode, op0,
2540 GEN_INT (INTVAL (op1) - 1));
2541 break;
2542
2543 case MOD:
2544 /* 0%x is 0 (or x&0 if x has side-effects). */
2545 if (trueop0 == CONST0_RTX (mode))
2546 {
2547 if (side_effects_p (op1))
2548 return simplify_gen_binary (AND, mode, op1, trueop0);
2549 return trueop0;
2550 }
2551 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2552 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2553 {
2554 if (side_effects_p (op0))
2555 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2556 return CONST0_RTX (mode);
2557 }
2558 break;
2559
2560 case ROTATERT:
2561 case ROTATE:
2562 case ASHIFTRT:
2563 if (trueop1 == CONST0_RTX (mode))
2564 return op0;
2565 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2566 return op0;
2567 /* Rotating ~0 always results in ~0. */
2568 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2569 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2570 && ! side_effects_p (op1))
2571 return op0;
2572 break;
2573
2574 case ASHIFT:
2575 case SS_ASHIFT:
2576 if (trueop1 == CONST0_RTX (mode))
2577 return op0;
2578 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2579 return op0;
2580 break;
2581
2582 case LSHIFTRT:
2583 if (trueop1 == CONST0_RTX (mode))
2584 return op0;
2585 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2586 return op0;
2587 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2588 if (GET_CODE (op0) == CLZ
2589 && GET_CODE (trueop1) == CONST_INT
2590 && STORE_FLAG_VALUE == 1
2591 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2592 {
2593 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2594 unsigned HOST_WIDE_INT zero_val = 0;
2595
2596 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2597 && zero_val == GET_MODE_BITSIZE (imode)
2598 && INTVAL (trueop1) == exact_log2 (zero_val))
2599 return simplify_gen_relational (EQ, mode, imode,
2600 XEXP (op0, 0), const0_rtx);
2601 }
2602 break;
2603
2604 case SMIN:
2605 if (width <= HOST_BITS_PER_WIDE_INT
2606 && GET_CODE (trueop1) == CONST_INT
2607 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2608 && ! side_effects_p (op0))
2609 return op1;
2610 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2611 return op0;
2612 tem = simplify_associative_operation (code, mode, op0, op1);
2613 if (tem)
2614 return tem;
2615 break;
2616
2617 case SMAX:
2618 if (width <= HOST_BITS_PER_WIDE_INT
2619 && GET_CODE (trueop1) == CONST_INT
2620 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2621 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2622 && ! side_effects_p (op0))
2623 return op1;
2624 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2625 return op0;
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2629 break;
2630
2631 case UMIN:
2632 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2633 return op1;
2634 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2635 return op0;
2636 tem = simplify_associative_operation (code, mode, op0, op1);
2637 if (tem)
2638 return tem;
2639 break;
2640
2641 case UMAX:
2642 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2643 return op1;
2644 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2645 return op0;
2646 tem = simplify_associative_operation (code, mode, op0, op1);
2647 if (tem)
2648 return tem;
2649 break;
2650
2651 case SS_PLUS:
2652 case US_PLUS:
2653 case SS_MINUS:
2654 case US_MINUS:
2655 /* ??? There are simplifications that can be done. */
2656 return 0;
2657
2658 case VEC_SELECT:
2659 if (!VECTOR_MODE_P (mode))
2660 {
2661 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2662 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2663 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2664 gcc_assert (XVECLEN (trueop1, 0) == 1);
2665 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2666
2667 if (GET_CODE (trueop0) == CONST_VECTOR)
2668 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2669 (trueop1, 0, 0)));
2670 }
2671 else
2672 {
2673 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2674 gcc_assert (GET_MODE_INNER (mode)
2675 == GET_MODE_INNER (GET_MODE (trueop0)));
2676 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2677
2678 if (GET_CODE (trueop0) == CONST_VECTOR)
2679 {
2680 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2681 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2682 rtvec v = rtvec_alloc (n_elts);
2683 unsigned int i;
2684
2685 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2686 for (i = 0; i < n_elts; i++)
2687 {
2688 rtx x = XVECEXP (trueop1, 0, i);
2689
2690 gcc_assert (GET_CODE (x) == CONST_INT);
2691 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2692 INTVAL (x));
2693 }
2694
2695 return gen_rtx_CONST_VECTOR (mode, v);
2696 }
2697 }
2698
2699 if (XVECLEN (trueop1, 0) == 1
2700 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2701 && GET_CODE (trueop0) == VEC_CONCAT)
2702 {
2703 rtx vec = trueop0;
2704 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2705
2706 /* Try to find the element in the VEC_CONCAT. */
2707 while (GET_MODE (vec) != mode
2708 && GET_CODE (vec) == VEC_CONCAT)
2709 {
2710 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2711 if (offset < vec_size)
2712 vec = XEXP (vec, 0);
2713 else
2714 {
2715 offset -= vec_size;
2716 vec = XEXP (vec, 1);
2717 }
2718 vec = avoid_constant_pool_reference (vec);
2719 }
2720
2721 if (GET_MODE (vec) == mode)
2722 return vec;
2723 }
2724
2725 return 0;
2726 case VEC_CONCAT:
2727 {
2728 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2729 ? GET_MODE (trueop0)
2730 : GET_MODE_INNER (mode));
2731 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2732 ? GET_MODE (trueop1)
2733 : GET_MODE_INNER (mode));
2734
2735 gcc_assert (VECTOR_MODE_P (mode));
2736 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2737 == GET_MODE_SIZE (mode));
2738
2739 if (VECTOR_MODE_P (op0_mode))
2740 gcc_assert (GET_MODE_INNER (mode)
2741 == GET_MODE_INNER (op0_mode));
2742 else
2743 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2744
2745 if (VECTOR_MODE_P (op1_mode))
2746 gcc_assert (GET_MODE_INNER (mode)
2747 == GET_MODE_INNER (op1_mode));
2748 else
2749 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2750
2751 if ((GET_CODE (trueop0) == CONST_VECTOR
2752 || GET_CODE (trueop0) == CONST_INT
2753 || GET_CODE (trueop0) == CONST_DOUBLE)
2754 && (GET_CODE (trueop1) == CONST_VECTOR
2755 || GET_CODE (trueop1) == CONST_INT
2756 || GET_CODE (trueop1) == CONST_DOUBLE))
2757 {
2758 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2759 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2760 rtvec v = rtvec_alloc (n_elts);
2761 unsigned int i;
2762 unsigned in_n_elts = 1;
2763
2764 if (VECTOR_MODE_P (op0_mode))
2765 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2766 for (i = 0; i < n_elts; i++)
2767 {
2768 if (i < in_n_elts)
2769 {
2770 if (!VECTOR_MODE_P (op0_mode))
2771 RTVEC_ELT (v, i) = trueop0;
2772 else
2773 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2774 }
2775 else
2776 {
2777 if (!VECTOR_MODE_P (op1_mode))
2778 RTVEC_ELT (v, i) = trueop1;
2779 else
2780 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2781 i - in_n_elts);
2782 }
2783 }
2784
2785 return gen_rtx_CONST_VECTOR (mode, v);
2786 }
2787 }
2788 return 0;
2789
2790 default:
2791 gcc_unreachable ();
2792 }
2793
2794 return 0;
2795 }
2796
2797 rtx
2798 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2799 rtx op0, rtx op1)
2800 {
2801 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2802 HOST_WIDE_INT val;
2803 unsigned int width = GET_MODE_BITSIZE (mode);
2804
2805 if (VECTOR_MODE_P (mode)
2806 && code != VEC_CONCAT
2807 && GET_CODE (op0) == CONST_VECTOR
2808 && GET_CODE (op1) == CONST_VECTOR)
2809 {
2810 unsigned n_elts = GET_MODE_NUNITS (mode);
2811 enum machine_mode op0mode = GET_MODE (op0);
2812 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2813 enum machine_mode op1mode = GET_MODE (op1);
2814 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2815 rtvec v = rtvec_alloc (n_elts);
2816 unsigned int i;
2817
2818 gcc_assert (op0_n_elts == n_elts);
2819 gcc_assert (op1_n_elts == n_elts);
2820 for (i = 0; i < n_elts; i++)
2821 {
2822 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2823 CONST_VECTOR_ELT (op0, i),
2824 CONST_VECTOR_ELT (op1, i));
2825 if (!x)
2826 return 0;
2827 RTVEC_ELT (v, i) = x;
2828 }
2829
2830 return gen_rtx_CONST_VECTOR (mode, v);
2831 }
2832
2833 if (VECTOR_MODE_P (mode)
2834 && code == VEC_CONCAT
2835 && CONSTANT_P (op0) && CONSTANT_P (op1))
2836 {
2837 unsigned n_elts = GET_MODE_NUNITS (mode);
2838 rtvec v = rtvec_alloc (n_elts);
2839
2840 gcc_assert (n_elts >= 2);
2841 if (n_elts == 2)
2842 {
2843 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2844 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2845
2846 RTVEC_ELT (v, 0) = op0;
2847 RTVEC_ELT (v, 1) = op1;
2848 }
2849 else
2850 {
2851 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2852 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2853 unsigned i;
2854
2855 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2856 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2857 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2858
2859 for (i = 0; i < op0_n_elts; ++i)
2860 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2861 for (i = 0; i < op1_n_elts; ++i)
2862 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2863 }
2864
2865 return gen_rtx_CONST_VECTOR (mode, v);
2866 }
2867
2868 if (SCALAR_FLOAT_MODE_P (mode)
2869 && GET_CODE (op0) == CONST_DOUBLE
2870 && GET_CODE (op1) == CONST_DOUBLE
2871 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2872 {
2873 if (code == AND
2874 || code == IOR
2875 || code == XOR)
2876 {
2877 long tmp0[4];
2878 long tmp1[4];
2879 REAL_VALUE_TYPE r;
2880 int i;
2881
2882 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2883 GET_MODE (op0));
2884 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2885 GET_MODE (op1));
2886 for (i = 0; i < 4; i++)
2887 {
2888 switch (code)
2889 {
2890 case AND:
2891 tmp0[i] &= tmp1[i];
2892 break;
2893 case IOR:
2894 tmp0[i] |= tmp1[i];
2895 break;
2896 case XOR:
2897 tmp0[i] ^= tmp1[i];
2898 break;
2899 default:
2900 gcc_unreachable ();
2901 }
2902 }
2903 real_from_target (&r, tmp0, mode);
2904 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2905 }
2906 else
2907 {
2908 REAL_VALUE_TYPE f0, f1, value, result;
2909 bool inexact;
2910
2911 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2912 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2913 real_convert (&f0, mode, &f0);
2914 real_convert (&f1, mode, &f1);
2915
2916 if (HONOR_SNANS (mode)
2917 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2918 return 0;
2919
2920 if (code == DIV
2921 && REAL_VALUES_EQUAL (f1, dconst0)
2922 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2923 return 0;
2924
2925 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2926 && flag_trapping_math
2927 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2928 {
2929 int s0 = REAL_VALUE_NEGATIVE (f0);
2930 int s1 = REAL_VALUE_NEGATIVE (f1);
2931
2932 switch (code)
2933 {
2934 case PLUS:
2935 /* Inf + -Inf = NaN plus exception. */
2936 if (s0 != s1)
2937 return 0;
2938 break;
2939 case MINUS:
2940 /* Inf - Inf = NaN plus exception. */
2941 if (s0 == s1)
2942 return 0;
2943 break;
2944 case DIV:
2945 /* Inf / Inf = NaN plus exception. */
2946 return 0;
2947 default:
2948 break;
2949 }
2950 }
2951
2952 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2953 && flag_trapping_math
2954 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2955 || (REAL_VALUE_ISINF (f1)
2956 && REAL_VALUES_EQUAL (f0, dconst0))))
2957 /* Inf * 0 = NaN plus exception. */
2958 return 0;
2959
2960 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2961 &f0, &f1);
2962 real_convert (&result, mode, &value);
2963
2964 /* Don't constant fold this floating point operation if
2965 the result has overflowed and flag_trapping_math. */
2966
2967 if (flag_trapping_math
2968 && MODE_HAS_INFINITIES (mode)
2969 && REAL_VALUE_ISINF (result)
2970 && !REAL_VALUE_ISINF (f0)
2971 && !REAL_VALUE_ISINF (f1))
2972 /* Overflow plus exception. */
2973 return 0;
2974
2975 /* Don't constant fold this floating point operation if the
2976 result may dependent upon the run-time rounding mode and
2977 flag_rounding_math is set, or if GCC's software emulation
2978 is unable to accurately represent the result. */
2979
2980 if ((flag_rounding_math
2981 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2982 && !flag_unsafe_math_optimizations))
2983 && (inexact || !real_identical (&result, &value)))
2984 return NULL_RTX;
2985
2986 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2987 }
2988 }
2989
2990 /* We can fold some multi-word operations. */
2991 if (GET_MODE_CLASS (mode) == MODE_INT
2992 && width == HOST_BITS_PER_WIDE_INT * 2
2993 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2994 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2995 {
2996 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2997 HOST_WIDE_INT h1, h2, hv, ht;
2998
2999 if (GET_CODE (op0) == CONST_DOUBLE)
3000 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3001 else
3002 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3003
3004 if (GET_CODE (op1) == CONST_DOUBLE)
3005 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3006 else
3007 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3008
3009 switch (code)
3010 {
3011 case MINUS:
3012 /* A - B == A + (-B). */
3013 neg_double (l2, h2, &lv, &hv);
3014 l2 = lv, h2 = hv;
3015
3016 /* Fall through.... */
3017
3018 case PLUS:
3019 add_double (l1, h1, l2, h2, &lv, &hv);
3020 break;
3021
3022 case MULT:
3023 mul_double (l1, h1, l2, h2, &lv, &hv);
3024 break;
3025
3026 case DIV:
3027 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3028 &lv, &hv, &lt, &ht))
3029 return 0;
3030 break;
3031
3032 case MOD:
3033 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3034 &lt, &ht, &lv, &hv))
3035 return 0;
3036 break;
3037
3038 case UDIV:
3039 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3040 &lv, &hv, &lt, &ht))
3041 return 0;
3042 break;
3043
3044 case UMOD:
3045 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3046 &lt, &ht, &lv, &hv))
3047 return 0;
3048 break;
3049
3050 case AND:
3051 lv = l1 & l2, hv = h1 & h2;
3052 break;
3053
3054 case IOR:
3055 lv = l1 | l2, hv = h1 | h2;
3056 break;
3057
3058 case XOR:
3059 lv = l1 ^ l2, hv = h1 ^ h2;
3060 break;
3061
3062 case SMIN:
3063 if (h1 < h2
3064 || (h1 == h2
3065 && ((unsigned HOST_WIDE_INT) l1
3066 < (unsigned HOST_WIDE_INT) l2)))
3067 lv = l1, hv = h1;
3068 else
3069 lv = l2, hv = h2;
3070 break;
3071
3072 case SMAX:
3073 if (h1 > h2
3074 || (h1 == h2
3075 && ((unsigned HOST_WIDE_INT) l1
3076 > (unsigned HOST_WIDE_INT) l2)))
3077 lv = l1, hv = h1;
3078 else
3079 lv = l2, hv = h2;
3080 break;
3081
3082 case UMIN:
3083 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3084 || (h1 == h2
3085 && ((unsigned HOST_WIDE_INT) l1
3086 < (unsigned HOST_WIDE_INT) l2)))
3087 lv = l1, hv = h1;
3088 else
3089 lv = l2, hv = h2;
3090 break;
3091
3092 case UMAX:
3093 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3094 || (h1 == h2
3095 && ((unsigned HOST_WIDE_INT) l1
3096 > (unsigned HOST_WIDE_INT) l2)))
3097 lv = l1, hv = h1;
3098 else
3099 lv = l2, hv = h2;
3100 break;
3101
3102 case LSHIFTRT: case ASHIFTRT:
3103 case ASHIFT:
3104 case ROTATE: case ROTATERT:
3105 if (SHIFT_COUNT_TRUNCATED)
3106 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3107
3108 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3109 return 0;
3110
3111 if (code == LSHIFTRT || code == ASHIFTRT)
3112 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3113 code == ASHIFTRT);
3114 else if (code == ASHIFT)
3115 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3116 else if (code == ROTATE)
3117 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3118 else /* code == ROTATERT */
3119 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3120 break;
3121
3122 default:
3123 return 0;
3124 }
3125
3126 return immed_double_const (lv, hv, mode);
3127 }
3128
3129 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3130 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3131 {
3132 /* Get the integer argument values in two forms:
3133 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3134
3135 arg0 = INTVAL (op0);
3136 arg1 = INTVAL (op1);
3137
3138 if (width < HOST_BITS_PER_WIDE_INT)
3139 {
3140 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3141 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3142
3143 arg0s = arg0;
3144 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3145 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3146
3147 arg1s = arg1;
3148 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3149 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3150 }
3151 else
3152 {
3153 arg0s = arg0;
3154 arg1s = arg1;
3155 }
3156
3157 /* Compute the value of the arithmetic. */
3158
3159 switch (code)
3160 {
3161 case PLUS:
3162 val = arg0s + arg1s;
3163 break;
3164
3165 case MINUS:
3166 val = arg0s - arg1s;
3167 break;
3168
3169 case MULT:
3170 val = arg0s * arg1s;
3171 break;
3172
3173 case DIV:
3174 if (arg1s == 0
3175 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3176 && arg1s == -1))
3177 return 0;
3178 val = arg0s / arg1s;
3179 break;
3180
3181 case MOD:
3182 if (arg1s == 0
3183 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3184 && arg1s == -1))
3185 return 0;
3186 val = arg0s % arg1s;
3187 break;
3188
3189 case UDIV:
3190 if (arg1 == 0
3191 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3192 && arg1s == -1))
3193 return 0;
3194 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3195 break;
3196
3197 case UMOD:
3198 if (arg1 == 0
3199 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3200 && arg1s == -1))
3201 return 0;
3202 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3203 break;
3204
3205 case AND:
3206 val = arg0 & arg1;
3207 break;
3208
3209 case IOR:
3210 val = arg0 | arg1;
3211 break;
3212
3213 case XOR:
3214 val = arg0 ^ arg1;
3215 break;
3216
3217 case LSHIFTRT:
3218 case ASHIFT:
3219 case ASHIFTRT:
3220 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3221 the value is in range. We can't return any old value for
3222 out-of-range arguments because either the middle-end (via
3223 shift_truncation_mask) or the back-end might be relying on
3224 target-specific knowledge. Nor can we rely on
3225 shift_truncation_mask, since the shift might not be part of an
3226 ashlM3, lshrM3 or ashrM3 instruction. */
3227 if (SHIFT_COUNT_TRUNCATED)
3228 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3229 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3230 return 0;
3231
3232 val = (code == ASHIFT
3233 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3234 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3235
3236 /* Sign-extend the result for arithmetic right shifts. */
3237 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3238 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3239 break;
3240
3241 case ROTATERT:
3242 if (arg1 < 0)
3243 return 0;
3244
3245 arg1 %= width;
3246 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3247 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3248 break;
3249
3250 case ROTATE:
3251 if (arg1 < 0)
3252 return 0;
3253
3254 arg1 %= width;
3255 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3256 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3257 break;
3258
3259 case COMPARE:
3260 /* Do nothing here. */
3261 return 0;
3262
3263 case SMIN:
3264 val = arg0s <= arg1s ? arg0s : arg1s;
3265 break;
3266
3267 case UMIN:
3268 val = ((unsigned HOST_WIDE_INT) arg0
3269 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3270 break;
3271
3272 case SMAX:
3273 val = arg0s > arg1s ? arg0s : arg1s;
3274 break;
3275
3276 case UMAX:
3277 val = ((unsigned HOST_WIDE_INT) arg0
3278 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3279 break;
3280
3281 case SS_PLUS:
3282 case US_PLUS:
3283 case SS_MINUS:
3284 case US_MINUS:
3285 case SS_ASHIFT:
3286 /* ??? There are simplifications that can be done. */
3287 return 0;
3288
3289 default:
3290 gcc_unreachable ();
3291 }
3292
3293 return gen_int_mode (val, mode);
3294 }
3295
3296 return NULL_RTX;
3297 }
3298
3299
3300 \f
3301 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3302 PLUS or MINUS.
3303
3304 Rather than test for specific case, we do this by a brute-force method
3305 and do all possible simplifications until no more changes occur. Then
3306 we rebuild the operation. */
3307
3308 struct simplify_plus_minus_op_data
3309 {
3310 rtx op;
3311 short neg;
3312 };
3313
3314 static int
3315 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3316 {
3317 const struct simplify_plus_minus_op_data *d1 = p1;
3318 const struct simplify_plus_minus_op_data *d2 = p2;
3319 int result;
3320
3321 result = (commutative_operand_precedence (d2->op)
3322 - commutative_operand_precedence (d1->op));
3323 if (result)
3324 return result;
3325
3326 /* Group together equal REGs to do more simplification. */
3327 if (REG_P (d1->op) && REG_P (d2->op))
3328 return REGNO (d1->op) - REGNO (d2->op);
3329 else
3330 return 0;
3331 }
3332
3333 static rtx
3334 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3335 rtx op1)
3336 {
3337 struct simplify_plus_minus_op_data ops[8];
3338 rtx result, tem;
3339 int n_ops = 2, input_ops = 2;
3340 int changed, n_constants = 0, canonicalized = 0;
3341 int i, j;
3342
3343 memset (ops, 0, sizeof ops);
3344
3345 /* Set up the two operands and then expand them until nothing has been
3346 changed. If we run out of room in our array, give up; this should
3347 almost never happen. */
3348
3349 ops[0].op = op0;
3350 ops[0].neg = 0;
3351 ops[1].op = op1;
3352 ops[1].neg = (code == MINUS);
3353
3354 do
3355 {
3356 changed = 0;
3357
3358 for (i = 0; i < n_ops; i++)
3359 {
3360 rtx this_op = ops[i].op;
3361 int this_neg = ops[i].neg;
3362 enum rtx_code this_code = GET_CODE (this_op);
3363
3364 switch (this_code)
3365 {
3366 case PLUS:
3367 case MINUS:
3368 if (n_ops == 7)
3369 return NULL_RTX;
3370
3371 ops[n_ops].op = XEXP (this_op, 1);
3372 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3373 n_ops++;
3374
3375 ops[i].op = XEXP (this_op, 0);
3376 input_ops++;
3377 changed = 1;
3378 canonicalized |= this_neg;
3379 break;
3380
3381 case NEG:
3382 ops[i].op = XEXP (this_op, 0);
3383 ops[i].neg = ! this_neg;
3384 changed = 1;
3385 canonicalized = 1;
3386 break;
3387
3388 case CONST:
3389 if (n_ops < 7
3390 && GET_CODE (XEXP (this_op, 0)) == PLUS
3391 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3392 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3393 {
3394 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3395 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3396 ops[n_ops].neg = this_neg;
3397 n_ops++;
3398 changed = 1;
3399 canonicalized = 1;
3400 }
3401 break;
3402
3403 case NOT:
3404 /* ~a -> (-a - 1) */
3405 if (n_ops != 7)
3406 {
3407 ops[n_ops].op = constm1_rtx;
3408 ops[n_ops++].neg = this_neg;
3409 ops[i].op = XEXP (this_op, 0);
3410 ops[i].neg = !this_neg;
3411 changed = 1;
3412 canonicalized = 1;
3413 }
3414 break;
3415
3416 case CONST_INT:
3417 n_constants++;
3418 if (this_neg)
3419 {
3420 ops[i].op = neg_const_int (mode, this_op);
3421 ops[i].neg = 0;
3422 changed = 1;
3423 canonicalized = 1;
3424 }
3425 break;
3426
3427 default:
3428 break;
3429 }
3430 }
3431 }
3432 while (changed);
3433
3434 if (n_constants > 1)
3435 canonicalized = 1;
3436
3437 gcc_assert (n_ops >= 2);
3438
3439 /* If we only have two operands, we can avoid the loops. */
3440 if (n_ops == 2)
3441 {
3442 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3443 rtx lhs, rhs;
3444
3445 /* Get the two operands. Be careful with the order, especially for
3446 the cases where code == MINUS. */
3447 if (ops[0].neg && ops[1].neg)
3448 {
3449 lhs = gen_rtx_NEG (mode, ops[0].op);
3450 rhs = ops[1].op;
3451 }
3452 else if (ops[0].neg)
3453 {
3454 lhs = ops[1].op;
3455 rhs = ops[0].op;
3456 }
3457 else
3458 {
3459 lhs = ops[0].op;
3460 rhs = ops[1].op;
3461 }
3462
3463 return simplify_const_binary_operation (code, mode, lhs, rhs);
3464 }
3465
3466 /* Now simplify each pair of operands until nothing changes. */
3467 do
3468 {
3469 /* Insertion sort is good enough for an eight-element array. */
3470 for (i = 1; i < n_ops; i++)
3471 {
3472 struct simplify_plus_minus_op_data save;
3473 j = i - 1;
3474 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3475 continue;
3476
3477 canonicalized = 1;
3478 save = ops[i];
3479 do
3480 ops[j + 1] = ops[j];
3481 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3482 ops[j + 1] = save;
3483 }
3484
3485 /* This is only useful the first time through. */
3486 if (!canonicalized)
3487 return NULL_RTX;
3488
3489 changed = 0;
3490 for (i = n_ops - 1; i > 0; i--)
3491 for (j = i - 1; j >= 0; j--)
3492 {
3493 rtx lhs = ops[j].op, rhs = ops[i].op;
3494 int lneg = ops[j].neg, rneg = ops[i].neg;
3495
3496 if (lhs != 0 && rhs != 0)
3497 {
3498 enum rtx_code ncode = PLUS;
3499
3500 if (lneg != rneg)
3501 {
3502 ncode = MINUS;
3503 if (lneg)
3504 tem = lhs, lhs = rhs, rhs = tem;
3505 }
3506 else if (swap_commutative_operands_p (lhs, rhs))
3507 tem = lhs, lhs = rhs, rhs = tem;
3508
3509 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3510 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3511 {
3512 rtx tem_lhs, tem_rhs;
3513
3514 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3515 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3516 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3517
3518 if (tem && !CONSTANT_P (tem))
3519 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3520 }
3521 else
3522 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3523
3524 /* Reject "simplifications" that just wrap the two
3525 arguments in a CONST. Failure to do so can result
3526 in infinite recursion with simplify_binary_operation
3527 when it calls us to simplify CONST operations. */
3528 if (tem
3529 && ! (GET_CODE (tem) == CONST
3530 && GET_CODE (XEXP (tem, 0)) == ncode
3531 && XEXP (XEXP (tem, 0), 0) == lhs
3532 && XEXP (XEXP (tem, 0), 1) == rhs))
3533 {
3534 lneg &= rneg;
3535 if (GET_CODE (tem) == NEG)
3536 tem = XEXP (tem, 0), lneg = !lneg;
3537 if (GET_CODE (tem) == CONST_INT && lneg)
3538 tem = neg_const_int (mode, tem), lneg = 0;
3539
3540 ops[i].op = tem;
3541 ops[i].neg = lneg;
3542 ops[j].op = NULL_RTX;
3543 changed = 1;
3544 }
3545 }
3546 }
3547
3548 /* Pack all the operands to the lower-numbered entries. */
3549 for (i = 0, j = 0; j < n_ops; j++)
3550 if (ops[j].op)
3551 {
3552 ops[i] = ops[j];
3553 i++;
3554 }
3555 n_ops = i;
3556 }
3557 while (changed);
3558
3559 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3560 if (n_ops == 2
3561 && GET_CODE (ops[1].op) == CONST_INT
3562 && CONSTANT_P (ops[0].op)
3563 && ops[0].neg)
3564 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3565
3566 /* We suppressed creation of trivial CONST expressions in the
3567 combination loop to avoid recursion. Create one manually now.
3568 The combination loop should have ensured that there is exactly
3569 one CONST_INT, and the sort will have ensured that it is last
3570 in the array and that any other constant will be next-to-last. */
3571
3572 if (n_ops > 1
3573 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3574 && CONSTANT_P (ops[n_ops - 2].op))
3575 {
3576 rtx value = ops[n_ops - 1].op;
3577 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3578 value = neg_const_int (mode, value);
3579 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3580 n_ops--;
3581 }
3582
3583 /* Put a non-negated operand first, if possible. */
3584
3585 for (i = 0; i < n_ops && ops[i].neg; i++)
3586 continue;
3587 if (i == n_ops)
3588 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3589 else if (i != 0)
3590 {
3591 tem = ops[0].op;
3592 ops[0] = ops[i];
3593 ops[i].op = tem;
3594 ops[i].neg = 1;
3595 }
3596
3597 /* Now make the result by performing the requested operations. */
3598 result = ops[0].op;
3599 for (i = 1; i < n_ops; i++)
3600 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3601 mode, result, ops[i].op);
3602
3603 return result;
3604 }
3605
3606 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3607 static bool
3608 plus_minus_operand_p (rtx x)
3609 {
3610 return GET_CODE (x) == PLUS
3611 || GET_CODE (x) == MINUS
3612 || (GET_CODE (x) == CONST
3613 && GET_CODE (XEXP (x, 0)) == PLUS
3614 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3615 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3616 }
3617
3618 /* Like simplify_binary_operation except used for relational operators.
3619 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3620 not also be VOIDmode.
3621
3622 CMP_MODE specifies in which mode the comparison is done in, so it is
3623 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3624 the operands or, if both are VOIDmode, the operands are compared in
3625 "infinite precision". */
3626 rtx
3627 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3628 enum machine_mode cmp_mode, rtx op0, rtx op1)
3629 {
3630 rtx tem, trueop0, trueop1;
3631
3632 if (cmp_mode == VOIDmode)
3633 cmp_mode = GET_MODE (op0);
3634 if (cmp_mode == VOIDmode)
3635 cmp_mode = GET_MODE (op1);
3636
3637 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3638 if (tem)
3639 {
3640 if (SCALAR_FLOAT_MODE_P (mode))
3641 {
3642 if (tem == const0_rtx)
3643 return CONST0_RTX (mode);
3644 #ifdef FLOAT_STORE_FLAG_VALUE
3645 {
3646 REAL_VALUE_TYPE val;
3647 val = FLOAT_STORE_FLAG_VALUE (mode);
3648 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3649 }
3650 #else
3651 return NULL_RTX;
3652 #endif
3653 }
3654 if (VECTOR_MODE_P (mode))
3655 {
3656 if (tem == const0_rtx)
3657 return CONST0_RTX (mode);
3658 #ifdef VECTOR_STORE_FLAG_VALUE
3659 {
3660 int i, units;
3661 rtvec v;
3662
3663 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3664 if (val == NULL_RTX)
3665 return NULL_RTX;
3666 if (val == const1_rtx)
3667 return CONST1_RTX (mode);
3668
3669 units = GET_MODE_NUNITS (mode);
3670 v = rtvec_alloc (units);
3671 for (i = 0; i < units; i++)
3672 RTVEC_ELT (v, i) = val;
3673 return gen_rtx_raw_CONST_VECTOR (mode, v);
3674 }
3675 #else
3676 return NULL_RTX;
3677 #endif
3678 }
3679
3680 return tem;
3681 }
3682
3683 /* For the following tests, ensure const0_rtx is op1. */
3684 if (swap_commutative_operands_p (op0, op1)
3685 || (op0 == const0_rtx && op1 != const0_rtx))
3686 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3687
3688 /* If op0 is a compare, extract the comparison arguments from it. */
3689 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3690 return simplify_relational_operation (code, mode, VOIDmode,
3691 XEXP (op0, 0), XEXP (op0, 1));
3692
3693 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3694 || CC0_P (op0))
3695 return NULL_RTX;
3696
3697 trueop0 = avoid_constant_pool_reference (op0);
3698 trueop1 = avoid_constant_pool_reference (op1);
3699 return simplify_relational_operation_1 (code, mode, cmp_mode,
3700 trueop0, trueop1);
3701 }
3702
3703 /* This part of simplify_relational_operation is only used when CMP_MODE
3704 is not in class MODE_CC (i.e. it is a real comparison).
3705
3706 MODE is the mode of the result, while CMP_MODE specifies in which
3707 mode the comparison is done in, so it is the mode of the operands. */
3708
3709 static rtx
3710 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3711 enum machine_mode cmp_mode, rtx op0, rtx op1)
3712 {
3713 enum rtx_code op0code = GET_CODE (op0);
3714
3715 if (op1 == const0_rtx && COMPARISON_P (op0))
3716 {
3717 /* If op0 is a comparison, extract the comparison arguments
3718 from it. */
3719 if (code == NE)
3720 {
3721 if (GET_MODE (op0) == mode)
3722 return simplify_rtx (op0);
3723 else
3724 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3725 XEXP (op0, 0), XEXP (op0, 1));
3726 }
3727 else if (code == EQ)
3728 {
3729 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3730 if (new_code != UNKNOWN)
3731 return simplify_gen_relational (new_code, mode, VOIDmode,
3732 XEXP (op0, 0), XEXP (op0, 1));
3733 }
3734 }
3735
3736 if (op1 == const0_rtx)
3737 {
3738 /* Canonicalize (GTU x 0) as (NE x 0). */
3739 if (code == GTU)
3740 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3741 /* Canonicalize (LEU x 0) as (EQ x 0). */
3742 if (code == LEU)
3743 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3744 }
3745 else if (op1 == const1_rtx)
3746 {
3747 switch (code)
3748 {
3749 case GE:
3750 /* Canonicalize (GE x 1) as (GT x 0). */
3751 return simplify_gen_relational (GT, mode, cmp_mode,
3752 op0, const0_rtx);
3753 case GEU:
3754 /* Canonicalize (GEU x 1) as (NE x 0). */
3755 return simplify_gen_relational (NE, mode, cmp_mode,
3756 op0, const0_rtx);
3757 case LT:
3758 /* Canonicalize (LT x 1) as (LE x 0). */
3759 return simplify_gen_relational (LE, mode, cmp_mode,
3760 op0, const0_rtx);
3761 case LTU:
3762 /* Canonicalize (LTU x 1) as (EQ x 0). */
3763 return simplify_gen_relational (EQ, mode, cmp_mode,
3764 op0, const0_rtx);
3765 default:
3766 break;
3767 }
3768 }
3769 else if (op1 == constm1_rtx)
3770 {
3771 /* Canonicalize (LE x -1) as (LT x 0). */
3772 if (code == LE)
3773 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3774 /* Canonicalize (GT x -1) as (GE x 0). */
3775 if (code == GT)
3776 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3777 }
3778
3779 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3780 if ((code == EQ || code == NE)
3781 && (op0code == PLUS || op0code == MINUS)
3782 && CONSTANT_P (op1)
3783 && CONSTANT_P (XEXP (op0, 1))
3784 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3785 {
3786 rtx x = XEXP (op0, 0);
3787 rtx c = XEXP (op0, 1);
3788
3789 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3790 cmp_mode, op1, c);
3791 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3792 }
3793
3794 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3795 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3796 if (code == NE
3797 && op1 == const0_rtx
3798 && GET_MODE_CLASS (mode) == MODE_INT
3799 && cmp_mode != VOIDmode
3800 /* ??? Work-around BImode bugs in the ia64 backend. */
3801 && mode != BImode
3802 && cmp_mode != BImode
3803 && nonzero_bits (op0, cmp_mode) == 1
3804 && STORE_FLAG_VALUE == 1)
3805 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3806 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3807 : lowpart_subreg (mode, op0, cmp_mode);
3808
3809 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3810 if ((code == EQ || code == NE)
3811 && op1 == const0_rtx
3812 && op0code == XOR)
3813 return simplify_gen_relational (code, mode, cmp_mode,
3814 XEXP (op0, 0), XEXP (op0, 1));
3815
3816 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3817 if ((code == EQ || code == NE)
3818 && op0code == XOR
3819 && rtx_equal_p (XEXP (op0, 0), op1)
3820 && !side_effects_p (XEXP (op0, 0)))
3821 return simplify_gen_relational (code, mode, cmp_mode,
3822 XEXP (op0, 1), const0_rtx);
3823
3824 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3825 if ((code == EQ || code == NE)
3826 && op0code == XOR
3827 && rtx_equal_p (XEXP (op0, 1), op1)
3828 && !side_effects_p (XEXP (op0, 1)))
3829 return simplify_gen_relational (code, mode, cmp_mode,
3830 XEXP (op0, 0), const0_rtx);
3831
3832 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3833 if ((code == EQ || code == NE)
3834 && op0code == XOR
3835 && (GET_CODE (op1) == CONST_INT
3836 || GET_CODE (op1) == CONST_DOUBLE)
3837 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3838 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3839 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3840 simplify_gen_binary (XOR, cmp_mode,
3841 XEXP (op0, 1), op1));
3842
3843 if (op0code == POPCOUNT && op1 == const0_rtx)
3844 switch (code)
3845 {
3846 case EQ:
3847 case LE:
3848 case LEU:
3849 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3850 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3851 XEXP (op0, 0), const0_rtx);
3852
3853 case NE:
3854 case GT:
3855 case GTU:
3856 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3857 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3858 XEXP (op0, 0), const0_rtx);
3859
3860 default:
3861 break;
3862 }
3863
3864 return NULL_RTX;
3865 }
3866
3867 /* Check if the given comparison (done in the given MODE) is actually a
3868 tautology or a contradiction.
3869 If no simplification is possible, this function returns zero.
3870 Otherwise, it returns either const_true_rtx or const0_rtx. */
3871
3872 rtx
3873 simplify_const_relational_operation (enum rtx_code code,
3874 enum machine_mode mode,
3875 rtx op0, rtx op1)
3876 {
3877 int equal, op0lt, op0ltu, op1lt, op1ltu;
3878 rtx tem;
3879 rtx trueop0;
3880 rtx trueop1;
3881
3882 gcc_assert (mode != VOIDmode
3883 || (GET_MODE (op0) == VOIDmode
3884 && GET_MODE (op1) == VOIDmode));
3885
3886 /* If op0 is a compare, extract the comparison arguments from it. */
3887 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3888 {
3889 op1 = XEXP (op0, 1);
3890 op0 = XEXP (op0, 0);
3891
3892 if (GET_MODE (op0) != VOIDmode)
3893 mode = GET_MODE (op0);
3894 else if (GET_MODE (op1) != VOIDmode)
3895 mode = GET_MODE (op1);
3896 else
3897 return 0;
3898 }
3899
3900 /* We can't simplify MODE_CC values since we don't know what the
3901 actual comparison is. */
3902 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3903 return 0;
3904
3905 /* Make sure the constant is second. */
3906 if (swap_commutative_operands_p (op0, op1))
3907 {
3908 tem = op0, op0 = op1, op1 = tem;
3909 code = swap_condition (code);
3910 }
3911
3912 trueop0 = avoid_constant_pool_reference (op0);
3913 trueop1 = avoid_constant_pool_reference (op1);
3914
3915 /* For integer comparisons of A and B maybe we can simplify A - B and can
3916 then simplify a comparison of that with zero. If A and B are both either
3917 a register or a CONST_INT, this can't help; testing for these cases will
3918 prevent infinite recursion here and speed things up.
3919
3920 We can only do this for EQ and NE comparisons as otherwise we may
3921 lose or introduce overflow which we cannot disregard as undefined as
3922 we do not know the signedness of the operation on either the left or
3923 the right hand side of the comparison. */
3924
3925 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3926 && (code == EQ || code == NE)
3927 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3928 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3929 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3930 /* We cannot do this if tem is a nonzero address. */
3931 && ! nonzero_address_p (tem))
3932 return simplify_const_relational_operation (signed_condition (code),
3933 mode, tem, const0_rtx);
3934
3935 if (! HONOR_NANS (mode) && code == ORDERED)
3936 return const_true_rtx;
3937
3938 if (! HONOR_NANS (mode) && code == UNORDERED)
3939 return const0_rtx;
3940
3941 /* For modes without NaNs, if the two operands are equal, we know the
3942 result except if they have side-effects. */
3943 if (! HONOR_NANS (GET_MODE (trueop0))
3944 && rtx_equal_p (trueop0, trueop1)
3945 && ! side_effects_p (trueop0))
3946 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3947
3948 /* If the operands are floating-point constants, see if we can fold
3949 the result. */
3950 else if (GET_CODE (trueop0) == CONST_DOUBLE
3951 && GET_CODE (trueop1) == CONST_DOUBLE
3952 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3953 {
3954 REAL_VALUE_TYPE d0, d1;
3955
3956 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3957 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3958
3959 /* Comparisons are unordered iff at least one of the values is NaN. */
3960 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3961 switch (code)
3962 {
3963 case UNEQ:
3964 case UNLT:
3965 case UNGT:
3966 case UNLE:
3967 case UNGE:
3968 case NE:
3969 case UNORDERED:
3970 return const_true_rtx;
3971 case EQ:
3972 case LT:
3973 case GT:
3974 case LE:
3975 case GE:
3976 case LTGT:
3977 case ORDERED:
3978 return const0_rtx;
3979 default:
3980 return 0;
3981 }
3982
3983 equal = REAL_VALUES_EQUAL (d0, d1);
3984 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3985 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3986 }
3987
3988 /* Otherwise, see if the operands are both integers. */
3989 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3990 && (GET_CODE (trueop0) == CONST_DOUBLE
3991 || GET_CODE (trueop0) == CONST_INT)
3992 && (GET_CODE (trueop1) == CONST_DOUBLE
3993 || GET_CODE (trueop1) == CONST_INT))
3994 {
3995 int width = GET_MODE_BITSIZE (mode);
3996 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3997 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3998
3999 /* Get the two words comprising each integer constant. */
4000 if (GET_CODE (trueop0) == CONST_DOUBLE)
4001 {
4002 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4003 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4004 }
4005 else
4006 {
4007 l0u = l0s = INTVAL (trueop0);
4008 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4009 }
4010
4011 if (GET_CODE (trueop1) == CONST_DOUBLE)
4012 {
4013 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4014 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4015 }
4016 else
4017 {
4018 l1u = l1s = INTVAL (trueop1);
4019 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4020 }
4021
4022 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4023 we have to sign or zero-extend the values. */
4024 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4025 {
4026 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4027 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4028
4029 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4030 l0s |= ((HOST_WIDE_INT) (-1) << width);
4031
4032 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4033 l1s |= ((HOST_WIDE_INT) (-1) << width);
4034 }
4035 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4036 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4037
4038 equal = (h0u == h1u && l0u == l1u);
4039 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4040 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4041 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4042 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4043 }
4044
4045 /* Otherwise, there are some code-specific tests we can make. */
4046 else
4047 {
4048 /* Optimize comparisons with upper and lower bounds. */
4049 if (SCALAR_INT_MODE_P (mode)
4050 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4051 {
4052 rtx mmin, mmax;
4053 int sign;
4054
4055 if (code == GEU
4056 || code == LEU
4057 || code == GTU
4058 || code == LTU)
4059 sign = 0;
4060 else
4061 sign = 1;
4062
4063 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4064
4065 tem = NULL_RTX;
4066 switch (code)
4067 {
4068 case GEU:
4069 case GE:
4070 /* x >= min is always true. */
4071 if (rtx_equal_p (trueop1, mmin))
4072 tem = const_true_rtx;
4073 else
4074 break;
4075
4076 case LEU:
4077 case LE:
4078 /* x <= max is always true. */
4079 if (rtx_equal_p (trueop1, mmax))
4080 tem = const_true_rtx;
4081 break;
4082
4083 case GTU:
4084 case GT:
4085 /* x > max is always false. */
4086 if (rtx_equal_p (trueop1, mmax))
4087 tem = const0_rtx;
4088 break;
4089
4090 case LTU:
4091 case LT:
4092 /* x < min is always false. */
4093 if (rtx_equal_p (trueop1, mmin))
4094 tem = const0_rtx;
4095 break;
4096
4097 default:
4098 break;
4099 }
4100 if (tem == const0_rtx
4101 || tem == const_true_rtx)
4102 return tem;
4103 }
4104
4105 switch (code)
4106 {
4107 case EQ:
4108 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4109 return const0_rtx;
4110 break;
4111
4112 case NE:
4113 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4114 return const_true_rtx;
4115 break;
4116
4117 case LT:
4118 /* Optimize abs(x) < 0.0. */
4119 if (trueop1 == CONST0_RTX (mode)
4120 && !HONOR_SNANS (mode)
4121 && (!INTEGRAL_MODE_P (mode)
4122 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4123 {
4124 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4125 : trueop0;
4126 if (GET_CODE (tem) == ABS)
4127 {
4128 if (INTEGRAL_MODE_P (mode)
4129 && (issue_strict_overflow_warning
4130 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4131 warning (OPT_Wstrict_overflow,
4132 ("assuming signed overflow does not occur when "
4133 "assuming abs (x) < 0 is false"));
4134 return const0_rtx;
4135 }
4136 }
4137
4138 /* Optimize popcount (x) < 0. */
4139 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4140 return const_true_rtx;
4141 break;
4142
4143 case GE:
4144 /* Optimize abs(x) >= 0.0. */
4145 if (trueop1 == CONST0_RTX (mode)
4146 && !HONOR_NANS (mode)
4147 && (!INTEGRAL_MODE_P (mode)
4148 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4149 {
4150 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4151 : trueop0;
4152 if (GET_CODE (tem) == ABS)
4153 {
4154 if (INTEGRAL_MODE_P (mode)
4155 && (issue_strict_overflow_warning
4156 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4157 warning (OPT_Wstrict_overflow,
4158 ("assuming signed overflow does not occur when "
4159 "assuming abs (x) >= 0 is true"));
4160 return const_true_rtx;
4161 }
4162 }
4163
4164 /* Optimize popcount (x) >= 0. */
4165 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4166 return const_true_rtx;
4167 break;
4168
4169 case UNGE:
4170 /* Optimize ! (abs(x) < 0.0). */
4171 if (trueop1 == CONST0_RTX (mode))
4172 {
4173 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4174 : trueop0;
4175 if (GET_CODE (tem) == ABS)
4176 return const_true_rtx;
4177 }
4178 break;
4179
4180 default:
4181 break;
4182 }
4183
4184 return 0;
4185 }
4186
4187 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4188 as appropriate. */
4189 switch (code)
4190 {
4191 case EQ:
4192 case UNEQ:
4193 return equal ? const_true_rtx : const0_rtx;
4194 case NE:
4195 case LTGT:
4196 return ! equal ? const_true_rtx : const0_rtx;
4197 case LT:
4198 case UNLT:
4199 return op0lt ? const_true_rtx : const0_rtx;
4200 case GT:
4201 case UNGT:
4202 return op1lt ? const_true_rtx : const0_rtx;
4203 case LTU:
4204 return op0ltu ? const_true_rtx : const0_rtx;
4205 case GTU:
4206 return op1ltu ? const_true_rtx : const0_rtx;
4207 case LE:
4208 case UNLE:
4209 return equal || op0lt ? const_true_rtx : const0_rtx;
4210 case GE:
4211 case UNGE:
4212 return equal || op1lt ? const_true_rtx : const0_rtx;
4213 case LEU:
4214 return equal || op0ltu ? const_true_rtx : const0_rtx;
4215 case GEU:
4216 return equal || op1ltu ? const_true_rtx : const0_rtx;
4217 case ORDERED:
4218 return const_true_rtx;
4219 case UNORDERED:
4220 return const0_rtx;
4221 default:
4222 gcc_unreachable ();
4223 }
4224 }
4225 \f
4226 /* Simplify CODE, an operation with result mode MODE and three operands,
4227 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4228 a constant. Return 0 if no simplifications is possible. */
4229
4230 rtx
4231 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4232 enum machine_mode op0_mode, rtx op0, rtx op1,
4233 rtx op2)
4234 {
4235 unsigned int width = GET_MODE_BITSIZE (mode);
4236
4237 /* VOIDmode means "infinite" precision. */
4238 if (width == 0)
4239 width = HOST_BITS_PER_WIDE_INT;
4240
4241 switch (code)
4242 {
4243 case SIGN_EXTRACT:
4244 case ZERO_EXTRACT:
4245 if (GET_CODE (op0) == CONST_INT
4246 && GET_CODE (op1) == CONST_INT
4247 && GET_CODE (op2) == CONST_INT
4248 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4249 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4250 {
4251 /* Extracting a bit-field from a constant */
4252 HOST_WIDE_INT val = INTVAL (op0);
4253
4254 if (BITS_BIG_ENDIAN)
4255 val >>= (GET_MODE_BITSIZE (op0_mode)
4256 - INTVAL (op2) - INTVAL (op1));
4257 else
4258 val >>= INTVAL (op2);
4259
4260 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4261 {
4262 /* First zero-extend. */
4263 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4264 /* If desired, propagate sign bit. */
4265 if (code == SIGN_EXTRACT
4266 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4267 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4268 }
4269
4270 /* Clear the bits that don't belong in our mode,
4271 unless they and our sign bit are all one.
4272 So we get either a reasonable negative value or a reasonable
4273 unsigned value for this mode. */
4274 if (width < HOST_BITS_PER_WIDE_INT
4275 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4276 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4277 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4278
4279 return gen_int_mode (val, mode);
4280 }
4281 break;
4282
4283 case IF_THEN_ELSE:
4284 if (GET_CODE (op0) == CONST_INT)
4285 return op0 != const0_rtx ? op1 : op2;
4286
4287 /* Convert c ? a : a into "a". */
4288 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4289 return op1;
4290
4291 /* Convert a != b ? a : b into "a". */
4292 if (GET_CODE (op0) == NE
4293 && ! side_effects_p (op0)
4294 && ! HONOR_NANS (mode)
4295 && ! HONOR_SIGNED_ZEROS (mode)
4296 && ((rtx_equal_p (XEXP (op0, 0), op1)
4297 && rtx_equal_p (XEXP (op0, 1), op2))
4298 || (rtx_equal_p (XEXP (op0, 0), op2)
4299 && rtx_equal_p (XEXP (op0, 1), op1))))
4300 return op1;
4301
4302 /* Convert a == b ? a : b into "b". */
4303 if (GET_CODE (op0) == EQ
4304 && ! side_effects_p (op0)
4305 && ! HONOR_NANS (mode)
4306 && ! HONOR_SIGNED_ZEROS (mode)
4307 && ((rtx_equal_p (XEXP (op0, 0), op1)
4308 && rtx_equal_p (XEXP (op0, 1), op2))
4309 || (rtx_equal_p (XEXP (op0, 0), op2)
4310 && rtx_equal_p (XEXP (op0, 1), op1))))
4311 return op2;
4312
4313 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4314 {
4315 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4316 ? GET_MODE (XEXP (op0, 1))
4317 : GET_MODE (XEXP (op0, 0)));
4318 rtx temp;
4319
4320 /* Look for happy constants in op1 and op2. */
4321 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4322 {
4323 HOST_WIDE_INT t = INTVAL (op1);
4324 HOST_WIDE_INT f = INTVAL (op2);
4325
4326 if (t == STORE_FLAG_VALUE && f == 0)
4327 code = GET_CODE (op0);
4328 else if (t == 0 && f == STORE_FLAG_VALUE)
4329 {
4330 enum rtx_code tmp;
4331 tmp = reversed_comparison_code (op0, NULL_RTX);
4332 if (tmp == UNKNOWN)
4333 break;
4334 code = tmp;
4335 }
4336 else
4337 break;
4338
4339 return simplify_gen_relational (code, mode, cmp_mode,
4340 XEXP (op0, 0), XEXP (op0, 1));
4341 }
4342
4343 if (cmp_mode == VOIDmode)
4344 cmp_mode = op0_mode;
4345 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4346 cmp_mode, XEXP (op0, 0),
4347 XEXP (op0, 1));
4348
4349 /* See if any simplifications were possible. */
4350 if (temp)
4351 {
4352 if (GET_CODE (temp) == CONST_INT)
4353 return temp == const0_rtx ? op2 : op1;
4354 else if (temp)
4355 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4356 }
4357 }
4358 break;
4359
4360 case VEC_MERGE:
4361 gcc_assert (GET_MODE (op0) == mode);
4362 gcc_assert (GET_MODE (op1) == mode);
4363 gcc_assert (VECTOR_MODE_P (mode));
4364 op2 = avoid_constant_pool_reference (op2);
4365 if (GET_CODE (op2) == CONST_INT)
4366 {
4367 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4368 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4369 int mask = (1 << n_elts) - 1;
4370
4371 if (!(INTVAL (op2) & mask))
4372 return op1;
4373 if ((INTVAL (op2) & mask) == mask)
4374 return op0;
4375
4376 op0 = avoid_constant_pool_reference (op0);
4377 op1 = avoid_constant_pool_reference (op1);
4378 if (GET_CODE (op0) == CONST_VECTOR
4379 && GET_CODE (op1) == CONST_VECTOR)
4380 {
4381 rtvec v = rtvec_alloc (n_elts);
4382 unsigned int i;
4383
4384 for (i = 0; i < n_elts; i++)
4385 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4386 ? CONST_VECTOR_ELT (op0, i)
4387 : CONST_VECTOR_ELT (op1, i));
4388 return gen_rtx_CONST_VECTOR (mode, v);
4389 }
4390 }
4391 break;
4392
4393 default:
4394 gcc_unreachable ();
4395 }
4396
4397 return 0;
4398 }
4399
4400 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4401 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4402
4403 Works by unpacking OP into a collection of 8-bit values
4404 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4405 and then repacking them again for OUTERMODE. */
4406
4407 static rtx
4408 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4409 enum machine_mode innermode, unsigned int byte)
4410 {
4411 /* We support up to 512-bit values (for V8DFmode). */
4412 enum {
4413 max_bitsize = 512,
4414 value_bit = 8,
4415 value_mask = (1 << value_bit) - 1
4416 };
4417 unsigned char value[max_bitsize / value_bit];
4418 int value_start;
4419 int i;
4420 int elem;
4421
4422 int num_elem;
4423 rtx * elems;
4424 int elem_bitsize;
4425 rtx result_s;
4426 rtvec result_v = NULL;
4427 enum mode_class outer_class;
4428 enum machine_mode outer_submode;
4429
4430 /* Some ports misuse CCmode. */
4431 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4432 return op;
4433
4434 /* We have no way to represent a complex constant at the rtl level. */
4435 if (COMPLEX_MODE_P (outermode))
4436 return NULL_RTX;
4437
4438 /* Unpack the value. */
4439
4440 if (GET_CODE (op) == CONST_VECTOR)
4441 {
4442 num_elem = CONST_VECTOR_NUNITS (op);
4443 elems = &CONST_VECTOR_ELT (op, 0);
4444 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4445 }
4446 else
4447 {
4448 num_elem = 1;
4449 elems = &op;
4450 elem_bitsize = max_bitsize;
4451 }
4452 /* If this asserts, it is too complicated; reducing value_bit may help. */
4453 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4454 /* I don't know how to handle endianness of sub-units. */
4455 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4456
4457 for (elem = 0; elem < num_elem; elem++)
4458 {
4459 unsigned char * vp;
4460 rtx el = elems[elem];
4461
4462 /* Vectors are kept in target memory order. (This is probably
4463 a mistake.) */
4464 {
4465 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4466 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4467 / BITS_PER_UNIT);
4468 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4469 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4470 unsigned bytele = (subword_byte % UNITS_PER_WORD
4471 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4472 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4473 }
4474
4475 switch (GET_CODE (el))
4476 {
4477 case CONST_INT:
4478 for (i = 0;
4479 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4480 i += value_bit)
4481 *vp++ = INTVAL (el) >> i;
4482 /* CONST_INTs are always logically sign-extended. */
4483 for (; i < elem_bitsize; i += value_bit)
4484 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4485 break;
4486
4487 case CONST_DOUBLE:
4488 if (GET_MODE (el) == VOIDmode)
4489 {
4490 /* If this triggers, someone should have generated a
4491 CONST_INT instead. */
4492 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4493
4494 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4495 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4496 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4497 {
4498 *vp++
4499 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4500 i += value_bit;
4501 }
4502 /* It shouldn't matter what's done here, so fill it with
4503 zero. */
4504 for (; i < elem_bitsize; i += value_bit)
4505 *vp++ = 0;
4506 }
4507 else
4508 {
4509 long tmp[max_bitsize / 32];
4510 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4511
4512 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4513 gcc_assert (bitsize <= elem_bitsize);
4514 gcc_assert (bitsize % value_bit == 0);
4515
4516 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4517 GET_MODE (el));
4518
4519 /* real_to_target produces its result in words affected by
4520 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4521 and use WORDS_BIG_ENDIAN instead; see the documentation
4522 of SUBREG in rtl.texi. */
4523 for (i = 0; i < bitsize; i += value_bit)
4524 {
4525 int ibase;
4526 if (WORDS_BIG_ENDIAN)
4527 ibase = bitsize - 1 - i;
4528 else
4529 ibase = i;
4530 *vp++ = tmp[ibase / 32] >> i % 32;
4531 }
4532
4533 /* It shouldn't matter what's done here, so fill it with
4534 zero. */
4535 for (; i < elem_bitsize; i += value_bit)
4536 *vp++ = 0;
4537 }
4538 break;
4539
4540 default:
4541 gcc_unreachable ();
4542 }
4543 }
4544
4545 /* Now, pick the right byte to start with. */
4546 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4547 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4548 will already have offset 0. */
4549 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4550 {
4551 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4552 - byte);
4553 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4554 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4555 byte = (subword_byte % UNITS_PER_WORD
4556 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4557 }
4558
4559 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4560 so if it's become negative it will instead be very large.) */
4561 gcc_assert (byte < GET_MODE_SIZE (innermode));
4562
4563 /* Convert from bytes to chunks of size value_bit. */
4564 value_start = byte * (BITS_PER_UNIT / value_bit);
4565
4566 /* Re-pack the value. */
4567
4568 if (VECTOR_MODE_P (outermode))
4569 {
4570 num_elem = GET_MODE_NUNITS (outermode);
4571 result_v = rtvec_alloc (num_elem);
4572 elems = &RTVEC_ELT (result_v, 0);
4573 outer_submode = GET_MODE_INNER (outermode);
4574 }
4575 else
4576 {
4577 num_elem = 1;
4578 elems = &result_s;
4579 outer_submode = outermode;
4580 }
4581
4582 outer_class = GET_MODE_CLASS (outer_submode);
4583 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4584
4585 gcc_assert (elem_bitsize % value_bit == 0);
4586 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4587
4588 for (elem = 0; elem < num_elem; elem++)
4589 {
4590 unsigned char *vp;
4591
4592 /* Vectors are stored in target memory order. (This is probably
4593 a mistake.) */
4594 {
4595 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4596 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4597 / BITS_PER_UNIT);
4598 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4599 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4600 unsigned bytele = (subword_byte % UNITS_PER_WORD
4601 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4602 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4603 }
4604
4605 switch (outer_class)
4606 {
4607 case MODE_INT:
4608 case MODE_PARTIAL_INT:
4609 {
4610 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4611
4612 for (i = 0;
4613 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4614 i += value_bit)
4615 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4616 for (; i < elem_bitsize; i += value_bit)
4617 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4618 << (i - HOST_BITS_PER_WIDE_INT));
4619
4620 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4621 know why. */
4622 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4623 elems[elem] = gen_int_mode (lo, outer_submode);
4624 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4625 elems[elem] = immed_double_const (lo, hi, outer_submode);
4626 else
4627 return NULL_RTX;
4628 }
4629 break;
4630
4631 case MODE_FLOAT:
4632 case MODE_DECIMAL_FLOAT:
4633 {
4634 REAL_VALUE_TYPE r;
4635 long tmp[max_bitsize / 32];
4636
4637 /* real_from_target wants its input in words affected by
4638 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4639 and use WORDS_BIG_ENDIAN instead; see the documentation
4640 of SUBREG in rtl.texi. */
4641 for (i = 0; i < max_bitsize / 32; i++)
4642 tmp[i] = 0;
4643 for (i = 0; i < elem_bitsize; i += value_bit)
4644 {
4645 int ibase;
4646 if (WORDS_BIG_ENDIAN)
4647 ibase = elem_bitsize - 1 - i;
4648 else
4649 ibase = i;
4650 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4651 }
4652
4653 real_from_target (&r, tmp, outer_submode);
4654 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4655 }
4656 break;
4657
4658 default:
4659 gcc_unreachable ();
4660 }
4661 }
4662 if (VECTOR_MODE_P (outermode))
4663 return gen_rtx_CONST_VECTOR (outermode, result_v);
4664 else
4665 return result_s;
4666 }
4667
4668 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4669 Return 0 if no simplifications are possible. */
4670 rtx
4671 simplify_subreg (enum machine_mode outermode, rtx op,
4672 enum machine_mode innermode, unsigned int byte)
4673 {
4674 /* Little bit of sanity checking. */
4675 gcc_assert (innermode != VOIDmode);
4676 gcc_assert (outermode != VOIDmode);
4677 gcc_assert (innermode != BLKmode);
4678 gcc_assert (outermode != BLKmode);
4679
4680 gcc_assert (GET_MODE (op) == innermode
4681 || GET_MODE (op) == VOIDmode);
4682
4683 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4684 gcc_assert (byte < GET_MODE_SIZE (innermode));
4685
4686 if (outermode == innermode && !byte)
4687 return op;
4688
4689 if (GET_CODE (op) == CONST_INT
4690 || GET_CODE (op) == CONST_DOUBLE
4691 || GET_CODE (op) == CONST_VECTOR)
4692 return simplify_immed_subreg (outermode, op, innermode, byte);
4693
4694 /* Changing mode twice with SUBREG => just change it once,
4695 or not at all if changing back op starting mode. */
4696 if (GET_CODE (op) == SUBREG)
4697 {
4698 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4699 int final_offset = byte + SUBREG_BYTE (op);
4700 rtx newx;
4701
4702 if (outermode == innermostmode
4703 && byte == 0 && SUBREG_BYTE (op) == 0)
4704 return SUBREG_REG (op);
4705
4706 /* The SUBREG_BYTE represents offset, as if the value were stored
4707 in memory. Irritating exception is paradoxical subreg, where
4708 we define SUBREG_BYTE to be 0. On big endian machines, this
4709 value should be negative. For a moment, undo this exception. */
4710 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4711 {
4712 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4713 if (WORDS_BIG_ENDIAN)
4714 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4715 if (BYTES_BIG_ENDIAN)
4716 final_offset += difference % UNITS_PER_WORD;
4717 }
4718 if (SUBREG_BYTE (op) == 0
4719 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4720 {
4721 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4722 if (WORDS_BIG_ENDIAN)
4723 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4724 if (BYTES_BIG_ENDIAN)
4725 final_offset += difference % UNITS_PER_WORD;
4726 }
4727
4728 /* See whether resulting subreg will be paradoxical. */
4729 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4730 {
4731 /* In nonparadoxical subregs we can't handle negative offsets. */
4732 if (final_offset < 0)
4733 return NULL_RTX;
4734 /* Bail out in case resulting subreg would be incorrect. */
4735 if (final_offset % GET_MODE_SIZE (outermode)
4736 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4737 return NULL_RTX;
4738 }
4739 else
4740 {
4741 int offset = 0;
4742 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4743
4744 /* In paradoxical subreg, see if we are still looking on lower part.
4745 If so, our SUBREG_BYTE will be 0. */
4746 if (WORDS_BIG_ENDIAN)
4747 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4748 if (BYTES_BIG_ENDIAN)
4749 offset += difference % UNITS_PER_WORD;
4750 if (offset == final_offset)
4751 final_offset = 0;
4752 else
4753 return NULL_RTX;
4754 }
4755
4756 /* Recurse for further possible simplifications. */
4757 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4758 final_offset);
4759 if (newx)
4760 return newx;
4761 if (validate_subreg (outermode, innermostmode,
4762 SUBREG_REG (op), final_offset))
4763 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4764 return NULL_RTX;
4765 }
4766
4767 /* Merge implicit and explicit truncations. */
4768
4769 if (GET_CODE (op) == TRUNCATE
4770 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4771 && subreg_lowpart_offset (outermode, innermode) == byte)
4772 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4773 GET_MODE (XEXP (op, 0)));
4774
4775 /* SUBREG of a hard register => just change the register number
4776 and/or mode. If the hard register is not valid in that mode,
4777 suppress this simplification. If the hard register is the stack,
4778 frame, or argument pointer, leave this as a SUBREG. */
4779
4780 if (REG_P (op)
4781 && REGNO (op) < FIRST_PSEUDO_REGISTER
4782 #ifdef CANNOT_CHANGE_MODE_CLASS
4783 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4784 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4785 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4786 #endif
4787 && ((reload_completed && !frame_pointer_needed)
4788 || (REGNO (op) != FRAME_POINTER_REGNUM
4789 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4790 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4791 #endif
4792 ))
4793 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4794 && REGNO (op) != ARG_POINTER_REGNUM
4795 #endif
4796 && REGNO (op) != STACK_POINTER_REGNUM
4797 && subreg_offset_representable_p (REGNO (op), innermode,
4798 byte, outermode))
4799 {
4800 unsigned int regno = REGNO (op);
4801 unsigned int final_regno
4802 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4803
4804 /* ??? We do allow it if the current REG is not valid for
4805 its mode. This is a kludge to work around how float/complex
4806 arguments are passed on 32-bit SPARC and should be fixed. */
4807 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4808 || ! HARD_REGNO_MODE_OK (regno, innermode))
4809 {
4810 rtx x;
4811 int final_offset = byte;
4812
4813 /* Adjust offset for paradoxical subregs. */
4814 if (byte == 0
4815 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4816 {
4817 int difference = (GET_MODE_SIZE (innermode)
4818 - GET_MODE_SIZE (outermode));
4819 if (WORDS_BIG_ENDIAN)
4820 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4821 if (BYTES_BIG_ENDIAN)
4822 final_offset += difference % UNITS_PER_WORD;
4823 }
4824
4825 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4826
4827 /* Propagate original regno. We don't have any way to specify
4828 the offset inside original regno, so do so only for lowpart.
4829 The information is used only by alias analysis that can not
4830 grog partial register anyway. */
4831
4832 if (subreg_lowpart_offset (outermode, innermode) == byte)
4833 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4834 return x;
4835 }
4836 }
4837
4838 /* If we have a SUBREG of a register that we are replacing and we are
4839 replacing it with a MEM, make a new MEM and try replacing the
4840 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4841 or if we would be widening it. */
4842
4843 if (MEM_P (op)
4844 && ! mode_dependent_address_p (XEXP (op, 0))
4845 /* Allow splitting of volatile memory references in case we don't
4846 have instruction to move the whole thing. */
4847 && (! MEM_VOLATILE_P (op)
4848 || ! have_insn_for (SET, innermode))
4849 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4850 return adjust_address_nv (op, outermode, byte);
4851
4852 /* Handle complex values represented as CONCAT
4853 of real and imaginary part. */
4854 if (GET_CODE (op) == CONCAT)
4855 {
4856 unsigned int part_size, final_offset;
4857 rtx part, res;
4858
4859 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4860 if (byte < part_size)
4861 {
4862 part = XEXP (op, 0);
4863 final_offset = byte;
4864 }
4865 else
4866 {
4867 part = XEXP (op, 1);
4868 final_offset = byte - part_size;
4869 }
4870
4871 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4872 return NULL_RTX;
4873
4874 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4875 if (res)
4876 return res;
4877 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4878 return gen_rtx_SUBREG (outermode, part, final_offset);
4879 return NULL_RTX;
4880 }
4881
4882 /* Optimize SUBREG truncations of zero and sign extended values. */
4883 if ((GET_CODE (op) == ZERO_EXTEND
4884 || GET_CODE (op) == SIGN_EXTEND)
4885 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4886 {
4887 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4888
4889 /* If we're requesting the lowpart of a zero or sign extension,
4890 there are three possibilities. If the outermode is the same
4891 as the origmode, we can omit both the extension and the subreg.
4892 If the outermode is not larger than the origmode, we can apply
4893 the truncation without the extension. Finally, if the outermode
4894 is larger than the origmode, but both are integer modes, we
4895 can just extend to the appropriate mode. */
4896 if (bitpos == 0)
4897 {
4898 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4899 if (outermode == origmode)
4900 return XEXP (op, 0);
4901 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4902 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4903 subreg_lowpart_offset (outermode,
4904 origmode));
4905 if (SCALAR_INT_MODE_P (outermode))
4906 return simplify_gen_unary (GET_CODE (op), outermode,
4907 XEXP (op, 0), origmode);
4908 }
4909
4910 /* A SUBREG resulting from a zero extension may fold to zero if
4911 it extracts higher bits that the ZERO_EXTEND's source bits. */
4912 if (GET_CODE (op) == ZERO_EXTEND
4913 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4914 return CONST0_RTX (outermode);
4915 }
4916
4917 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4918 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4919 the outer subreg is effectively a truncation to the original mode. */
4920 if ((GET_CODE (op) == LSHIFTRT
4921 || GET_CODE (op) == ASHIFTRT)
4922 && SCALAR_INT_MODE_P (outermode)
4923 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4924 to avoid the possibility that an outer LSHIFTRT shifts by more
4925 than the sign extension's sign_bit_copies and introduces zeros
4926 into the high bits of the result. */
4927 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4928 && GET_CODE (XEXP (op, 1)) == CONST_INT
4929 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4930 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4931 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4932 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4933 return simplify_gen_binary (ASHIFTRT, outermode,
4934 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4935
4936 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4937 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4938 the outer subreg is effectively a truncation to the original mode. */
4939 if ((GET_CODE (op) == LSHIFTRT
4940 || GET_CODE (op) == ASHIFTRT)
4941 && SCALAR_INT_MODE_P (outermode)
4942 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4943 && GET_CODE (XEXP (op, 1)) == CONST_INT
4944 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4945 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4946 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4947 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4948 return simplify_gen_binary (LSHIFTRT, outermode,
4949 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4950
4951 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4952 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4953 the outer subreg is effectively a truncation to the original mode. */
4954 if (GET_CODE (op) == ASHIFT
4955 && SCALAR_INT_MODE_P (outermode)
4956 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4957 && GET_CODE (XEXP (op, 1)) == CONST_INT
4958 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4959 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4960 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4961 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4962 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4963 return simplify_gen_binary (ASHIFT, outermode,
4964 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4965
4966 return NULL_RTX;
4967 }
4968
4969 /* Make a SUBREG operation or equivalent if it folds. */
4970
4971 rtx
4972 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4973 enum machine_mode innermode, unsigned int byte)
4974 {
4975 rtx newx;
4976
4977 newx = simplify_subreg (outermode, op, innermode, byte);
4978 if (newx)
4979 return newx;
4980
4981 if (GET_CODE (op) == SUBREG
4982 || GET_CODE (op) == CONCAT
4983 || GET_MODE (op) == VOIDmode)
4984 return NULL_RTX;
4985
4986 if (validate_subreg (outermode, innermode, op, byte))
4987 return gen_rtx_SUBREG (outermode, op, byte);
4988
4989 return NULL_RTX;
4990 }
4991
4992 /* Simplify X, an rtx expression.
4993
4994 Return the simplified expression or NULL if no simplifications
4995 were possible.
4996
4997 This is the preferred entry point into the simplification routines;
4998 however, we still allow passes to call the more specific routines.
4999
5000 Right now GCC has three (yes, three) major bodies of RTL simplification
5001 code that need to be unified.
5002
5003 1. fold_rtx in cse.c. This code uses various CSE specific
5004 information to aid in RTL simplification.
5005
5006 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5007 it uses combine specific information to aid in RTL
5008 simplification.
5009
5010 3. The routines in this file.
5011
5012
5013 Long term we want to only have one body of simplification code; to
5014 get to that state I recommend the following steps:
5015
5016 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5017 which are not pass dependent state into these routines.
5018
5019 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5020 use this routine whenever possible.
5021
5022 3. Allow for pass dependent state to be provided to these
5023 routines and add simplifications based on the pass dependent
5024 state. Remove code from cse.c & combine.c that becomes
5025 redundant/dead.
5026
5027 It will take time, but ultimately the compiler will be easier to
5028 maintain and improve. It's totally silly that when we add a
5029 simplification that it needs to be added to 4 places (3 for RTL
5030 simplification and 1 for tree simplification. */
5031
5032 rtx
5033 simplify_rtx (rtx x)
5034 {
5035 enum rtx_code code = GET_CODE (x);
5036 enum machine_mode mode = GET_MODE (x);
5037
5038 switch (GET_RTX_CLASS (code))
5039 {
5040 case RTX_UNARY:
5041 return simplify_unary_operation (code, mode,
5042 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5043 case RTX_COMM_ARITH:
5044 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5045 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5046
5047 /* Fall through.... */
5048
5049 case RTX_BIN_ARITH:
5050 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5051
5052 case RTX_TERNARY:
5053 case RTX_BITFIELD_OPS:
5054 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5055 XEXP (x, 0), XEXP (x, 1),
5056 XEXP (x, 2));
5057
5058 case RTX_COMPARE:
5059 case RTX_COMM_COMPARE:
5060 return simplify_relational_operation (code, mode,
5061 ((GET_MODE (XEXP (x, 0))
5062 != VOIDmode)
5063 ? GET_MODE (XEXP (x, 0))
5064 : GET_MODE (XEXP (x, 1))),
5065 XEXP (x, 0),
5066 XEXP (x, 1));
5067
5068 case RTX_EXTRA:
5069 if (code == SUBREG)
5070 return simplify_subreg (mode, SUBREG_REG (x),
5071 GET_MODE (SUBREG_REG (x)),
5072 SUBREG_BYTE (x));
5073 break;
5074
5075 case RTX_OBJ:
5076 if (code == LO_SUM)
5077 {
5078 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5079 if (GET_CODE (XEXP (x, 0)) == HIGH
5080 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5081 return XEXP (x, 1);
5082 }
5083 break;
5084
5085 default:
5086 break;
5087 }
5088 return NULL;
5089 }