extend.texi (__builtin_bswap32): Document.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
409
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
418
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
434
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
442 {
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445 }
446
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
450
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
457
458
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465 {
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
468
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
474 }
475
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
480
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482 {
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
485
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495 {
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
498 }
499
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
502 }
503 break;
504
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
509
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
518
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532 {
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536 {
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540 }
541
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545 }
546
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551 {
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554 }
555
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
560 {
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564 }
565
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
573
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
581
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
592 {
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
596 {
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
604 }
605 else if (STORE_FLAG_VALUE == -1)
606 {
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 }
615 }
616 break;
617
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
624
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
630
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
640
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
648
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
665
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
675
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
679
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
684
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
688
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
691
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
703
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
714
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
723
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
731
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
735
736 /* (float_extend (float_extend x)) is (float_extend x)
737
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
740 */
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
750
751 break;
752
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
758
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
763
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
773
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
777
778 break;
779
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
787
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
795
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
814
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
823
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
835
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
845
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
857
858 default:
859 break;
860 }
861
862 return 0;
863 }
864
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
868 rtx
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
871 {
872 unsigned int width = GET_MODE_BITSIZE (mode);
873
874 if (code == VEC_DUPLICATE)
875 {
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
878 {
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
884 }
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
887 {
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
892
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
897 {
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
901
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
906 }
907 return gen_rtx_CONST_VECTOR (mode, v);
908 }
909 }
910
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
912 {
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
920
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
923 {
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
930 }
931 return gen_rtx_CONST_VECTOR (mode, v);
932 }
933
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
937
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
940 {
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
943
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
948
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
952 }
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
956 {
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
959
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
964
965 if (op_mode == VOIDmode)
966 {
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
971 }
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
973 ;
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
976
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
980 }
981
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
984 {
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
987
988 switch (code)
989 {
990 case NOT:
991 val = ~ arg0;
992 break;
993
994 case NEG:
995 val = - arg0;
996 break;
997
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1001
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1008
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1012 ;
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1016
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1020 {
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1025 }
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1029
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1036
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1044
1045 case BSWAP:
1046 return 0;
1047
1048 case TRUNCATE:
1049 val = arg0;
1050 break;
1051
1052 case ZERO_EXTEND:
1053 /* When zero-extending a CONST_INT, we need to know its
1054 original mode. */
1055 gcc_assert (op_mode != VOIDmode);
1056 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1057 {
1058 /* If we were really extending the mode,
1059 we would have to distinguish between zero-extension
1060 and sign-extension. */
1061 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1062 val = arg0;
1063 }
1064 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1065 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1066 else
1067 return 0;
1068 break;
1069
1070 case SIGN_EXTEND:
1071 if (op_mode == VOIDmode)
1072 op_mode = mode;
1073 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1074 {
1075 /* If we were really extending the mode,
1076 we would have to distinguish between zero-extension
1077 and sign-extension. */
1078 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1079 val = arg0;
1080 }
1081 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1082 {
1083 val
1084 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1085 if (val
1086 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1087 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1088 }
1089 else
1090 return 0;
1091 break;
1092
1093 case SQRT:
1094 case FLOAT_EXTEND:
1095 case FLOAT_TRUNCATE:
1096 case SS_TRUNCATE:
1097 case US_TRUNCATE:
1098 case SS_NEG:
1099 return 0;
1100
1101 default:
1102 gcc_unreachable ();
1103 }
1104
1105 return gen_int_mode (val, mode);
1106 }
1107
1108 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1109 for a DImode operation on a CONST_INT. */
1110 else if (GET_MODE (op) == VOIDmode
1111 && width <= HOST_BITS_PER_WIDE_INT * 2
1112 && (GET_CODE (op) == CONST_DOUBLE
1113 || GET_CODE (op) == CONST_INT))
1114 {
1115 unsigned HOST_WIDE_INT l1, lv;
1116 HOST_WIDE_INT h1, hv;
1117
1118 if (GET_CODE (op) == CONST_DOUBLE)
1119 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1120 else
1121 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1122
1123 switch (code)
1124 {
1125 case NOT:
1126 lv = ~ l1;
1127 hv = ~ h1;
1128 break;
1129
1130 case NEG:
1131 neg_double (l1, h1, &lv, &hv);
1132 break;
1133
1134 case ABS:
1135 if (h1 < 0)
1136 neg_double (l1, h1, &lv, &hv);
1137 else
1138 lv = l1, hv = h1;
1139 break;
1140
1141 case FFS:
1142 hv = 0;
1143 if (l1 == 0)
1144 {
1145 if (h1 == 0)
1146 lv = 0;
1147 else
1148 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1149 }
1150 else
1151 lv = exact_log2 (l1 & -l1) + 1;
1152 break;
1153
1154 case CLZ:
1155 hv = 0;
1156 if (h1 != 0)
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1158 - HOST_BITS_PER_WIDE_INT;
1159 else if (l1 != 0)
1160 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1161 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1162 lv = GET_MODE_BITSIZE (mode);
1163 break;
1164
1165 case CTZ:
1166 hv = 0;
1167 if (l1 != 0)
1168 lv = exact_log2 (l1 & -l1);
1169 else if (h1 != 0)
1170 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1171 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1172 lv = GET_MODE_BITSIZE (mode);
1173 break;
1174
1175 case POPCOUNT:
1176 hv = 0;
1177 lv = 0;
1178 while (l1)
1179 lv++, l1 &= l1 - 1;
1180 while (h1)
1181 lv++, h1 &= h1 - 1;
1182 break;
1183
1184 case PARITY:
1185 hv = 0;
1186 lv = 0;
1187 while (l1)
1188 lv++, l1 &= l1 - 1;
1189 while (h1)
1190 lv++, h1 &= h1 - 1;
1191 lv &= 1;
1192 break;
1193
1194 case TRUNCATE:
1195 /* This is just a change-of-mode, so do nothing. */
1196 lv = l1, hv = h1;
1197 break;
1198
1199 case ZERO_EXTEND:
1200 gcc_assert (op_mode != VOIDmode);
1201
1202 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1203 return 0;
1204
1205 hv = 0;
1206 lv = l1 & GET_MODE_MASK (op_mode);
1207 break;
1208
1209 case SIGN_EXTEND:
1210 if (op_mode == VOIDmode
1211 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1212 return 0;
1213 else
1214 {
1215 lv = l1 & GET_MODE_MASK (op_mode);
1216 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1217 && (lv & ((HOST_WIDE_INT) 1
1218 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1219 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1220
1221 hv = HWI_SIGN_EXTEND (lv);
1222 }
1223 break;
1224
1225 case SQRT:
1226 return 0;
1227
1228 default:
1229 return 0;
1230 }
1231
1232 return immed_double_const (lv, hv, mode);
1233 }
1234
1235 else if (GET_CODE (op) == CONST_DOUBLE
1236 && SCALAR_FLOAT_MODE_P (mode))
1237 {
1238 REAL_VALUE_TYPE d, t;
1239 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1240
1241 switch (code)
1242 {
1243 case SQRT:
1244 if (HONOR_SNANS (mode) && real_isnan (&d))
1245 return 0;
1246 real_sqrt (&t, mode, &d);
1247 d = t;
1248 break;
1249 case ABS:
1250 d = REAL_VALUE_ABS (d);
1251 break;
1252 case NEG:
1253 d = REAL_VALUE_NEGATE (d);
1254 break;
1255 case FLOAT_TRUNCATE:
1256 d = real_value_truncate (mode, d);
1257 break;
1258 case FLOAT_EXTEND:
1259 /* All this does is change the mode. */
1260 break;
1261 case FIX:
1262 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1263 break;
1264 case NOT:
1265 {
1266 long tmp[4];
1267 int i;
1268
1269 real_to_target (tmp, &d, GET_MODE (op));
1270 for (i = 0; i < 4; i++)
1271 tmp[i] = ~tmp[i];
1272 real_from_target (&d, tmp, mode);
1273 break;
1274 }
1275 default:
1276 gcc_unreachable ();
1277 }
1278 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1279 }
1280
1281 else if (GET_CODE (op) == CONST_DOUBLE
1282 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1283 && GET_MODE_CLASS (mode) == MODE_INT
1284 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1285 {
1286 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1287 operators are intentionally left unspecified (to ease implementation
1288 by target backends), for consistency, this routine implements the
1289 same semantics for constant folding as used by the middle-end. */
1290
1291 /* This was formerly used only for non-IEEE float.
1292 eggert@twinsun.com says it is safe for IEEE also. */
1293 HOST_WIDE_INT xh, xl, th, tl;
1294 REAL_VALUE_TYPE x, t;
1295 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1296 switch (code)
1297 {
1298 case FIX:
1299 if (REAL_VALUE_ISNAN (x))
1300 return const0_rtx;
1301
1302 /* Test against the signed upper bound. */
1303 if (width > HOST_BITS_PER_WIDE_INT)
1304 {
1305 th = ((unsigned HOST_WIDE_INT) 1
1306 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1307 tl = -1;
1308 }
1309 else
1310 {
1311 th = 0;
1312 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1313 }
1314 real_from_integer (&t, VOIDmode, tl, th, 0);
1315 if (REAL_VALUES_LESS (t, x))
1316 {
1317 xh = th;
1318 xl = tl;
1319 break;
1320 }
1321
1322 /* Test against the signed lower bound. */
1323 if (width > HOST_BITS_PER_WIDE_INT)
1324 {
1325 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1326 tl = 0;
1327 }
1328 else
1329 {
1330 th = -1;
1331 tl = (HOST_WIDE_INT) -1 << (width - 1);
1332 }
1333 real_from_integer (&t, VOIDmode, tl, th, 0);
1334 if (REAL_VALUES_LESS (x, t))
1335 {
1336 xh = th;
1337 xl = tl;
1338 break;
1339 }
1340 REAL_VALUE_TO_INT (&xl, &xh, x);
1341 break;
1342
1343 case UNSIGNED_FIX:
1344 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1345 return const0_rtx;
1346
1347 /* Test against the unsigned upper bound. */
1348 if (width == 2*HOST_BITS_PER_WIDE_INT)
1349 {
1350 th = -1;
1351 tl = -1;
1352 }
1353 else if (width >= HOST_BITS_PER_WIDE_INT)
1354 {
1355 th = ((unsigned HOST_WIDE_INT) 1
1356 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1357 tl = -1;
1358 }
1359 else
1360 {
1361 th = 0;
1362 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1363 }
1364 real_from_integer (&t, VOIDmode, tl, th, 1);
1365 if (REAL_VALUES_LESS (t, x))
1366 {
1367 xh = th;
1368 xl = tl;
1369 break;
1370 }
1371
1372 REAL_VALUE_TO_INT (&xl, &xh, x);
1373 break;
1374
1375 default:
1376 gcc_unreachable ();
1377 }
1378 return immed_double_const (xl, xh, mode);
1379 }
1380
1381 return NULL_RTX;
1382 }
1383 \f
1384 /* Subroutine of simplify_binary_operation to simplify a commutative,
1385 associative binary operation CODE with result mode MODE, operating
1386 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1387 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1388 canonicalization is possible. */
1389
1390 static rtx
1391 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1392 rtx op0, rtx op1)
1393 {
1394 rtx tem;
1395
1396 /* Linearize the operator to the left. */
1397 if (GET_CODE (op1) == code)
1398 {
1399 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1400 if (GET_CODE (op0) == code)
1401 {
1402 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1403 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1404 }
1405
1406 /* "a op (b op c)" becomes "(b op c) op a". */
1407 if (! swap_commutative_operands_p (op1, op0))
1408 return simplify_gen_binary (code, mode, op1, op0);
1409
1410 tem = op0;
1411 op0 = op1;
1412 op1 = tem;
1413 }
1414
1415 if (GET_CODE (op0) == code)
1416 {
1417 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1418 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1419 {
1420 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1421 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1422 }
1423
1424 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1425 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1426 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1427 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1428 if (tem != 0)
1429 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1430
1431 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1432 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1433 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1434 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1435 if (tem != 0)
1436 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1437 }
1438
1439 return 0;
1440 }
1441
1442
1443 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1444 and OP1. Return 0 if no simplification is possible.
1445
1446 Don't use this for relational operations such as EQ or LT.
1447 Use simplify_relational_operation instead. */
1448 rtx
1449 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1450 rtx op0, rtx op1)
1451 {
1452 rtx trueop0, trueop1;
1453 rtx tem;
1454
1455 /* Relational operations don't work here. We must know the mode
1456 of the operands in order to do the comparison correctly.
1457 Assuming a full word can give incorrect results.
1458 Consider comparing 128 with -128 in QImode. */
1459 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1460 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1461
1462 /* Make sure the constant is second. */
1463 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1464 && swap_commutative_operands_p (op0, op1))
1465 {
1466 tem = op0, op0 = op1, op1 = tem;
1467 }
1468
1469 trueop0 = avoid_constant_pool_reference (op0);
1470 trueop1 = avoid_constant_pool_reference (op1);
1471
1472 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1473 if (tem)
1474 return tem;
1475 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1476 }
1477
1478 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1479 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1480 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1481 actual constants. */
1482
1483 static rtx
1484 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1485 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1486 {
1487 rtx tem, reversed, opleft, opright;
1488 HOST_WIDE_INT val;
1489 unsigned int width = GET_MODE_BITSIZE (mode);
1490
1491 /* Even if we can't compute a constant result,
1492 there are some cases worth simplifying. */
1493
1494 switch (code)
1495 {
1496 case PLUS:
1497 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1498 when x is NaN, infinite, or finite and nonzero. They aren't
1499 when x is -0 and the rounding mode is not towards -infinity,
1500 since (-0) + 0 is then 0. */
1501 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1502 return op0;
1503
1504 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1505 transformations are safe even for IEEE. */
1506 if (GET_CODE (op0) == NEG)
1507 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1508 else if (GET_CODE (op1) == NEG)
1509 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1510
1511 /* (~a) + 1 -> -a */
1512 if (INTEGRAL_MODE_P (mode)
1513 && GET_CODE (op0) == NOT
1514 && trueop1 == const1_rtx)
1515 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1516
1517 /* Handle both-operands-constant cases. We can only add
1518 CONST_INTs to constants since the sum of relocatable symbols
1519 can't be handled by most assemblers. Don't add CONST_INT
1520 to CONST_INT since overflow won't be computed properly if wider
1521 than HOST_BITS_PER_WIDE_INT. */
1522
1523 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1524 && GET_CODE (op1) == CONST_INT)
1525 return plus_constant (op0, INTVAL (op1));
1526 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1527 && GET_CODE (op0) == CONST_INT)
1528 return plus_constant (op1, INTVAL (op0));
1529
1530 /* See if this is something like X * C - X or vice versa or
1531 if the multiplication is written as a shift. If so, we can
1532 distribute and make a new multiply, shift, or maybe just
1533 have X (if C is 2 in the example above). But don't make
1534 something more expensive than we had before. */
1535
1536 if (SCALAR_INT_MODE_P (mode))
1537 {
1538 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1539 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1540 rtx lhs = op0, rhs = op1;
1541
1542 if (GET_CODE (lhs) == NEG)
1543 {
1544 coeff0l = -1;
1545 coeff0h = -1;
1546 lhs = XEXP (lhs, 0);
1547 }
1548 else if (GET_CODE (lhs) == MULT
1549 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1550 {
1551 coeff0l = INTVAL (XEXP (lhs, 1));
1552 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1553 lhs = XEXP (lhs, 0);
1554 }
1555 else if (GET_CODE (lhs) == ASHIFT
1556 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1557 && INTVAL (XEXP (lhs, 1)) >= 0
1558 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1559 {
1560 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1561 coeff0h = 0;
1562 lhs = XEXP (lhs, 0);
1563 }
1564
1565 if (GET_CODE (rhs) == NEG)
1566 {
1567 coeff1l = -1;
1568 coeff1h = -1;
1569 rhs = XEXP (rhs, 0);
1570 }
1571 else if (GET_CODE (rhs) == MULT
1572 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1573 {
1574 coeff1l = INTVAL (XEXP (rhs, 1));
1575 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1576 rhs = XEXP (rhs, 0);
1577 }
1578 else if (GET_CODE (rhs) == ASHIFT
1579 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1580 && INTVAL (XEXP (rhs, 1)) >= 0
1581 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1582 {
1583 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1584 coeff1h = 0;
1585 rhs = XEXP (rhs, 0);
1586 }
1587
1588 if (rtx_equal_p (lhs, rhs))
1589 {
1590 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1591 rtx coeff;
1592 unsigned HOST_WIDE_INT l;
1593 HOST_WIDE_INT h;
1594
1595 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1596 coeff = immed_double_const (l, h, mode);
1597
1598 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1599 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1600 ? tem : 0;
1601 }
1602 }
1603
1604 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1605 if ((GET_CODE (op1) == CONST_INT
1606 || GET_CODE (op1) == CONST_DOUBLE)
1607 && GET_CODE (op0) == XOR
1608 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1609 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1610 && mode_signbit_p (mode, op1))
1611 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1612 simplify_gen_binary (XOR, mode, op1,
1613 XEXP (op0, 1)));
1614
1615 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1616 if (GET_CODE (op0) == MULT
1617 && GET_CODE (XEXP (op0, 0)) == NEG)
1618 {
1619 rtx in1, in2;
1620
1621 in1 = XEXP (XEXP (op0, 0), 0);
1622 in2 = XEXP (op0, 1);
1623 return simplify_gen_binary (MINUS, mode, op1,
1624 simplify_gen_binary (MULT, mode,
1625 in1, in2));
1626 }
1627
1628 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1629 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1630 is 1. */
1631 if (COMPARISON_P (op0)
1632 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1633 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1634 && (reversed = reversed_comparison (op0, mode)))
1635 return
1636 simplify_gen_unary (NEG, mode, reversed, mode);
1637
1638 /* If one of the operands is a PLUS or a MINUS, see if we can
1639 simplify this by the associative law.
1640 Don't use the associative law for floating point.
1641 The inaccuracy makes it nonassociative,
1642 and subtle programs can break if operations are associated. */
1643
1644 if (INTEGRAL_MODE_P (mode)
1645 && (plus_minus_operand_p (op0)
1646 || plus_minus_operand_p (op1))
1647 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1648 return tem;
1649
1650 /* Reassociate floating point addition only when the user
1651 specifies unsafe math optimizations. */
1652 if (FLOAT_MODE_P (mode)
1653 && flag_unsafe_math_optimizations)
1654 {
1655 tem = simplify_associative_operation (code, mode, op0, op1);
1656 if (tem)
1657 return tem;
1658 }
1659 break;
1660
1661 case COMPARE:
1662 #ifdef HAVE_cc0
1663 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1664 using cc0, in which case we want to leave it as a COMPARE
1665 so we can distinguish it from a register-register-copy.
1666
1667 In IEEE floating point, x-0 is not the same as x. */
1668
1669 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1670 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1671 && trueop1 == CONST0_RTX (mode))
1672 return op0;
1673 #endif
1674
1675 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1676 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1677 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1678 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1679 {
1680 rtx xop00 = XEXP (op0, 0);
1681 rtx xop10 = XEXP (op1, 0);
1682
1683 #ifdef HAVE_cc0
1684 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1685 #else
1686 if (REG_P (xop00) && REG_P (xop10)
1687 && GET_MODE (xop00) == GET_MODE (xop10)
1688 && REGNO (xop00) == REGNO (xop10)
1689 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1690 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1691 #endif
1692 return xop00;
1693 }
1694 break;
1695
1696 case MINUS:
1697 /* We can't assume x-x is 0 even with non-IEEE floating point,
1698 but since it is zero except in very strange circumstances, we
1699 will treat it as zero with -funsafe-math-optimizations. */
1700 if (rtx_equal_p (trueop0, trueop1)
1701 && ! side_effects_p (op0)
1702 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1703 return CONST0_RTX (mode);
1704
1705 /* Change subtraction from zero into negation. (0 - x) is the
1706 same as -x when x is NaN, infinite, or finite and nonzero.
1707 But if the mode has signed zeros, and does not round towards
1708 -infinity, then 0 - 0 is 0, not -0. */
1709 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1710 return simplify_gen_unary (NEG, mode, op1, mode);
1711
1712 /* (-1 - a) is ~a. */
1713 if (trueop0 == constm1_rtx)
1714 return simplify_gen_unary (NOT, mode, op1, mode);
1715
1716 /* Subtracting 0 has no effect unless the mode has signed zeros
1717 and supports rounding towards -infinity. In such a case,
1718 0 - 0 is -0. */
1719 if (!(HONOR_SIGNED_ZEROS (mode)
1720 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1721 && trueop1 == CONST0_RTX (mode))
1722 return op0;
1723
1724 /* See if this is something like X * C - X or vice versa or
1725 if the multiplication is written as a shift. If so, we can
1726 distribute and make a new multiply, shift, or maybe just
1727 have X (if C is 2 in the example above). But don't make
1728 something more expensive than we had before. */
1729
1730 if (SCALAR_INT_MODE_P (mode))
1731 {
1732 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1733 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1734 rtx lhs = op0, rhs = op1;
1735
1736 if (GET_CODE (lhs) == NEG)
1737 {
1738 coeff0l = -1;
1739 coeff0h = -1;
1740 lhs = XEXP (lhs, 0);
1741 }
1742 else if (GET_CODE (lhs) == MULT
1743 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1744 {
1745 coeff0l = INTVAL (XEXP (lhs, 1));
1746 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1747 lhs = XEXP (lhs, 0);
1748 }
1749 else if (GET_CODE (lhs) == ASHIFT
1750 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1751 && INTVAL (XEXP (lhs, 1)) >= 0
1752 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1753 {
1754 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1755 coeff0h = 0;
1756 lhs = XEXP (lhs, 0);
1757 }
1758
1759 if (GET_CODE (rhs) == NEG)
1760 {
1761 negcoeff1l = 1;
1762 negcoeff1h = 0;
1763 rhs = XEXP (rhs, 0);
1764 }
1765 else if (GET_CODE (rhs) == MULT
1766 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1767 {
1768 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1769 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1770 rhs = XEXP (rhs, 0);
1771 }
1772 else if (GET_CODE (rhs) == ASHIFT
1773 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1774 && INTVAL (XEXP (rhs, 1)) >= 0
1775 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1776 {
1777 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1778 negcoeff1h = -1;
1779 rhs = XEXP (rhs, 0);
1780 }
1781
1782 if (rtx_equal_p (lhs, rhs))
1783 {
1784 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1785 rtx coeff;
1786 unsigned HOST_WIDE_INT l;
1787 HOST_WIDE_INT h;
1788
1789 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1790 coeff = immed_double_const (l, h, mode);
1791
1792 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1793 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1794 ? tem : 0;
1795 }
1796 }
1797
1798 /* (a - (-b)) -> (a + b). True even for IEEE. */
1799 if (GET_CODE (op1) == NEG)
1800 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1801
1802 /* (-x - c) may be simplified as (-c - x). */
1803 if (GET_CODE (op0) == NEG
1804 && (GET_CODE (op1) == CONST_INT
1805 || GET_CODE (op1) == CONST_DOUBLE))
1806 {
1807 tem = simplify_unary_operation (NEG, mode, op1, mode);
1808 if (tem)
1809 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1810 }
1811
1812 /* Don't let a relocatable value get a negative coeff. */
1813 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1814 return simplify_gen_binary (PLUS, mode,
1815 op0,
1816 neg_const_int (mode, op1));
1817
1818 /* (x - (x & y)) -> (x & ~y) */
1819 if (GET_CODE (op1) == AND)
1820 {
1821 if (rtx_equal_p (op0, XEXP (op1, 0)))
1822 {
1823 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1824 GET_MODE (XEXP (op1, 1)));
1825 return simplify_gen_binary (AND, mode, op0, tem);
1826 }
1827 if (rtx_equal_p (op0, XEXP (op1, 1)))
1828 {
1829 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1830 GET_MODE (XEXP (op1, 0)));
1831 return simplify_gen_binary (AND, mode, op0, tem);
1832 }
1833 }
1834
1835 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1836 by reversing the comparison code if valid. */
1837 if (STORE_FLAG_VALUE == 1
1838 && trueop0 == const1_rtx
1839 && COMPARISON_P (op1)
1840 && (reversed = reversed_comparison (op1, mode)))
1841 return reversed;
1842
1843 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1844 if (GET_CODE (op1) == MULT
1845 && GET_CODE (XEXP (op1, 0)) == NEG)
1846 {
1847 rtx in1, in2;
1848
1849 in1 = XEXP (XEXP (op1, 0), 0);
1850 in2 = XEXP (op1, 1);
1851 return simplify_gen_binary (PLUS, mode,
1852 simplify_gen_binary (MULT, mode,
1853 in1, in2),
1854 op0);
1855 }
1856
1857 /* Canonicalize (minus (neg A) (mult B C)) to
1858 (minus (mult (neg B) C) A). */
1859 if (GET_CODE (op1) == MULT
1860 && GET_CODE (op0) == NEG)
1861 {
1862 rtx in1, in2;
1863
1864 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1865 in2 = XEXP (op1, 1);
1866 return simplify_gen_binary (MINUS, mode,
1867 simplify_gen_binary (MULT, mode,
1868 in1, in2),
1869 XEXP (op0, 0));
1870 }
1871
1872 /* If one of the operands is a PLUS or a MINUS, see if we can
1873 simplify this by the associative law. This will, for example,
1874 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1875 Don't use the associative law for floating point.
1876 The inaccuracy makes it nonassociative,
1877 and subtle programs can break if operations are associated. */
1878
1879 if (INTEGRAL_MODE_P (mode)
1880 && (plus_minus_operand_p (op0)
1881 || plus_minus_operand_p (op1))
1882 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1883 return tem;
1884 break;
1885
1886 case MULT:
1887 if (trueop1 == constm1_rtx)
1888 return simplify_gen_unary (NEG, mode, op0, mode);
1889
1890 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1891 x is NaN, since x * 0 is then also NaN. Nor is it valid
1892 when the mode has signed zeros, since multiplying a negative
1893 number by 0 will give -0, not 0. */
1894 if (!HONOR_NANS (mode)
1895 && !HONOR_SIGNED_ZEROS (mode)
1896 && trueop1 == CONST0_RTX (mode)
1897 && ! side_effects_p (op0))
1898 return op1;
1899
1900 /* In IEEE floating point, x*1 is not equivalent to x for
1901 signalling NaNs. */
1902 if (!HONOR_SNANS (mode)
1903 && trueop1 == CONST1_RTX (mode))
1904 return op0;
1905
1906 /* Convert multiply by constant power of two into shift unless
1907 we are still generating RTL. This test is a kludge. */
1908 if (GET_CODE (trueop1) == CONST_INT
1909 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1910 /* If the mode is larger than the host word size, and the
1911 uppermost bit is set, then this isn't a power of two due
1912 to implicit sign extension. */
1913 && (width <= HOST_BITS_PER_WIDE_INT
1914 || val != HOST_BITS_PER_WIDE_INT - 1))
1915 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1916
1917 /* Likewise for multipliers wider than a word. */
1918 if (GET_CODE (trueop1) == CONST_DOUBLE
1919 && (GET_MODE (trueop1) == VOIDmode
1920 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1921 && GET_MODE (op0) == mode
1922 && CONST_DOUBLE_LOW (trueop1) == 0
1923 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1924 return simplify_gen_binary (ASHIFT, mode, op0,
1925 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1926
1927 /* x*2 is x+x and x*(-1) is -x */
1928 if (GET_CODE (trueop1) == CONST_DOUBLE
1929 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1930 && GET_MODE (op0) == mode)
1931 {
1932 REAL_VALUE_TYPE d;
1933 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1934
1935 if (REAL_VALUES_EQUAL (d, dconst2))
1936 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1937
1938 if (!HONOR_SNANS (mode)
1939 && REAL_VALUES_EQUAL (d, dconstm1))
1940 return simplify_gen_unary (NEG, mode, op0, mode);
1941 }
1942
1943 /* Optimize -x * -x as x * x. */
1944 if (FLOAT_MODE_P (mode)
1945 && GET_CODE (op0) == NEG
1946 && GET_CODE (op1) == NEG
1947 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1948 && !side_effects_p (XEXP (op0, 0)))
1949 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1950
1951 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1952 if (SCALAR_FLOAT_MODE_P (mode)
1953 && GET_CODE (op0) == ABS
1954 && GET_CODE (op1) == ABS
1955 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1956 && !side_effects_p (XEXP (op0, 0)))
1957 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1958
1959 /* Reassociate multiplication, but for floating point MULTs
1960 only when the user specifies unsafe math optimizations. */
1961 if (! FLOAT_MODE_P (mode)
1962 || flag_unsafe_math_optimizations)
1963 {
1964 tem = simplify_associative_operation (code, mode, op0, op1);
1965 if (tem)
1966 return tem;
1967 }
1968 break;
1969
1970 case IOR:
1971 if (trueop1 == const0_rtx)
1972 return op0;
1973 if (GET_CODE (trueop1) == CONST_INT
1974 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1975 == GET_MODE_MASK (mode)))
1976 return op1;
1977 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1978 return op0;
1979 /* A | (~A) -> -1 */
1980 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1981 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1982 && ! side_effects_p (op0)
1983 && SCALAR_INT_MODE_P (mode))
1984 return constm1_rtx;
1985
1986 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1987 if (GET_CODE (op1) == CONST_INT
1988 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1989 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1990 return op1;
1991
1992 /* Convert (A & B) | A to A. */
1993 if (GET_CODE (op0) == AND
1994 && (rtx_equal_p (XEXP (op0, 0), op1)
1995 || rtx_equal_p (XEXP (op0, 1), op1))
1996 && ! side_effects_p (XEXP (op0, 0))
1997 && ! side_effects_p (XEXP (op0, 1)))
1998 return op1;
1999
2000 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2001 mode size to (rotate A CX). */
2002
2003 if (GET_CODE (op1) == ASHIFT
2004 || GET_CODE (op1) == SUBREG)
2005 {
2006 opleft = op1;
2007 opright = op0;
2008 }
2009 else
2010 {
2011 opright = op1;
2012 opleft = op0;
2013 }
2014
2015 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2016 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2017 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2018 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2019 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2020 == GET_MODE_BITSIZE (mode)))
2021 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2022
2023 /* Same, but for ashift that has been "simplified" to a wider mode
2024 by simplify_shift_const. */
2025
2026 if (GET_CODE (opleft) == SUBREG
2027 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2028 && GET_CODE (opright) == LSHIFTRT
2029 && GET_CODE (XEXP (opright, 0)) == SUBREG
2030 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2031 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2032 && (GET_MODE_SIZE (GET_MODE (opleft))
2033 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2034 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2035 SUBREG_REG (XEXP (opright, 0)))
2036 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2037 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2038 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2039 == GET_MODE_BITSIZE (mode)))
2040 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2041 XEXP (SUBREG_REG (opleft), 1));
2042
2043 /* If we have (ior (and (X C1) C2)), simplify this by making
2044 C1 as small as possible if C1 actually changes. */
2045 if (GET_CODE (op1) == CONST_INT
2046 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2047 || INTVAL (op1) > 0)
2048 && GET_CODE (op0) == AND
2049 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2050 && GET_CODE (op1) == CONST_INT
2051 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2052 return simplify_gen_binary (IOR, mode,
2053 simplify_gen_binary
2054 (AND, mode, XEXP (op0, 0),
2055 GEN_INT (INTVAL (XEXP (op0, 1))
2056 & ~INTVAL (op1))),
2057 op1);
2058
2059 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2060 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2061 the PLUS does not affect any of the bits in OP1: then we can do
2062 the IOR as a PLUS and we can associate. This is valid if OP1
2063 can be safely shifted left C bits. */
2064 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2065 && GET_CODE (XEXP (op0, 0)) == PLUS
2066 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2067 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2068 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2069 {
2070 int count = INTVAL (XEXP (op0, 1));
2071 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2072
2073 if (mask >> count == INTVAL (trueop1)
2074 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2075 return simplify_gen_binary (ASHIFTRT, mode,
2076 plus_constant (XEXP (op0, 0), mask),
2077 XEXP (op0, 1));
2078 }
2079
2080 tem = simplify_associative_operation (code, mode, op0, op1);
2081 if (tem)
2082 return tem;
2083 break;
2084
2085 case XOR:
2086 if (trueop1 == const0_rtx)
2087 return op0;
2088 if (GET_CODE (trueop1) == CONST_INT
2089 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2090 == GET_MODE_MASK (mode)))
2091 return simplify_gen_unary (NOT, mode, op0, mode);
2092 if (rtx_equal_p (trueop0, trueop1)
2093 && ! side_effects_p (op0)
2094 && GET_MODE_CLASS (mode) != MODE_CC)
2095 return CONST0_RTX (mode);
2096
2097 /* Canonicalize XOR of the most significant bit to PLUS. */
2098 if ((GET_CODE (op1) == CONST_INT
2099 || GET_CODE (op1) == CONST_DOUBLE)
2100 && mode_signbit_p (mode, op1))
2101 return simplify_gen_binary (PLUS, mode, op0, op1);
2102 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2103 if ((GET_CODE (op1) == CONST_INT
2104 || GET_CODE (op1) == CONST_DOUBLE)
2105 && GET_CODE (op0) == PLUS
2106 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2107 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2108 && mode_signbit_p (mode, XEXP (op0, 1)))
2109 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2110 simplify_gen_binary (XOR, mode, op1,
2111 XEXP (op0, 1)));
2112
2113 /* If we are XORing two things that have no bits in common,
2114 convert them into an IOR. This helps to detect rotation encoded
2115 using those methods and possibly other simplifications. */
2116
2117 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2118 && (nonzero_bits (op0, mode)
2119 & nonzero_bits (op1, mode)) == 0)
2120 return (simplify_gen_binary (IOR, mode, op0, op1));
2121
2122 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2123 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2124 (NOT y). */
2125 {
2126 int num_negated = 0;
2127
2128 if (GET_CODE (op0) == NOT)
2129 num_negated++, op0 = XEXP (op0, 0);
2130 if (GET_CODE (op1) == NOT)
2131 num_negated++, op1 = XEXP (op1, 0);
2132
2133 if (num_negated == 2)
2134 return simplify_gen_binary (XOR, mode, op0, op1);
2135 else if (num_negated == 1)
2136 return simplify_gen_unary (NOT, mode,
2137 simplify_gen_binary (XOR, mode, op0, op1),
2138 mode);
2139 }
2140
2141 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2142 correspond to a machine insn or result in further simplifications
2143 if B is a constant. */
2144
2145 if (GET_CODE (op0) == AND
2146 && rtx_equal_p (XEXP (op0, 1), op1)
2147 && ! side_effects_p (op1))
2148 return simplify_gen_binary (AND, mode,
2149 simplify_gen_unary (NOT, mode,
2150 XEXP (op0, 0), mode),
2151 op1);
2152
2153 else if (GET_CODE (op0) == AND
2154 && rtx_equal_p (XEXP (op0, 0), op1)
2155 && ! side_effects_p (op1))
2156 return simplify_gen_binary (AND, mode,
2157 simplify_gen_unary (NOT, mode,
2158 XEXP (op0, 1), mode),
2159 op1);
2160
2161 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2162 comparison if STORE_FLAG_VALUE is 1. */
2163 if (STORE_FLAG_VALUE == 1
2164 && trueop1 == const1_rtx
2165 && COMPARISON_P (op0)
2166 && (reversed = reversed_comparison (op0, mode)))
2167 return reversed;
2168
2169 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2170 is (lt foo (const_int 0)), so we can perform the above
2171 simplification if STORE_FLAG_VALUE is 1. */
2172
2173 if (STORE_FLAG_VALUE == 1
2174 && trueop1 == const1_rtx
2175 && GET_CODE (op0) == LSHIFTRT
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2178 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2179
2180 /* (xor (comparison foo bar) (const_int sign-bit))
2181 when STORE_FLAG_VALUE is the sign bit. */
2182 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2183 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2184 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2185 && trueop1 == const_true_rtx
2186 && COMPARISON_P (op0)
2187 && (reversed = reversed_comparison (op0, mode)))
2188 return reversed;
2189
2190 break;
2191
2192 tem = simplify_associative_operation (code, mode, op0, op1);
2193 if (tem)
2194 return tem;
2195 break;
2196
2197 case AND:
2198 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2199 return trueop1;
2200 /* If we are turning off bits already known off in OP0, we need
2201 not do an AND. */
2202 if (GET_CODE (trueop1) == CONST_INT
2203 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2204 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2205 return op0;
2206 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2207 && GET_MODE_CLASS (mode) != MODE_CC)
2208 return op0;
2209 /* A & (~A) -> 0 */
2210 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2211 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2212 && ! side_effects_p (op0)
2213 && GET_MODE_CLASS (mode) != MODE_CC)
2214 return CONST0_RTX (mode);
2215
2216 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2217 there are no nonzero bits of C outside of X's mode. */
2218 if ((GET_CODE (op0) == SIGN_EXTEND
2219 || GET_CODE (op0) == ZERO_EXTEND)
2220 && GET_CODE (trueop1) == CONST_INT
2221 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2222 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2223 & INTVAL (trueop1)) == 0)
2224 {
2225 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2226 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2227 gen_int_mode (INTVAL (trueop1),
2228 imode));
2229 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2230 }
2231
2232 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2233 insn (and may simplify more). */
2234 if (GET_CODE (op0) == XOR
2235 && rtx_equal_p (XEXP (op0, 0), op1)
2236 && ! side_effects_p (op1))
2237 return simplify_gen_binary (AND, mode,
2238 simplify_gen_unary (NOT, mode,
2239 XEXP (op0, 1), mode),
2240 op1);
2241
2242 if (GET_CODE (op0) == XOR
2243 && rtx_equal_p (XEXP (op0, 1), op1)
2244 && ! side_effects_p (op1))
2245 return simplify_gen_binary (AND, mode,
2246 simplify_gen_unary (NOT, mode,
2247 XEXP (op0, 0), mode),
2248 op1);
2249
2250 /* Similarly for (~(A ^ B)) & A. */
2251 if (GET_CODE (op0) == NOT
2252 && GET_CODE (XEXP (op0, 0)) == XOR
2253 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2254 && ! side_effects_p (op1))
2255 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2256
2257 if (GET_CODE (op0) == NOT
2258 && GET_CODE (XEXP (op0, 0)) == XOR
2259 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2260 && ! side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2262
2263 /* Convert (A | B) & A to A. */
2264 if (GET_CODE (op0) == IOR
2265 && (rtx_equal_p (XEXP (op0, 0), op1)
2266 || rtx_equal_p (XEXP (op0, 1), op1))
2267 && ! side_effects_p (XEXP (op0, 0))
2268 && ! side_effects_p (XEXP (op0, 1)))
2269 return op1;
2270
2271 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2272 ((A & N) + B) & M -> (A + B) & M
2273 Similarly if (N & M) == 0,
2274 ((A | N) + B) & M -> (A + B) & M
2275 and for - instead of + and/or ^ instead of |. */
2276 if (GET_CODE (trueop1) == CONST_INT
2277 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2278 && ~INTVAL (trueop1)
2279 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2280 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2281 {
2282 rtx pmop[2];
2283 int which;
2284
2285 pmop[0] = XEXP (op0, 0);
2286 pmop[1] = XEXP (op0, 1);
2287
2288 for (which = 0; which < 2; which++)
2289 {
2290 tem = pmop[which];
2291 switch (GET_CODE (tem))
2292 {
2293 case AND:
2294 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2295 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2296 == INTVAL (trueop1))
2297 pmop[which] = XEXP (tem, 0);
2298 break;
2299 case IOR:
2300 case XOR:
2301 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2302 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2303 pmop[which] = XEXP (tem, 0);
2304 break;
2305 default:
2306 break;
2307 }
2308 }
2309
2310 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2311 {
2312 tem = simplify_gen_binary (GET_CODE (op0), mode,
2313 pmop[0], pmop[1]);
2314 return simplify_gen_binary (code, mode, tem, op1);
2315 }
2316 }
2317 tem = simplify_associative_operation (code, mode, op0, op1);
2318 if (tem)
2319 return tem;
2320 break;
2321
2322 case UDIV:
2323 /* 0/x is 0 (or x&0 if x has side-effects). */
2324 if (trueop0 == CONST0_RTX (mode))
2325 {
2326 if (side_effects_p (op1))
2327 return simplify_gen_binary (AND, mode, op1, trueop0);
2328 return trueop0;
2329 }
2330 /* x/1 is x. */
2331 if (trueop1 == CONST1_RTX (mode))
2332 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2333 /* Convert divide by power of two into shift. */
2334 if (GET_CODE (trueop1) == CONST_INT
2335 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2336 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2337 break;
2338
2339 case DIV:
2340 /* Handle floating point and integers separately. */
2341 if (SCALAR_FLOAT_MODE_P (mode))
2342 {
2343 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2344 safe for modes with NaNs, since 0.0 / 0.0 will then be
2345 NaN rather than 0.0. Nor is it safe for modes with signed
2346 zeros, since dividing 0 by a negative number gives -0.0 */
2347 if (trueop0 == CONST0_RTX (mode)
2348 && !HONOR_NANS (mode)
2349 && !HONOR_SIGNED_ZEROS (mode)
2350 && ! side_effects_p (op1))
2351 return op0;
2352 /* x/1.0 is x. */
2353 if (trueop1 == CONST1_RTX (mode)
2354 && !HONOR_SNANS (mode))
2355 return op0;
2356
2357 if (GET_CODE (trueop1) == CONST_DOUBLE
2358 && trueop1 != CONST0_RTX (mode))
2359 {
2360 REAL_VALUE_TYPE d;
2361 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2362
2363 /* x/-1.0 is -x. */
2364 if (REAL_VALUES_EQUAL (d, dconstm1)
2365 && !HONOR_SNANS (mode))
2366 return simplify_gen_unary (NEG, mode, op0, mode);
2367
2368 /* Change FP division by a constant into multiplication.
2369 Only do this with -funsafe-math-optimizations. */
2370 if (flag_unsafe_math_optimizations
2371 && !REAL_VALUES_EQUAL (d, dconst0))
2372 {
2373 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2374 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2375 return simplify_gen_binary (MULT, mode, op0, tem);
2376 }
2377 }
2378 }
2379 else
2380 {
2381 /* 0/x is 0 (or x&0 if x has side-effects). */
2382 if (trueop0 == CONST0_RTX (mode))
2383 {
2384 if (side_effects_p (op1))
2385 return simplify_gen_binary (AND, mode, op1, trueop0);
2386 return trueop0;
2387 }
2388 /* x/1 is x. */
2389 if (trueop1 == CONST1_RTX (mode))
2390 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2391 /* x/-1 is -x. */
2392 if (trueop1 == constm1_rtx)
2393 {
2394 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2395 return simplify_gen_unary (NEG, mode, x, mode);
2396 }
2397 }
2398 break;
2399
2400 case UMOD:
2401 /* 0%x is 0 (or x&0 if x has side-effects). */
2402 if (trueop0 == CONST0_RTX (mode))
2403 {
2404 if (side_effects_p (op1))
2405 return simplify_gen_binary (AND, mode, op1, trueop0);
2406 return trueop0;
2407 }
2408 /* x%1 is 0 (of x&0 if x has side-effects). */
2409 if (trueop1 == CONST1_RTX (mode))
2410 {
2411 if (side_effects_p (op0))
2412 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2413 return CONST0_RTX (mode);
2414 }
2415 /* Implement modulus by power of two as AND. */
2416 if (GET_CODE (trueop1) == CONST_INT
2417 && exact_log2 (INTVAL (trueop1)) > 0)
2418 return simplify_gen_binary (AND, mode, op0,
2419 GEN_INT (INTVAL (op1) - 1));
2420 break;
2421
2422 case MOD:
2423 /* 0%x is 0 (or x&0 if x has side-effects). */
2424 if (trueop0 == CONST0_RTX (mode))
2425 {
2426 if (side_effects_p (op1))
2427 return simplify_gen_binary (AND, mode, op1, trueop0);
2428 return trueop0;
2429 }
2430 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2431 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2432 {
2433 if (side_effects_p (op0))
2434 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2435 return CONST0_RTX (mode);
2436 }
2437 break;
2438
2439 case ROTATERT:
2440 case ROTATE:
2441 case ASHIFTRT:
2442 if (trueop1 == CONST0_RTX (mode))
2443 return op0;
2444 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2445 return op0;
2446 /* Rotating ~0 always results in ~0. */
2447 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2448 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2449 && ! side_effects_p (op1))
2450 return op0;
2451 break;
2452
2453 case ASHIFT:
2454 case SS_ASHIFT:
2455 if (trueop1 == CONST0_RTX (mode))
2456 return op0;
2457 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2458 return op0;
2459 break;
2460
2461 case LSHIFTRT:
2462 if (trueop1 == CONST0_RTX (mode))
2463 return op0;
2464 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2465 return op0;
2466 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2467 if (GET_CODE (op0) == CLZ
2468 && GET_CODE (trueop1) == CONST_INT
2469 && STORE_FLAG_VALUE == 1
2470 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2471 {
2472 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2473 unsigned HOST_WIDE_INT zero_val = 0;
2474
2475 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2476 && zero_val == GET_MODE_BITSIZE (imode)
2477 && INTVAL (trueop1) == exact_log2 (zero_val))
2478 return simplify_gen_relational (EQ, mode, imode,
2479 XEXP (op0, 0), const0_rtx);
2480 }
2481 break;
2482
2483 case SMIN:
2484 if (width <= HOST_BITS_PER_WIDE_INT
2485 && GET_CODE (trueop1) == CONST_INT
2486 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2487 && ! side_effects_p (op0))
2488 return op1;
2489 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2490 return op0;
2491 tem = simplify_associative_operation (code, mode, op0, op1);
2492 if (tem)
2493 return tem;
2494 break;
2495
2496 case SMAX:
2497 if (width <= HOST_BITS_PER_WIDE_INT
2498 && GET_CODE (trueop1) == CONST_INT
2499 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2500 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2501 && ! side_effects_p (op0))
2502 return op1;
2503 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2504 return op0;
2505 tem = simplify_associative_operation (code, mode, op0, op1);
2506 if (tem)
2507 return tem;
2508 break;
2509
2510 case UMIN:
2511 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2512 return op1;
2513 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2514 return op0;
2515 tem = simplify_associative_operation (code, mode, op0, op1);
2516 if (tem)
2517 return tem;
2518 break;
2519
2520 case UMAX:
2521 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2522 return op1;
2523 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2524 return op0;
2525 tem = simplify_associative_operation (code, mode, op0, op1);
2526 if (tem)
2527 return tem;
2528 break;
2529
2530 case SS_PLUS:
2531 case US_PLUS:
2532 case SS_MINUS:
2533 case US_MINUS:
2534 /* ??? There are simplifications that can be done. */
2535 return 0;
2536
2537 case VEC_SELECT:
2538 if (!VECTOR_MODE_P (mode))
2539 {
2540 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2541 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2542 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2543 gcc_assert (XVECLEN (trueop1, 0) == 1);
2544 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2545
2546 if (GET_CODE (trueop0) == CONST_VECTOR)
2547 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2548 (trueop1, 0, 0)));
2549 }
2550 else
2551 {
2552 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2553 gcc_assert (GET_MODE_INNER (mode)
2554 == GET_MODE_INNER (GET_MODE (trueop0)));
2555 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2556
2557 if (GET_CODE (trueop0) == CONST_VECTOR)
2558 {
2559 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2560 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2561 rtvec v = rtvec_alloc (n_elts);
2562 unsigned int i;
2563
2564 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2565 for (i = 0; i < n_elts; i++)
2566 {
2567 rtx x = XVECEXP (trueop1, 0, i);
2568
2569 gcc_assert (GET_CODE (x) == CONST_INT);
2570 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2571 INTVAL (x));
2572 }
2573
2574 return gen_rtx_CONST_VECTOR (mode, v);
2575 }
2576 }
2577
2578 if (XVECLEN (trueop1, 0) == 1
2579 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2580 && GET_CODE (trueop0) == VEC_CONCAT)
2581 {
2582 rtx vec = trueop0;
2583 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2584
2585 /* Try to find the element in the VEC_CONCAT. */
2586 while (GET_MODE (vec) != mode
2587 && GET_CODE (vec) == VEC_CONCAT)
2588 {
2589 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2590 if (offset < vec_size)
2591 vec = XEXP (vec, 0);
2592 else
2593 {
2594 offset -= vec_size;
2595 vec = XEXP (vec, 1);
2596 }
2597 vec = avoid_constant_pool_reference (vec);
2598 }
2599
2600 if (GET_MODE (vec) == mode)
2601 return vec;
2602 }
2603
2604 return 0;
2605 case VEC_CONCAT:
2606 {
2607 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2608 ? GET_MODE (trueop0)
2609 : GET_MODE_INNER (mode));
2610 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2611 ? GET_MODE (trueop1)
2612 : GET_MODE_INNER (mode));
2613
2614 gcc_assert (VECTOR_MODE_P (mode));
2615 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2616 == GET_MODE_SIZE (mode));
2617
2618 if (VECTOR_MODE_P (op0_mode))
2619 gcc_assert (GET_MODE_INNER (mode)
2620 == GET_MODE_INNER (op0_mode));
2621 else
2622 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2623
2624 if (VECTOR_MODE_P (op1_mode))
2625 gcc_assert (GET_MODE_INNER (mode)
2626 == GET_MODE_INNER (op1_mode));
2627 else
2628 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2629
2630 if ((GET_CODE (trueop0) == CONST_VECTOR
2631 || GET_CODE (trueop0) == CONST_INT
2632 || GET_CODE (trueop0) == CONST_DOUBLE)
2633 && (GET_CODE (trueop1) == CONST_VECTOR
2634 || GET_CODE (trueop1) == CONST_INT
2635 || GET_CODE (trueop1) == CONST_DOUBLE))
2636 {
2637 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2638 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2639 rtvec v = rtvec_alloc (n_elts);
2640 unsigned int i;
2641 unsigned in_n_elts = 1;
2642
2643 if (VECTOR_MODE_P (op0_mode))
2644 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2645 for (i = 0; i < n_elts; i++)
2646 {
2647 if (i < in_n_elts)
2648 {
2649 if (!VECTOR_MODE_P (op0_mode))
2650 RTVEC_ELT (v, i) = trueop0;
2651 else
2652 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2653 }
2654 else
2655 {
2656 if (!VECTOR_MODE_P (op1_mode))
2657 RTVEC_ELT (v, i) = trueop1;
2658 else
2659 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2660 i - in_n_elts);
2661 }
2662 }
2663
2664 return gen_rtx_CONST_VECTOR (mode, v);
2665 }
2666 }
2667 return 0;
2668
2669 default:
2670 gcc_unreachable ();
2671 }
2672
2673 return 0;
2674 }
2675
2676 rtx
2677 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2678 rtx op0, rtx op1)
2679 {
2680 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2681 HOST_WIDE_INT val;
2682 unsigned int width = GET_MODE_BITSIZE (mode);
2683
2684 if (VECTOR_MODE_P (mode)
2685 && code != VEC_CONCAT
2686 && GET_CODE (op0) == CONST_VECTOR
2687 && GET_CODE (op1) == CONST_VECTOR)
2688 {
2689 unsigned n_elts = GET_MODE_NUNITS (mode);
2690 enum machine_mode op0mode = GET_MODE (op0);
2691 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2692 enum machine_mode op1mode = GET_MODE (op1);
2693 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2694 rtvec v = rtvec_alloc (n_elts);
2695 unsigned int i;
2696
2697 gcc_assert (op0_n_elts == n_elts);
2698 gcc_assert (op1_n_elts == n_elts);
2699 for (i = 0; i < n_elts; i++)
2700 {
2701 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2702 CONST_VECTOR_ELT (op0, i),
2703 CONST_VECTOR_ELT (op1, i));
2704 if (!x)
2705 return 0;
2706 RTVEC_ELT (v, i) = x;
2707 }
2708
2709 return gen_rtx_CONST_VECTOR (mode, v);
2710 }
2711
2712 if (VECTOR_MODE_P (mode)
2713 && code == VEC_CONCAT
2714 && CONSTANT_P (op0) && CONSTANT_P (op1))
2715 {
2716 unsigned n_elts = GET_MODE_NUNITS (mode);
2717 rtvec v = rtvec_alloc (n_elts);
2718
2719 gcc_assert (n_elts >= 2);
2720 if (n_elts == 2)
2721 {
2722 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2723 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2724
2725 RTVEC_ELT (v, 0) = op0;
2726 RTVEC_ELT (v, 1) = op1;
2727 }
2728 else
2729 {
2730 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2731 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2732 unsigned i;
2733
2734 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2735 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2736 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2737
2738 for (i = 0; i < op0_n_elts; ++i)
2739 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2740 for (i = 0; i < op1_n_elts; ++i)
2741 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2742 }
2743
2744 return gen_rtx_CONST_VECTOR (mode, v);
2745 }
2746
2747 if (SCALAR_FLOAT_MODE_P (mode)
2748 && GET_CODE (op0) == CONST_DOUBLE
2749 && GET_CODE (op1) == CONST_DOUBLE
2750 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2751 {
2752 if (code == AND
2753 || code == IOR
2754 || code == XOR)
2755 {
2756 long tmp0[4];
2757 long tmp1[4];
2758 REAL_VALUE_TYPE r;
2759 int i;
2760
2761 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2762 GET_MODE (op0));
2763 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2764 GET_MODE (op1));
2765 for (i = 0; i < 4; i++)
2766 {
2767 switch (code)
2768 {
2769 case AND:
2770 tmp0[i] &= tmp1[i];
2771 break;
2772 case IOR:
2773 tmp0[i] |= tmp1[i];
2774 break;
2775 case XOR:
2776 tmp0[i] ^= tmp1[i];
2777 break;
2778 default:
2779 gcc_unreachable ();
2780 }
2781 }
2782 real_from_target (&r, tmp0, mode);
2783 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2784 }
2785 else
2786 {
2787 REAL_VALUE_TYPE f0, f1, value, result;
2788 bool inexact;
2789
2790 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2791 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2792 real_convert (&f0, mode, &f0);
2793 real_convert (&f1, mode, &f1);
2794
2795 if (HONOR_SNANS (mode)
2796 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2797 return 0;
2798
2799 if (code == DIV
2800 && REAL_VALUES_EQUAL (f1, dconst0)
2801 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2802 return 0;
2803
2804 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2805 && flag_trapping_math
2806 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2807 {
2808 int s0 = REAL_VALUE_NEGATIVE (f0);
2809 int s1 = REAL_VALUE_NEGATIVE (f1);
2810
2811 switch (code)
2812 {
2813 case PLUS:
2814 /* Inf + -Inf = NaN plus exception. */
2815 if (s0 != s1)
2816 return 0;
2817 break;
2818 case MINUS:
2819 /* Inf - Inf = NaN plus exception. */
2820 if (s0 == s1)
2821 return 0;
2822 break;
2823 case DIV:
2824 /* Inf / Inf = NaN plus exception. */
2825 return 0;
2826 default:
2827 break;
2828 }
2829 }
2830
2831 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2832 && flag_trapping_math
2833 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2834 || (REAL_VALUE_ISINF (f1)
2835 && REAL_VALUES_EQUAL (f0, dconst0))))
2836 /* Inf * 0 = NaN plus exception. */
2837 return 0;
2838
2839 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2840 &f0, &f1);
2841 real_convert (&result, mode, &value);
2842
2843 /* Don't constant fold this floating point operation if
2844 the result has overflowed and flag_trapping_math. */
2845
2846 if (flag_trapping_math
2847 && MODE_HAS_INFINITIES (mode)
2848 && REAL_VALUE_ISINF (result)
2849 && !REAL_VALUE_ISINF (f0)
2850 && !REAL_VALUE_ISINF (f1))
2851 /* Overflow plus exception. */
2852 return 0;
2853
2854 /* Don't constant fold this floating point operation if the
2855 result may dependent upon the run-time rounding mode and
2856 flag_rounding_math is set, or if GCC's software emulation
2857 is unable to accurately represent the result. */
2858
2859 if ((flag_rounding_math
2860 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2861 && !flag_unsafe_math_optimizations))
2862 && (inexact || !real_identical (&result, &value)))
2863 return NULL_RTX;
2864
2865 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2866 }
2867 }
2868
2869 /* We can fold some multi-word operations. */
2870 if (GET_MODE_CLASS (mode) == MODE_INT
2871 && width == HOST_BITS_PER_WIDE_INT * 2
2872 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2873 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2874 {
2875 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2876 HOST_WIDE_INT h1, h2, hv, ht;
2877
2878 if (GET_CODE (op0) == CONST_DOUBLE)
2879 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2880 else
2881 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2882
2883 if (GET_CODE (op1) == CONST_DOUBLE)
2884 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2885 else
2886 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2887
2888 switch (code)
2889 {
2890 case MINUS:
2891 /* A - B == A + (-B). */
2892 neg_double (l2, h2, &lv, &hv);
2893 l2 = lv, h2 = hv;
2894
2895 /* Fall through.... */
2896
2897 case PLUS:
2898 add_double (l1, h1, l2, h2, &lv, &hv);
2899 break;
2900
2901 case MULT:
2902 mul_double (l1, h1, l2, h2, &lv, &hv);
2903 break;
2904
2905 case DIV:
2906 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2907 &lv, &hv, &lt, &ht))
2908 return 0;
2909 break;
2910
2911 case MOD:
2912 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2913 &lt, &ht, &lv, &hv))
2914 return 0;
2915 break;
2916
2917 case UDIV:
2918 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2919 &lv, &hv, &lt, &ht))
2920 return 0;
2921 break;
2922
2923 case UMOD:
2924 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2925 &lt, &ht, &lv, &hv))
2926 return 0;
2927 break;
2928
2929 case AND:
2930 lv = l1 & l2, hv = h1 & h2;
2931 break;
2932
2933 case IOR:
2934 lv = l1 | l2, hv = h1 | h2;
2935 break;
2936
2937 case XOR:
2938 lv = l1 ^ l2, hv = h1 ^ h2;
2939 break;
2940
2941 case SMIN:
2942 if (h1 < h2
2943 || (h1 == h2
2944 && ((unsigned HOST_WIDE_INT) l1
2945 < (unsigned HOST_WIDE_INT) l2)))
2946 lv = l1, hv = h1;
2947 else
2948 lv = l2, hv = h2;
2949 break;
2950
2951 case SMAX:
2952 if (h1 > h2
2953 || (h1 == h2
2954 && ((unsigned HOST_WIDE_INT) l1
2955 > (unsigned HOST_WIDE_INT) l2)))
2956 lv = l1, hv = h1;
2957 else
2958 lv = l2, hv = h2;
2959 break;
2960
2961 case UMIN:
2962 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2963 || (h1 == h2
2964 && ((unsigned HOST_WIDE_INT) l1
2965 < (unsigned HOST_WIDE_INT) l2)))
2966 lv = l1, hv = h1;
2967 else
2968 lv = l2, hv = h2;
2969 break;
2970
2971 case UMAX:
2972 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2973 || (h1 == h2
2974 && ((unsigned HOST_WIDE_INT) l1
2975 > (unsigned HOST_WIDE_INT) l2)))
2976 lv = l1, hv = h1;
2977 else
2978 lv = l2, hv = h2;
2979 break;
2980
2981 case LSHIFTRT: case ASHIFTRT:
2982 case ASHIFT:
2983 case ROTATE: case ROTATERT:
2984 if (SHIFT_COUNT_TRUNCATED)
2985 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2986
2987 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2988 return 0;
2989
2990 if (code == LSHIFTRT || code == ASHIFTRT)
2991 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2992 code == ASHIFTRT);
2993 else if (code == ASHIFT)
2994 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2995 else if (code == ROTATE)
2996 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2997 else /* code == ROTATERT */
2998 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2999 break;
3000
3001 default:
3002 return 0;
3003 }
3004
3005 return immed_double_const (lv, hv, mode);
3006 }
3007
3008 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3009 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3010 {
3011 /* Get the integer argument values in two forms:
3012 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3013
3014 arg0 = INTVAL (op0);
3015 arg1 = INTVAL (op1);
3016
3017 if (width < HOST_BITS_PER_WIDE_INT)
3018 {
3019 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3020 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3021
3022 arg0s = arg0;
3023 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3024 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3025
3026 arg1s = arg1;
3027 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3028 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3029 }
3030 else
3031 {
3032 arg0s = arg0;
3033 arg1s = arg1;
3034 }
3035
3036 /* Compute the value of the arithmetic. */
3037
3038 switch (code)
3039 {
3040 case PLUS:
3041 val = arg0s + arg1s;
3042 break;
3043
3044 case MINUS:
3045 val = arg0s - arg1s;
3046 break;
3047
3048 case MULT:
3049 val = arg0s * arg1s;
3050 break;
3051
3052 case DIV:
3053 if (arg1s == 0
3054 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3055 && arg1s == -1))
3056 return 0;
3057 val = arg0s / arg1s;
3058 break;
3059
3060 case MOD:
3061 if (arg1s == 0
3062 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3063 && arg1s == -1))
3064 return 0;
3065 val = arg0s % arg1s;
3066 break;
3067
3068 case UDIV:
3069 if (arg1 == 0
3070 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3071 && arg1s == -1))
3072 return 0;
3073 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3074 break;
3075
3076 case UMOD:
3077 if (arg1 == 0
3078 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3079 && arg1s == -1))
3080 return 0;
3081 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3082 break;
3083
3084 case AND:
3085 val = arg0 & arg1;
3086 break;
3087
3088 case IOR:
3089 val = arg0 | arg1;
3090 break;
3091
3092 case XOR:
3093 val = arg0 ^ arg1;
3094 break;
3095
3096 case LSHIFTRT:
3097 case ASHIFT:
3098 case ASHIFTRT:
3099 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3100 the value is in range. We can't return any old value for
3101 out-of-range arguments because either the middle-end (via
3102 shift_truncation_mask) or the back-end might be relying on
3103 target-specific knowledge. Nor can we rely on
3104 shift_truncation_mask, since the shift might not be part of an
3105 ashlM3, lshrM3 or ashrM3 instruction. */
3106 if (SHIFT_COUNT_TRUNCATED)
3107 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3108 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3109 return 0;
3110
3111 val = (code == ASHIFT
3112 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3113 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3114
3115 /* Sign-extend the result for arithmetic right shifts. */
3116 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3117 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3118 break;
3119
3120 case ROTATERT:
3121 if (arg1 < 0)
3122 return 0;
3123
3124 arg1 %= width;
3125 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3126 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3127 break;
3128
3129 case ROTATE:
3130 if (arg1 < 0)
3131 return 0;
3132
3133 arg1 %= width;
3134 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3135 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3136 break;
3137
3138 case COMPARE:
3139 /* Do nothing here. */
3140 return 0;
3141
3142 case SMIN:
3143 val = arg0s <= arg1s ? arg0s : arg1s;
3144 break;
3145
3146 case UMIN:
3147 val = ((unsigned HOST_WIDE_INT) arg0
3148 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3149 break;
3150
3151 case SMAX:
3152 val = arg0s > arg1s ? arg0s : arg1s;
3153 break;
3154
3155 case UMAX:
3156 val = ((unsigned HOST_WIDE_INT) arg0
3157 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3158 break;
3159
3160 case SS_PLUS:
3161 case US_PLUS:
3162 case SS_MINUS:
3163 case US_MINUS:
3164 case SS_ASHIFT:
3165 /* ??? There are simplifications that can be done. */
3166 return 0;
3167
3168 default:
3169 gcc_unreachable ();
3170 }
3171
3172 return gen_int_mode (val, mode);
3173 }
3174
3175 return NULL_RTX;
3176 }
3177
3178
3179 \f
3180 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3181 PLUS or MINUS.
3182
3183 Rather than test for specific case, we do this by a brute-force method
3184 and do all possible simplifications until no more changes occur. Then
3185 we rebuild the operation. */
3186
3187 struct simplify_plus_minus_op_data
3188 {
3189 rtx op;
3190 short neg;
3191 };
3192
3193 static int
3194 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3195 {
3196 const struct simplify_plus_minus_op_data *d1 = p1;
3197 const struct simplify_plus_minus_op_data *d2 = p2;
3198 int result;
3199
3200 result = (commutative_operand_precedence (d2->op)
3201 - commutative_operand_precedence (d1->op));
3202 if (result)
3203 return result;
3204
3205 /* Group together equal REGs to do more simplification. */
3206 if (REG_P (d1->op) && REG_P (d2->op))
3207 return REGNO (d1->op) - REGNO (d2->op);
3208 else
3209 return 0;
3210 }
3211
3212 static rtx
3213 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3214 rtx op1)
3215 {
3216 struct simplify_plus_minus_op_data ops[8];
3217 rtx result, tem;
3218 int n_ops = 2, input_ops = 2;
3219 int changed, n_constants = 0, canonicalized = 0;
3220 int i, j;
3221
3222 memset (ops, 0, sizeof ops);
3223
3224 /* Set up the two operands and then expand them until nothing has been
3225 changed. If we run out of room in our array, give up; this should
3226 almost never happen. */
3227
3228 ops[0].op = op0;
3229 ops[0].neg = 0;
3230 ops[1].op = op1;
3231 ops[1].neg = (code == MINUS);
3232
3233 do
3234 {
3235 changed = 0;
3236
3237 for (i = 0; i < n_ops; i++)
3238 {
3239 rtx this_op = ops[i].op;
3240 int this_neg = ops[i].neg;
3241 enum rtx_code this_code = GET_CODE (this_op);
3242
3243 switch (this_code)
3244 {
3245 case PLUS:
3246 case MINUS:
3247 if (n_ops == 7)
3248 return NULL_RTX;
3249
3250 ops[n_ops].op = XEXP (this_op, 1);
3251 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3252 n_ops++;
3253
3254 ops[i].op = XEXP (this_op, 0);
3255 input_ops++;
3256 changed = 1;
3257 canonicalized |= this_neg;
3258 break;
3259
3260 case NEG:
3261 ops[i].op = XEXP (this_op, 0);
3262 ops[i].neg = ! this_neg;
3263 changed = 1;
3264 canonicalized = 1;
3265 break;
3266
3267 case CONST:
3268 if (n_ops < 7
3269 && GET_CODE (XEXP (this_op, 0)) == PLUS
3270 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3271 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3272 {
3273 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3274 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3275 ops[n_ops].neg = this_neg;
3276 n_ops++;
3277 changed = 1;
3278 canonicalized = 1;
3279 }
3280 break;
3281
3282 case NOT:
3283 /* ~a -> (-a - 1) */
3284 if (n_ops != 7)
3285 {
3286 ops[n_ops].op = constm1_rtx;
3287 ops[n_ops++].neg = this_neg;
3288 ops[i].op = XEXP (this_op, 0);
3289 ops[i].neg = !this_neg;
3290 changed = 1;
3291 canonicalized = 1;
3292 }
3293 break;
3294
3295 case CONST_INT:
3296 n_constants++;
3297 if (this_neg)
3298 {
3299 ops[i].op = neg_const_int (mode, this_op);
3300 ops[i].neg = 0;
3301 changed = 1;
3302 canonicalized = 1;
3303 }
3304 break;
3305
3306 default:
3307 break;
3308 }
3309 }
3310 }
3311 while (changed);
3312
3313 if (n_constants > 1)
3314 canonicalized = 1;
3315
3316 gcc_assert (n_ops >= 2);
3317
3318 /* If we only have two operands, we can avoid the loops. */
3319 if (n_ops == 2)
3320 {
3321 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3322 rtx lhs, rhs;
3323
3324 /* Get the two operands. Be careful with the order, especially for
3325 the cases where code == MINUS. */
3326 if (ops[0].neg && ops[1].neg)
3327 {
3328 lhs = gen_rtx_NEG (mode, ops[0].op);
3329 rhs = ops[1].op;
3330 }
3331 else if (ops[0].neg)
3332 {
3333 lhs = ops[1].op;
3334 rhs = ops[0].op;
3335 }
3336 else
3337 {
3338 lhs = ops[0].op;
3339 rhs = ops[1].op;
3340 }
3341
3342 return simplify_const_binary_operation (code, mode, lhs, rhs);
3343 }
3344
3345 /* Now simplify each pair of operands until nothing changes. */
3346 do
3347 {
3348 /* Insertion sort is good enough for an eight-element array. */
3349 for (i = 1; i < n_ops; i++)
3350 {
3351 struct simplify_plus_minus_op_data save;
3352 j = i - 1;
3353 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3354 continue;
3355
3356 canonicalized = 1;
3357 save = ops[i];
3358 do
3359 ops[j + 1] = ops[j];
3360 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3361 ops[j + 1] = save;
3362 }
3363
3364 /* This is only useful the first time through. */
3365 if (!canonicalized)
3366 return NULL_RTX;
3367
3368 changed = 0;
3369 for (i = n_ops - 1; i > 0; i--)
3370 for (j = i - 1; j >= 0; j--)
3371 {
3372 rtx lhs = ops[j].op, rhs = ops[i].op;
3373 int lneg = ops[j].neg, rneg = ops[i].neg;
3374
3375 if (lhs != 0 && rhs != 0)
3376 {
3377 enum rtx_code ncode = PLUS;
3378
3379 if (lneg != rneg)
3380 {
3381 ncode = MINUS;
3382 if (lneg)
3383 tem = lhs, lhs = rhs, rhs = tem;
3384 }
3385 else if (swap_commutative_operands_p (lhs, rhs))
3386 tem = lhs, lhs = rhs, rhs = tem;
3387
3388 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3389 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3390 {
3391 rtx tem_lhs, tem_rhs;
3392
3393 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3394 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3395 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3396
3397 if (tem && !CONSTANT_P (tem))
3398 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3399 }
3400 else
3401 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3402
3403 /* Reject "simplifications" that just wrap the two
3404 arguments in a CONST. Failure to do so can result
3405 in infinite recursion with simplify_binary_operation
3406 when it calls us to simplify CONST operations. */
3407 if (tem
3408 && ! (GET_CODE (tem) == CONST
3409 && GET_CODE (XEXP (tem, 0)) == ncode
3410 && XEXP (XEXP (tem, 0), 0) == lhs
3411 && XEXP (XEXP (tem, 0), 1) == rhs))
3412 {
3413 lneg &= rneg;
3414 if (GET_CODE (tem) == NEG)
3415 tem = XEXP (tem, 0), lneg = !lneg;
3416 if (GET_CODE (tem) == CONST_INT && lneg)
3417 tem = neg_const_int (mode, tem), lneg = 0;
3418
3419 ops[i].op = tem;
3420 ops[i].neg = lneg;
3421 ops[j].op = NULL_RTX;
3422 changed = 1;
3423 }
3424 }
3425 }
3426
3427 /* Pack all the operands to the lower-numbered entries. */
3428 for (i = 0, j = 0; j < n_ops; j++)
3429 if (ops[j].op)
3430 {
3431 ops[i] = ops[j];
3432 i++;
3433 }
3434 n_ops = i;
3435 }
3436 while (changed);
3437
3438 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3439 if (n_ops == 2
3440 && GET_CODE (ops[1].op) == CONST_INT
3441 && CONSTANT_P (ops[0].op)
3442 && ops[0].neg)
3443 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3444
3445 /* We suppressed creation of trivial CONST expressions in the
3446 combination loop to avoid recursion. Create one manually now.
3447 The combination loop should have ensured that there is exactly
3448 one CONST_INT, and the sort will have ensured that it is last
3449 in the array and that any other constant will be next-to-last. */
3450
3451 if (n_ops > 1
3452 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3453 && CONSTANT_P (ops[n_ops - 2].op))
3454 {
3455 rtx value = ops[n_ops - 1].op;
3456 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3457 value = neg_const_int (mode, value);
3458 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3459 n_ops--;
3460 }
3461
3462 /* Put a non-negated operand first, if possible. */
3463
3464 for (i = 0; i < n_ops && ops[i].neg; i++)
3465 continue;
3466 if (i == n_ops)
3467 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3468 else if (i != 0)
3469 {
3470 tem = ops[0].op;
3471 ops[0] = ops[i];
3472 ops[i].op = tem;
3473 ops[i].neg = 1;
3474 }
3475
3476 /* Now make the result by performing the requested operations. */
3477 result = ops[0].op;
3478 for (i = 1; i < n_ops; i++)
3479 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3480 mode, result, ops[i].op);
3481
3482 return result;
3483 }
3484
3485 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3486 static bool
3487 plus_minus_operand_p (rtx x)
3488 {
3489 return GET_CODE (x) == PLUS
3490 || GET_CODE (x) == MINUS
3491 || (GET_CODE (x) == CONST
3492 && GET_CODE (XEXP (x, 0)) == PLUS
3493 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3494 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3495 }
3496
3497 /* Like simplify_binary_operation except used for relational operators.
3498 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3499 not also be VOIDmode.
3500
3501 CMP_MODE specifies in which mode the comparison is done in, so it is
3502 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3503 the operands or, if both are VOIDmode, the operands are compared in
3504 "infinite precision". */
3505 rtx
3506 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3507 enum machine_mode cmp_mode, rtx op0, rtx op1)
3508 {
3509 rtx tem, trueop0, trueop1;
3510
3511 if (cmp_mode == VOIDmode)
3512 cmp_mode = GET_MODE (op0);
3513 if (cmp_mode == VOIDmode)
3514 cmp_mode = GET_MODE (op1);
3515
3516 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3517 if (tem)
3518 {
3519 if (SCALAR_FLOAT_MODE_P (mode))
3520 {
3521 if (tem == const0_rtx)
3522 return CONST0_RTX (mode);
3523 #ifdef FLOAT_STORE_FLAG_VALUE
3524 {
3525 REAL_VALUE_TYPE val;
3526 val = FLOAT_STORE_FLAG_VALUE (mode);
3527 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3528 }
3529 #else
3530 return NULL_RTX;
3531 #endif
3532 }
3533 if (VECTOR_MODE_P (mode))
3534 {
3535 if (tem == const0_rtx)
3536 return CONST0_RTX (mode);
3537 #ifdef VECTOR_STORE_FLAG_VALUE
3538 {
3539 int i, units;
3540 rtvec v;
3541
3542 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3543 if (val == NULL_RTX)
3544 return NULL_RTX;
3545 if (val == const1_rtx)
3546 return CONST1_RTX (mode);
3547
3548 units = GET_MODE_NUNITS (mode);
3549 v = rtvec_alloc (units);
3550 for (i = 0; i < units; i++)
3551 RTVEC_ELT (v, i) = val;
3552 return gen_rtx_raw_CONST_VECTOR (mode, v);
3553 }
3554 #else
3555 return NULL_RTX;
3556 #endif
3557 }
3558
3559 return tem;
3560 }
3561
3562 /* For the following tests, ensure const0_rtx is op1. */
3563 if (swap_commutative_operands_p (op0, op1)
3564 || (op0 == const0_rtx && op1 != const0_rtx))
3565 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3566
3567 /* If op0 is a compare, extract the comparison arguments from it. */
3568 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3569 return simplify_relational_operation (code, mode, VOIDmode,
3570 XEXP (op0, 0), XEXP (op0, 1));
3571
3572 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3573 || CC0_P (op0))
3574 return NULL_RTX;
3575
3576 trueop0 = avoid_constant_pool_reference (op0);
3577 trueop1 = avoid_constant_pool_reference (op1);
3578 return simplify_relational_operation_1 (code, mode, cmp_mode,
3579 trueop0, trueop1);
3580 }
3581
3582 /* This part of simplify_relational_operation is only used when CMP_MODE
3583 is not in class MODE_CC (i.e. it is a real comparison).
3584
3585 MODE is the mode of the result, while CMP_MODE specifies in which
3586 mode the comparison is done in, so it is the mode of the operands. */
3587
3588 static rtx
3589 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3590 enum machine_mode cmp_mode, rtx op0, rtx op1)
3591 {
3592 enum rtx_code op0code = GET_CODE (op0);
3593
3594 if (GET_CODE (op1) == CONST_INT)
3595 {
3596 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3597 {
3598 /* If op0 is a comparison, extract the comparison arguments
3599 from it. */
3600 if (code == NE)
3601 {
3602 if (GET_MODE (op0) == mode)
3603 return simplify_rtx (op0);
3604 else
3605 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3606 XEXP (op0, 0), XEXP (op0, 1));
3607 }
3608 else if (code == EQ)
3609 {
3610 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3611 if (new_code != UNKNOWN)
3612 return simplify_gen_relational (new_code, mode, VOIDmode,
3613 XEXP (op0, 0), XEXP (op0, 1));
3614 }
3615 }
3616 }
3617
3618 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3619 if ((code == EQ || code == NE)
3620 && (op0code == PLUS || op0code == MINUS)
3621 && CONSTANT_P (op1)
3622 && CONSTANT_P (XEXP (op0, 1))
3623 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3624 {
3625 rtx x = XEXP (op0, 0);
3626 rtx c = XEXP (op0, 1);
3627
3628 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3629 cmp_mode, op1, c);
3630 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3631 }
3632
3633 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3634 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3635 if (code == NE
3636 && op1 == const0_rtx
3637 && GET_MODE_CLASS (mode) == MODE_INT
3638 && cmp_mode != VOIDmode
3639 /* ??? Work-around BImode bugs in the ia64 backend. */
3640 && mode != BImode
3641 && cmp_mode != BImode
3642 && nonzero_bits (op0, cmp_mode) == 1
3643 && STORE_FLAG_VALUE == 1)
3644 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3645 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3646 : lowpart_subreg (mode, op0, cmp_mode);
3647
3648 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3649 if ((code == EQ || code == NE)
3650 && op1 == const0_rtx
3651 && op0code == XOR)
3652 return simplify_gen_relational (code, mode, cmp_mode,
3653 XEXP (op0, 0), XEXP (op0, 1));
3654
3655 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3656 if ((code == EQ || code == NE)
3657 && op0code == XOR
3658 && rtx_equal_p (XEXP (op0, 0), op1)
3659 && !side_effects_p (XEXP (op0, 0)))
3660 return simplify_gen_relational (code, mode, cmp_mode,
3661 XEXP (op0, 1), const0_rtx);
3662
3663 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3664 if ((code == EQ || code == NE)
3665 && op0code == XOR
3666 && rtx_equal_p (XEXP (op0, 1), op1)
3667 && !side_effects_p (XEXP (op0, 1)))
3668 return simplify_gen_relational (code, mode, cmp_mode,
3669 XEXP (op0, 0), const0_rtx);
3670
3671 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3672 if ((code == EQ || code == NE)
3673 && op0code == XOR
3674 && (GET_CODE (op1) == CONST_INT
3675 || GET_CODE (op1) == CONST_DOUBLE)
3676 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3677 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3678 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3679 simplify_gen_binary (XOR, cmp_mode,
3680 XEXP (op0, 1), op1));
3681
3682 return NULL_RTX;
3683 }
3684
3685 /* Check if the given comparison (done in the given MODE) is actually a
3686 tautology or a contradiction.
3687 If no simplification is possible, this function returns zero.
3688 Otherwise, it returns either const_true_rtx or const0_rtx. */
3689
3690 rtx
3691 simplify_const_relational_operation (enum rtx_code code,
3692 enum machine_mode mode,
3693 rtx op0, rtx op1)
3694 {
3695 int equal, op0lt, op0ltu, op1lt, op1ltu;
3696 rtx tem;
3697 rtx trueop0;
3698 rtx trueop1;
3699
3700 gcc_assert (mode != VOIDmode
3701 || (GET_MODE (op0) == VOIDmode
3702 && GET_MODE (op1) == VOIDmode));
3703
3704 /* If op0 is a compare, extract the comparison arguments from it. */
3705 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3706 {
3707 op1 = XEXP (op0, 1);
3708 op0 = XEXP (op0, 0);
3709
3710 if (GET_MODE (op0) != VOIDmode)
3711 mode = GET_MODE (op0);
3712 else if (GET_MODE (op1) != VOIDmode)
3713 mode = GET_MODE (op1);
3714 else
3715 return 0;
3716 }
3717
3718 /* We can't simplify MODE_CC values since we don't know what the
3719 actual comparison is. */
3720 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3721 return 0;
3722
3723 /* Make sure the constant is second. */
3724 if (swap_commutative_operands_p (op0, op1))
3725 {
3726 tem = op0, op0 = op1, op1 = tem;
3727 code = swap_condition (code);
3728 }
3729
3730 trueop0 = avoid_constant_pool_reference (op0);
3731 trueop1 = avoid_constant_pool_reference (op1);
3732
3733 /* For integer comparisons of A and B maybe we can simplify A - B and can
3734 then simplify a comparison of that with zero. If A and B are both either
3735 a register or a CONST_INT, this can't help; testing for these cases will
3736 prevent infinite recursion here and speed things up.
3737
3738 We can only do this for EQ and NE comparisons as otherwise we may
3739 lose or introduce overflow which we cannot disregard as undefined as
3740 we do not know the signedness of the operation on either the left or
3741 the right hand side of the comparison. */
3742
3743 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3744 && (code == EQ || code == NE)
3745 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3746 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3747 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3748 /* We cannot do this if tem is a nonzero address. */
3749 && ! nonzero_address_p (tem))
3750 return simplify_const_relational_operation (signed_condition (code),
3751 mode, tem, const0_rtx);
3752
3753 if (! HONOR_NANS (mode) && code == ORDERED)
3754 return const_true_rtx;
3755
3756 if (! HONOR_NANS (mode) && code == UNORDERED)
3757 return const0_rtx;
3758
3759 /* For modes without NaNs, if the two operands are equal, we know the
3760 result except if they have side-effects. */
3761 if (! HONOR_NANS (GET_MODE (trueop0))
3762 && rtx_equal_p (trueop0, trueop1)
3763 && ! side_effects_p (trueop0))
3764 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3765
3766 /* If the operands are floating-point constants, see if we can fold
3767 the result. */
3768 else if (GET_CODE (trueop0) == CONST_DOUBLE
3769 && GET_CODE (trueop1) == CONST_DOUBLE
3770 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3771 {
3772 REAL_VALUE_TYPE d0, d1;
3773
3774 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3775 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3776
3777 /* Comparisons are unordered iff at least one of the values is NaN. */
3778 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3779 switch (code)
3780 {
3781 case UNEQ:
3782 case UNLT:
3783 case UNGT:
3784 case UNLE:
3785 case UNGE:
3786 case NE:
3787 case UNORDERED:
3788 return const_true_rtx;
3789 case EQ:
3790 case LT:
3791 case GT:
3792 case LE:
3793 case GE:
3794 case LTGT:
3795 case ORDERED:
3796 return const0_rtx;
3797 default:
3798 return 0;
3799 }
3800
3801 equal = REAL_VALUES_EQUAL (d0, d1);
3802 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3803 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3804 }
3805
3806 /* Otherwise, see if the operands are both integers. */
3807 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3808 && (GET_CODE (trueop0) == CONST_DOUBLE
3809 || GET_CODE (trueop0) == CONST_INT)
3810 && (GET_CODE (trueop1) == CONST_DOUBLE
3811 || GET_CODE (trueop1) == CONST_INT))
3812 {
3813 int width = GET_MODE_BITSIZE (mode);
3814 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3815 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3816
3817 /* Get the two words comprising each integer constant. */
3818 if (GET_CODE (trueop0) == CONST_DOUBLE)
3819 {
3820 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3821 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3822 }
3823 else
3824 {
3825 l0u = l0s = INTVAL (trueop0);
3826 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3827 }
3828
3829 if (GET_CODE (trueop1) == CONST_DOUBLE)
3830 {
3831 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3832 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3833 }
3834 else
3835 {
3836 l1u = l1s = INTVAL (trueop1);
3837 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3838 }
3839
3840 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3841 we have to sign or zero-extend the values. */
3842 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3843 {
3844 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3845 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3846
3847 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3848 l0s |= ((HOST_WIDE_INT) (-1) << width);
3849
3850 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3851 l1s |= ((HOST_WIDE_INT) (-1) << width);
3852 }
3853 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3854 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3855
3856 equal = (h0u == h1u && l0u == l1u);
3857 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3858 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3859 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3860 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3861 }
3862
3863 /* Otherwise, there are some code-specific tests we can make. */
3864 else
3865 {
3866 /* Optimize comparisons with upper and lower bounds. */
3867 if (SCALAR_INT_MODE_P (mode)
3868 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3869 {
3870 rtx mmin, mmax;
3871 int sign;
3872
3873 if (code == GEU
3874 || code == LEU
3875 || code == GTU
3876 || code == LTU)
3877 sign = 0;
3878 else
3879 sign = 1;
3880
3881 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3882
3883 tem = NULL_RTX;
3884 switch (code)
3885 {
3886 case GEU:
3887 case GE:
3888 /* x >= min is always true. */
3889 if (rtx_equal_p (trueop1, mmin))
3890 tem = const_true_rtx;
3891 else
3892 break;
3893
3894 case LEU:
3895 case LE:
3896 /* x <= max is always true. */
3897 if (rtx_equal_p (trueop1, mmax))
3898 tem = const_true_rtx;
3899 break;
3900
3901 case GTU:
3902 case GT:
3903 /* x > max is always false. */
3904 if (rtx_equal_p (trueop1, mmax))
3905 tem = const0_rtx;
3906 break;
3907
3908 case LTU:
3909 case LT:
3910 /* x < min is always false. */
3911 if (rtx_equal_p (trueop1, mmin))
3912 tem = const0_rtx;
3913 break;
3914
3915 default:
3916 break;
3917 }
3918 if (tem == const0_rtx
3919 || tem == const_true_rtx)
3920 return tem;
3921 }
3922
3923 switch (code)
3924 {
3925 case EQ:
3926 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3927 return const0_rtx;
3928 break;
3929
3930 case NE:
3931 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3932 return const_true_rtx;
3933 break;
3934
3935 case LT:
3936 /* Optimize abs(x) < 0.0. */
3937 if (trueop1 == CONST0_RTX (mode)
3938 && !HONOR_SNANS (mode)
3939 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3940 {
3941 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3942 : trueop0;
3943 if (GET_CODE (tem) == ABS)
3944 return const0_rtx;
3945 }
3946 break;
3947
3948 case GE:
3949 /* Optimize abs(x) >= 0.0. */
3950 if (trueop1 == CONST0_RTX (mode)
3951 && !HONOR_NANS (mode)
3952 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3953 {
3954 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3955 : trueop0;
3956 if (GET_CODE (tem) == ABS)
3957 return const_true_rtx;
3958 }
3959 break;
3960
3961 case UNGE:
3962 /* Optimize ! (abs(x) < 0.0). */
3963 if (trueop1 == CONST0_RTX (mode))
3964 {
3965 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3966 : trueop0;
3967 if (GET_CODE (tem) == ABS)
3968 return const_true_rtx;
3969 }
3970 break;
3971
3972 default:
3973 break;
3974 }
3975
3976 return 0;
3977 }
3978
3979 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3980 as appropriate. */
3981 switch (code)
3982 {
3983 case EQ:
3984 case UNEQ:
3985 return equal ? const_true_rtx : const0_rtx;
3986 case NE:
3987 case LTGT:
3988 return ! equal ? const_true_rtx : const0_rtx;
3989 case LT:
3990 case UNLT:
3991 return op0lt ? const_true_rtx : const0_rtx;
3992 case GT:
3993 case UNGT:
3994 return op1lt ? const_true_rtx : const0_rtx;
3995 case LTU:
3996 return op0ltu ? const_true_rtx : const0_rtx;
3997 case GTU:
3998 return op1ltu ? const_true_rtx : const0_rtx;
3999 case LE:
4000 case UNLE:
4001 return equal || op0lt ? const_true_rtx : const0_rtx;
4002 case GE:
4003 case UNGE:
4004 return equal || op1lt ? const_true_rtx : const0_rtx;
4005 case LEU:
4006 return equal || op0ltu ? const_true_rtx : const0_rtx;
4007 case GEU:
4008 return equal || op1ltu ? const_true_rtx : const0_rtx;
4009 case ORDERED:
4010 return const_true_rtx;
4011 case UNORDERED:
4012 return const0_rtx;
4013 default:
4014 gcc_unreachable ();
4015 }
4016 }
4017 \f
4018 /* Simplify CODE, an operation with result mode MODE and three operands,
4019 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4020 a constant. Return 0 if no simplifications is possible. */
4021
4022 rtx
4023 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4024 enum machine_mode op0_mode, rtx op0, rtx op1,
4025 rtx op2)
4026 {
4027 unsigned int width = GET_MODE_BITSIZE (mode);
4028
4029 /* VOIDmode means "infinite" precision. */
4030 if (width == 0)
4031 width = HOST_BITS_PER_WIDE_INT;
4032
4033 switch (code)
4034 {
4035 case SIGN_EXTRACT:
4036 case ZERO_EXTRACT:
4037 if (GET_CODE (op0) == CONST_INT
4038 && GET_CODE (op1) == CONST_INT
4039 && GET_CODE (op2) == CONST_INT
4040 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4041 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4042 {
4043 /* Extracting a bit-field from a constant */
4044 HOST_WIDE_INT val = INTVAL (op0);
4045
4046 if (BITS_BIG_ENDIAN)
4047 val >>= (GET_MODE_BITSIZE (op0_mode)
4048 - INTVAL (op2) - INTVAL (op1));
4049 else
4050 val >>= INTVAL (op2);
4051
4052 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4053 {
4054 /* First zero-extend. */
4055 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4056 /* If desired, propagate sign bit. */
4057 if (code == SIGN_EXTRACT
4058 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4059 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4060 }
4061
4062 /* Clear the bits that don't belong in our mode,
4063 unless they and our sign bit are all one.
4064 So we get either a reasonable negative value or a reasonable
4065 unsigned value for this mode. */
4066 if (width < HOST_BITS_PER_WIDE_INT
4067 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4068 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4069 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4070
4071 return gen_int_mode (val, mode);
4072 }
4073 break;
4074
4075 case IF_THEN_ELSE:
4076 if (GET_CODE (op0) == CONST_INT)
4077 return op0 != const0_rtx ? op1 : op2;
4078
4079 /* Convert c ? a : a into "a". */
4080 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4081 return op1;
4082
4083 /* Convert a != b ? a : b into "a". */
4084 if (GET_CODE (op0) == NE
4085 && ! side_effects_p (op0)
4086 && ! HONOR_NANS (mode)
4087 && ! HONOR_SIGNED_ZEROS (mode)
4088 && ((rtx_equal_p (XEXP (op0, 0), op1)
4089 && rtx_equal_p (XEXP (op0, 1), op2))
4090 || (rtx_equal_p (XEXP (op0, 0), op2)
4091 && rtx_equal_p (XEXP (op0, 1), op1))))
4092 return op1;
4093
4094 /* Convert a == b ? a : b into "b". */
4095 if (GET_CODE (op0) == EQ
4096 && ! side_effects_p (op0)
4097 && ! HONOR_NANS (mode)
4098 && ! HONOR_SIGNED_ZEROS (mode)
4099 && ((rtx_equal_p (XEXP (op0, 0), op1)
4100 && rtx_equal_p (XEXP (op0, 1), op2))
4101 || (rtx_equal_p (XEXP (op0, 0), op2)
4102 && rtx_equal_p (XEXP (op0, 1), op1))))
4103 return op2;
4104
4105 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4106 {
4107 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4108 ? GET_MODE (XEXP (op0, 1))
4109 : GET_MODE (XEXP (op0, 0)));
4110 rtx temp;
4111
4112 /* Look for happy constants in op1 and op2. */
4113 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4114 {
4115 HOST_WIDE_INT t = INTVAL (op1);
4116 HOST_WIDE_INT f = INTVAL (op2);
4117
4118 if (t == STORE_FLAG_VALUE && f == 0)
4119 code = GET_CODE (op0);
4120 else if (t == 0 && f == STORE_FLAG_VALUE)
4121 {
4122 enum rtx_code tmp;
4123 tmp = reversed_comparison_code (op0, NULL_RTX);
4124 if (tmp == UNKNOWN)
4125 break;
4126 code = tmp;
4127 }
4128 else
4129 break;
4130
4131 return simplify_gen_relational (code, mode, cmp_mode,
4132 XEXP (op0, 0), XEXP (op0, 1));
4133 }
4134
4135 if (cmp_mode == VOIDmode)
4136 cmp_mode = op0_mode;
4137 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4138 cmp_mode, XEXP (op0, 0),
4139 XEXP (op0, 1));
4140
4141 /* See if any simplifications were possible. */
4142 if (temp)
4143 {
4144 if (GET_CODE (temp) == CONST_INT)
4145 return temp == const0_rtx ? op2 : op1;
4146 else if (temp)
4147 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4148 }
4149 }
4150 break;
4151
4152 case VEC_MERGE:
4153 gcc_assert (GET_MODE (op0) == mode);
4154 gcc_assert (GET_MODE (op1) == mode);
4155 gcc_assert (VECTOR_MODE_P (mode));
4156 op2 = avoid_constant_pool_reference (op2);
4157 if (GET_CODE (op2) == CONST_INT)
4158 {
4159 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4160 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4161 int mask = (1 << n_elts) - 1;
4162
4163 if (!(INTVAL (op2) & mask))
4164 return op1;
4165 if ((INTVAL (op2) & mask) == mask)
4166 return op0;
4167
4168 op0 = avoid_constant_pool_reference (op0);
4169 op1 = avoid_constant_pool_reference (op1);
4170 if (GET_CODE (op0) == CONST_VECTOR
4171 && GET_CODE (op1) == CONST_VECTOR)
4172 {
4173 rtvec v = rtvec_alloc (n_elts);
4174 unsigned int i;
4175
4176 for (i = 0; i < n_elts; i++)
4177 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4178 ? CONST_VECTOR_ELT (op0, i)
4179 : CONST_VECTOR_ELT (op1, i));
4180 return gen_rtx_CONST_VECTOR (mode, v);
4181 }
4182 }
4183 break;
4184
4185 default:
4186 gcc_unreachable ();
4187 }
4188
4189 return 0;
4190 }
4191
4192 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4193 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4194
4195 Works by unpacking OP into a collection of 8-bit values
4196 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4197 and then repacking them again for OUTERMODE. */
4198
4199 static rtx
4200 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4201 enum machine_mode innermode, unsigned int byte)
4202 {
4203 /* We support up to 512-bit values (for V8DFmode). */
4204 enum {
4205 max_bitsize = 512,
4206 value_bit = 8,
4207 value_mask = (1 << value_bit) - 1
4208 };
4209 unsigned char value[max_bitsize / value_bit];
4210 int value_start;
4211 int i;
4212 int elem;
4213
4214 int num_elem;
4215 rtx * elems;
4216 int elem_bitsize;
4217 rtx result_s;
4218 rtvec result_v = NULL;
4219 enum mode_class outer_class;
4220 enum machine_mode outer_submode;
4221
4222 /* Some ports misuse CCmode. */
4223 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4224 return op;
4225
4226 /* We have no way to represent a complex constant at the rtl level. */
4227 if (COMPLEX_MODE_P (outermode))
4228 return NULL_RTX;
4229
4230 /* Unpack the value. */
4231
4232 if (GET_CODE (op) == CONST_VECTOR)
4233 {
4234 num_elem = CONST_VECTOR_NUNITS (op);
4235 elems = &CONST_VECTOR_ELT (op, 0);
4236 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4237 }
4238 else
4239 {
4240 num_elem = 1;
4241 elems = &op;
4242 elem_bitsize = max_bitsize;
4243 }
4244 /* If this asserts, it is too complicated; reducing value_bit may help. */
4245 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4246 /* I don't know how to handle endianness of sub-units. */
4247 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4248
4249 for (elem = 0; elem < num_elem; elem++)
4250 {
4251 unsigned char * vp;
4252 rtx el = elems[elem];
4253
4254 /* Vectors are kept in target memory order. (This is probably
4255 a mistake.) */
4256 {
4257 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4258 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4259 / BITS_PER_UNIT);
4260 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4261 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4262 unsigned bytele = (subword_byte % UNITS_PER_WORD
4263 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4264 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4265 }
4266
4267 switch (GET_CODE (el))
4268 {
4269 case CONST_INT:
4270 for (i = 0;
4271 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4272 i += value_bit)
4273 *vp++ = INTVAL (el) >> i;
4274 /* CONST_INTs are always logically sign-extended. */
4275 for (; i < elem_bitsize; i += value_bit)
4276 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4277 break;
4278
4279 case CONST_DOUBLE:
4280 if (GET_MODE (el) == VOIDmode)
4281 {
4282 /* If this triggers, someone should have generated a
4283 CONST_INT instead. */
4284 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4285
4286 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4287 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4288 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4289 {
4290 *vp++
4291 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4292 i += value_bit;
4293 }
4294 /* It shouldn't matter what's done here, so fill it with
4295 zero. */
4296 for (; i < elem_bitsize; i += value_bit)
4297 *vp++ = 0;
4298 }
4299 else
4300 {
4301 long tmp[max_bitsize / 32];
4302 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4303
4304 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4305 gcc_assert (bitsize <= elem_bitsize);
4306 gcc_assert (bitsize % value_bit == 0);
4307
4308 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4309 GET_MODE (el));
4310
4311 /* real_to_target produces its result in words affected by
4312 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4313 and use WORDS_BIG_ENDIAN instead; see the documentation
4314 of SUBREG in rtl.texi. */
4315 for (i = 0; i < bitsize; i += value_bit)
4316 {
4317 int ibase;
4318 if (WORDS_BIG_ENDIAN)
4319 ibase = bitsize - 1 - i;
4320 else
4321 ibase = i;
4322 *vp++ = tmp[ibase / 32] >> i % 32;
4323 }
4324
4325 /* It shouldn't matter what's done here, so fill it with
4326 zero. */
4327 for (; i < elem_bitsize; i += value_bit)
4328 *vp++ = 0;
4329 }
4330 break;
4331
4332 default:
4333 gcc_unreachable ();
4334 }
4335 }
4336
4337 /* Now, pick the right byte to start with. */
4338 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4339 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4340 will already have offset 0. */
4341 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4342 {
4343 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4344 - byte);
4345 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4346 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4347 byte = (subword_byte % UNITS_PER_WORD
4348 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4349 }
4350
4351 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4352 so if it's become negative it will instead be very large.) */
4353 gcc_assert (byte < GET_MODE_SIZE (innermode));
4354
4355 /* Convert from bytes to chunks of size value_bit. */
4356 value_start = byte * (BITS_PER_UNIT / value_bit);
4357
4358 /* Re-pack the value. */
4359
4360 if (VECTOR_MODE_P (outermode))
4361 {
4362 num_elem = GET_MODE_NUNITS (outermode);
4363 result_v = rtvec_alloc (num_elem);
4364 elems = &RTVEC_ELT (result_v, 0);
4365 outer_submode = GET_MODE_INNER (outermode);
4366 }
4367 else
4368 {
4369 num_elem = 1;
4370 elems = &result_s;
4371 outer_submode = outermode;
4372 }
4373
4374 outer_class = GET_MODE_CLASS (outer_submode);
4375 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4376
4377 gcc_assert (elem_bitsize % value_bit == 0);
4378 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4379
4380 for (elem = 0; elem < num_elem; elem++)
4381 {
4382 unsigned char *vp;
4383
4384 /* Vectors are stored in target memory order. (This is probably
4385 a mistake.) */
4386 {
4387 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4388 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4389 / BITS_PER_UNIT);
4390 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4391 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4392 unsigned bytele = (subword_byte % UNITS_PER_WORD
4393 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4394 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4395 }
4396
4397 switch (outer_class)
4398 {
4399 case MODE_INT:
4400 case MODE_PARTIAL_INT:
4401 {
4402 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4403
4404 for (i = 0;
4405 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4406 i += value_bit)
4407 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4408 for (; i < elem_bitsize; i += value_bit)
4409 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4410 << (i - HOST_BITS_PER_WIDE_INT));
4411
4412 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4413 know why. */
4414 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4415 elems[elem] = gen_int_mode (lo, outer_submode);
4416 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4417 elems[elem] = immed_double_const (lo, hi, outer_submode);
4418 else
4419 return NULL_RTX;
4420 }
4421 break;
4422
4423 case MODE_FLOAT:
4424 case MODE_DECIMAL_FLOAT:
4425 {
4426 REAL_VALUE_TYPE r;
4427 long tmp[max_bitsize / 32];
4428
4429 /* real_from_target wants its input in words affected by
4430 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4431 and use WORDS_BIG_ENDIAN instead; see the documentation
4432 of SUBREG in rtl.texi. */
4433 for (i = 0; i < max_bitsize / 32; i++)
4434 tmp[i] = 0;
4435 for (i = 0; i < elem_bitsize; i += value_bit)
4436 {
4437 int ibase;
4438 if (WORDS_BIG_ENDIAN)
4439 ibase = elem_bitsize - 1 - i;
4440 else
4441 ibase = i;
4442 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4443 }
4444
4445 real_from_target (&r, tmp, outer_submode);
4446 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4447 }
4448 break;
4449
4450 default:
4451 gcc_unreachable ();
4452 }
4453 }
4454 if (VECTOR_MODE_P (outermode))
4455 return gen_rtx_CONST_VECTOR (outermode, result_v);
4456 else
4457 return result_s;
4458 }
4459
4460 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4461 Return 0 if no simplifications are possible. */
4462 rtx
4463 simplify_subreg (enum machine_mode outermode, rtx op,
4464 enum machine_mode innermode, unsigned int byte)
4465 {
4466 /* Little bit of sanity checking. */
4467 gcc_assert (innermode != VOIDmode);
4468 gcc_assert (outermode != VOIDmode);
4469 gcc_assert (innermode != BLKmode);
4470 gcc_assert (outermode != BLKmode);
4471
4472 gcc_assert (GET_MODE (op) == innermode
4473 || GET_MODE (op) == VOIDmode);
4474
4475 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4476 gcc_assert (byte < GET_MODE_SIZE (innermode));
4477
4478 if (outermode == innermode && !byte)
4479 return op;
4480
4481 if (GET_CODE (op) == CONST_INT
4482 || GET_CODE (op) == CONST_DOUBLE
4483 || GET_CODE (op) == CONST_VECTOR)
4484 return simplify_immed_subreg (outermode, op, innermode, byte);
4485
4486 /* Changing mode twice with SUBREG => just change it once,
4487 or not at all if changing back op starting mode. */
4488 if (GET_CODE (op) == SUBREG)
4489 {
4490 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4491 int final_offset = byte + SUBREG_BYTE (op);
4492 rtx newx;
4493
4494 if (outermode == innermostmode
4495 && byte == 0 && SUBREG_BYTE (op) == 0)
4496 return SUBREG_REG (op);
4497
4498 /* The SUBREG_BYTE represents offset, as if the value were stored
4499 in memory. Irritating exception is paradoxical subreg, where
4500 we define SUBREG_BYTE to be 0. On big endian machines, this
4501 value should be negative. For a moment, undo this exception. */
4502 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4503 {
4504 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4505 if (WORDS_BIG_ENDIAN)
4506 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4507 if (BYTES_BIG_ENDIAN)
4508 final_offset += difference % UNITS_PER_WORD;
4509 }
4510 if (SUBREG_BYTE (op) == 0
4511 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4512 {
4513 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4514 if (WORDS_BIG_ENDIAN)
4515 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4516 if (BYTES_BIG_ENDIAN)
4517 final_offset += difference % UNITS_PER_WORD;
4518 }
4519
4520 /* See whether resulting subreg will be paradoxical. */
4521 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4522 {
4523 /* In nonparadoxical subregs we can't handle negative offsets. */
4524 if (final_offset < 0)
4525 return NULL_RTX;
4526 /* Bail out in case resulting subreg would be incorrect. */
4527 if (final_offset % GET_MODE_SIZE (outermode)
4528 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4529 return NULL_RTX;
4530 }
4531 else
4532 {
4533 int offset = 0;
4534 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4535
4536 /* In paradoxical subreg, see if we are still looking on lower part.
4537 If so, our SUBREG_BYTE will be 0. */
4538 if (WORDS_BIG_ENDIAN)
4539 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4540 if (BYTES_BIG_ENDIAN)
4541 offset += difference % UNITS_PER_WORD;
4542 if (offset == final_offset)
4543 final_offset = 0;
4544 else
4545 return NULL_RTX;
4546 }
4547
4548 /* Recurse for further possible simplifications. */
4549 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4550 final_offset);
4551 if (newx)
4552 return newx;
4553 if (validate_subreg (outermode, innermostmode,
4554 SUBREG_REG (op), final_offset))
4555 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4556 return NULL_RTX;
4557 }
4558
4559 /* Merge implicit and explicit truncations. */
4560
4561 if (GET_CODE (op) == TRUNCATE
4562 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4563 && subreg_lowpart_offset (outermode, innermode) == byte)
4564 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4565 GET_MODE (XEXP (op, 0)));
4566
4567 /* SUBREG of a hard register => just change the register number
4568 and/or mode. If the hard register is not valid in that mode,
4569 suppress this simplification. If the hard register is the stack,
4570 frame, or argument pointer, leave this as a SUBREG. */
4571
4572 if (REG_P (op)
4573 && REGNO (op) < FIRST_PSEUDO_REGISTER
4574 #ifdef CANNOT_CHANGE_MODE_CLASS
4575 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4576 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4577 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4578 #endif
4579 && ((reload_completed && !frame_pointer_needed)
4580 || (REGNO (op) != FRAME_POINTER_REGNUM
4581 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4582 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4583 #endif
4584 ))
4585 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4586 && REGNO (op) != ARG_POINTER_REGNUM
4587 #endif
4588 && REGNO (op) != STACK_POINTER_REGNUM
4589 && subreg_offset_representable_p (REGNO (op), innermode,
4590 byte, outermode))
4591 {
4592 unsigned int regno = REGNO (op);
4593 unsigned int final_regno
4594 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4595
4596 /* ??? We do allow it if the current REG is not valid for
4597 its mode. This is a kludge to work around how float/complex
4598 arguments are passed on 32-bit SPARC and should be fixed. */
4599 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4600 || ! HARD_REGNO_MODE_OK (regno, innermode))
4601 {
4602 rtx x;
4603 int final_offset = byte;
4604
4605 /* Adjust offset for paradoxical subregs. */
4606 if (byte == 0
4607 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4608 {
4609 int difference = (GET_MODE_SIZE (innermode)
4610 - GET_MODE_SIZE (outermode));
4611 if (WORDS_BIG_ENDIAN)
4612 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4613 if (BYTES_BIG_ENDIAN)
4614 final_offset += difference % UNITS_PER_WORD;
4615 }
4616
4617 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4618
4619 /* Propagate original regno. We don't have any way to specify
4620 the offset inside original regno, so do so only for lowpart.
4621 The information is used only by alias analysis that can not
4622 grog partial register anyway. */
4623
4624 if (subreg_lowpart_offset (outermode, innermode) == byte)
4625 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4626 return x;
4627 }
4628 }
4629
4630 /* If we have a SUBREG of a register that we are replacing and we are
4631 replacing it with a MEM, make a new MEM and try replacing the
4632 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4633 or if we would be widening it. */
4634
4635 if (MEM_P (op)
4636 && ! mode_dependent_address_p (XEXP (op, 0))
4637 /* Allow splitting of volatile memory references in case we don't
4638 have instruction to move the whole thing. */
4639 && (! MEM_VOLATILE_P (op)
4640 || ! have_insn_for (SET, innermode))
4641 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4642 return adjust_address_nv (op, outermode, byte);
4643
4644 /* Handle complex values represented as CONCAT
4645 of real and imaginary part. */
4646 if (GET_CODE (op) == CONCAT)
4647 {
4648 unsigned int inner_size, final_offset;
4649 rtx part, res;
4650
4651 inner_size = GET_MODE_UNIT_SIZE (innermode);
4652 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4653 final_offset = byte % inner_size;
4654 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4655 return NULL_RTX;
4656
4657 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4658 if (res)
4659 return res;
4660 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4661 return gen_rtx_SUBREG (outermode, part, final_offset);
4662 return NULL_RTX;
4663 }
4664
4665 /* Optimize SUBREG truncations of zero and sign extended values. */
4666 if ((GET_CODE (op) == ZERO_EXTEND
4667 || GET_CODE (op) == SIGN_EXTEND)
4668 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4669 {
4670 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4671
4672 /* If we're requesting the lowpart of a zero or sign extension,
4673 there are three possibilities. If the outermode is the same
4674 as the origmode, we can omit both the extension and the subreg.
4675 If the outermode is not larger than the origmode, we can apply
4676 the truncation without the extension. Finally, if the outermode
4677 is larger than the origmode, but both are integer modes, we
4678 can just extend to the appropriate mode. */
4679 if (bitpos == 0)
4680 {
4681 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4682 if (outermode == origmode)
4683 return XEXP (op, 0);
4684 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4685 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4686 subreg_lowpart_offset (outermode,
4687 origmode));
4688 if (SCALAR_INT_MODE_P (outermode))
4689 return simplify_gen_unary (GET_CODE (op), outermode,
4690 XEXP (op, 0), origmode);
4691 }
4692
4693 /* A SUBREG resulting from a zero extension may fold to zero if
4694 it extracts higher bits that the ZERO_EXTEND's source bits. */
4695 if (GET_CODE (op) == ZERO_EXTEND
4696 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4697 return CONST0_RTX (outermode);
4698 }
4699
4700 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4701 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4702 the outer subreg is effectively a truncation to the original mode. */
4703 if ((GET_CODE (op) == LSHIFTRT
4704 || GET_CODE (op) == ASHIFTRT)
4705 && SCALAR_INT_MODE_P (outermode)
4706 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4707 to avoid the possibility that an outer LSHIFTRT shifts by more
4708 than the sign extension's sign_bit_copies and introduces zeros
4709 into the high bits of the result. */
4710 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4711 && GET_CODE (XEXP (op, 1)) == CONST_INT
4712 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4714 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4715 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4716 return simplify_gen_binary (ASHIFTRT, outermode,
4717 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4718
4719 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4720 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4721 the outer subreg is effectively a truncation to the original mode. */
4722 if ((GET_CODE (op) == LSHIFTRT
4723 || GET_CODE (op) == ASHIFTRT)
4724 && SCALAR_INT_MODE_P (outermode)
4725 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4726 && GET_CODE (XEXP (op, 1)) == CONST_INT
4727 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4729 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4730 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4731 return simplify_gen_binary (LSHIFTRT, outermode,
4732 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4733
4734 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4735 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4736 the outer subreg is effectively a truncation to the original mode. */
4737 if (GET_CODE (op) == ASHIFT
4738 && SCALAR_INT_MODE_P (outermode)
4739 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4740 && GET_CODE (XEXP (op, 1)) == CONST_INT
4741 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4742 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4743 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4744 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4745 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4746 return simplify_gen_binary (ASHIFT, outermode,
4747 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4748
4749 return NULL_RTX;
4750 }
4751
4752 /* Make a SUBREG operation or equivalent if it folds. */
4753
4754 rtx
4755 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4756 enum machine_mode innermode, unsigned int byte)
4757 {
4758 rtx newx;
4759
4760 newx = simplify_subreg (outermode, op, innermode, byte);
4761 if (newx)
4762 return newx;
4763
4764 if (GET_CODE (op) == SUBREG
4765 || GET_CODE (op) == CONCAT
4766 || GET_MODE (op) == VOIDmode)
4767 return NULL_RTX;
4768
4769 if (validate_subreg (outermode, innermode, op, byte))
4770 return gen_rtx_SUBREG (outermode, op, byte);
4771
4772 return NULL_RTX;
4773 }
4774
4775 /* Simplify X, an rtx expression.
4776
4777 Return the simplified expression or NULL if no simplifications
4778 were possible.
4779
4780 This is the preferred entry point into the simplification routines;
4781 however, we still allow passes to call the more specific routines.
4782
4783 Right now GCC has three (yes, three) major bodies of RTL simplification
4784 code that need to be unified.
4785
4786 1. fold_rtx in cse.c. This code uses various CSE specific
4787 information to aid in RTL simplification.
4788
4789 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4790 it uses combine specific information to aid in RTL
4791 simplification.
4792
4793 3. The routines in this file.
4794
4795
4796 Long term we want to only have one body of simplification code; to
4797 get to that state I recommend the following steps:
4798
4799 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4800 which are not pass dependent state into these routines.
4801
4802 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4803 use this routine whenever possible.
4804
4805 3. Allow for pass dependent state to be provided to these
4806 routines and add simplifications based on the pass dependent
4807 state. Remove code from cse.c & combine.c that becomes
4808 redundant/dead.
4809
4810 It will take time, but ultimately the compiler will be easier to
4811 maintain and improve. It's totally silly that when we add a
4812 simplification that it needs to be added to 4 places (3 for RTL
4813 simplification and 1 for tree simplification. */
4814
4815 rtx
4816 simplify_rtx (rtx x)
4817 {
4818 enum rtx_code code = GET_CODE (x);
4819 enum machine_mode mode = GET_MODE (x);
4820
4821 switch (GET_RTX_CLASS (code))
4822 {
4823 case RTX_UNARY:
4824 return simplify_unary_operation (code, mode,
4825 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4826 case RTX_COMM_ARITH:
4827 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4828 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4829
4830 /* Fall through.... */
4831
4832 case RTX_BIN_ARITH:
4833 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4834
4835 case RTX_TERNARY:
4836 case RTX_BITFIELD_OPS:
4837 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4838 XEXP (x, 0), XEXP (x, 1),
4839 XEXP (x, 2));
4840
4841 case RTX_COMPARE:
4842 case RTX_COMM_COMPARE:
4843 return simplify_relational_operation (code, mode,
4844 ((GET_MODE (XEXP (x, 0))
4845 != VOIDmode)
4846 ? GET_MODE (XEXP (x, 0))
4847 : GET_MODE (XEXP (x, 1))),
4848 XEXP (x, 0),
4849 XEXP (x, 1));
4850
4851 case RTX_EXTRA:
4852 if (code == SUBREG)
4853 return simplify_gen_subreg (mode, SUBREG_REG (x),
4854 GET_MODE (SUBREG_REG (x)),
4855 SUBREG_BYTE (x));
4856 break;
4857
4858 case RTX_OBJ:
4859 if (code == LO_SUM)
4860 {
4861 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4862 if (GET_CODE (XEXP (x, 0)) == HIGH
4863 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4864 return XEXP (x, 1);
4865 }
4866 break;
4867
4868 default:
4869 break;
4870 }
4871 return NULL;
4872 }