re PR tree-optimization/24964 (Does not optimise abs(x)**2 to x**2)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
409
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
418
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
434
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
442 {
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445 }
446
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
450
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
457
458
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465 {
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
468
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
474 }
475
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
480
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482 {
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
485
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495 {
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
498 }
499
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
502 }
503 break;
504
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
509
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
518
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532 {
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536 {
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540 }
541
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545 }
546
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551 {
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554 }
555
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
560 {
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564 }
565
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
573
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
581
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
592 {
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
596 {
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
604 }
605 else if (STORE_FLAG_VALUE == -1)
606 {
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 }
615 }
616 break;
617
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
624
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
630
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
640
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
648
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
665
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
675
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
679
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
684
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
688
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
691
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
703
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
714
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
723
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
731
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
735
736 /* (float_extend (float_extend x)) is (float_extend x)
737
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
740 */
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
750
751 break;
752
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
758
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
763
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
773
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
777
778 break;
779
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
787
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
795
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
814
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
823
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
835
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
845
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
857
858 default:
859 break;
860 }
861
862 return 0;
863 }
864
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
868 rtx
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
871 {
872 unsigned int width = GET_MODE_BITSIZE (mode);
873
874 if (code == VEC_DUPLICATE)
875 {
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
878 {
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
884 }
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
887 {
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
892
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
897 {
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
901
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
906 }
907 return gen_rtx_CONST_VECTOR (mode, v);
908 }
909 }
910
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
912 {
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
920
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
923 {
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
930 }
931 return gen_rtx_CONST_VECTOR (mode, v);
932 }
933
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
937
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
940 {
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
943
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
948
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
952 }
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
956 {
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
959
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
964
965 if (op_mode == VOIDmode)
966 {
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
971 }
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
973 ;
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
976
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
980 }
981
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
984 {
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
987
988 switch (code)
989 {
990 case NOT:
991 val = ~ arg0;
992 break;
993
994 case NEG:
995 val = - arg0;
996 break;
997
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1001
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1008
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1012 ;
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1016
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1020 {
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1025 }
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1029
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1036
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1044
1045 case TRUNCATE:
1046 val = arg0;
1047 break;
1048
1049 case ZERO_EXTEND:
1050 /* When zero-extending a CONST_INT, we need to know its
1051 original mode. */
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1054 {
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1060 }
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 else
1064 return 0;
1065 break;
1066
1067 case SIGN_EXTEND:
1068 if (op_mode == VOIDmode)
1069 op_mode = mode;
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1071 {
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1076 val = arg0;
1077 }
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1079 {
1080 val
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1082 if (val
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1085 }
1086 else
1087 return 0;
1088 break;
1089
1090 case SQRT:
1091 case FLOAT_EXTEND:
1092 case FLOAT_TRUNCATE:
1093 case SS_TRUNCATE:
1094 case US_TRUNCATE:
1095 case SS_NEG:
1096 return 0;
1097
1098 default:
1099 gcc_unreachable ();
1100 }
1101
1102 return gen_int_mode (val, mode);
1103 }
1104
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op) == VOIDmode
1108 && width <= HOST_BITS_PER_WIDE_INT * 2
1109 && (GET_CODE (op) == CONST_DOUBLE
1110 || GET_CODE (op) == CONST_INT))
1111 {
1112 unsigned HOST_WIDE_INT l1, lv;
1113 HOST_WIDE_INT h1, hv;
1114
1115 if (GET_CODE (op) == CONST_DOUBLE)
1116 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1117 else
1118 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1119
1120 switch (code)
1121 {
1122 case NOT:
1123 lv = ~ l1;
1124 hv = ~ h1;
1125 break;
1126
1127 case NEG:
1128 neg_double (l1, h1, &lv, &hv);
1129 break;
1130
1131 case ABS:
1132 if (h1 < 0)
1133 neg_double (l1, h1, &lv, &hv);
1134 else
1135 lv = l1, hv = h1;
1136 break;
1137
1138 case FFS:
1139 hv = 0;
1140 if (l1 == 0)
1141 {
1142 if (h1 == 0)
1143 lv = 0;
1144 else
1145 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1146 }
1147 else
1148 lv = exact_log2 (l1 & -l1) + 1;
1149 break;
1150
1151 case CLZ:
1152 hv = 0;
1153 if (h1 != 0)
1154 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1155 - HOST_BITS_PER_WIDE_INT;
1156 else if (l1 != 0)
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1159 lv = GET_MODE_BITSIZE (mode);
1160 break;
1161
1162 case CTZ:
1163 hv = 0;
1164 if (l1 != 0)
1165 lv = exact_log2 (l1 & -l1);
1166 else if (h1 != 0)
1167 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1169 lv = GET_MODE_BITSIZE (mode);
1170 break;
1171
1172 case POPCOUNT:
1173 hv = 0;
1174 lv = 0;
1175 while (l1)
1176 lv++, l1 &= l1 - 1;
1177 while (h1)
1178 lv++, h1 &= h1 - 1;
1179 break;
1180
1181 case PARITY:
1182 hv = 0;
1183 lv = 0;
1184 while (l1)
1185 lv++, l1 &= l1 - 1;
1186 while (h1)
1187 lv++, h1 &= h1 - 1;
1188 lv &= 1;
1189 break;
1190
1191 case TRUNCATE:
1192 /* This is just a change-of-mode, so do nothing. */
1193 lv = l1, hv = h1;
1194 break;
1195
1196 case ZERO_EXTEND:
1197 gcc_assert (op_mode != VOIDmode);
1198
1199 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1200 return 0;
1201
1202 hv = 0;
1203 lv = l1 & GET_MODE_MASK (op_mode);
1204 break;
1205
1206 case SIGN_EXTEND:
1207 if (op_mode == VOIDmode
1208 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1209 return 0;
1210 else
1211 {
1212 lv = l1 & GET_MODE_MASK (op_mode);
1213 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1214 && (lv & ((HOST_WIDE_INT) 1
1215 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1216 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1217
1218 hv = HWI_SIGN_EXTEND (lv);
1219 }
1220 break;
1221
1222 case SQRT:
1223 return 0;
1224
1225 default:
1226 return 0;
1227 }
1228
1229 return immed_double_const (lv, hv, mode);
1230 }
1231
1232 else if (GET_CODE (op) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode))
1234 {
1235 REAL_VALUE_TYPE d, t;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1237
1238 switch (code)
1239 {
1240 case SQRT:
1241 if (HONOR_SNANS (mode) && real_isnan (&d))
1242 return 0;
1243 real_sqrt (&t, mode, &d);
1244 d = t;
1245 break;
1246 case ABS:
1247 d = REAL_VALUE_ABS (d);
1248 break;
1249 case NEG:
1250 d = REAL_VALUE_NEGATE (d);
1251 break;
1252 case FLOAT_TRUNCATE:
1253 d = real_value_truncate (mode, d);
1254 break;
1255 case FLOAT_EXTEND:
1256 /* All this does is change the mode. */
1257 break;
1258 case FIX:
1259 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1260 break;
1261 case NOT:
1262 {
1263 long tmp[4];
1264 int i;
1265
1266 real_to_target (tmp, &d, GET_MODE (op));
1267 for (i = 0; i < 4; i++)
1268 tmp[i] = ~tmp[i];
1269 real_from_target (&d, tmp, mode);
1270 break;
1271 }
1272 default:
1273 gcc_unreachable ();
1274 }
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1276 }
1277
1278 else if (GET_CODE (op) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1280 && GET_MODE_CLASS (mode) == MODE_INT
1281 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1282 {
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1287
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh, xl, th, tl;
1291 REAL_VALUE_TYPE x, t;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1293 switch (code)
1294 {
1295 case FIX:
1296 if (REAL_VALUE_ISNAN (x))
1297 return const0_rtx;
1298
1299 /* Test against the signed upper bound. */
1300 if (width > HOST_BITS_PER_WIDE_INT)
1301 {
1302 th = ((unsigned HOST_WIDE_INT) 1
1303 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1304 tl = -1;
1305 }
1306 else
1307 {
1308 th = 0;
1309 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1310 }
1311 real_from_integer (&t, VOIDmode, tl, th, 0);
1312 if (REAL_VALUES_LESS (t, x))
1313 {
1314 xh = th;
1315 xl = tl;
1316 break;
1317 }
1318
1319 /* Test against the signed lower bound. */
1320 if (width > HOST_BITS_PER_WIDE_INT)
1321 {
1322 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1323 tl = 0;
1324 }
1325 else
1326 {
1327 th = -1;
1328 tl = (HOST_WIDE_INT) -1 << (width - 1);
1329 }
1330 real_from_integer (&t, VOIDmode, tl, th, 0);
1331 if (REAL_VALUES_LESS (x, t))
1332 {
1333 xh = th;
1334 xl = tl;
1335 break;
1336 }
1337 REAL_VALUE_TO_INT (&xl, &xh, x);
1338 break;
1339
1340 case UNSIGNED_FIX:
1341 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1342 return const0_rtx;
1343
1344 /* Test against the unsigned upper bound. */
1345 if (width == 2*HOST_BITS_PER_WIDE_INT)
1346 {
1347 th = -1;
1348 tl = -1;
1349 }
1350 else if (width >= HOST_BITS_PER_WIDE_INT)
1351 {
1352 th = ((unsigned HOST_WIDE_INT) 1
1353 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1354 tl = -1;
1355 }
1356 else
1357 {
1358 th = 0;
1359 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1360 }
1361 real_from_integer (&t, VOIDmode, tl, th, 1);
1362 if (REAL_VALUES_LESS (t, x))
1363 {
1364 xh = th;
1365 xl = tl;
1366 break;
1367 }
1368
1369 REAL_VALUE_TO_INT (&xl, &xh, x);
1370 break;
1371
1372 default:
1373 gcc_unreachable ();
1374 }
1375 return immed_double_const (xl, xh, mode);
1376 }
1377
1378 return NULL_RTX;
1379 }
1380 \f
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1386
1387 static rtx
1388 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1389 rtx op0, rtx op1)
1390 {
1391 rtx tem;
1392
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1) == code)
1395 {
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0) == code)
1398 {
1399 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1400 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1401 }
1402
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1, op0))
1405 return simplify_gen_binary (code, mode, op1, op0);
1406
1407 tem = op0;
1408 op0 = op1;
1409 op1 = tem;
1410 }
1411
1412 if (GET_CODE (op0) == code)
1413 {
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1416 {
1417 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1418 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1419 }
1420
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1423 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1424 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1425 if (tem != 0)
1426 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1427
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1432 if (tem != 0)
1433 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1434 }
1435
1436 return 0;
1437 }
1438
1439
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1442
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1445 rtx
1446 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1447 rtx op0, rtx op1)
1448 {
1449 rtx trueop0, trueop1;
1450 rtx tem;
1451
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1458
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0, op1))
1462 {
1463 tem = op0, op0 = op1, op1 = tem;
1464 }
1465
1466 trueop0 = avoid_constant_pool_reference (op0);
1467 trueop1 = avoid_constant_pool_reference (op1);
1468
1469 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1470 if (tem)
1471 return tem;
1472 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1473 }
1474
1475 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1476 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1477 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1478 actual constants. */
1479
1480 static rtx
1481 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1482 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1483 {
1484 rtx tem, reversed, opleft, opright;
1485 HOST_WIDE_INT val;
1486 unsigned int width = GET_MODE_BITSIZE (mode);
1487
1488 /* Even if we can't compute a constant result,
1489 there are some cases worth simplifying. */
1490
1491 switch (code)
1492 {
1493 case PLUS:
1494 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1495 when x is NaN, infinite, or finite and nonzero. They aren't
1496 when x is -0 and the rounding mode is not towards -infinity,
1497 since (-0) + 0 is then 0. */
1498 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1499 return op0;
1500
1501 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1502 transformations are safe even for IEEE. */
1503 if (GET_CODE (op0) == NEG)
1504 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1505 else if (GET_CODE (op1) == NEG)
1506 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1507
1508 /* (~a) + 1 -> -a */
1509 if (INTEGRAL_MODE_P (mode)
1510 && GET_CODE (op0) == NOT
1511 && trueop1 == const1_rtx)
1512 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1513
1514 /* Handle both-operands-constant cases. We can only add
1515 CONST_INTs to constants since the sum of relocatable symbols
1516 can't be handled by most assemblers. Don't add CONST_INT
1517 to CONST_INT since overflow won't be computed properly if wider
1518 than HOST_BITS_PER_WIDE_INT. */
1519
1520 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1521 && GET_CODE (op1) == CONST_INT)
1522 return plus_constant (op0, INTVAL (op1));
1523 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1524 && GET_CODE (op0) == CONST_INT)
1525 return plus_constant (op1, INTVAL (op0));
1526
1527 /* See if this is something like X * C - X or vice versa or
1528 if the multiplication is written as a shift. If so, we can
1529 distribute and make a new multiply, shift, or maybe just
1530 have X (if C is 2 in the example above). But don't make
1531 something more expensive than we had before. */
1532
1533 if (SCALAR_INT_MODE_P (mode))
1534 {
1535 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1536 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1537 rtx lhs = op0, rhs = op1;
1538
1539 if (GET_CODE (lhs) == NEG)
1540 {
1541 coeff0l = -1;
1542 coeff0h = -1;
1543 lhs = XEXP (lhs, 0);
1544 }
1545 else if (GET_CODE (lhs) == MULT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1547 {
1548 coeff0l = INTVAL (XEXP (lhs, 1));
1549 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1550 lhs = XEXP (lhs, 0);
1551 }
1552 else if (GET_CODE (lhs) == ASHIFT
1553 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1554 && INTVAL (XEXP (lhs, 1)) >= 0
1555 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1556 {
1557 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1558 coeff0h = 0;
1559 lhs = XEXP (lhs, 0);
1560 }
1561
1562 if (GET_CODE (rhs) == NEG)
1563 {
1564 coeff1l = -1;
1565 coeff1h = -1;
1566 rhs = XEXP (rhs, 0);
1567 }
1568 else if (GET_CODE (rhs) == MULT
1569 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1570 {
1571 coeff1l = INTVAL (XEXP (rhs, 1));
1572 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1573 rhs = XEXP (rhs, 0);
1574 }
1575 else if (GET_CODE (rhs) == ASHIFT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1577 && INTVAL (XEXP (rhs, 1)) >= 0
1578 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1579 {
1580 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1581 coeff1h = 0;
1582 rhs = XEXP (rhs, 0);
1583 }
1584
1585 if (rtx_equal_p (lhs, rhs))
1586 {
1587 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1588 rtx coeff;
1589 unsigned HOST_WIDE_INT l;
1590 HOST_WIDE_INT h;
1591
1592 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1593 coeff = immed_double_const (l, h, mode);
1594
1595 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1596 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1597 ? tem : 0;
1598 }
1599 }
1600
1601 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1602 if ((GET_CODE (op1) == CONST_INT
1603 || GET_CODE (op1) == CONST_DOUBLE)
1604 && GET_CODE (op0) == XOR
1605 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1606 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1607 && mode_signbit_p (mode, op1))
1608 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1609 simplify_gen_binary (XOR, mode, op1,
1610 XEXP (op0, 1)));
1611
1612 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1613 if (GET_CODE (op0) == MULT
1614 && GET_CODE (XEXP (op0, 0)) == NEG)
1615 {
1616 rtx in1, in2;
1617
1618 in1 = XEXP (XEXP (op0, 0), 0);
1619 in2 = XEXP (op0, 1);
1620 return simplify_gen_binary (MINUS, mode, op1,
1621 simplify_gen_binary (MULT, mode,
1622 in1, in2));
1623 }
1624
1625 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1626 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1627 is 1. */
1628 if (COMPARISON_P (op0)
1629 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1630 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1631 && (reversed = reversed_comparison (op0, mode)))
1632 return
1633 simplify_gen_unary (NEG, mode, reversed, mode);
1634
1635 /* If one of the operands is a PLUS or a MINUS, see if we can
1636 simplify this by the associative law.
1637 Don't use the associative law for floating point.
1638 The inaccuracy makes it nonassociative,
1639 and subtle programs can break if operations are associated. */
1640
1641 if (INTEGRAL_MODE_P (mode)
1642 && (plus_minus_operand_p (op0)
1643 || plus_minus_operand_p (op1))
1644 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1645 return tem;
1646
1647 /* Reassociate floating point addition only when the user
1648 specifies unsafe math optimizations. */
1649 if (FLOAT_MODE_P (mode)
1650 && flag_unsafe_math_optimizations)
1651 {
1652 tem = simplify_associative_operation (code, mode, op0, op1);
1653 if (tem)
1654 return tem;
1655 }
1656 break;
1657
1658 case COMPARE:
1659 #ifdef HAVE_cc0
1660 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1661 using cc0, in which case we want to leave it as a COMPARE
1662 so we can distinguish it from a register-register-copy.
1663
1664 In IEEE floating point, x-0 is not the same as x. */
1665
1666 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1667 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1668 && trueop1 == CONST0_RTX (mode))
1669 return op0;
1670 #endif
1671
1672 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1673 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1674 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1675 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1676 {
1677 rtx xop00 = XEXP (op0, 0);
1678 rtx xop10 = XEXP (op1, 0);
1679
1680 #ifdef HAVE_cc0
1681 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1682 #else
1683 if (REG_P (xop00) && REG_P (xop10)
1684 && GET_MODE (xop00) == GET_MODE (xop10)
1685 && REGNO (xop00) == REGNO (xop10)
1686 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1687 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1688 #endif
1689 return xop00;
1690 }
1691 break;
1692
1693 case MINUS:
1694 /* We can't assume x-x is 0 even with non-IEEE floating point,
1695 but since it is zero except in very strange circumstances, we
1696 will treat it as zero with -funsafe-math-optimizations. */
1697 if (rtx_equal_p (trueop0, trueop1)
1698 && ! side_effects_p (op0)
1699 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1700 return CONST0_RTX (mode);
1701
1702 /* Change subtraction from zero into negation. (0 - x) is the
1703 same as -x when x is NaN, infinite, or finite and nonzero.
1704 But if the mode has signed zeros, and does not round towards
1705 -infinity, then 0 - 0 is 0, not -0. */
1706 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1707 return simplify_gen_unary (NEG, mode, op1, mode);
1708
1709 /* (-1 - a) is ~a. */
1710 if (trueop0 == constm1_rtx)
1711 return simplify_gen_unary (NOT, mode, op1, mode);
1712
1713 /* Subtracting 0 has no effect unless the mode has signed zeros
1714 and supports rounding towards -infinity. In such a case,
1715 0 - 0 is -0. */
1716 if (!(HONOR_SIGNED_ZEROS (mode)
1717 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1718 && trueop1 == CONST0_RTX (mode))
1719 return op0;
1720
1721 /* See if this is something like X * C - X or vice versa or
1722 if the multiplication is written as a shift. If so, we can
1723 distribute and make a new multiply, shift, or maybe just
1724 have X (if C is 2 in the example above). But don't make
1725 something more expensive than we had before. */
1726
1727 if (SCALAR_INT_MODE_P (mode))
1728 {
1729 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1730 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1731 rtx lhs = op0, rhs = op1;
1732
1733 if (GET_CODE (lhs) == NEG)
1734 {
1735 coeff0l = -1;
1736 coeff0h = -1;
1737 lhs = XEXP (lhs, 0);
1738 }
1739 else if (GET_CODE (lhs) == MULT
1740 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1741 {
1742 coeff0l = INTVAL (XEXP (lhs, 1));
1743 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1744 lhs = XEXP (lhs, 0);
1745 }
1746 else if (GET_CODE (lhs) == ASHIFT
1747 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1748 && INTVAL (XEXP (lhs, 1)) >= 0
1749 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1750 {
1751 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1752 coeff0h = 0;
1753 lhs = XEXP (lhs, 0);
1754 }
1755
1756 if (GET_CODE (rhs) == NEG)
1757 {
1758 negcoeff1l = 1;
1759 negcoeff1h = 0;
1760 rhs = XEXP (rhs, 0);
1761 }
1762 else if (GET_CODE (rhs) == MULT
1763 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1764 {
1765 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1766 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1767 rhs = XEXP (rhs, 0);
1768 }
1769 else if (GET_CODE (rhs) == ASHIFT
1770 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1771 && INTVAL (XEXP (rhs, 1)) >= 0
1772 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1773 {
1774 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1775 negcoeff1h = -1;
1776 rhs = XEXP (rhs, 0);
1777 }
1778
1779 if (rtx_equal_p (lhs, rhs))
1780 {
1781 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1782 rtx coeff;
1783 unsigned HOST_WIDE_INT l;
1784 HOST_WIDE_INT h;
1785
1786 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1787 coeff = immed_double_const (l, h, mode);
1788
1789 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1790 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1791 ? tem : 0;
1792 }
1793 }
1794
1795 /* (a - (-b)) -> (a + b). True even for IEEE. */
1796 if (GET_CODE (op1) == NEG)
1797 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1798
1799 /* (-x - c) may be simplified as (-c - x). */
1800 if (GET_CODE (op0) == NEG
1801 && (GET_CODE (op1) == CONST_INT
1802 || GET_CODE (op1) == CONST_DOUBLE))
1803 {
1804 tem = simplify_unary_operation (NEG, mode, op1, mode);
1805 if (tem)
1806 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1807 }
1808
1809 /* Don't let a relocatable value get a negative coeff. */
1810 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1811 return simplify_gen_binary (PLUS, mode,
1812 op0,
1813 neg_const_int (mode, op1));
1814
1815 /* (x - (x & y)) -> (x & ~y) */
1816 if (GET_CODE (op1) == AND)
1817 {
1818 if (rtx_equal_p (op0, XEXP (op1, 0)))
1819 {
1820 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1821 GET_MODE (XEXP (op1, 1)));
1822 return simplify_gen_binary (AND, mode, op0, tem);
1823 }
1824 if (rtx_equal_p (op0, XEXP (op1, 1)))
1825 {
1826 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1827 GET_MODE (XEXP (op1, 0)));
1828 return simplify_gen_binary (AND, mode, op0, tem);
1829 }
1830 }
1831
1832 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1833 by reversing the comparison code if valid. */
1834 if (STORE_FLAG_VALUE == 1
1835 && trueop0 == const1_rtx
1836 && COMPARISON_P (op1)
1837 && (reversed = reversed_comparison (op1, mode)))
1838 return reversed;
1839
1840 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1841 if (GET_CODE (op1) == MULT
1842 && GET_CODE (XEXP (op1, 0)) == NEG)
1843 {
1844 rtx in1, in2;
1845
1846 in1 = XEXP (XEXP (op1, 0), 0);
1847 in2 = XEXP (op1, 1);
1848 return simplify_gen_binary (PLUS, mode,
1849 simplify_gen_binary (MULT, mode,
1850 in1, in2),
1851 op0);
1852 }
1853
1854 /* Canonicalize (minus (neg A) (mult B C)) to
1855 (minus (mult (neg B) C) A). */
1856 if (GET_CODE (op1) == MULT
1857 && GET_CODE (op0) == NEG)
1858 {
1859 rtx in1, in2;
1860
1861 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1862 in2 = XEXP (op1, 1);
1863 return simplify_gen_binary (MINUS, mode,
1864 simplify_gen_binary (MULT, mode,
1865 in1, in2),
1866 XEXP (op0, 0));
1867 }
1868
1869 /* If one of the operands is a PLUS or a MINUS, see if we can
1870 simplify this by the associative law. This will, for example,
1871 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1872 Don't use the associative law for floating point.
1873 The inaccuracy makes it nonassociative,
1874 and subtle programs can break if operations are associated. */
1875
1876 if (INTEGRAL_MODE_P (mode)
1877 && (plus_minus_operand_p (op0)
1878 || plus_minus_operand_p (op1))
1879 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1880 return tem;
1881 break;
1882
1883 case MULT:
1884 if (trueop1 == constm1_rtx)
1885 return simplify_gen_unary (NEG, mode, op0, mode);
1886
1887 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1888 x is NaN, since x * 0 is then also NaN. Nor is it valid
1889 when the mode has signed zeros, since multiplying a negative
1890 number by 0 will give -0, not 0. */
1891 if (!HONOR_NANS (mode)
1892 && !HONOR_SIGNED_ZEROS (mode)
1893 && trueop1 == CONST0_RTX (mode)
1894 && ! side_effects_p (op0))
1895 return op1;
1896
1897 /* In IEEE floating point, x*1 is not equivalent to x for
1898 signalling NaNs. */
1899 if (!HONOR_SNANS (mode)
1900 && trueop1 == CONST1_RTX (mode))
1901 return op0;
1902
1903 /* Convert multiply by constant power of two into shift unless
1904 we are still generating RTL. This test is a kludge. */
1905 if (GET_CODE (trueop1) == CONST_INT
1906 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1907 /* If the mode is larger than the host word size, and the
1908 uppermost bit is set, then this isn't a power of two due
1909 to implicit sign extension. */
1910 && (width <= HOST_BITS_PER_WIDE_INT
1911 || val != HOST_BITS_PER_WIDE_INT - 1))
1912 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1913
1914 /* Likewise for multipliers wider than a word. */
1915 if (GET_CODE (trueop1) == CONST_DOUBLE
1916 && (GET_MODE (trueop1) == VOIDmode
1917 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1918 && GET_MODE (op0) == mode
1919 && CONST_DOUBLE_LOW (trueop1) == 0
1920 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1921 return simplify_gen_binary (ASHIFT, mode, op0,
1922 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1923
1924 /* x*2 is x+x and x*(-1) is -x */
1925 if (GET_CODE (trueop1) == CONST_DOUBLE
1926 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1927 && GET_MODE (op0) == mode)
1928 {
1929 REAL_VALUE_TYPE d;
1930 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1931
1932 if (REAL_VALUES_EQUAL (d, dconst2))
1933 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1934
1935 if (!HONOR_SNANS (mode)
1936 && REAL_VALUES_EQUAL (d, dconstm1))
1937 return simplify_gen_unary (NEG, mode, op0, mode);
1938 }
1939
1940 /* Optimize -x * -x as x * x. */
1941 if (FLOAT_MODE_P (mode)
1942 && GET_CODE (op0) == NEG
1943 && GET_CODE (op1) == NEG
1944 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1945 && !side_effects_p (XEXP (op0, 0)))
1946 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1947
1948 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1949 if (SCALAR_FLOAT_MODE_P (mode)
1950 && GET_CODE (op0) == ABS
1951 && GET_CODE (op1) == ABS
1952 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1953 && !side_effects_p (XEXP (op0, 0)))
1954 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1955
1956 /* Reassociate multiplication, but for floating point MULTs
1957 only when the user specifies unsafe math optimizations. */
1958 if (! FLOAT_MODE_P (mode)
1959 || flag_unsafe_math_optimizations)
1960 {
1961 tem = simplify_associative_operation (code, mode, op0, op1);
1962 if (tem)
1963 return tem;
1964 }
1965 break;
1966
1967 case IOR:
1968 if (trueop1 == const0_rtx)
1969 return op0;
1970 if (GET_CODE (trueop1) == CONST_INT
1971 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1972 == GET_MODE_MASK (mode)))
1973 return op1;
1974 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1975 return op0;
1976 /* A | (~A) -> -1 */
1977 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1978 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1979 && ! side_effects_p (op0)
1980 && SCALAR_INT_MODE_P (mode))
1981 return constm1_rtx;
1982
1983 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1984 if (GET_CODE (op1) == CONST_INT
1985 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1986 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1987 return op1;
1988
1989 /* Convert (A & B) | A to A. */
1990 if (GET_CODE (op0) == AND
1991 && (rtx_equal_p (XEXP (op0, 0), op1)
1992 || rtx_equal_p (XEXP (op0, 1), op1))
1993 && ! side_effects_p (XEXP (op0, 0))
1994 && ! side_effects_p (XEXP (op0, 1)))
1995 return op1;
1996
1997 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1998 mode size to (rotate A CX). */
1999
2000 if (GET_CODE (op1) == ASHIFT
2001 || GET_CODE (op1) == SUBREG)
2002 {
2003 opleft = op1;
2004 opright = op0;
2005 }
2006 else
2007 {
2008 opright = op1;
2009 opleft = op0;
2010 }
2011
2012 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2013 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2014 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2015 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2016 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2017 == GET_MODE_BITSIZE (mode)))
2018 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2019
2020 /* Same, but for ashift that has been "simplified" to a wider mode
2021 by simplify_shift_const. */
2022
2023 if (GET_CODE (opleft) == SUBREG
2024 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2025 && GET_CODE (opright) == LSHIFTRT
2026 && GET_CODE (XEXP (opright, 0)) == SUBREG
2027 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2028 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2029 && (GET_MODE_SIZE (GET_MODE (opleft))
2030 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2031 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2032 SUBREG_REG (XEXP (opright, 0)))
2033 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2034 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2035 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2036 == GET_MODE_BITSIZE (mode)))
2037 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2038 XEXP (SUBREG_REG (opleft), 1));
2039
2040 /* If we have (ior (and (X C1) C2)), simplify this by making
2041 C1 as small as possible if C1 actually changes. */
2042 if (GET_CODE (op1) == CONST_INT
2043 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2044 || INTVAL (op1) > 0)
2045 && GET_CODE (op0) == AND
2046 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2047 && GET_CODE (op1) == CONST_INT
2048 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2049 return simplify_gen_binary (IOR, mode,
2050 simplify_gen_binary
2051 (AND, mode, XEXP (op0, 0),
2052 GEN_INT (INTVAL (XEXP (op0, 1))
2053 & ~INTVAL (op1))),
2054 op1);
2055
2056 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2057 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2058 the PLUS does not affect any of the bits in OP1: then we can do
2059 the IOR as a PLUS and we can associate. This is valid if OP1
2060 can be safely shifted left C bits. */
2061 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2062 && GET_CODE (XEXP (op0, 0)) == PLUS
2063 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2064 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2065 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2066 {
2067 int count = INTVAL (XEXP (op0, 1));
2068 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2069
2070 if (mask >> count == INTVAL (trueop1)
2071 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2072 return simplify_gen_binary (ASHIFTRT, mode,
2073 plus_constant (XEXP (op0, 0), mask),
2074 XEXP (op0, 1));
2075 }
2076
2077 tem = simplify_associative_operation (code, mode, op0, op1);
2078 if (tem)
2079 return tem;
2080 break;
2081
2082 case XOR:
2083 if (trueop1 == const0_rtx)
2084 return op0;
2085 if (GET_CODE (trueop1) == CONST_INT
2086 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2087 == GET_MODE_MASK (mode)))
2088 return simplify_gen_unary (NOT, mode, op0, mode);
2089 if (rtx_equal_p (trueop0, trueop1)
2090 && ! side_effects_p (op0)
2091 && GET_MODE_CLASS (mode) != MODE_CC)
2092 return CONST0_RTX (mode);
2093
2094 /* Canonicalize XOR of the most significant bit to PLUS. */
2095 if ((GET_CODE (op1) == CONST_INT
2096 || GET_CODE (op1) == CONST_DOUBLE)
2097 && mode_signbit_p (mode, op1))
2098 return simplify_gen_binary (PLUS, mode, op0, op1);
2099 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2100 if ((GET_CODE (op1) == CONST_INT
2101 || GET_CODE (op1) == CONST_DOUBLE)
2102 && GET_CODE (op0) == PLUS
2103 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2104 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2105 && mode_signbit_p (mode, XEXP (op0, 1)))
2106 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2107 simplify_gen_binary (XOR, mode, op1,
2108 XEXP (op0, 1)));
2109
2110 /* If we are XORing two things that have no bits in common,
2111 convert them into an IOR. This helps to detect rotation encoded
2112 using those methods and possibly other simplifications. */
2113
2114 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2115 && (nonzero_bits (op0, mode)
2116 & nonzero_bits (op1, mode)) == 0)
2117 return (simplify_gen_binary (IOR, mode, op0, op1));
2118
2119 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2120 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2121 (NOT y). */
2122 {
2123 int num_negated = 0;
2124
2125 if (GET_CODE (op0) == NOT)
2126 num_negated++, op0 = XEXP (op0, 0);
2127 if (GET_CODE (op1) == NOT)
2128 num_negated++, op1 = XEXP (op1, 0);
2129
2130 if (num_negated == 2)
2131 return simplify_gen_binary (XOR, mode, op0, op1);
2132 else if (num_negated == 1)
2133 return simplify_gen_unary (NOT, mode,
2134 simplify_gen_binary (XOR, mode, op0, op1),
2135 mode);
2136 }
2137
2138 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2139 correspond to a machine insn or result in further simplifications
2140 if B is a constant. */
2141
2142 if (GET_CODE (op0) == AND
2143 && rtx_equal_p (XEXP (op0, 1), op1)
2144 && ! side_effects_p (op1))
2145 return simplify_gen_binary (AND, mode,
2146 simplify_gen_unary (NOT, mode,
2147 XEXP (op0, 0), mode),
2148 op1);
2149
2150 else if (GET_CODE (op0) == AND
2151 && rtx_equal_p (XEXP (op0, 0), op1)
2152 && ! side_effects_p (op1))
2153 return simplify_gen_binary (AND, mode,
2154 simplify_gen_unary (NOT, mode,
2155 XEXP (op0, 1), mode),
2156 op1);
2157
2158 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2159 comparison if STORE_FLAG_VALUE is 1. */
2160 if (STORE_FLAG_VALUE == 1
2161 && trueop1 == const1_rtx
2162 && COMPARISON_P (op0)
2163 && (reversed = reversed_comparison (op0, mode)))
2164 return reversed;
2165
2166 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2167 is (lt foo (const_int 0)), so we can perform the above
2168 simplification if STORE_FLAG_VALUE is 1. */
2169
2170 if (STORE_FLAG_VALUE == 1
2171 && trueop1 == const1_rtx
2172 && GET_CODE (op0) == LSHIFTRT
2173 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2175 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2176
2177 /* (xor (comparison foo bar) (const_int sign-bit))
2178 when STORE_FLAG_VALUE is the sign bit. */
2179 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2180 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2181 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2182 && trueop1 == const_true_rtx
2183 && COMPARISON_P (op0)
2184 && (reversed = reversed_comparison (op0, mode)))
2185 return reversed;
2186
2187 break;
2188
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2192 break;
2193
2194 case AND:
2195 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2196 return trueop1;
2197 /* If we are turning off bits already known off in OP0, we need
2198 not do an AND. */
2199 if (GET_CODE (trueop1) == CONST_INT
2200 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2201 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2202 return op0;
2203 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2204 && GET_MODE_CLASS (mode) != MODE_CC)
2205 return op0;
2206 /* A & (~A) -> 0 */
2207 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2208 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2209 && ! side_effects_p (op0)
2210 && GET_MODE_CLASS (mode) != MODE_CC)
2211 return CONST0_RTX (mode);
2212
2213 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2214 there are no nonzero bits of C outside of X's mode. */
2215 if ((GET_CODE (op0) == SIGN_EXTEND
2216 || GET_CODE (op0) == ZERO_EXTEND)
2217 && GET_CODE (trueop1) == CONST_INT
2218 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2219 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2220 & INTVAL (trueop1)) == 0)
2221 {
2222 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2223 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2224 gen_int_mode (INTVAL (trueop1),
2225 imode));
2226 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2227 }
2228
2229 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2230 insn (and may simplify more). */
2231 if (GET_CODE (op0) == XOR
2232 && rtx_equal_p (XEXP (op0, 0), op1)
2233 && ! side_effects_p (op1))
2234 return simplify_gen_binary (AND, mode,
2235 simplify_gen_unary (NOT, mode,
2236 XEXP (op0, 1), mode),
2237 op1);
2238
2239 if (GET_CODE (op0) == XOR
2240 && rtx_equal_p (XEXP (op0, 1), op1)
2241 && ! side_effects_p (op1))
2242 return simplify_gen_binary (AND, mode,
2243 simplify_gen_unary (NOT, mode,
2244 XEXP (op0, 0), mode),
2245 op1);
2246
2247 /* Similarly for (~(A ^ B)) & A. */
2248 if (GET_CODE (op0) == NOT
2249 && GET_CODE (XEXP (op0, 0)) == XOR
2250 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2253
2254 if (GET_CODE (op0) == NOT
2255 && GET_CODE (XEXP (op0, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2259
2260 /* Convert (A | B) & A to A. */
2261 if (GET_CODE (op0) == IOR
2262 && (rtx_equal_p (XEXP (op0, 0), op1)
2263 || rtx_equal_p (XEXP (op0, 1), op1))
2264 && ! side_effects_p (XEXP (op0, 0))
2265 && ! side_effects_p (XEXP (op0, 1)))
2266 return op1;
2267
2268 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2269 ((A & N) + B) & M -> (A + B) & M
2270 Similarly if (N & M) == 0,
2271 ((A | N) + B) & M -> (A + B) & M
2272 and for - instead of + and/or ^ instead of |. */
2273 if (GET_CODE (trueop1) == CONST_INT
2274 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2275 && ~INTVAL (trueop1)
2276 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2277 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2278 {
2279 rtx pmop[2];
2280 int which;
2281
2282 pmop[0] = XEXP (op0, 0);
2283 pmop[1] = XEXP (op0, 1);
2284
2285 for (which = 0; which < 2; which++)
2286 {
2287 tem = pmop[which];
2288 switch (GET_CODE (tem))
2289 {
2290 case AND:
2291 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2292 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2293 == INTVAL (trueop1))
2294 pmop[which] = XEXP (tem, 0);
2295 break;
2296 case IOR:
2297 case XOR:
2298 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2300 pmop[which] = XEXP (tem, 0);
2301 break;
2302 default:
2303 break;
2304 }
2305 }
2306
2307 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2308 {
2309 tem = simplify_gen_binary (GET_CODE (op0), mode,
2310 pmop[0], pmop[1]);
2311 return simplify_gen_binary (code, mode, tem, op1);
2312 }
2313 }
2314 tem = simplify_associative_operation (code, mode, op0, op1);
2315 if (tem)
2316 return tem;
2317 break;
2318
2319 case UDIV:
2320 /* 0/x is 0 (or x&0 if x has side-effects). */
2321 if (trueop0 == CONST0_RTX (mode))
2322 {
2323 if (side_effects_p (op1))
2324 return simplify_gen_binary (AND, mode, op1, trueop0);
2325 return trueop0;
2326 }
2327 /* x/1 is x. */
2328 if (trueop1 == CONST1_RTX (mode))
2329 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2330 /* Convert divide by power of two into shift. */
2331 if (GET_CODE (trueop1) == CONST_INT
2332 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2333 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2334 break;
2335
2336 case DIV:
2337 /* Handle floating point and integers separately. */
2338 if (SCALAR_FLOAT_MODE_P (mode))
2339 {
2340 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2341 safe for modes with NaNs, since 0.0 / 0.0 will then be
2342 NaN rather than 0.0. Nor is it safe for modes with signed
2343 zeros, since dividing 0 by a negative number gives -0.0 */
2344 if (trueop0 == CONST0_RTX (mode)
2345 && !HONOR_NANS (mode)
2346 && !HONOR_SIGNED_ZEROS (mode)
2347 && ! side_effects_p (op1))
2348 return op0;
2349 /* x/1.0 is x. */
2350 if (trueop1 == CONST1_RTX (mode)
2351 && !HONOR_SNANS (mode))
2352 return op0;
2353
2354 if (GET_CODE (trueop1) == CONST_DOUBLE
2355 && trueop1 != CONST0_RTX (mode))
2356 {
2357 REAL_VALUE_TYPE d;
2358 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2359
2360 /* x/-1.0 is -x. */
2361 if (REAL_VALUES_EQUAL (d, dconstm1)
2362 && !HONOR_SNANS (mode))
2363 return simplify_gen_unary (NEG, mode, op0, mode);
2364
2365 /* Change FP division by a constant into multiplication.
2366 Only do this with -funsafe-math-optimizations. */
2367 if (flag_unsafe_math_optimizations
2368 && !REAL_VALUES_EQUAL (d, dconst0))
2369 {
2370 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2371 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2372 return simplify_gen_binary (MULT, mode, op0, tem);
2373 }
2374 }
2375 }
2376 else
2377 {
2378 /* 0/x is 0 (or x&0 if x has side-effects). */
2379 if (trueop0 == CONST0_RTX (mode))
2380 {
2381 if (side_effects_p (op1))
2382 return simplify_gen_binary (AND, mode, op1, trueop0);
2383 return trueop0;
2384 }
2385 /* x/1 is x. */
2386 if (trueop1 == CONST1_RTX (mode))
2387 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2388 /* x/-1 is -x. */
2389 if (trueop1 == constm1_rtx)
2390 {
2391 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2392 return simplify_gen_unary (NEG, mode, x, mode);
2393 }
2394 }
2395 break;
2396
2397 case UMOD:
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0 == CONST0_RTX (mode))
2400 {
2401 if (side_effects_p (op1))
2402 return simplify_gen_binary (AND, mode, op1, trueop0);
2403 return trueop0;
2404 }
2405 /* x%1 is 0 (of x&0 if x has side-effects). */
2406 if (trueop1 == CONST1_RTX (mode))
2407 {
2408 if (side_effects_p (op0))
2409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2410 return CONST0_RTX (mode);
2411 }
2412 /* Implement modulus by power of two as AND. */
2413 if (GET_CODE (trueop1) == CONST_INT
2414 && exact_log2 (INTVAL (trueop1)) > 0)
2415 return simplify_gen_binary (AND, mode, op0,
2416 GEN_INT (INTVAL (op1) - 1));
2417 break;
2418
2419 case MOD:
2420 /* 0%x is 0 (or x&0 if x has side-effects). */
2421 if (trueop0 == CONST0_RTX (mode))
2422 {
2423 if (side_effects_p (op1))
2424 return simplify_gen_binary (AND, mode, op1, trueop0);
2425 return trueop0;
2426 }
2427 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2428 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2429 {
2430 if (side_effects_p (op0))
2431 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2432 return CONST0_RTX (mode);
2433 }
2434 break;
2435
2436 case ROTATERT:
2437 case ROTATE:
2438 case ASHIFTRT:
2439 /* Rotating ~0 always results in ~0. */
2440 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2441 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2442 && ! side_effects_p (op1))
2443 return op0;
2444
2445 /* Fall through.... */
2446
2447 case ASHIFT:
2448 case SS_ASHIFT:
2449 case LSHIFTRT:
2450 if (trueop1 == CONST0_RTX (mode))
2451 return op0;
2452 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2453 return op0;
2454 break;
2455
2456 case SMIN:
2457 if (width <= HOST_BITS_PER_WIDE_INT
2458 && GET_CODE (trueop1) == CONST_INT
2459 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2460 && ! side_effects_p (op0))
2461 return op1;
2462 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2463 return op0;
2464 tem = simplify_associative_operation (code, mode, op0, op1);
2465 if (tem)
2466 return tem;
2467 break;
2468
2469 case SMAX:
2470 if (width <= HOST_BITS_PER_WIDE_INT
2471 && GET_CODE (trueop1) == CONST_INT
2472 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2473 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2474 && ! side_effects_p (op0))
2475 return op1;
2476 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2477 return op0;
2478 tem = simplify_associative_operation (code, mode, op0, op1);
2479 if (tem)
2480 return tem;
2481 break;
2482
2483 case UMIN:
2484 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2485 return op1;
2486 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2487 return op0;
2488 tem = simplify_associative_operation (code, mode, op0, op1);
2489 if (tem)
2490 return tem;
2491 break;
2492
2493 case UMAX:
2494 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2495 return op1;
2496 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2497 return op0;
2498 tem = simplify_associative_operation (code, mode, op0, op1);
2499 if (tem)
2500 return tem;
2501 break;
2502
2503 case SS_PLUS:
2504 case US_PLUS:
2505 case SS_MINUS:
2506 case US_MINUS:
2507 /* ??? There are simplifications that can be done. */
2508 return 0;
2509
2510 case VEC_SELECT:
2511 if (!VECTOR_MODE_P (mode))
2512 {
2513 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2514 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2515 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2516 gcc_assert (XVECLEN (trueop1, 0) == 1);
2517 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2518
2519 if (GET_CODE (trueop0) == CONST_VECTOR)
2520 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2521 (trueop1, 0, 0)));
2522 }
2523 else
2524 {
2525 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2526 gcc_assert (GET_MODE_INNER (mode)
2527 == GET_MODE_INNER (GET_MODE (trueop0)));
2528 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2529
2530 if (GET_CODE (trueop0) == CONST_VECTOR)
2531 {
2532 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2533 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2534 rtvec v = rtvec_alloc (n_elts);
2535 unsigned int i;
2536
2537 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2538 for (i = 0; i < n_elts; i++)
2539 {
2540 rtx x = XVECEXP (trueop1, 0, i);
2541
2542 gcc_assert (GET_CODE (x) == CONST_INT);
2543 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2544 INTVAL (x));
2545 }
2546
2547 return gen_rtx_CONST_VECTOR (mode, v);
2548 }
2549 }
2550
2551 if (XVECLEN (trueop1, 0) == 1
2552 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2553 && GET_CODE (trueop0) == VEC_CONCAT)
2554 {
2555 rtx vec = trueop0;
2556 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2557
2558 /* Try to find the element in the VEC_CONCAT. */
2559 while (GET_MODE (vec) != mode
2560 && GET_CODE (vec) == VEC_CONCAT)
2561 {
2562 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2563 if (offset < vec_size)
2564 vec = XEXP (vec, 0);
2565 else
2566 {
2567 offset -= vec_size;
2568 vec = XEXP (vec, 1);
2569 }
2570 vec = avoid_constant_pool_reference (vec);
2571 }
2572
2573 if (GET_MODE (vec) == mode)
2574 return vec;
2575 }
2576
2577 return 0;
2578 case VEC_CONCAT:
2579 {
2580 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2581 ? GET_MODE (trueop0)
2582 : GET_MODE_INNER (mode));
2583 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2584 ? GET_MODE (trueop1)
2585 : GET_MODE_INNER (mode));
2586
2587 gcc_assert (VECTOR_MODE_P (mode));
2588 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2589 == GET_MODE_SIZE (mode));
2590
2591 if (VECTOR_MODE_P (op0_mode))
2592 gcc_assert (GET_MODE_INNER (mode)
2593 == GET_MODE_INNER (op0_mode));
2594 else
2595 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2596
2597 if (VECTOR_MODE_P (op1_mode))
2598 gcc_assert (GET_MODE_INNER (mode)
2599 == GET_MODE_INNER (op1_mode));
2600 else
2601 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2602
2603 if ((GET_CODE (trueop0) == CONST_VECTOR
2604 || GET_CODE (trueop0) == CONST_INT
2605 || GET_CODE (trueop0) == CONST_DOUBLE)
2606 && (GET_CODE (trueop1) == CONST_VECTOR
2607 || GET_CODE (trueop1) == CONST_INT
2608 || GET_CODE (trueop1) == CONST_DOUBLE))
2609 {
2610 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2611 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2612 rtvec v = rtvec_alloc (n_elts);
2613 unsigned int i;
2614 unsigned in_n_elts = 1;
2615
2616 if (VECTOR_MODE_P (op0_mode))
2617 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2618 for (i = 0; i < n_elts; i++)
2619 {
2620 if (i < in_n_elts)
2621 {
2622 if (!VECTOR_MODE_P (op0_mode))
2623 RTVEC_ELT (v, i) = trueop0;
2624 else
2625 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2626 }
2627 else
2628 {
2629 if (!VECTOR_MODE_P (op1_mode))
2630 RTVEC_ELT (v, i) = trueop1;
2631 else
2632 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2633 i - in_n_elts);
2634 }
2635 }
2636
2637 return gen_rtx_CONST_VECTOR (mode, v);
2638 }
2639 }
2640 return 0;
2641
2642 default:
2643 gcc_unreachable ();
2644 }
2645
2646 return 0;
2647 }
2648
2649 rtx
2650 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2651 rtx op0, rtx op1)
2652 {
2653 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2654 HOST_WIDE_INT val;
2655 unsigned int width = GET_MODE_BITSIZE (mode);
2656
2657 if (VECTOR_MODE_P (mode)
2658 && code != VEC_CONCAT
2659 && GET_CODE (op0) == CONST_VECTOR
2660 && GET_CODE (op1) == CONST_VECTOR)
2661 {
2662 unsigned n_elts = GET_MODE_NUNITS (mode);
2663 enum machine_mode op0mode = GET_MODE (op0);
2664 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2665 enum machine_mode op1mode = GET_MODE (op1);
2666 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2667 rtvec v = rtvec_alloc (n_elts);
2668 unsigned int i;
2669
2670 gcc_assert (op0_n_elts == n_elts);
2671 gcc_assert (op1_n_elts == n_elts);
2672 for (i = 0; i < n_elts; i++)
2673 {
2674 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2675 CONST_VECTOR_ELT (op0, i),
2676 CONST_VECTOR_ELT (op1, i));
2677 if (!x)
2678 return 0;
2679 RTVEC_ELT (v, i) = x;
2680 }
2681
2682 return gen_rtx_CONST_VECTOR (mode, v);
2683 }
2684
2685 if (VECTOR_MODE_P (mode)
2686 && code == VEC_CONCAT
2687 && CONSTANT_P (op0) && CONSTANT_P (op1))
2688 {
2689 unsigned n_elts = GET_MODE_NUNITS (mode);
2690 rtvec v = rtvec_alloc (n_elts);
2691
2692 gcc_assert (n_elts >= 2);
2693 if (n_elts == 2)
2694 {
2695 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2696 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2697
2698 RTVEC_ELT (v, 0) = op0;
2699 RTVEC_ELT (v, 1) = op1;
2700 }
2701 else
2702 {
2703 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2704 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2705 unsigned i;
2706
2707 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2708 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2709 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2710
2711 for (i = 0; i < op0_n_elts; ++i)
2712 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2713 for (i = 0; i < op1_n_elts; ++i)
2714 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2715 }
2716
2717 return gen_rtx_CONST_VECTOR (mode, v);
2718 }
2719
2720 if (SCALAR_FLOAT_MODE_P (mode)
2721 && GET_CODE (op0) == CONST_DOUBLE
2722 && GET_CODE (op1) == CONST_DOUBLE
2723 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2724 {
2725 if (code == AND
2726 || code == IOR
2727 || code == XOR)
2728 {
2729 long tmp0[4];
2730 long tmp1[4];
2731 REAL_VALUE_TYPE r;
2732 int i;
2733
2734 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2735 GET_MODE (op0));
2736 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2737 GET_MODE (op1));
2738 for (i = 0; i < 4; i++)
2739 {
2740 switch (code)
2741 {
2742 case AND:
2743 tmp0[i] &= tmp1[i];
2744 break;
2745 case IOR:
2746 tmp0[i] |= tmp1[i];
2747 break;
2748 case XOR:
2749 tmp0[i] ^= tmp1[i];
2750 break;
2751 default:
2752 gcc_unreachable ();
2753 }
2754 }
2755 real_from_target (&r, tmp0, mode);
2756 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2757 }
2758 else
2759 {
2760 REAL_VALUE_TYPE f0, f1, value, result;
2761 bool inexact;
2762
2763 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2764 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2765 real_convert (&f0, mode, &f0);
2766 real_convert (&f1, mode, &f1);
2767
2768 if (HONOR_SNANS (mode)
2769 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2770 return 0;
2771
2772 if (code == DIV
2773 && REAL_VALUES_EQUAL (f1, dconst0)
2774 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2775 return 0;
2776
2777 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2778 && flag_trapping_math
2779 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2780 {
2781 int s0 = REAL_VALUE_NEGATIVE (f0);
2782 int s1 = REAL_VALUE_NEGATIVE (f1);
2783
2784 switch (code)
2785 {
2786 case PLUS:
2787 /* Inf + -Inf = NaN plus exception. */
2788 if (s0 != s1)
2789 return 0;
2790 break;
2791 case MINUS:
2792 /* Inf - Inf = NaN plus exception. */
2793 if (s0 == s1)
2794 return 0;
2795 break;
2796 case DIV:
2797 /* Inf / Inf = NaN plus exception. */
2798 return 0;
2799 default:
2800 break;
2801 }
2802 }
2803
2804 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2805 && flag_trapping_math
2806 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2807 || (REAL_VALUE_ISINF (f1)
2808 && REAL_VALUES_EQUAL (f0, dconst0))))
2809 /* Inf * 0 = NaN plus exception. */
2810 return 0;
2811
2812 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2813 &f0, &f1);
2814 real_convert (&result, mode, &value);
2815
2816 /* Don't constant fold this floating point operation if
2817 the result has overflowed and flag_trapping_math. */
2818
2819 if (flag_trapping_math
2820 && MODE_HAS_INFINITIES (mode)
2821 && REAL_VALUE_ISINF (result)
2822 && !REAL_VALUE_ISINF (f0)
2823 && !REAL_VALUE_ISINF (f1))
2824 /* Overflow plus exception. */
2825 return 0;
2826
2827 /* Don't constant fold this floating point operation if the
2828 result may dependent upon the run-time rounding mode and
2829 flag_rounding_math is set, or if GCC's software emulation
2830 is unable to accurately represent the result. */
2831
2832 if ((flag_rounding_math
2833 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2834 && !flag_unsafe_math_optimizations))
2835 && (inexact || !real_identical (&result, &value)))
2836 return NULL_RTX;
2837
2838 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2839 }
2840 }
2841
2842 /* We can fold some multi-word operations. */
2843 if (GET_MODE_CLASS (mode) == MODE_INT
2844 && width == HOST_BITS_PER_WIDE_INT * 2
2845 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2846 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2847 {
2848 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2849 HOST_WIDE_INT h1, h2, hv, ht;
2850
2851 if (GET_CODE (op0) == CONST_DOUBLE)
2852 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2853 else
2854 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2855
2856 if (GET_CODE (op1) == CONST_DOUBLE)
2857 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2858 else
2859 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2860
2861 switch (code)
2862 {
2863 case MINUS:
2864 /* A - B == A + (-B). */
2865 neg_double (l2, h2, &lv, &hv);
2866 l2 = lv, h2 = hv;
2867
2868 /* Fall through.... */
2869
2870 case PLUS:
2871 add_double (l1, h1, l2, h2, &lv, &hv);
2872 break;
2873
2874 case MULT:
2875 mul_double (l1, h1, l2, h2, &lv, &hv);
2876 break;
2877
2878 case DIV:
2879 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2880 &lv, &hv, &lt, &ht))
2881 return 0;
2882 break;
2883
2884 case MOD:
2885 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2886 &lt, &ht, &lv, &hv))
2887 return 0;
2888 break;
2889
2890 case UDIV:
2891 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2892 &lv, &hv, &lt, &ht))
2893 return 0;
2894 break;
2895
2896 case UMOD:
2897 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2898 &lt, &ht, &lv, &hv))
2899 return 0;
2900 break;
2901
2902 case AND:
2903 lv = l1 & l2, hv = h1 & h2;
2904 break;
2905
2906 case IOR:
2907 lv = l1 | l2, hv = h1 | h2;
2908 break;
2909
2910 case XOR:
2911 lv = l1 ^ l2, hv = h1 ^ h2;
2912 break;
2913
2914 case SMIN:
2915 if (h1 < h2
2916 || (h1 == h2
2917 && ((unsigned HOST_WIDE_INT) l1
2918 < (unsigned HOST_WIDE_INT) l2)))
2919 lv = l1, hv = h1;
2920 else
2921 lv = l2, hv = h2;
2922 break;
2923
2924 case SMAX:
2925 if (h1 > h2
2926 || (h1 == h2
2927 && ((unsigned HOST_WIDE_INT) l1
2928 > (unsigned HOST_WIDE_INT) l2)))
2929 lv = l1, hv = h1;
2930 else
2931 lv = l2, hv = h2;
2932 break;
2933
2934 case UMIN:
2935 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2936 || (h1 == h2
2937 && ((unsigned HOST_WIDE_INT) l1
2938 < (unsigned HOST_WIDE_INT) l2)))
2939 lv = l1, hv = h1;
2940 else
2941 lv = l2, hv = h2;
2942 break;
2943
2944 case UMAX:
2945 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2946 || (h1 == h2
2947 && ((unsigned HOST_WIDE_INT) l1
2948 > (unsigned HOST_WIDE_INT) l2)))
2949 lv = l1, hv = h1;
2950 else
2951 lv = l2, hv = h2;
2952 break;
2953
2954 case LSHIFTRT: case ASHIFTRT:
2955 case ASHIFT:
2956 case ROTATE: case ROTATERT:
2957 if (SHIFT_COUNT_TRUNCATED)
2958 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2959
2960 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2961 return 0;
2962
2963 if (code == LSHIFTRT || code == ASHIFTRT)
2964 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2965 code == ASHIFTRT);
2966 else if (code == ASHIFT)
2967 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2968 else if (code == ROTATE)
2969 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2970 else /* code == ROTATERT */
2971 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2972 break;
2973
2974 default:
2975 return 0;
2976 }
2977
2978 return immed_double_const (lv, hv, mode);
2979 }
2980
2981 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2982 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2983 {
2984 /* Get the integer argument values in two forms:
2985 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2986
2987 arg0 = INTVAL (op0);
2988 arg1 = INTVAL (op1);
2989
2990 if (width < HOST_BITS_PER_WIDE_INT)
2991 {
2992 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2993 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2994
2995 arg0s = arg0;
2996 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2997 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2998
2999 arg1s = arg1;
3000 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3001 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3002 }
3003 else
3004 {
3005 arg0s = arg0;
3006 arg1s = arg1;
3007 }
3008
3009 /* Compute the value of the arithmetic. */
3010
3011 switch (code)
3012 {
3013 case PLUS:
3014 val = arg0s + arg1s;
3015 break;
3016
3017 case MINUS:
3018 val = arg0s - arg1s;
3019 break;
3020
3021 case MULT:
3022 val = arg0s * arg1s;
3023 break;
3024
3025 case DIV:
3026 if (arg1s == 0
3027 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3028 && arg1s == -1))
3029 return 0;
3030 val = arg0s / arg1s;
3031 break;
3032
3033 case MOD:
3034 if (arg1s == 0
3035 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3036 && arg1s == -1))
3037 return 0;
3038 val = arg0s % arg1s;
3039 break;
3040
3041 case UDIV:
3042 if (arg1 == 0
3043 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3044 && arg1s == -1))
3045 return 0;
3046 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3047 break;
3048
3049 case UMOD:
3050 if (arg1 == 0
3051 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3052 && arg1s == -1))
3053 return 0;
3054 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3055 break;
3056
3057 case AND:
3058 val = arg0 & arg1;
3059 break;
3060
3061 case IOR:
3062 val = arg0 | arg1;
3063 break;
3064
3065 case XOR:
3066 val = arg0 ^ arg1;
3067 break;
3068
3069 case LSHIFTRT:
3070 case ASHIFT:
3071 case ASHIFTRT:
3072 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3073 the value is in range. We can't return any old value for
3074 out-of-range arguments because either the middle-end (via
3075 shift_truncation_mask) or the back-end might be relying on
3076 target-specific knowledge. Nor can we rely on
3077 shift_truncation_mask, since the shift might not be part of an
3078 ashlM3, lshrM3 or ashrM3 instruction. */
3079 if (SHIFT_COUNT_TRUNCATED)
3080 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3081 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3082 return 0;
3083
3084 val = (code == ASHIFT
3085 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3086 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3087
3088 /* Sign-extend the result for arithmetic right shifts. */
3089 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3090 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3091 break;
3092
3093 case ROTATERT:
3094 if (arg1 < 0)
3095 return 0;
3096
3097 arg1 %= width;
3098 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3099 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3100 break;
3101
3102 case ROTATE:
3103 if (arg1 < 0)
3104 return 0;
3105
3106 arg1 %= width;
3107 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3108 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3109 break;
3110
3111 case COMPARE:
3112 /* Do nothing here. */
3113 return 0;
3114
3115 case SMIN:
3116 val = arg0s <= arg1s ? arg0s : arg1s;
3117 break;
3118
3119 case UMIN:
3120 val = ((unsigned HOST_WIDE_INT) arg0
3121 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3122 break;
3123
3124 case SMAX:
3125 val = arg0s > arg1s ? arg0s : arg1s;
3126 break;
3127
3128 case UMAX:
3129 val = ((unsigned HOST_WIDE_INT) arg0
3130 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3131 break;
3132
3133 case SS_PLUS:
3134 case US_PLUS:
3135 case SS_MINUS:
3136 case US_MINUS:
3137 case SS_ASHIFT:
3138 /* ??? There are simplifications that can be done. */
3139 return 0;
3140
3141 default:
3142 gcc_unreachable ();
3143 }
3144
3145 return gen_int_mode (val, mode);
3146 }
3147
3148 return NULL_RTX;
3149 }
3150
3151
3152 \f
3153 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3154 PLUS or MINUS.
3155
3156 Rather than test for specific case, we do this by a brute-force method
3157 and do all possible simplifications until no more changes occur. Then
3158 we rebuild the operation. */
3159
3160 struct simplify_plus_minus_op_data
3161 {
3162 rtx op;
3163 short neg;
3164 short ix;
3165 };
3166
3167 static int
3168 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3169 {
3170 const struct simplify_plus_minus_op_data *d1 = p1;
3171 const struct simplify_plus_minus_op_data *d2 = p2;
3172 int result;
3173
3174 result = (commutative_operand_precedence (d2->op)
3175 - commutative_operand_precedence (d1->op));
3176 if (result)
3177 return result;
3178 return d1->ix - d2->ix;
3179 }
3180
3181 static rtx
3182 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3183 rtx op1)
3184 {
3185 struct simplify_plus_minus_op_data ops[8];
3186 rtx result, tem;
3187 int n_ops = 2, input_ops = 2;
3188 int first, changed, canonicalized = 0;
3189 int i, j;
3190
3191 memset (ops, 0, sizeof ops);
3192
3193 /* Set up the two operands and then expand them until nothing has been
3194 changed. If we run out of room in our array, give up; this should
3195 almost never happen. */
3196
3197 ops[0].op = op0;
3198 ops[0].neg = 0;
3199 ops[1].op = op1;
3200 ops[1].neg = (code == MINUS);
3201
3202 do
3203 {
3204 changed = 0;
3205
3206 for (i = 0; i < n_ops; i++)
3207 {
3208 rtx this_op = ops[i].op;
3209 int this_neg = ops[i].neg;
3210 enum rtx_code this_code = GET_CODE (this_op);
3211
3212 switch (this_code)
3213 {
3214 case PLUS:
3215 case MINUS:
3216 if (n_ops == 7)
3217 return NULL_RTX;
3218
3219 ops[n_ops].op = XEXP (this_op, 1);
3220 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3221 n_ops++;
3222
3223 ops[i].op = XEXP (this_op, 0);
3224 input_ops++;
3225 changed = 1;
3226 canonicalized |= this_neg;
3227 break;
3228
3229 case NEG:
3230 ops[i].op = XEXP (this_op, 0);
3231 ops[i].neg = ! this_neg;
3232 changed = 1;
3233 canonicalized = 1;
3234 break;
3235
3236 case CONST:
3237 if (n_ops < 7
3238 && GET_CODE (XEXP (this_op, 0)) == PLUS
3239 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3240 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3241 {
3242 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3243 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3244 ops[n_ops].neg = this_neg;
3245 n_ops++;
3246 changed = 1;
3247 canonicalized = 1;
3248 }
3249 break;
3250
3251 case NOT:
3252 /* ~a -> (-a - 1) */
3253 if (n_ops != 7)
3254 {
3255 ops[n_ops].op = constm1_rtx;
3256 ops[n_ops++].neg = this_neg;
3257 ops[i].op = XEXP (this_op, 0);
3258 ops[i].neg = !this_neg;
3259 changed = 1;
3260 canonicalized = 1;
3261 }
3262 break;
3263
3264 case CONST_INT:
3265 if (this_neg)
3266 {
3267 ops[i].op = neg_const_int (mode, this_op);
3268 ops[i].neg = 0;
3269 changed = 1;
3270 canonicalized = 1;
3271 }
3272 break;
3273
3274 default:
3275 break;
3276 }
3277 }
3278 }
3279 while (changed);
3280
3281 gcc_assert (n_ops >= 2);
3282 if (!canonicalized)
3283 {
3284 int n_constants = 0;
3285
3286 for (i = 0; i < n_ops; i++)
3287 if (GET_CODE (ops[i].op) == CONST_INT)
3288 n_constants++;
3289
3290 if (n_constants <= 1)
3291 return NULL_RTX;
3292 }
3293
3294 /* If we only have two operands, we can avoid the loops. */
3295 if (n_ops == 2)
3296 {
3297 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3298 rtx lhs, rhs;
3299
3300 /* Get the two operands. Be careful with the order, especially for
3301 the cases where code == MINUS. */
3302 if (ops[0].neg && ops[1].neg)
3303 {
3304 lhs = gen_rtx_NEG (mode, ops[0].op);
3305 rhs = ops[1].op;
3306 }
3307 else if (ops[0].neg)
3308 {
3309 lhs = ops[1].op;
3310 rhs = ops[0].op;
3311 }
3312 else
3313 {
3314 lhs = ops[0].op;
3315 rhs = ops[1].op;
3316 }
3317
3318 return simplify_const_binary_operation (code, mode, lhs, rhs);
3319 }
3320
3321 /* Now simplify each pair of operands until nothing changes. The first
3322 time through just simplify constants against each other. */
3323
3324 first = 1;
3325 do
3326 {
3327 changed = first;
3328
3329 for (i = 0; i < n_ops - 1; i++)
3330 for (j = i + 1; j < n_ops; j++)
3331 {
3332 rtx lhs = ops[i].op, rhs = ops[j].op;
3333 int lneg = ops[i].neg, rneg = ops[j].neg;
3334
3335 if (lhs != 0 && rhs != 0
3336 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3337 {
3338 enum rtx_code ncode = PLUS;
3339
3340 if (lneg != rneg)
3341 {
3342 ncode = MINUS;
3343 if (lneg)
3344 tem = lhs, lhs = rhs, rhs = tem;
3345 }
3346 else if (swap_commutative_operands_p (lhs, rhs))
3347 tem = lhs, lhs = rhs, rhs = tem;
3348
3349 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3350 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3351 {
3352 rtx tem_lhs, tem_rhs;
3353
3354 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3355 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3356 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3357
3358 if (tem && !CONSTANT_P (tem))
3359 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3360 }
3361 else
3362 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3363
3364 /* Reject "simplifications" that just wrap the two
3365 arguments in a CONST. Failure to do so can result
3366 in infinite recursion with simplify_binary_operation
3367 when it calls us to simplify CONST operations. */
3368 if (tem
3369 && ! (GET_CODE (tem) == CONST
3370 && GET_CODE (XEXP (tem, 0)) == ncode
3371 && XEXP (XEXP (tem, 0), 0) == lhs
3372 && XEXP (XEXP (tem, 0), 1) == rhs)
3373 /* Don't allow -x + -1 -> ~x simplifications in the
3374 first pass. This allows us the chance to combine
3375 the -1 with other constants. */
3376 && ! (first
3377 && GET_CODE (tem) == NOT
3378 && XEXP (tem, 0) == rhs))
3379 {
3380 lneg &= rneg;
3381 if (GET_CODE (tem) == NEG)
3382 tem = XEXP (tem, 0), lneg = !lneg;
3383 if (GET_CODE (tem) == CONST_INT && lneg)
3384 tem = neg_const_int (mode, tem), lneg = 0;
3385
3386 ops[i].op = tem;
3387 ops[i].neg = lneg;
3388 ops[j].op = NULL_RTX;
3389 changed = 1;
3390 }
3391 }
3392 }
3393
3394 first = 0;
3395 }
3396 while (changed);
3397
3398 /* Pack all the operands to the lower-numbered entries. */
3399 for (i = 0, j = 0; j < n_ops; j++)
3400 if (ops[j].op)
3401 {
3402 ops[i] = ops[j];
3403 /* Stabilize sort. */
3404 ops[i].ix = i;
3405 i++;
3406 }
3407 n_ops = i;
3408
3409 /* Sort the operations based on swap_commutative_operands_p. */
3410 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3411
3412 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3413 if (n_ops == 2
3414 && GET_CODE (ops[1].op) == CONST_INT
3415 && CONSTANT_P (ops[0].op)
3416 && ops[0].neg)
3417 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3418
3419 /* We suppressed creation of trivial CONST expressions in the
3420 combination loop to avoid recursion. Create one manually now.
3421 The combination loop should have ensured that there is exactly
3422 one CONST_INT, and the sort will have ensured that it is last
3423 in the array and that any other constant will be next-to-last. */
3424
3425 if (n_ops > 1
3426 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3427 && CONSTANT_P (ops[n_ops - 2].op))
3428 {
3429 rtx value = ops[n_ops - 1].op;
3430 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3431 value = neg_const_int (mode, value);
3432 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3433 n_ops--;
3434 }
3435
3436 /* Put a non-negated operand first, if possible. */
3437
3438 for (i = 0; i < n_ops && ops[i].neg; i++)
3439 continue;
3440 if (i == n_ops)
3441 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3442 else if (i != 0)
3443 {
3444 tem = ops[0].op;
3445 ops[0] = ops[i];
3446 ops[i].op = tem;
3447 ops[i].neg = 1;
3448 }
3449
3450 /* Now make the result by performing the requested operations. */
3451 result = ops[0].op;
3452 for (i = 1; i < n_ops; i++)
3453 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3454 mode, result, ops[i].op);
3455
3456 return result;
3457 }
3458
3459 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3460 static bool
3461 plus_minus_operand_p (rtx x)
3462 {
3463 return GET_CODE (x) == PLUS
3464 || GET_CODE (x) == MINUS
3465 || (GET_CODE (x) == CONST
3466 && GET_CODE (XEXP (x, 0)) == PLUS
3467 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3468 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3469 }
3470
3471 /* Like simplify_binary_operation except used for relational operators.
3472 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3473 not also be VOIDmode.
3474
3475 CMP_MODE specifies in which mode the comparison is done in, so it is
3476 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3477 the operands or, if both are VOIDmode, the operands are compared in
3478 "infinite precision". */
3479 rtx
3480 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3481 enum machine_mode cmp_mode, rtx op0, rtx op1)
3482 {
3483 rtx tem, trueop0, trueop1;
3484
3485 if (cmp_mode == VOIDmode)
3486 cmp_mode = GET_MODE (op0);
3487 if (cmp_mode == VOIDmode)
3488 cmp_mode = GET_MODE (op1);
3489
3490 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3491 if (tem)
3492 {
3493 if (SCALAR_FLOAT_MODE_P (mode))
3494 {
3495 if (tem == const0_rtx)
3496 return CONST0_RTX (mode);
3497 #ifdef FLOAT_STORE_FLAG_VALUE
3498 {
3499 REAL_VALUE_TYPE val;
3500 val = FLOAT_STORE_FLAG_VALUE (mode);
3501 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3502 }
3503 #else
3504 return NULL_RTX;
3505 #endif
3506 }
3507 if (VECTOR_MODE_P (mode))
3508 {
3509 if (tem == const0_rtx)
3510 return CONST0_RTX (mode);
3511 #ifdef VECTOR_STORE_FLAG_VALUE
3512 {
3513 int i, units;
3514 rtvec v;
3515
3516 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3517 if (val == NULL_RTX)
3518 return NULL_RTX;
3519 if (val == const1_rtx)
3520 return CONST1_RTX (mode);
3521
3522 units = GET_MODE_NUNITS (mode);
3523 v = rtvec_alloc (units);
3524 for (i = 0; i < units; i++)
3525 RTVEC_ELT (v, i) = val;
3526 return gen_rtx_raw_CONST_VECTOR (mode, v);
3527 }
3528 #else
3529 return NULL_RTX;
3530 #endif
3531 }
3532
3533 return tem;
3534 }
3535
3536 /* For the following tests, ensure const0_rtx is op1. */
3537 if (swap_commutative_operands_p (op0, op1)
3538 || (op0 == const0_rtx && op1 != const0_rtx))
3539 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3540
3541 /* If op0 is a compare, extract the comparison arguments from it. */
3542 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3543 return simplify_relational_operation (code, mode, VOIDmode,
3544 XEXP (op0, 0), XEXP (op0, 1));
3545
3546 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3547 || CC0_P (op0))
3548 return NULL_RTX;
3549
3550 trueop0 = avoid_constant_pool_reference (op0);
3551 trueop1 = avoid_constant_pool_reference (op1);
3552 return simplify_relational_operation_1 (code, mode, cmp_mode,
3553 trueop0, trueop1);
3554 }
3555
3556 /* This part of simplify_relational_operation is only used when CMP_MODE
3557 is not in class MODE_CC (i.e. it is a real comparison).
3558
3559 MODE is the mode of the result, while CMP_MODE specifies in which
3560 mode the comparison is done in, so it is the mode of the operands. */
3561
3562 static rtx
3563 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3564 enum machine_mode cmp_mode, rtx op0, rtx op1)
3565 {
3566 enum rtx_code op0code = GET_CODE (op0);
3567
3568 if (GET_CODE (op1) == CONST_INT)
3569 {
3570 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3571 {
3572 /* If op0 is a comparison, extract the comparison arguments
3573 from it. */
3574 if (code == NE)
3575 {
3576 if (GET_MODE (op0) == mode)
3577 return simplify_rtx (op0);
3578 else
3579 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3580 XEXP (op0, 0), XEXP (op0, 1));
3581 }
3582 else if (code == EQ)
3583 {
3584 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3585 if (new_code != UNKNOWN)
3586 return simplify_gen_relational (new_code, mode, VOIDmode,
3587 XEXP (op0, 0), XEXP (op0, 1));
3588 }
3589 }
3590 }
3591
3592 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3593 if ((code == EQ || code == NE)
3594 && (op0code == PLUS || op0code == MINUS)
3595 && CONSTANT_P (op1)
3596 && CONSTANT_P (XEXP (op0, 1))
3597 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3598 {
3599 rtx x = XEXP (op0, 0);
3600 rtx c = XEXP (op0, 1);
3601
3602 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3603 cmp_mode, op1, c);
3604 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3605 }
3606
3607 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3608 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3609 if (code == NE
3610 && op1 == const0_rtx
3611 && GET_MODE_CLASS (mode) == MODE_INT
3612 && cmp_mode != VOIDmode
3613 /* ??? Work-around BImode bugs in the ia64 backend. */
3614 && mode != BImode
3615 && cmp_mode != BImode
3616 && nonzero_bits (op0, cmp_mode) == 1
3617 && STORE_FLAG_VALUE == 1)
3618 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3619 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3620 : lowpart_subreg (mode, op0, cmp_mode);
3621
3622 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3623 if ((code == EQ || code == NE)
3624 && op1 == const0_rtx
3625 && op0code == XOR)
3626 return simplify_gen_relational (code, mode, cmp_mode,
3627 XEXP (op0, 0), XEXP (op0, 1));
3628
3629 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3630 if ((code == EQ || code == NE)
3631 && op0code == XOR
3632 && rtx_equal_p (XEXP (op0, 0), op1)
3633 && !side_effects_p (XEXP (op0, 0)))
3634 return simplify_gen_relational (code, mode, cmp_mode,
3635 XEXP (op0, 1), const0_rtx);
3636
3637 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3638 if ((code == EQ || code == NE)
3639 && op0code == XOR
3640 && rtx_equal_p (XEXP (op0, 1), op1)
3641 && !side_effects_p (XEXP (op0, 1)))
3642 return simplify_gen_relational (code, mode, cmp_mode,
3643 XEXP (op0, 0), const0_rtx);
3644
3645 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3646 if ((code == EQ || code == NE)
3647 && op0code == XOR
3648 && (GET_CODE (op1) == CONST_INT
3649 || GET_CODE (op1) == CONST_DOUBLE)
3650 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3651 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3652 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3653 simplify_gen_binary (XOR, cmp_mode,
3654 XEXP (op0, 1), op1));
3655
3656 return NULL_RTX;
3657 }
3658
3659 /* Check if the given comparison (done in the given MODE) is actually a
3660 tautology or a contradiction.
3661 If no simplification is possible, this function returns zero.
3662 Otherwise, it returns either const_true_rtx or const0_rtx. */
3663
3664 rtx
3665 simplify_const_relational_operation (enum rtx_code code,
3666 enum machine_mode mode,
3667 rtx op0, rtx op1)
3668 {
3669 int equal, op0lt, op0ltu, op1lt, op1ltu;
3670 rtx tem;
3671 rtx trueop0;
3672 rtx trueop1;
3673
3674 gcc_assert (mode != VOIDmode
3675 || (GET_MODE (op0) == VOIDmode
3676 && GET_MODE (op1) == VOIDmode));
3677
3678 /* If op0 is a compare, extract the comparison arguments from it. */
3679 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3680 {
3681 op1 = XEXP (op0, 1);
3682 op0 = XEXP (op0, 0);
3683
3684 if (GET_MODE (op0) != VOIDmode)
3685 mode = GET_MODE (op0);
3686 else if (GET_MODE (op1) != VOIDmode)
3687 mode = GET_MODE (op1);
3688 else
3689 return 0;
3690 }
3691
3692 /* We can't simplify MODE_CC values since we don't know what the
3693 actual comparison is. */
3694 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3695 return 0;
3696
3697 /* Make sure the constant is second. */
3698 if (swap_commutative_operands_p (op0, op1))
3699 {
3700 tem = op0, op0 = op1, op1 = tem;
3701 code = swap_condition (code);
3702 }
3703
3704 trueop0 = avoid_constant_pool_reference (op0);
3705 trueop1 = avoid_constant_pool_reference (op1);
3706
3707 /* For integer comparisons of A and B maybe we can simplify A - B and can
3708 then simplify a comparison of that with zero. If A and B are both either
3709 a register or a CONST_INT, this can't help; testing for these cases will
3710 prevent infinite recursion here and speed things up.
3711
3712 If CODE is an unsigned comparison, then we can never do this optimization,
3713 because it gives an incorrect result if the subtraction wraps around zero.
3714 ANSI C defines unsigned operations such that they never overflow, and
3715 thus such cases can not be ignored; but we cannot do it even for
3716 signed comparisons for languages such as Java, so test flag_wrapv. */
3717
3718 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3719 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3720 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3721 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3722 /* We cannot do this for == or != if tem is a nonzero address. */
3723 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3724 && code != GTU && code != GEU && code != LTU && code != LEU)
3725 return simplify_const_relational_operation (signed_condition (code),
3726 mode, tem, const0_rtx);
3727
3728 if (flag_unsafe_math_optimizations && code == ORDERED)
3729 return const_true_rtx;
3730
3731 if (flag_unsafe_math_optimizations && code == UNORDERED)
3732 return const0_rtx;
3733
3734 /* For modes without NaNs, if the two operands are equal, we know the
3735 result except if they have side-effects. */
3736 if (! HONOR_NANS (GET_MODE (trueop0))
3737 && rtx_equal_p (trueop0, trueop1)
3738 && ! side_effects_p (trueop0))
3739 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3740
3741 /* If the operands are floating-point constants, see if we can fold
3742 the result. */
3743 else if (GET_CODE (trueop0) == CONST_DOUBLE
3744 && GET_CODE (trueop1) == CONST_DOUBLE
3745 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3746 {
3747 REAL_VALUE_TYPE d0, d1;
3748
3749 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3750 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3751
3752 /* Comparisons are unordered iff at least one of the values is NaN. */
3753 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3754 switch (code)
3755 {
3756 case UNEQ:
3757 case UNLT:
3758 case UNGT:
3759 case UNLE:
3760 case UNGE:
3761 case NE:
3762 case UNORDERED:
3763 return const_true_rtx;
3764 case EQ:
3765 case LT:
3766 case GT:
3767 case LE:
3768 case GE:
3769 case LTGT:
3770 case ORDERED:
3771 return const0_rtx;
3772 default:
3773 return 0;
3774 }
3775
3776 equal = REAL_VALUES_EQUAL (d0, d1);
3777 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3778 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3779 }
3780
3781 /* Otherwise, see if the operands are both integers. */
3782 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3783 && (GET_CODE (trueop0) == CONST_DOUBLE
3784 || GET_CODE (trueop0) == CONST_INT)
3785 && (GET_CODE (trueop1) == CONST_DOUBLE
3786 || GET_CODE (trueop1) == CONST_INT))
3787 {
3788 int width = GET_MODE_BITSIZE (mode);
3789 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3790 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3791
3792 /* Get the two words comprising each integer constant. */
3793 if (GET_CODE (trueop0) == CONST_DOUBLE)
3794 {
3795 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3796 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3797 }
3798 else
3799 {
3800 l0u = l0s = INTVAL (trueop0);
3801 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3802 }
3803
3804 if (GET_CODE (trueop1) == CONST_DOUBLE)
3805 {
3806 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3807 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3808 }
3809 else
3810 {
3811 l1u = l1s = INTVAL (trueop1);
3812 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3813 }
3814
3815 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3816 we have to sign or zero-extend the values. */
3817 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3818 {
3819 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3820 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3821
3822 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3823 l0s |= ((HOST_WIDE_INT) (-1) << width);
3824
3825 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3826 l1s |= ((HOST_WIDE_INT) (-1) << width);
3827 }
3828 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3829 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3830
3831 equal = (h0u == h1u && l0u == l1u);
3832 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3833 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3834 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3835 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3836 }
3837
3838 /* Otherwise, there are some code-specific tests we can make. */
3839 else
3840 {
3841 /* Optimize comparisons with upper and lower bounds. */
3842 if (SCALAR_INT_MODE_P (mode)
3843 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3844 {
3845 rtx mmin, mmax;
3846 int sign;
3847
3848 if (code == GEU
3849 || code == LEU
3850 || code == GTU
3851 || code == LTU)
3852 sign = 0;
3853 else
3854 sign = 1;
3855
3856 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3857
3858 tem = NULL_RTX;
3859 switch (code)
3860 {
3861 case GEU:
3862 case GE:
3863 /* x >= min is always true. */
3864 if (rtx_equal_p (trueop1, mmin))
3865 tem = const_true_rtx;
3866 else
3867 break;
3868
3869 case LEU:
3870 case LE:
3871 /* x <= max is always true. */
3872 if (rtx_equal_p (trueop1, mmax))
3873 tem = const_true_rtx;
3874 break;
3875
3876 case GTU:
3877 case GT:
3878 /* x > max is always false. */
3879 if (rtx_equal_p (trueop1, mmax))
3880 tem = const0_rtx;
3881 break;
3882
3883 case LTU:
3884 case LT:
3885 /* x < min is always false. */
3886 if (rtx_equal_p (trueop1, mmin))
3887 tem = const0_rtx;
3888 break;
3889
3890 default:
3891 break;
3892 }
3893 if (tem == const0_rtx
3894 || tem == const_true_rtx)
3895 return tem;
3896 }
3897
3898 switch (code)
3899 {
3900 case EQ:
3901 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3902 return const0_rtx;
3903 break;
3904
3905 case NE:
3906 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3907 return const_true_rtx;
3908 break;
3909
3910 case LT:
3911 /* Optimize abs(x) < 0.0. */
3912 if (trueop1 == CONST0_RTX (mode)
3913 && !HONOR_SNANS (mode)
3914 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3915 {
3916 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3917 : trueop0;
3918 if (GET_CODE (tem) == ABS)
3919 return const0_rtx;
3920 }
3921 break;
3922
3923 case GE:
3924 /* Optimize abs(x) >= 0.0. */
3925 if (trueop1 == CONST0_RTX (mode)
3926 && !HONOR_NANS (mode)
3927 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3928 {
3929 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3930 : trueop0;
3931 if (GET_CODE (tem) == ABS)
3932 return const_true_rtx;
3933 }
3934 break;
3935
3936 case UNGE:
3937 /* Optimize ! (abs(x) < 0.0). */
3938 if (trueop1 == CONST0_RTX (mode))
3939 {
3940 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3941 : trueop0;
3942 if (GET_CODE (tem) == ABS)
3943 return const_true_rtx;
3944 }
3945 break;
3946
3947 default:
3948 break;
3949 }
3950
3951 return 0;
3952 }
3953
3954 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3955 as appropriate. */
3956 switch (code)
3957 {
3958 case EQ:
3959 case UNEQ:
3960 return equal ? const_true_rtx : const0_rtx;
3961 case NE:
3962 case LTGT:
3963 return ! equal ? const_true_rtx : const0_rtx;
3964 case LT:
3965 case UNLT:
3966 return op0lt ? const_true_rtx : const0_rtx;
3967 case GT:
3968 case UNGT:
3969 return op1lt ? const_true_rtx : const0_rtx;
3970 case LTU:
3971 return op0ltu ? const_true_rtx : const0_rtx;
3972 case GTU:
3973 return op1ltu ? const_true_rtx : const0_rtx;
3974 case LE:
3975 case UNLE:
3976 return equal || op0lt ? const_true_rtx : const0_rtx;
3977 case GE:
3978 case UNGE:
3979 return equal || op1lt ? const_true_rtx : const0_rtx;
3980 case LEU:
3981 return equal || op0ltu ? const_true_rtx : const0_rtx;
3982 case GEU:
3983 return equal || op1ltu ? const_true_rtx : const0_rtx;
3984 case ORDERED:
3985 return const_true_rtx;
3986 case UNORDERED:
3987 return const0_rtx;
3988 default:
3989 gcc_unreachable ();
3990 }
3991 }
3992 \f
3993 /* Simplify CODE, an operation with result mode MODE and three operands,
3994 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3995 a constant. Return 0 if no simplifications is possible. */
3996
3997 rtx
3998 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3999 enum machine_mode op0_mode, rtx op0, rtx op1,
4000 rtx op2)
4001 {
4002 unsigned int width = GET_MODE_BITSIZE (mode);
4003
4004 /* VOIDmode means "infinite" precision. */
4005 if (width == 0)
4006 width = HOST_BITS_PER_WIDE_INT;
4007
4008 switch (code)
4009 {
4010 case SIGN_EXTRACT:
4011 case ZERO_EXTRACT:
4012 if (GET_CODE (op0) == CONST_INT
4013 && GET_CODE (op1) == CONST_INT
4014 && GET_CODE (op2) == CONST_INT
4015 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4016 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4017 {
4018 /* Extracting a bit-field from a constant */
4019 HOST_WIDE_INT val = INTVAL (op0);
4020
4021 if (BITS_BIG_ENDIAN)
4022 val >>= (GET_MODE_BITSIZE (op0_mode)
4023 - INTVAL (op2) - INTVAL (op1));
4024 else
4025 val >>= INTVAL (op2);
4026
4027 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4028 {
4029 /* First zero-extend. */
4030 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4031 /* If desired, propagate sign bit. */
4032 if (code == SIGN_EXTRACT
4033 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4034 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4035 }
4036
4037 /* Clear the bits that don't belong in our mode,
4038 unless they and our sign bit are all one.
4039 So we get either a reasonable negative value or a reasonable
4040 unsigned value for this mode. */
4041 if (width < HOST_BITS_PER_WIDE_INT
4042 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4043 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4044 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4045
4046 return gen_int_mode (val, mode);
4047 }
4048 break;
4049
4050 case IF_THEN_ELSE:
4051 if (GET_CODE (op0) == CONST_INT)
4052 return op0 != const0_rtx ? op1 : op2;
4053
4054 /* Convert c ? a : a into "a". */
4055 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4056 return op1;
4057
4058 /* Convert a != b ? a : b into "a". */
4059 if (GET_CODE (op0) == NE
4060 && ! side_effects_p (op0)
4061 && ! HONOR_NANS (mode)
4062 && ! HONOR_SIGNED_ZEROS (mode)
4063 && ((rtx_equal_p (XEXP (op0, 0), op1)
4064 && rtx_equal_p (XEXP (op0, 1), op2))
4065 || (rtx_equal_p (XEXP (op0, 0), op2)
4066 && rtx_equal_p (XEXP (op0, 1), op1))))
4067 return op1;
4068
4069 /* Convert a == b ? a : b into "b". */
4070 if (GET_CODE (op0) == EQ
4071 && ! side_effects_p (op0)
4072 && ! HONOR_NANS (mode)
4073 && ! HONOR_SIGNED_ZEROS (mode)
4074 && ((rtx_equal_p (XEXP (op0, 0), op1)
4075 && rtx_equal_p (XEXP (op0, 1), op2))
4076 || (rtx_equal_p (XEXP (op0, 0), op2)
4077 && rtx_equal_p (XEXP (op0, 1), op1))))
4078 return op2;
4079
4080 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4081 {
4082 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4083 ? GET_MODE (XEXP (op0, 1))
4084 : GET_MODE (XEXP (op0, 0)));
4085 rtx temp;
4086
4087 /* Look for happy constants in op1 and op2. */
4088 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4089 {
4090 HOST_WIDE_INT t = INTVAL (op1);
4091 HOST_WIDE_INT f = INTVAL (op2);
4092
4093 if (t == STORE_FLAG_VALUE && f == 0)
4094 code = GET_CODE (op0);
4095 else if (t == 0 && f == STORE_FLAG_VALUE)
4096 {
4097 enum rtx_code tmp;
4098 tmp = reversed_comparison_code (op0, NULL_RTX);
4099 if (tmp == UNKNOWN)
4100 break;
4101 code = tmp;
4102 }
4103 else
4104 break;
4105
4106 return simplify_gen_relational (code, mode, cmp_mode,
4107 XEXP (op0, 0), XEXP (op0, 1));
4108 }
4109
4110 if (cmp_mode == VOIDmode)
4111 cmp_mode = op0_mode;
4112 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4113 cmp_mode, XEXP (op0, 0),
4114 XEXP (op0, 1));
4115
4116 /* See if any simplifications were possible. */
4117 if (temp)
4118 {
4119 if (GET_CODE (temp) == CONST_INT)
4120 return temp == const0_rtx ? op2 : op1;
4121 else if (temp)
4122 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4123 }
4124 }
4125 break;
4126
4127 case VEC_MERGE:
4128 gcc_assert (GET_MODE (op0) == mode);
4129 gcc_assert (GET_MODE (op1) == mode);
4130 gcc_assert (VECTOR_MODE_P (mode));
4131 op2 = avoid_constant_pool_reference (op2);
4132 if (GET_CODE (op2) == CONST_INT)
4133 {
4134 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4135 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4136 int mask = (1 << n_elts) - 1;
4137
4138 if (!(INTVAL (op2) & mask))
4139 return op1;
4140 if ((INTVAL (op2) & mask) == mask)
4141 return op0;
4142
4143 op0 = avoid_constant_pool_reference (op0);
4144 op1 = avoid_constant_pool_reference (op1);
4145 if (GET_CODE (op0) == CONST_VECTOR
4146 && GET_CODE (op1) == CONST_VECTOR)
4147 {
4148 rtvec v = rtvec_alloc (n_elts);
4149 unsigned int i;
4150
4151 for (i = 0; i < n_elts; i++)
4152 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4153 ? CONST_VECTOR_ELT (op0, i)
4154 : CONST_VECTOR_ELT (op1, i));
4155 return gen_rtx_CONST_VECTOR (mode, v);
4156 }
4157 }
4158 break;
4159
4160 default:
4161 gcc_unreachable ();
4162 }
4163
4164 return 0;
4165 }
4166
4167 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4168 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4169
4170 Works by unpacking OP into a collection of 8-bit values
4171 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4172 and then repacking them again for OUTERMODE. */
4173
4174 static rtx
4175 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4176 enum machine_mode innermode, unsigned int byte)
4177 {
4178 /* We support up to 512-bit values (for V8DFmode). */
4179 enum {
4180 max_bitsize = 512,
4181 value_bit = 8,
4182 value_mask = (1 << value_bit) - 1
4183 };
4184 unsigned char value[max_bitsize / value_bit];
4185 int value_start;
4186 int i;
4187 int elem;
4188
4189 int num_elem;
4190 rtx * elems;
4191 int elem_bitsize;
4192 rtx result_s;
4193 rtvec result_v = NULL;
4194 enum mode_class outer_class;
4195 enum machine_mode outer_submode;
4196
4197 /* Some ports misuse CCmode. */
4198 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4199 return op;
4200
4201 /* We have no way to represent a complex constant at the rtl level. */
4202 if (COMPLEX_MODE_P (outermode))
4203 return NULL_RTX;
4204
4205 /* Unpack the value. */
4206
4207 if (GET_CODE (op) == CONST_VECTOR)
4208 {
4209 num_elem = CONST_VECTOR_NUNITS (op);
4210 elems = &CONST_VECTOR_ELT (op, 0);
4211 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4212 }
4213 else
4214 {
4215 num_elem = 1;
4216 elems = &op;
4217 elem_bitsize = max_bitsize;
4218 }
4219 /* If this asserts, it is too complicated; reducing value_bit may help. */
4220 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4221 /* I don't know how to handle endianness of sub-units. */
4222 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4223
4224 for (elem = 0; elem < num_elem; elem++)
4225 {
4226 unsigned char * vp;
4227 rtx el = elems[elem];
4228
4229 /* Vectors are kept in target memory order. (This is probably
4230 a mistake.) */
4231 {
4232 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4233 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4234 / BITS_PER_UNIT);
4235 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4236 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4237 unsigned bytele = (subword_byte % UNITS_PER_WORD
4238 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4239 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4240 }
4241
4242 switch (GET_CODE (el))
4243 {
4244 case CONST_INT:
4245 for (i = 0;
4246 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4247 i += value_bit)
4248 *vp++ = INTVAL (el) >> i;
4249 /* CONST_INTs are always logically sign-extended. */
4250 for (; i < elem_bitsize; i += value_bit)
4251 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4252 break;
4253
4254 case CONST_DOUBLE:
4255 if (GET_MODE (el) == VOIDmode)
4256 {
4257 /* If this triggers, someone should have generated a
4258 CONST_INT instead. */
4259 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4260
4261 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4262 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4263 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4264 {
4265 *vp++
4266 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4267 i += value_bit;
4268 }
4269 /* It shouldn't matter what's done here, so fill it with
4270 zero. */
4271 for (; i < elem_bitsize; i += value_bit)
4272 *vp++ = 0;
4273 }
4274 else
4275 {
4276 long tmp[max_bitsize / 32];
4277 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4278
4279 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4280 gcc_assert (bitsize <= elem_bitsize);
4281 gcc_assert (bitsize % value_bit == 0);
4282
4283 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4284 GET_MODE (el));
4285
4286 /* real_to_target produces its result in words affected by
4287 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4288 and use WORDS_BIG_ENDIAN instead; see the documentation
4289 of SUBREG in rtl.texi. */
4290 for (i = 0; i < bitsize; i += value_bit)
4291 {
4292 int ibase;
4293 if (WORDS_BIG_ENDIAN)
4294 ibase = bitsize - 1 - i;
4295 else
4296 ibase = i;
4297 *vp++ = tmp[ibase / 32] >> i % 32;
4298 }
4299
4300 /* It shouldn't matter what's done here, so fill it with
4301 zero. */
4302 for (; i < elem_bitsize; i += value_bit)
4303 *vp++ = 0;
4304 }
4305 break;
4306
4307 default:
4308 gcc_unreachable ();
4309 }
4310 }
4311
4312 /* Now, pick the right byte to start with. */
4313 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4314 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4315 will already have offset 0. */
4316 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4317 {
4318 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4319 - byte);
4320 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4321 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4322 byte = (subword_byte % UNITS_PER_WORD
4323 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4324 }
4325
4326 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4327 so if it's become negative it will instead be very large.) */
4328 gcc_assert (byte < GET_MODE_SIZE (innermode));
4329
4330 /* Convert from bytes to chunks of size value_bit. */
4331 value_start = byte * (BITS_PER_UNIT / value_bit);
4332
4333 /* Re-pack the value. */
4334
4335 if (VECTOR_MODE_P (outermode))
4336 {
4337 num_elem = GET_MODE_NUNITS (outermode);
4338 result_v = rtvec_alloc (num_elem);
4339 elems = &RTVEC_ELT (result_v, 0);
4340 outer_submode = GET_MODE_INNER (outermode);
4341 }
4342 else
4343 {
4344 num_elem = 1;
4345 elems = &result_s;
4346 outer_submode = outermode;
4347 }
4348
4349 outer_class = GET_MODE_CLASS (outer_submode);
4350 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4351
4352 gcc_assert (elem_bitsize % value_bit == 0);
4353 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4354
4355 for (elem = 0; elem < num_elem; elem++)
4356 {
4357 unsigned char *vp;
4358
4359 /* Vectors are stored in target memory order. (This is probably
4360 a mistake.) */
4361 {
4362 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4363 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4364 / BITS_PER_UNIT);
4365 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4366 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4367 unsigned bytele = (subword_byte % UNITS_PER_WORD
4368 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4369 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4370 }
4371
4372 switch (outer_class)
4373 {
4374 case MODE_INT:
4375 case MODE_PARTIAL_INT:
4376 {
4377 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4378
4379 for (i = 0;
4380 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4381 i += value_bit)
4382 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4383 for (; i < elem_bitsize; i += value_bit)
4384 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4385 << (i - HOST_BITS_PER_WIDE_INT));
4386
4387 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4388 know why. */
4389 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4390 elems[elem] = gen_int_mode (lo, outer_submode);
4391 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4392 elems[elem] = immed_double_const (lo, hi, outer_submode);
4393 else
4394 return NULL_RTX;
4395 }
4396 break;
4397
4398 case MODE_FLOAT:
4399 case MODE_DECIMAL_FLOAT:
4400 {
4401 REAL_VALUE_TYPE r;
4402 long tmp[max_bitsize / 32];
4403
4404 /* real_from_target wants its input in words affected by
4405 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4406 and use WORDS_BIG_ENDIAN instead; see the documentation
4407 of SUBREG in rtl.texi. */
4408 for (i = 0; i < max_bitsize / 32; i++)
4409 tmp[i] = 0;
4410 for (i = 0; i < elem_bitsize; i += value_bit)
4411 {
4412 int ibase;
4413 if (WORDS_BIG_ENDIAN)
4414 ibase = elem_bitsize - 1 - i;
4415 else
4416 ibase = i;
4417 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4418 }
4419
4420 real_from_target (&r, tmp, outer_submode);
4421 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4422 }
4423 break;
4424
4425 default:
4426 gcc_unreachable ();
4427 }
4428 }
4429 if (VECTOR_MODE_P (outermode))
4430 return gen_rtx_CONST_VECTOR (outermode, result_v);
4431 else
4432 return result_s;
4433 }
4434
4435 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4436 Return 0 if no simplifications are possible. */
4437 rtx
4438 simplify_subreg (enum machine_mode outermode, rtx op,
4439 enum machine_mode innermode, unsigned int byte)
4440 {
4441 /* Little bit of sanity checking. */
4442 gcc_assert (innermode != VOIDmode);
4443 gcc_assert (outermode != VOIDmode);
4444 gcc_assert (innermode != BLKmode);
4445 gcc_assert (outermode != BLKmode);
4446
4447 gcc_assert (GET_MODE (op) == innermode
4448 || GET_MODE (op) == VOIDmode);
4449
4450 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4451 gcc_assert (byte < GET_MODE_SIZE (innermode));
4452
4453 if (outermode == innermode && !byte)
4454 return op;
4455
4456 if (GET_CODE (op) == CONST_INT
4457 || GET_CODE (op) == CONST_DOUBLE
4458 || GET_CODE (op) == CONST_VECTOR)
4459 return simplify_immed_subreg (outermode, op, innermode, byte);
4460
4461 /* Changing mode twice with SUBREG => just change it once,
4462 or not at all if changing back op starting mode. */
4463 if (GET_CODE (op) == SUBREG)
4464 {
4465 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4466 int final_offset = byte + SUBREG_BYTE (op);
4467 rtx newx;
4468
4469 if (outermode == innermostmode
4470 && byte == 0 && SUBREG_BYTE (op) == 0)
4471 return SUBREG_REG (op);
4472
4473 /* The SUBREG_BYTE represents offset, as if the value were stored
4474 in memory. Irritating exception is paradoxical subreg, where
4475 we define SUBREG_BYTE to be 0. On big endian machines, this
4476 value should be negative. For a moment, undo this exception. */
4477 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4478 {
4479 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4480 if (WORDS_BIG_ENDIAN)
4481 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4482 if (BYTES_BIG_ENDIAN)
4483 final_offset += difference % UNITS_PER_WORD;
4484 }
4485 if (SUBREG_BYTE (op) == 0
4486 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4487 {
4488 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4489 if (WORDS_BIG_ENDIAN)
4490 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4491 if (BYTES_BIG_ENDIAN)
4492 final_offset += difference % UNITS_PER_WORD;
4493 }
4494
4495 /* See whether resulting subreg will be paradoxical. */
4496 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4497 {
4498 /* In nonparadoxical subregs we can't handle negative offsets. */
4499 if (final_offset < 0)
4500 return NULL_RTX;
4501 /* Bail out in case resulting subreg would be incorrect. */
4502 if (final_offset % GET_MODE_SIZE (outermode)
4503 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4504 return NULL_RTX;
4505 }
4506 else
4507 {
4508 int offset = 0;
4509 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4510
4511 /* In paradoxical subreg, see if we are still looking on lower part.
4512 If so, our SUBREG_BYTE will be 0. */
4513 if (WORDS_BIG_ENDIAN)
4514 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4515 if (BYTES_BIG_ENDIAN)
4516 offset += difference % UNITS_PER_WORD;
4517 if (offset == final_offset)
4518 final_offset = 0;
4519 else
4520 return NULL_RTX;
4521 }
4522
4523 /* Recurse for further possible simplifications. */
4524 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4525 final_offset);
4526 if (newx)
4527 return newx;
4528 if (validate_subreg (outermode, innermostmode,
4529 SUBREG_REG (op), final_offset))
4530 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4531 return NULL_RTX;
4532 }
4533
4534 /* Merge implicit and explicit truncations. */
4535
4536 if (GET_CODE (op) == TRUNCATE
4537 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4538 && subreg_lowpart_offset (outermode, innermode) == byte)
4539 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4540 GET_MODE (XEXP (op, 0)));
4541
4542 /* SUBREG of a hard register => just change the register number
4543 and/or mode. If the hard register is not valid in that mode,
4544 suppress this simplification. If the hard register is the stack,
4545 frame, or argument pointer, leave this as a SUBREG. */
4546
4547 if (REG_P (op)
4548 && REGNO (op) < FIRST_PSEUDO_REGISTER
4549 #ifdef CANNOT_CHANGE_MODE_CLASS
4550 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4551 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4552 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4553 #endif
4554 && ((reload_completed && !frame_pointer_needed)
4555 || (REGNO (op) != FRAME_POINTER_REGNUM
4556 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4557 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4558 #endif
4559 ))
4560 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4561 && REGNO (op) != ARG_POINTER_REGNUM
4562 #endif
4563 && REGNO (op) != STACK_POINTER_REGNUM
4564 && subreg_offset_representable_p (REGNO (op), innermode,
4565 byte, outermode))
4566 {
4567 unsigned int regno = REGNO (op);
4568 unsigned int final_regno
4569 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4570
4571 /* ??? We do allow it if the current REG is not valid for
4572 its mode. This is a kludge to work around how float/complex
4573 arguments are passed on 32-bit SPARC and should be fixed. */
4574 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4575 || ! HARD_REGNO_MODE_OK (regno, innermode))
4576 {
4577 rtx x;
4578 int final_offset = byte;
4579
4580 /* Adjust offset for paradoxical subregs. */
4581 if (byte == 0
4582 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4583 {
4584 int difference = (GET_MODE_SIZE (innermode)
4585 - GET_MODE_SIZE (outermode));
4586 if (WORDS_BIG_ENDIAN)
4587 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4588 if (BYTES_BIG_ENDIAN)
4589 final_offset += difference % UNITS_PER_WORD;
4590 }
4591
4592 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4593
4594 /* Propagate original regno. We don't have any way to specify
4595 the offset inside original regno, so do so only for lowpart.
4596 The information is used only by alias analysis that can not
4597 grog partial register anyway. */
4598
4599 if (subreg_lowpart_offset (outermode, innermode) == byte)
4600 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4601 return x;
4602 }
4603 }
4604
4605 /* If we have a SUBREG of a register that we are replacing and we are
4606 replacing it with a MEM, make a new MEM and try replacing the
4607 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4608 or if we would be widening it. */
4609
4610 if (MEM_P (op)
4611 && ! mode_dependent_address_p (XEXP (op, 0))
4612 /* Allow splitting of volatile memory references in case we don't
4613 have instruction to move the whole thing. */
4614 && (! MEM_VOLATILE_P (op)
4615 || ! have_insn_for (SET, innermode))
4616 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4617 return adjust_address_nv (op, outermode, byte);
4618
4619 /* Handle complex values represented as CONCAT
4620 of real and imaginary part. */
4621 if (GET_CODE (op) == CONCAT)
4622 {
4623 unsigned int inner_size, final_offset;
4624 rtx part, res;
4625
4626 inner_size = GET_MODE_UNIT_SIZE (innermode);
4627 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4628 final_offset = byte % inner_size;
4629 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4630 return NULL_RTX;
4631
4632 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4633 if (res)
4634 return res;
4635 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4636 return gen_rtx_SUBREG (outermode, part, final_offset);
4637 return NULL_RTX;
4638 }
4639
4640 /* Optimize SUBREG truncations of zero and sign extended values. */
4641 if ((GET_CODE (op) == ZERO_EXTEND
4642 || GET_CODE (op) == SIGN_EXTEND)
4643 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4644 {
4645 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4646
4647 /* If we're requesting the lowpart of a zero or sign extension,
4648 there are three possibilities. If the outermode is the same
4649 as the origmode, we can omit both the extension and the subreg.
4650 If the outermode is not larger than the origmode, we can apply
4651 the truncation without the extension. Finally, if the outermode
4652 is larger than the origmode, but both are integer modes, we
4653 can just extend to the appropriate mode. */
4654 if (bitpos == 0)
4655 {
4656 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4657 if (outermode == origmode)
4658 return XEXP (op, 0);
4659 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4660 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4661 subreg_lowpart_offset (outermode,
4662 origmode));
4663 if (SCALAR_INT_MODE_P (outermode))
4664 return simplify_gen_unary (GET_CODE (op), outermode,
4665 XEXP (op, 0), origmode);
4666 }
4667
4668 /* A SUBREG resulting from a zero extension may fold to zero if
4669 it extracts higher bits that the ZERO_EXTEND's source bits. */
4670 if (GET_CODE (op) == ZERO_EXTEND
4671 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4672 return CONST0_RTX (outermode);
4673 }
4674
4675 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4676 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4677 the outer subreg is effectively a truncation to the original mode. */
4678 if ((GET_CODE (op) == LSHIFTRT
4679 || GET_CODE (op) == ASHIFTRT)
4680 && SCALAR_INT_MODE_P (outermode)
4681 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4682 to avoid the possibility that an outer LSHIFTRT shifts by more
4683 than the sign extension's sign_bit_copies and introduces zeros
4684 into the high bits of the result. */
4685 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4686 && GET_CODE (XEXP (op, 1)) == CONST_INT
4687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4689 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4690 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4691 return simplify_gen_binary (ASHIFTRT, outermode,
4692 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4693
4694 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4696 the outer subreg is effectively a truncation to the original mode. */
4697 if ((GET_CODE (op) == LSHIFTRT
4698 || GET_CODE (op) == ASHIFTRT)
4699 && SCALAR_INT_MODE_P (outermode)
4700 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4701 && GET_CODE (XEXP (op, 1)) == CONST_INT
4702 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4704 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4705 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4706 return simplify_gen_binary (LSHIFTRT, outermode,
4707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4708
4709 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4711 the outer subreg is effectively a truncation to the original mode. */
4712 if (GET_CODE (op) == ASHIFT
4713 && SCALAR_INT_MODE_P (outermode)
4714 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4715 && GET_CODE (XEXP (op, 1)) == CONST_INT
4716 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4717 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4718 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4719 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4720 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4721 return simplify_gen_binary (ASHIFT, outermode,
4722 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4723
4724 return NULL_RTX;
4725 }
4726
4727 /* Make a SUBREG operation or equivalent if it folds. */
4728
4729 rtx
4730 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4731 enum machine_mode innermode, unsigned int byte)
4732 {
4733 rtx newx;
4734
4735 newx = simplify_subreg (outermode, op, innermode, byte);
4736 if (newx)
4737 return newx;
4738
4739 if (GET_CODE (op) == SUBREG
4740 || GET_CODE (op) == CONCAT
4741 || GET_MODE (op) == VOIDmode)
4742 return NULL_RTX;
4743
4744 if (validate_subreg (outermode, innermode, op, byte))
4745 return gen_rtx_SUBREG (outermode, op, byte);
4746
4747 return NULL_RTX;
4748 }
4749
4750 /* Simplify X, an rtx expression.
4751
4752 Return the simplified expression or NULL if no simplifications
4753 were possible.
4754
4755 This is the preferred entry point into the simplification routines;
4756 however, we still allow passes to call the more specific routines.
4757
4758 Right now GCC has three (yes, three) major bodies of RTL simplification
4759 code that need to be unified.
4760
4761 1. fold_rtx in cse.c. This code uses various CSE specific
4762 information to aid in RTL simplification.
4763
4764 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4765 it uses combine specific information to aid in RTL
4766 simplification.
4767
4768 3. The routines in this file.
4769
4770
4771 Long term we want to only have one body of simplification code; to
4772 get to that state I recommend the following steps:
4773
4774 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4775 which are not pass dependent state into these routines.
4776
4777 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4778 use this routine whenever possible.
4779
4780 3. Allow for pass dependent state to be provided to these
4781 routines and add simplifications based on the pass dependent
4782 state. Remove code from cse.c & combine.c that becomes
4783 redundant/dead.
4784
4785 It will take time, but ultimately the compiler will be easier to
4786 maintain and improve. It's totally silly that when we add a
4787 simplification that it needs to be added to 4 places (3 for RTL
4788 simplification and 1 for tree simplification. */
4789
4790 rtx
4791 simplify_rtx (rtx x)
4792 {
4793 enum rtx_code code = GET_CODE (x);
4794 enum machine_mode mode = GET_MODE (x);
4795
4796 switch (GET_RTX_CLASS (code))
4797 {
4798 case RTX_UNARY:
4799 return simplify_unary_operation (code, mode,
4800 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4801 case RTX_COMM_ARITH:
4802 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4803 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4804
4805 /* Fall through.... */
4806
4807 case RTX_BIN_ARITH:
4808 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4809
4810 case RTX_TERNARY:
4811 case RTX_BITFIELD_OPS:
4812 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4813 XEXP (x, 0), XEXP (x, 1),
4814 XEXP (x, 2));
4815
4816 case RTX_COMPARE:
4817 case RTX_COMM_COMPARE:
4818 return simplify_relational_operation (code, mode,
4819 ((GET_MODE (XEXP (x, 0))
4820 != VOIDmode)
4821 ? GET_MODE (XEXP (x, 0))
4822 : GET_MODE (XEXP (x, 1))),
4823 XEXP (x, 0),
4824 XEXP (x, 1));
4825
4826 case RTX_EXTRA:
4827 if (code == SUBREG)
4828 return simplify_gen_subreg (mode, SUBREG_REG (x),
4829 GET_MODE (SUBREG_REG (x)),
4830 SUBREG_BYTE (x));
4831 break;
4832
4833 case RTX_OBJ:
4834 if (code == LO_SUM)
4835 {
4836 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4837 if (GET_CODE (XEXP (x, 0)) == HIGH
4838 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4839 return XEXP (x, 1);
4840 }
4841 break;
4842
4843 default:
4844 break;
4845 }
4846 return NULL;
4847 }
4848