rtl.def (SS_ASHIFT, SS_NEG): New codes.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
409
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
418
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
434
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
442 {
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445 }
446
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
450
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
457
458
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465 {
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
468
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
474 }
475
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
480
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482 {
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
485
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495 {
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
498 }
499
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
502 }
503 break;
504
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
509
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
518
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532 {
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536 {
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540 }
541
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545 }
546
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551 {
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554 }
555
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
560 {
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564 }
565
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
573
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
581
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
592 {
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
596 {
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
604 }
605 else if (STORE_FLAG_VALUE == -1)
606 {
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 }
615 }
616 break;
617
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
624
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
630
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
640
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
648
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
665
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
675
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
679
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
684
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
688
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
691
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
703
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
714
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
723
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
731
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
735
736 /* (float_extend (float_extend x)) is (float_extend x)
737
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
740 */
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
750
751 break;
752
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
758
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
763
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
773
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
777
778 break;
779
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
787
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
795
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
814
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
823
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
835
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
845
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
857
858 default:
859 break;
860 }
861
862 return 0;
863 }
864
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
868 rtx
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
871 {
872 unsigned int width = GET_MODE_BITSIZE (mode);
873
874 if (code == VEC_DUPLICATE)
875 {
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
878 {
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
884 }
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
887 {
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
892
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
897 {
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
901
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
906 }
907 return gen_rtx_CONST_VECTOR (mode, v);
908 }
909 }
910
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
912 {
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
920
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
923 {
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
930 }
931 return gen_rtx_CONST_VECTOR (mode, v);
932 }
933
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
937
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
940 {
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
943
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
948
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
952 }
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
956 {
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
959
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
964
965 if (op_mode == VOIDmode)
966 {
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
971 }
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
973 ;
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
976
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
980 }
981
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
984 {
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
987
988 switch (code)
989 {
990 case NOT:
991 val = ~ arg0;
992 break;
993
994 case NEG:
995 val = - arg0;
996 break;
997
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1001
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1008
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1012 ;
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1016
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1020 {
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1025 }
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1029
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1036
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1044
1045 case TRUNCATE:
1046 val = arg0;
1047 break;
1048
1049 case ZERO_EXTEND:
1050 /* When zero-extending a CONST_INT, we need to know its
1051 original mode. */
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1054 {
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1060 }
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 else
1064 return 0;
1065 break;
1066
1067 case SIGN_EXTEND:
1068 if (op_mode == VOIDmode)
1069 op_mode = mode;
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1071 {
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1076 val = arg0;
1077 }
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1079 {
1080 val
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1082 if (val
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1085 }
1086 else
1087 return 0;
1088 break;
1089
1090 case SQRT:
1091 case FLOAT_EXTEND:
1092 case FLOAT_TRUNCATE:
1093 case SS_TRUNCATE:
1094 case US_TRUNCATE:
1095 case SS_NEG:
1096 return 0;
1097
1098 default:
1099 gcc_unreachable ();
1100 }
1101
1102 return gen_int_mode (val, mode);
1103 }
1104
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op) == VOIDmode
1108 && width <= HOST_BITS_PER_WIDE_INT * 2
1109 && (GET_CODE (op) == CONST_DOUBLE
1110 || GET_CODE (op) == CONST_INT))
1111 {
1112 unsigned HOST_WIDE_INT l1, lv;
1113 HOST_WIDE_INT h1, hv;
1114
1115 if (GET_CODE (op) == CONST_DOUBLE)
1116 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1117 else
1118 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1119
1120 switch (code)
1121 {
1122 case NOT:
1123 lv = ~ l1;
1124 hv = ~ h1;
1125 break;
1126
1127 case NEG:
1128 neg_double (l1, h1, &lv, &hv);
1129 break;
1130
1131 case ABS:
1132 if (h1 < 0)
1133 neg_double (l1, h1, &lv, &hv);
1134 else
1135 lv = l1, hv = h1;
1136 break;
1137
1138 case FFS:
1139 hv = 0;
1140 if (l1 == 0)
1141 {
1142 if (h1 == 0)
1143 lv = 0;
1144 else
1145 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1146 }
1147 else
1148 lv = exact_log2 (l1 & -l1) + 1;
1149 break;
1150
1151 case CLZ:
1152 hv = 0;
1153 if (h1 != 0)
1154 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1155 - HOST_BITS_PER_WIDE_INT;
1156 else if (l1 != 0)
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1159 lv = GET_MODE_BITSIZE (mode);
1160 break;
1161
1162 case CTZ:
1163 hv = 0;
1164 if (l1 != 0)
1165 lv = exact_log2 (l1 & -l1);
1166 else if (h1 != 0)
1167 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1169 lv = GET_MODE_BITSIZE (mode);
1170 break;
1171
1172 case POPCOUNT:
1173 hv = 0;
1174 lv = 0;
1175 while (l1)
1176 lv++, l1 &= l1 - 1;
1177 while (h1)
1178 lv++, h1 &= h1 - 1;
1179 break;
1180
1181 case PARITY:
1182 hv = 0;
1183 lv = 0;
1184 while (l1)
1185 lv++, l1 &= l1 - 1;
1186 while (h1)
1187 lv++, h1 &= h1 - 1;
1188 lv &= 1;
1189 break;
1190
1191 case TRUNCATE:
1192 /* This is just a change-of-mode, so do nothing. */
1193 lv = l1, hv = h1;
1194 break;
1195
1196 case ZERO_EXTEND:
1197 gcc_assert (op_mode != VOIDmode);
1198
1199 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1200 return 0;
1201
1202 hv = 0;
1203 lv = l1 & GET_MODE_MASK (op_mode);
1204 break;
1205
1206 case SIGN_EXTEND:
1207 if (op_mode == VOIDmode
1208 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1209 return 0;
1210 else
1211 {
1212 lv = l1 & GET_MODE_MASK (op_mode);
1213 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1214 && (lv & ((HOST_WIDE_INT) 1
1215 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1216 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1217
1218 hv = HWI_SIGN_EXTEND (lv);
1219 }
1220 break;
1221
1222 case SQRT:
1223 return 0;
1224
1225 default:
1226 return 0;
1227 }
1228
1229 return immed_double_const (lv, hv, mode);
1230 }
1231
1232 else if (GET_CODE (op) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode))
1234 {
1235 REAL_VALUE_TYPE d, t;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1237
1238 switch (code)
1239 {
1240 case SQRT:
1241 if (HONOR_SNANS (mode) && real_isnan (&d))
1242 return 0;
1243 real_sqrt (&t, mode, &d);
1244 d = t;
1245 break;
1246 case ABS:
1247 d = REAL_VALUE_ABS (d);
1248 break;
1249 case NEG:
1250 d = REAL_VALUE_NEGATE (d);
1251 break;
1252 case FLOAT_TRUNCATE:
1253 d = real_value_truncate (mode, d);
1254 break;
1255 case FLOAT_EXTEND:
1256 /* All this does is change the mode. */
1257 break;
1258 case FIX:
1259 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1260 break;
1261 case NOT:
1262 {
1263 long tmp[4];
1264 int i;
1265
1266 real_to_target (tmp, &d, GET_MODE (op));
1267 for (i = 0; i < 4; i++)
1268 tmp[i] = ~tmp[i];
1269 real_from_target (&d, tmp, mode);
1270 break;
1271 }
1272 default:
1273 gcc_unreachable ();
1274 }
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1276 }
1277
1278 else if (GET_CODE (op) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1280 && GET_MODE_CLASS (mode) == MODE_INT
1281 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1282 {
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1287
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh, xl, th, tl;
1291 REAL_VALUE_TYPE x, t;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1293 switch (code)
1294 {
1295 case FIX:
1296 if (REAL_VALUE_ISNAN (x))
1297 return const0_rtx;
1298
1299 /* Test against the signed upper bound. */
1300 if (width > HOST_BITS_PER_WIDE_INT)
1301 {
1302 th = ((unsigned HOST_WIDE_INT) 1
1303 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1304 tl = -1;
1305 }
1306 else
1307 {
1308 th = 0;
1309 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1310 }
1311 real_from_integer (&t, VOIDmode, tl, th, 0);
1312 if (REAL_VALUES_LESS (t, x))
1313 {
1314 xh = th;
1315 xl = tl;
1316 break;
1317 }
1318
1319 /* Test against the signed lower bound. */
1320 if (width > HOST_BITS_PER_WIDE_INT)
1321 {
1322 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1323 tl = 0;
1324 }
1325 else
1326 {
1327 th = -1;
1328 tl = (HOST_WIDE_INT) -1 << (width - 1);
1329 }
1330 real_from_integer (&t, VOIDmode, tl, th, 0);
1331 if (REAL_VALUES_LESS (x, t))
1332 {
1333 xh = th;
1334 xl = tl;
1335 break;
1336 }
1337 REAL_VALUE_TO_INT (&xl, &xh, x);
1338 break;
1339
1340 case UNSIGNED_FIX:
1341 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1342 return const0_rtx;
1343
1344 /* Test against the unsigned upper bound. */
1345 if (width == 2*HOST_BITS_PER_WIDE_INT)
1346 {
1347 th = -1;
1348 tl = -1;
1349 }
1350 else if (width >= HOST_BITS_PER_WIDE_INT)
1351 {
1352 th = ((unsigned HOST_WIDE_INT) 1
1353 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1354 tl = -1;
1355 }
1356 else
1357 {
1358 th = 0;
1359 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1360 }
1361 real_from_integer (&t, VOIDmode, tl, th, 1);
1362 if (REAL_VALUES_LESS (t, x))
1363 {
1364 xh = th;
1365 xl = tl;
1366 break;
1367 }
1368
1369 REAL_VALUE_TO_INT (&xl, &xh, x);
1370 break;
1371
1372 default:
1373 gcc_unreachable ();
1374 }
1375 return immed_double_const (xl, xh, mode);
1376 }
1377
1378 return NULL_RTX;
1379 }
1380 \f
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1386
1387 static rtx
1388 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1389 rtx op0, rtx op1)
1390 {
1391 rtx tem;
1392
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1) == code)
1395 {
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0) == code)
1398 {
1399 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1400 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1401 }
1402
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1, op0))
1405 return simplify_gen_binary (code, mode, op1, op0);
1406
1407 tem = op0;
1408 op0 = op1;
1409 op1 = tem;
1410 }
1411
1412 if (GET_CODE (op0) == code)
1413 {
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1416 {
1417 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1418 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1419 }
1420
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1423 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1424 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1425 if (tem != 0)
1426 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1427
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1432 if (tem != 0)
1433 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1434 }
1435
1436 return 0;
1437 }
1438
1439
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1442
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1445 rtx
1446 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1447 rtx op0, rtx op1)
1448 {
1449 rtx trueop0, trueop1;
1450 rtx tem;
1451
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1458
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0, op1))
1462 {
1463 tem = op0, op0 = op1, op1 = tem;
1464 }
1465
1466 trueop0 = avoid_constant_pool_reference (op0);
1467 trueop1 = avoid_constant_pool_reference (op1);
1468
1469 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1470 if (tem)
1471 return tem;
1472 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1473 }
1474
1475 static rtx
1476 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1477 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1478 {
1479 rtx tem, reversed, opleft, opright;
1480 HOST_WIDE_INT val;
1481 unsigned int width = GET_MODE_BITSIZE (mode);
1482
1483 /* Even if we can't compute a constant result,
1484 there are some cases worth simplifying. */
1485
1486 switch (code)
1487 {
1488 case PLUS:
1489 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1490 when x is NaN, infinite, or finite and nonzero. They aren't
1491 when x is -0 and the rounding mode is not towards -infinity,
1492 since (-0) + 0 is then 0. */
1493 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1494 return op0;
1495
1496 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1497 transformations are safe even for IEEE. */
1498 if (GET_CODE (op0) == NEG)
1499 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1500 else if (GET_CODE (op1) == NEG)
1501 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1502
1503 /* (~a) + 1 -> -a */
1504 if (INTEGRAL_MODE_P (mode)
1505 && GET_CODE (op0) == NOT
1506 && trueop1 == const1_rtx)
1507 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1508
1509 /* Handle both-operands-constant cases. We can only add
1510 CONST_INTs to constants since the sum of relocatable symbols
1511 can't be handled by most assemblers. Don't add CONST_INT
1512 to CONST_INT since overflow won't be computed properly if wider
1513 than HOST_BITS_PER_WIDE_INT. */
1514
1515 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1516 && GET_CODE (op1) == CONST_INT)
1517 return plus_constant (op0, INTVAL (op1));
1518 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1519 && GET_CODE (op0) == CONST_INT)
1520 return plus_constant (op1, INTVAL (op0));
1521
1522 /* See if this is something like X * C - X or vice versa or
1523 if the multiplication is written as a shift. If so, we can
1524 distribute and make a new multiply, shift, or maybe just
1525 have X (if C is 2 in the example above). But don't make
1526 something more expensive than we had before. */
1527
1528 if (SCALAR_INT_MODE_P (mode))
1529 {
1530 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1531 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1532 rtx lhs = op0, rhs = op1;
1533
1534 if (GET_CODE (lhs) == NEG)
1535 {
1536 coeff0l = -1;
1537 coeff0h = -1;
1538 lhs = XEXP (lhs, 0);
1539 }
1540 else if (GET_CODE (lhs) == MULT
1541 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1542 {
1543 coeff0l = INTVAL (XEXP (lhs, 1));
1544 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1545 lhs = XEXP (lhs, 0);
1546 }
1547 else if (GET_CODE (lhs) == ASHIFT
1548 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1549 && INTVAL (XEXP (lhs, 1)) >= 0
1550 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1551 {
1552 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1553 coeff0h = 0;
1554 lhs = XEXP (lhs, 0);
1555 }
1556
1557 if (GET_CODE (rhs) == NEG)
1558 {
1559 coeff1l = -1;
1560 coeff1h = -1;
1561 rhs = XEXP (rhs, 0);
1562 }
1563 else if (GET_CODE (rhs) == MULT
1564 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1565 {
1566 coeff1l = INTVAL (XEXP (rhs, 1));
1567 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1568 rhs = XEXP (rhs, 0);
1569 }
1570 else if (GET_CODE (rhs) == ASHIFT
1571 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1572 && INTVAL (XEXP (rhs, 1)) >= 0
1573 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1574 {
1575 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1576 coeff1h = 0;
1577 rhs = XEXP (rhs, 0);
1578 }
1579
1580 if (rtx_equal_p (lhs, rhs))
1581 {
1582 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1583 rtx coeff;
1584 unsigned HOST_WIDE_INT l;
1585 HOST_WIDE_INT h;
1586
1587 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1588 coeff = immed_double_const (l, h, mode);
1589
1590 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1591 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1592 ? tem : 0;
1593 }
1594 }
1595
1596 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1597 if ((GET_CODE (op1) == CONST_INT
1598 || GET_CODE (op1) == CONST_DOUBLE)
1599 && GET_CODE (op0) == XOR
1600 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1601 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1602 && mode_signbit_p (mode, op1))
1603 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1604 simplify_gen_binary (XOR, mode, op1,
1605 XEXP (op0, 1)));
1606
1607 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1608 if (GET_CODE (op0) == MULT
1609 && GET_CODE (XEXP (op0, 0)) == NEG)
1610 {
1611 rtx in1, in2;
1612
1613 in1 = XEXP (XEXP (op0, 0), 0);
1614 in2 = XEXP (op0, 1);
1615 return simplify_gen_binary (MINUS, mode, op1,
1616 simplify_gen_binary (MULT, mode,
1617 in1, in2));
1618 }
1619
1620 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1621 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1622 is 1. */
1623 if (COMPARISON_P (op0)
1624 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1625 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1626 && (reversed = reversed_comparison (op0, mode)))
1627 return
1628 simplify_gen_unary (NEG, mode, reversed, mode);
1629
1630 /* If one of the operands is a PLUS or a MINUS, see if we can
1631 simplify this by the associative law.
1632 Don't use the associative law for floating point.
1633 The inaccuracy makes it nonassociative,
1634 and subtle programs can break if operations are associated. */
1635
1636 if (INTEGRAL_MODE_P (mode)
1637 && (plus_minus_operand_p (op0)
1638 || plus_minus_operand_p (op1))
1639 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1640 return tem;
1641
1642 /* Reassociate floating point addition only when the user
1643 specifies unsafe math optimizations. */
1644 if (FLOAT_MODE_P (mode)
1645 && flag_unsafe_math_optimizations)
1646 {
1647 tem = simplify_associative_operation (code, mode, op0, op1);
1648 if (tem)
1649 return tem;
1650 }
1651 break;
1652
1653 case COMPARE:
1654 #ifdef HAVE_cc0
1655 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1656 using cc0, in which case we want to leave it as a COMPARE
1657 so we can distinguish it from a register-register-copy.
1658
1659 In IEEE floating point, x-0 is not the same as x. */
1660
1661 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1662 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1663 && trueop1 == CONST0_RTX (mode))
1664 return op0;
1665 #endif
1666
1667 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1668 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1669 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1670 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1671 {
1672 rtx xop00 = XEXP (op0, 0);
1673 rtx xop10 = XEXP (op1, 0);
1674
1675 #ifdef HAVE_cc0
1676 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1677 #else
1678 if (REG_P (xop00) && REG_P (xop10)
1679 && GET_MODE (xop00) == GET_MODE (xop10)
1680 && REGNO (xop00) == REGNO (xop10)
1681 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1682 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1683 #endif
1684 return xop00;
1685 }
1686 break;
1687
1688 case MINUS:
1689 /* We can't assume x-x is 0 even with non-IEEE floating point,
1690 but since it is zero except in very strange circumstances, we
1691 will treat it as zero with -funsafe-math-optimizations. */
1692 if (rtx_equal_p (trueop0, trueop1)
1693 && ! side_effects_p (op0)
1694 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1695 return CONST0_RTX (mode);
1696
1697 /* Change subtraction from zero into negation. (0 - x) is the
1698 same as -x when x is NaN, infinite, or finite and nonzero.
1699 But if the mode has signed zeros, and does not round towards
1700 -infinity, then 0 - 0 is 0, not -0. */
1701 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1702 return simplify_gen_unary (NEG, mode, op1, mode);
1703
1704 /* (-1 - a) is ~a. */
1705 if (trueop0 == constm1_rtx)
1706 return simplify_gen_unary (NOT, mode, op1, mode);
1707
1708 /* Subtracting 0 has no effect unless the mode has signed zeros
1709 and supports rounding towards -infinity. In such a case,
1710 0 - 0 is -0. */
1711 if (!(HONOR_SIGNED_ZEROS (mode)
1712 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1713 && trueop1 == CONST0_RTX (mode))
1714 return op0;
1715
1716 /* See if this is something like X * C - X or vice versa or
1717 if the multiplication is written as a shift. If so, we can
1718 distribute and make a new multiply, shift, or maybe just
1719 have X (if C is 2 in the example above). But don't make
1720 something more expensive than we had before. */
1721
1722 if (SCALAR_INT_MODE_P (mode))
1723 {
1724 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1725 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1726 rtx lhs = op0, rhs = op1;
1727
1728 if (GET_CODE (lhs) == NEG)
1729 {
1730 coeff0l = -1;
1731 coeff0h = -1;
1732 lhs = XEXP (lhs, 0);
1733 }
1734 else if (GET_CODE (lhs) == MULT
1735 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1736 {
1737 coeff0l = INTVAL (XEXP (lhs, 1));
1738 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1739 lhs = XEXP (lhs, 0);
1740 }
1741 else if (GET_CODE (lhs) == ASHIFT
1742 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1743 && INTVAL (XEXP (lhs, 1)) >= 0
1744 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1745 {
1746 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1747 coeff0h = 0;
1748 lhs = XEXP (lhs, 0);
1749 }
1750
1751 if (GET_CODE (rhs) == NEG)
1752 {
1753 negcoeff1l = 1;
1754 negcoeff1h = 0;
1755 rhs = XEXP (rhs, 0);
1756 }
1757 else if (GET_CODE (rhs) == MULT
1758 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1759 {
1760 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1761 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1762 rhs = XEXP (rhs, 0);
1763 }
1764 else if (GET_CODE (rhs) == ASHIFT
1765 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1766 && INTVAL (XEXP (rhs, 1)) >= 0
1767 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1768 {
1769 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1770 negcoeff1h = -1;
1771 rhs = XEXP (rhs, 0);
1772 }
1773
1774 if (rtx_equal_p (lhs, rhs))
1775 {
1776 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1777 rtx coeff;
1778 unsigned HOST_WIDE_INT l;
1779 HOST_WIDE_INT h;
1780
1781 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1782 coeff = immed_double_const (l, h, mode);
1783
1784 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1785 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1786 ? tem : 0;
1787 }
1788 }
1789
1790 /* (a - (-b)) -> (a + b). True even for IEEE. */
1791 if (GET_CODE (op1) == NEG)
1792 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1793
1794 /* (-x - c) may be simplified as (-c - x). */
1795 if (GET_CODE (op0) == NEG
1796 && (GET_CODE (op1) == CONST_INT
1797 || GET_CODE (op1) == CONST_DOUBLE))
1798 {
1799 tem = simplify_unary_operation (NEG, mode, op1, mode);
1800 if (tem)
1801 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1802 }
1803
1804 /* Don't let a relocatable value get a negative coeff. */
1805 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1806 return simplify_gen_binary (PLUS, mode,
1807 op0,
1808 neg_const_int (mode, op1));
1809
1810 /* (x - (x & y)) -> (x & ~y) */
1811 if (GET_CODE (op1) == AND)
1812 {
1813 if (rtx_equal_p (op0, XEXP (op1, 0)))
1814 {
1815 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1816 GET_MODE (XEXP (op1, 1)));
1817 return simplify_gen_binary (AND, mode, op0, tem);
1818 }
1819 if (rtx_equal_p (op0, XEXP (op1, 1)))
1820 {
1821 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1822 GET_MODE (XEXP (op1, 0)));
1823 return simplify_gen_binary (AND, mode, op0, tem);
1824 }
1825 }
1826
1827 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1828 by reversing the comparison code if valid. */
1829 if (STORE_FLAG_VALUE == 1
1830 && trueop0 == const1_rtx
1831 && COMPARISON_P (op1)
1832 && (reversed = reversed_comparison (op1, mode)))
1833 return reversed;
1834
1835 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1836 if (GET_CODE (op1) == MULT
1837 && GET_CODE (XEXP (op1, 0)) == NEG)
1838 {
1839 rtx in1, in2;
1840
1841 in1 = XEXP (XEXP (op1, 0), 0);
1842 in2 = XEXP (op1, 1);
1843 return simplify_gen_binary (PLUS, mode,
1844 simplify_gen_binary (MULT, mode,
1845 in1, in2),
1846 op0);
1847 }
1848
1849 /* Canonicalize (minus (neg A) (mult B C)) to
1850 (minus (mult (neg B) C) A). */
1851 if (GET_CODE (op1) == MULT
1852 && GET_CODE (op0) == NEG)
1853 {
1854 rtx in1, in2;
1855
1856 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1857 in2 = XEXP (op1, 1);
1858 return simplify_gen_binary (MINUS, mode,
1859 simplify_gen_binary (MULT, mode,
1860 in1, in2),
1861 XEXP (op0, 0));
1862 }
1863
1864 /* If one of the operands is a PLUS or a MINUS, see if we can
1865 simplify this by the associative law. This will, for example,
1866 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1867 Don't use the associative law for floating point.
1868 The inaccuracy makes it nonassociative,
1869 and subtle programs can break if operations are associated. */
1870
1871 if (INTEGRAL_MODE_P (mode)
1872 && (plus_minus_operand_p (op0)
1873 || plus_minus_operand_p (op1))
1874 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1875 return tem;
1876 break;
1877
1878 case MULT:
1879 if (trueop1 == constm1_rtx)
1880 return simplify_gen_unary (NEG, mode, op0, mode);
1881
1882 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1883 x is NaN, since x * 0 is then also NaN. Nor is it valid
1884 when the mode has signed zeros, since multiplying a negative
1885 number by 0 will give -0, not 0. */
1886 if (!HONOR_NANS (mode)
1887 && !HONOR_SIGNED_ZEROS (mode)
1888 && trueop1 == CONST0_RTX (mode)
1889 && ! side_effects_p (op0))
1890 return op1;
1891
1892 /* In IEEE floating point, x*1 is not equivalent to x for
1893 signalling NaNs. */
1894 if (!HONOR_SNANS (mode)
1895 && trueop1 == CONST1_RTX (mode))
1896 return op0;
1897
1898 /* Convert multiply by constant power of two into shift unless
1899 we are still generating RTL. This test is a kludge. */
1900 if (GET_CODE (trueop1) == CONST_INT
1901 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1902 /* If the mode is larger than the host word size, and the
1903 uppermost bit is set, then this isn't a power of two due
1904 to implicit sign extension. */
1905 && (width <= HOST_BITS_PER_WIDE_INT
1906 || val != HOST_BITS_PER_WIDE_INT - 1))
1907 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1908
1909 /* Likewise for multipliers wider than a word. */
1910 else if (GET_CODE (trueop1) == CONST_DOUBLE
1911 && (GET_MODE (trueop1) == VOIDmode
1912 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1913 && GET_MODE (op0) == mode
1914 && CONST_DOUBLE_LOW (trueop1) == 0
1915 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1916 return simplify_gen_binary (ASHIFT, mode, op0,
1917 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1918
1919 /* x*2 is x+x and x*(-1) is -x */
1920 if (GET_CODE (trueop1) == CONST_DOUBLE
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1922 && GET_MODE (op0) == mode)
1923 {
1924 REAL_VALUE_TYPE d;
1925 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1926
1927 if (REAL_VALUES_EQUAL (d, dconst2))
1928 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1929
1930 if (REAL_VALUES_EQUAL (d, dconstm1))
1931 return simplify_gen_unary (NEG, mode, op0, mode);
1932 }
1933
1934 /* Reassociate multiplication, but for floating point MULTs
1935 only when the user specifies unsafe math optimizations. */
1936 if (! FLOAT_MODE_P (mode)
1937 || flag_unsafe_math_optimizations)
1938 {
1939 tem = simplify_associative_operation (code, mode, op0, op1);
1940 if (tem)
1941 return tem;
1942 }
1943 break;
1944
1945 case IOR:
1946 if (trueop1 == const0_rtx)
1947 return op0;
1948 if (GET_CODE (trueop1) == CONST_INT
1949 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1950 == GET_MODE_MASK (mode)))
1951 return op1;
1952 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1953 return op0;
1954 /* A | (~A) -> -1 */
1955 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1956 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1957 && ! side_effects_p (op0)
1958 && SCALAR_INT_MODE_P (mode))
1959 return constm1_rtx;
1960
1961 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1962 if (GET_CODE (op1) == CONST_INT
1963 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1964 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1965 return op1;
1966
1967 /* Convert (A & B) | A to A. */
1968 if (GET_CODE (op0) == AND
1969 && (rtx_equal_p (XEXP (op0, 0), op1)
1970 || rtx_equal_p (XEXP (op0, 1), op1))
1971 && ! side_effects_p (XEXP (op0, 0))
1972 && ! side_effects_p (XEXP (op0, 1)))
1973 return op1;
1974
1975 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1976 mode size to (rotate A CX). */
1977
1978 if (GET_CODE (op1) == ASHIFT
1979 || GET_CODE (op1) == SUBREG)
1980 {
1981 opleft = op1;
1982 opright = op0;
1983 }
1984 else
1985 {
1986 opright = op1;
1987 opleft = op0;
1988 }
1989
1990 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1991 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1992 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1993 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1994 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1995 == GET_MODE_BITSIZE (mode)))
1996 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1997
1998 /* Same, but for ashift that has been "simplified" to a wider mode
1999 by simplify_shift_const. */
2000
2001 if (GET_CODE (opleft) == SUBREG
2002 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2003 && GET_CODE (opright) == LSHIFTRT
2004 && GET_CODE (XEXP (opright, 0)) == SUBREG
2005 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2006 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2007 && (GET_MODE_SIZE (GET_MODE (opleft))
2008 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2009 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2010 SUBREG_REG (XEXP (opright, 0)))
2011 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2012 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2013 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2014 == GET_MODE_BITSIZE (mode)))
2015 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2016 XEXP (SUBREG_REG (opleft), 1));
2017
2018 /* If we have (ior (and (X C1) C2)), simplify this by making
2019 C1 as small as possible if C1 actually changes. */
2020 if (GET_CODE (op1) == CONST_INT
2021 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2022 || INTVAL (op1) > 0)
2023 && GET_CODE (op0) == AND
2024 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2025 && GET_CODE (op1) == CONST_INT
2026 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2027 return simplify_gen_binary (IOR, mode,
2028 simplify_gen_binary
2029 (AND, mode, XEXP (op0, 0),
2030 GEN_INT (INTVAL (XEXP (op0, 1))
2031 & ~INTVAL (op1))),
2032 op1);
2033
2034 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2035 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2036 the PLUS does not affect any of the bits in OP1: then we can do
2037 the IOR as a PLUS and we can associate. This is valid if OP1
2038 can be safely shifted left C bits. */
2039 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2040 && GET_CODE (XEXP (op0, 0)) == PLUS
2041 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2042 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2043 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2044 {
2045 int count = INTVAL (XEXP (op0, 1));
2046 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2047
2048 if (mask >> count == INTVAL (trueop1)
2049 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2050 return simplify_gen_binary (ASHIFTRT, mode,
2051 plus_constant (XEXP (op0, 0), mask),
2052 XEXP (op0, 1));
2053 }
2054
2055 tem = simplify_associative_operation (code, mode, op0, op1);
2056 if (tem)
2057 return tem;
2058 break;
2059
2060 case XOR:
2061 if (trueop1 == const0_rtx)
2062 return op0;
2063 if (GET_CODE (trueop1) == CONST_INT
2064 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2065 == GET_MODE_MASK (mode)))
2066 return simplify_gen_unary (NOT, mode, op0, mode);
2067 if (rtx_equal_p (trueop0, trueop1)
2068 && ! side_effects_p (op0)
2069 && GET_MODE_CLASS (mode) != MODE_CC)
2070 return CONST0_RTX (mode);
2071
2072 /* Canonicalize XOR of the most significant bit to PLUS. */
2073 if ((GET_CODE (op1) == CONST_INT
2074 || GET_CODE (op1) == CONST_DOUBLE)
2075 && mode_signbit_p (mode, op1))
2076 return simplify_gen_binary (PLUS, mode, op0, op1);
2077 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2078 if ((GET_CODE (op1) == CONST_INT
2079 || GET_CODE (op1) == CONST_DOUBLE)
2080 && GET_CODE (op0) == PLUS
2081 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2082 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2083 && mode_signbit_p (mode, XEXP (op0, 1)))
2084 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2085 simplify_gen_binary (XOR, mode, op1,
2086 XEXP (op0, 1)));
2087
2088 /* If we are XORing two things that have no bits in common,
2089 convert them into an IOR. This helps to detect rotation encoded
2090 using those methods and possibly other simplifications. */
2091
2092 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2093 && (nonzero_bits (op0, mode)
2094 & nonzero_bits (op1, mode)) == 0)
2095 return (simplify_gen_binary (IOR, mode, op0, op1));
2096
2097 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2098 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2099 (NOT y). */
2100 {
2101 int num_negated = 0;
2102
2103 if (GET_CODE (op0) == NOT)
2104 num_negated++, op0 = XEXP (op0, 0);
2105 if (GET_CODE (op1) == NOT)
2106 num_negated++, op1 = XEXP (op1, 0);
2107
2108 if (num_negated == 2)
2109 return simplify_gen_binary (XOR, mode, op0, op1);
2110 else if (num_negated == 1)
2111 return simplify_gen_unary (NOT, mode,
2112 simplify_gen_binary (XOR, mode, op0, op1),
2113 mode);
2114 }
2115
2116 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2117 correspond to a machine insn or result in further simplifications
2118 if B is a constant. */
2119
2120 if (GET_CODE (op0) == AND
2121 && rtx_equal_p (XEXP (op0, 1), op1)
2122 && ! side_effects_p (op1))
2123 return simplify_gen_binary (AND, mode,
2124 simplify_gen_unary (NOT, mode,
2125 XEXP (op0, 0), mode),
2126 op1);
2127
2128 else if (GET_CODE (op0) == AND
2129 && rtx_equal_p (XEXP (op0, 0), op1)
2130 && ! side_effects_p (op1))
2131 return simplify_gen_binary (AND, mode,
2132 simplify_gen_unary (NOT, mode,
2133 XEXP (op0, 1), mode),
2134 op1);
2135
2136 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2137 comparison if STORE_FLAG_VALUE is 1. */
2138 if (STORE_FLAG_VALUE == 1
2139 && trueop1 == const1_rtx
2140 && COMPARISON_P (op0)
2141 && (reversed = reversed_comparison (op0, mode)))
2142 return reversed;
2143
2144 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2145 is (lt foo (const_int 0)), so we can perform the above
2146 simplification if STORE_FLAG_VALUE is 1. */
2147
2148 if (STORE_FLAG_VALUE == 1
2149 && trueop1 == const1_rtx
2150 && GET_CODE (op0) == LSHIFTRT
2151 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2152 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2153 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2154
2155 /* (xor (comparison foo bar) (const_int sign-bit))
2156 when STORE_FLAG_VALUE is the sign bit. */
2157 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2158 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2159 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2160 && trueop1 == const_true_rtx
2161 && COMPARISON_P (op0)
2162 && (reversed = reversed_comparison (op0, mode)))
2163 return reversed;
2164
2165 break;
2166
2167 tem = simplify_associative_operation (code, mode, op0, op1);
2168 if (tem)
2169 return tem;
2170 break;
2171
2172 case AND:
2173 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2174 return trueop1;
2175 /* If we are turning off bits already known off in OP0, we need
2176 not do an AND. */
2177 if (GET_CODE (trueop1) == CONST_INT
2178 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2179 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2180 return op0;
2181 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2182 && GET_MODE_CLASS (mode) != MODE_CC)
2183 return op0;
2184 /* A & (~A) -> 0 */
2185 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2186 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2187 && ! side_effects_p (op0)
2188 && GET_MODE_CLASS (mode) != MODE_CC)
2189 return CONST0_RTX (mode);
2190
2191 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2192 there are no nonzero bits of C outside of X's mode. */
2193 if ((GET_CODE (op0) == SIGN_EXTEND
2194 || GET_CODE (op0) == ZERO_EXTEND)
2195 && GET_CODE (trueop1) == CONST_INT
2196 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2197 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2198 & INTVAL (trueop1)) == 0)
2199 {
2200 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2201 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2202 gen_int_mode (INTVAL (trueop1),
2203 imode));
2204 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2205 }
2206
2207 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2208 insn (and may simplify more). */
2209 if (GET_CODE (op0) == XOR
2210 && rtx_equal_p (XEXP (op0, 0), op1)
2211 && ! side_effects_p (op1))
2212 return simplify_gen_binary (AND, mode,
2213 simplify_gen_unary (NOT, mode,
2214 XEXP (op0, 1), mode),
2215 op1);
2216
2217 if (GET_CODE (op0) == XOR
2218 && rtx_equal_p (XEXP (op0, 1), op1)
2219 && ! side_effects_p (op1))
2220 return simplify_gen_binary (AND, mode,
2221 simplify_gen_unary (NOT, mode,
2222 XEXP (op0, 0), mode),
2223 op1);
2224
2225 /* Similarly for (~(A ^ B)) & A. */
2226 if (GET_CODE (op0) == NOT
2227 && GET_CODE (XEXP (op0, 0)) == XOR
2228 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2229 && ! side_effects_p (op1))
2230 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2231
2232 if (GET_CODE (op0) == NOT
2233 && GET_CODE (XEXP (op0, 0)) == XOR
2234 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2235 && ! side_effects_p (op1))
2236 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2237
2238 /* Convert (A | B) & A to A. */
2239 if (GET_CODE (op0) == IOR
2240 && (rtx_equal_p (XEXP (op0, 0), op1)
2241 || rtx_equal_p (XEXP (op0, 1), op1))
2242 && ! side_effects_p (XEXP (op0, 0))
2243 && ! side_effects_p (XEXP (op0, 1)))
2244 return op1;
2245
2246 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2247 ((A & N) + B) & M -> (A + B) & M
2248 Similarly if (N & M) == 0,
2249 ((A | N) + B) & M -> (A + B) & M
2250 and for - instead of + and/or ^ instead of |. */
2251 if (GET_CODE (trueop1) == CONST_INT
2252 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2253 && ~INTVAL (trueop1)
2254 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2255 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2256 {
2257 rtx pmop[2];
2258 int which;
2259
2260 pmop[0] = XEXP (op0, 0);
2261 pmop[1] = XEXP (op0, 1);
2262
2263 for (which = 0; which < 2; which++)
2264 {
2265 tem = pmop[which];
2266 switch (GET_CODE (tem))
2267 {
2268 case AND:
2269 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2270 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2271 == INTVAL (trueop1))
2272 pmop[which] = XEXP (tem, 0);
2273 break;
2274 case IOR:
2275 case XOR:
2276 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2277 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2278 pmop[which] = XEXP (tem, 0);
2279 break;
2280 default:
2281 break;
2282 }
2283 }
2284
2285 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2286 {
2287 tem = simplify_gen_binary (GET_CODE (op0), mode,
2288 pmop[0], pmop[1]);
2289 return simplify_gen_binary (code, mode, tem, op1);
2290 }
2291 }
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (tem)
2294 return tem;
2295 break;
2296
2297 case UDIV:
2298 /* 0/x is 0 (or x&0 if x has side-effects). */
2299 if (trueop0 == CONST0_RTX (mode))
2300 {
2301 if (side_effects_p (op1))
2302 return simplify_gen_binary (AND, mode, op1, trueop0);
2303 return trueop0;
2304 }
2305 /* x/1 is x. */
2306 if (trueop1 == CONST1_RTX (mode))
2307 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2308 /* Convert divide by power of two into shift. */
2309 if (GET_CODE (trueop1) == CONST_INT
2310 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2311 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2312 break;
2313
2314 case DIV:
2315 /* Handle floating point and integers separately. */
2316 if (SCALAR_FLOAT_MODE_P (mode))
2317 {
2318 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2319 safe for modes with NaNs, since 0.0 / 0.0 will then be
2320 NaN rather than 0.0. Nor is it safe for modes with signed
2321 zeros, since dividing 0 by a negative number gives -0.0 */
2322 if (trueop0 == CONST0_RTX (mode)
2323 && !HONOR_NANS (mode)
2324 && !HONOR_SIGNED_ZEROS (mode)
2325 && ! side_effects_p (op1))
2326 return op0;
2327 /* x/1.0 is x. */
2328 if (trueop1 == CONST1_RTX (mode)
2329 && !HONOR_SNANS (mode))
2330 return op0;
2331
2332 if (GET_CODE (trueop1) == CONST_DOUBLE
2333 && trueop1 != CONST0_RTX (mode))
2334 {
2335 REAL_VALUE_TYPE d;
2336 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2337
2338 /* x/-1.0 is -x. */
2339 if (REAL_VALUES_EQUAL (d, dconstm1)
2340 && !HONOR_SNANS (mode))
2341 return simplify_gen_unary (NEG, mode, op0, mode);
2342
2343 /* Change FP division by a constant into multiplication.
2344 Only do this with -funsafe-math-optimizations. */
2345 if (flag_unsafe_math_optimizations
2346 && !REAL_VALUES_EQUAL (d, dconst0))
2347 {
2348 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2349 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2350 return simplify_gen_binary (MULT, mode, op0, tem);
2351 }
2352 }
2353 }
2354 else
2355 {
2356 /* 0/x is 0 (or x&0 if x has side-effects). */
2357 if (trueop0 == CONST0_RTX (mode))
2358 {
2359 if (side_effects_p (op1))
2360 return simplify_gen_binary (AND, mode, op1, trueop0);
2361 return trueop0;
2362 }
2363 /* x/1 is x. */
2364 if (trueop1 == CONST1_RTX (mode))
2365 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2366 /* x/-1 is -x. */
2367 if (trueop1 == constm1_rtx)
2368 {
2369 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2370 return simplify_gen_unary (NEG, mode, x, mode);
2371 }
2372 }
2373 break;
2374
2375 case UMOD:
2376 /* 0%x is 0 (or x&0 if x has side-effects). */
2377 if (trueop0 == CONST0_RTX (mode))
2378 {
2379 if (side_effects_p (op1))
2380 return simplify_gen_binary (AND, mode, op1, trueop0);
2381 return trueop0;
2382 }
2383 /* x%1 is 0 (of x&0 if x has side-effects). */
2384 if (trueop1 == CONST1_RTX (mode))
2385 {
2386 if (side_effects_p (op0))
2387 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2388 return CONST0_RTX (mode);
2389 }
2390 /* Implement modulus by power of two as AND. */
2391 if (GET_CODE (trueop1) == CONST_INT
2392 && exact_log2 (INTVAL (trueop1)) > 0)
2393 return simplify_gen_binary (AND, mode, op0,
2394 GEN_INT (INTVAL (op1) - 1));
2395 break;
2396
2397 case MOD:
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0 == CONST0_RTX (mode))
2400 {
2401 if (side_effects_p (op1))
2402 return simplify_gen_binary (AND, mode, op1, trueop0);
2403 return trueop0;
2404 }
2405 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2406 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2407 {
2408 if (side_effects_p (op0))
2409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2410 return CONST0_RTX (mode);
2411 }
2412 break;
2413
2414 case ROTATERT:
2415 case ROTATE:
2416 case ASHIFTRT:
2417 /* Rotating ~0 always results in ~0. */
2418 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2419 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2420 && ! side_effects_p (op1))
2421 return op0;
2422
2423 /* Fall through.... */
2424
2425 case ASHIFT:
2426 case SS_ASHIFT:
2427 case LSHIFTRT:
2428 if (trueop1 == CONST0_RTX (mode))
2429 return op0;
2430 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2431 return op0;
2432 break;
2433
2434 case SMIN:
2435 if (width <= HOST_BITS_PER_WIDE_INT
2436 && GET_CODE (trueop1) == CONST_INT
2437 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2438 && ! side_effects_p (op0))
2439 return op1;
2440 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2441 return op0;
2442 tem = simplify_associative_operation (code, mode, op0, op1);
2443 if (tem)
2444 return tem;
2445 break;
2446
2447 case SMAX:
2448 if (width <= HOST_BITS_PER_WIDE_INT
2449 && GET_CODE (trueop1) == CONST_INT
2450 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2451 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2452 && ! side_effects_p (op0))
2453 return op1;
2454 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2455 return op0;
2456 tem = simplify_associative_operation (code, mode, op0, op1);
2457 if (tem)
2458 return tem;
2459 break;
2460
2461 case UMIN:
2462 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2463 return op1;
2464 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2465 return op0;
2466 tem = simplify_associative_operation (code, mode, op0, op1);
2467 if (tem)
2468 return tem;
2469 break;
2470
2471 case UMAX:
2472 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2473 return op1;
2474 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2475 return op0;
2476 tem = simplify_associative_operation (code, mode, op0, op1);
2477 if (tem)
2478 return tem;
2479 break;
2480
2481 case SS_PLUS:
2482 case US_PLUS:
2483 case SS_MINUS:
2484 case US_MINUS:
2485 /* ??? There are simplifications that can be done. */
2486 return 0;
2487
2488 case VEC_SELECT:
2489 if (!VECTOR_MODE_P (mode))
2490 {
2491 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2492 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2493 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2494 gcc_assert (XVECLEN (trueop1, 0) == 1);
2495 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2496
2497 if (GET_CODE (trueop0) == CONST_VECTOR)
2498 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2499 (trueop1, 0, 0)));
2500 }
2501 else
2502 {
2503 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2504 gcc_assert (GET_MODE_INNER (mode)
2505 == GET_MODE_INNER (GET_MODE (trueop0)));
2506 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2507
2508 if (GET_CODE (trueop0) == CONST_VECTOR)
2509 {
2510 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2511 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2512 rtvec v = rtvec_alloc (n_elts);
2513 unsigned int i;
2514
2515 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2516 for (i = 0; i < n_elts; i++)
2517 {
2518 rtx x = XVECEXP (trueop1, 0, i);
2519
2520 gcc_assert (GET_CODE (x) == CONST_INT);
2521 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2522 INTVAL (x));
2523 }
2524
2525 return gen_rtx_CONST_VECTOR (mode, v);
2526 }
2527 }
2528
2529 if (XVECLEN (trueop1, 0) == 1
2530 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2531 && GET_CODE (trueop0) == VEC_CONCAT)
2532 {
2533 rtx vec = trueop0;
2534 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2535
2536 /* Try to find the element in the VEC_CONCAT. */
2537 while (GET_MODE (vec) != mode
2538 && GET_CODE (vec) == VEC_CONCAT)
2539 {
2540 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2541 if (offset < vec_size)
2542 vec = XEXP (vec, 0);
2543 else
2544 {
2545 offset -= vec_size;
2546 vec = XEXP (vec, 1);
2547 }
2548 vec = avoid_constant_pool_reference (vec);
2549 }
2550
2551 if (GET_MODE (vec) == mode)
2552 return vec;
2553 }
2554
2555 return 0;
2556 case VEC_CONCAT:
2557 {
2558 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2559 ? GET_MODE (trueop0)
2560 : GET_MODE_INNER (mode));
2561 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2562 ? GET_MODE (trueop1)
2563 : GET_MODE_INNER (mode));
2564
2565 gcc_assert (VECTOR_MODE_P (mode));
2566 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2567 == GET_MODE_SIZE (mode));
2568
2569 if (VECTOR_MODE_P (op0_mode))
2570 gcc_assert (GET_MODE_INNER (mode)
2571 == GET_MODE_INNER (op0_mode));
2572 else
2573 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2574
2575 if (VECTOR_MODE_P (op1_mode))
2576 gcc_assert (GET_MODE_INNER (mode)
2577 == GET_MODE_INNER (op1_mode));
2578 else
2579 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2580
2581 if ((GET_CODE (trueop0) == CONST_VECTOR
2582 || GET_CODE (trueop0) == CONST_INT
2583 || GET_CODE (trueop0) == CONST_DOUBLE)
2584 && (GET_CODE (trueop1) == CONST_VECTOR
2585 || GET_CODE (trueop1) == CONST_INT
2586 || GET_CODE (trueop1) == CONST_DOUBLE))
2587 {
2588 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2589 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2590 rtvec v = rtvec_alloc (n_elts);
2591 unsigned int i;
2592 unsigned in_n_elts = 1;
2593
2594 if (VECTOR_MODE_P (op0_mode))
2595 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2596 for (i = 0; i < n_elts; i++)
2597 {
2598 if (i < in_n_elts)
2599 {
2600 if (!VECTOR_MODE_P (op0_mode))
2601 RTVEC_ELT (v, i) = trueop0;
2602 else
2603 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2604 }
2605 else
2606 {
2607 if (!VECTOR_MODE_P (op1_mode))
2608 RTVEC_ELT (v, i) = trueop1;
2609 else
2610 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2611 i - in_n_elts);
2612 }
2613 }
2614
2615 return gen_rtx_CONST_VECTOR (mode, v);
2616 }
2617 }
2618 return 0;
2619
2620 default:
2621 gcc_unreachable ();
2622 }
2623
2624 return 0;
2625 }
2626
2627 rtx
2628 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2629 rtx op0, rtx op1)
2630 {
2631 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2632 HOST_WIDE_INT val;
2633 unsigned int width = GET_MODE_BITSIZE (mode);
2634
2635 if (VECTOR_MODE_P (mode)
2636 && code != VEC_CONCAT
2637 && GET_CODE (op0) == CONST_VECTOR
2638 && GET_CODE (op1) == CONST_VECTOR)
2639 {
2640 unsigned n_elts = GET_MODE_NUNITS (mode);
2641 enum machine_mode op0mode = GET_MODE (op0);
2642 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2643 enum machine_mode op1mode = GET_MODE (op1);
2644 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2645 rtvec v = rtvec_alloc (n_elts);
2646 unsigned int i;
2647
2648 gcc_assert (op0_n_elts == n_elts);
2649 gcc_assert (op1_n_elts == n_elts);
2650 for (i = 0; i < n_elts; i++)
2651 {
2652 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2653 CONST_VECTOR_ELT (op0, i),
2654 CONST_VECTOR_ELT (op1, i));
2655 if (!x)
2656 return 0;
2657 RTVEC_ELT (v, i) = x;
2658 }
2659
2660 return gen_rtx_CONST_VECTOR (mode, v);
2661 }
2662
2663 if (VECTOR_MODE_P (mode)
2664 && code == VEC_CONCAT
2665 && CONSTANT_P (op0) && CONSTANT_P (op1))
2666 {
2667 unsigned n_elts = GET_MODE_NUNITS (mode);
2668 rtvec v = rtvec_alloc (n_elts);
2669
2670 gcc_assert (n_elts >= 2);
2671 if (n_elts == 2)
2672 {
2673 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2674 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2675
2676 RTVEC_ELT (v, 0) = op0;
2677 RTVEC_ELT (v, 1) = op1;
2678 }
2679 else
2680 {
2681 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2682 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2683 unsigned i;
2684
2685 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2686 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2687 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2688
2689 for (i = 0; i < op0_n_elts; ++i)
2690 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2691 for (i = 0; i < op1_n_elts; ++i)
2692 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2693 }
2694
2695 return gen_rtx_CONST_VECTOR (mode, v);
2696 }
2697
2698 if (SCALAR_FLOAT_MODE_P (mode)
2699 && GET_CODE (op0) == CONST_DOUBLE
2700 && GET_CODE (op1) == CONST_DOUBLE
2701 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2702 {
2703 if (code == AND
2704 || code == IOR
2705 || code == XOR)
2706 {
2707 long tmp0[4];
2708 long tmp1[4];
2709 REAL_VALUE_TYPE r;
2710 int i;
2711
2712 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2713 GET_MODE (op0));
2714 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2715 GET_MODE (op1));
2716 for (i = 0; i < 4; i++)
2717 {
2718 switch (code)
2719 {
2720 case AND:
2721 tmp0[i] &= tmp1[i];
2722 break;
2723 case IOR:
2724 tmp0[i] |= tmp1[i];
2725 break;
2726 case XOR:
2727 tmp0[i] ^= tmp1[i];
2728 break;
2729 default:
2730 gcc_unreachable ();
2731 }
2732 }
2733 real_from_target (&r, tmp0, mode);
2734 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2735 }
2736 else
2737 {
2738 REAL_VALUE_TYPE f0, f1, value, result;
2739 bool inexact;
2740
2741 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2742 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2743 real_convert (&f0, mode, &f0);
2744 real_convert (&f1, mode, &f1);
2745
2746 if (HONOR_SNANS (mode)
2747 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2748 return 0;
2749
2750 if (code == DIV
2751 && REAL_VALUES_EQUAL (f1, dconst0)
2752 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2753 return 0;
2754
2755 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2756 && flag_trapping_math
2757 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2758 {
2759 int s0 = REAL_VALUE_NEGATIVE (f0);
2760 int s1 = REAL_VALUE_NEGATIVE (f1);
2761
2762 switch (code)
2763 {
2764 case PLUS:
2765 /* Inf + -Inf = NaN plus exception. */
2766 if (s0 != s1)
2767 return 0;
2768 break;
2769 case MINUS:
2770 /* Inf - Inf = NaN plus exception. */
2771 if (s0 == s1)
2772 return 0;
2773 break;
2774 case DIV:
2775 /* Inf / Inf = NaN plus exception. */
2776 return 0;
2777 default:
2778 break;
2779 }
2780 }
2781
2782 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2783 && flag_trapping_math
2784 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2785 || (REAL_VALUE_ISINF (f1)
2786 && REAL_VALUES_EQUAL (f0, dconst0))))
2787 /* Inf * 0 = NaN plus exception. */
2788 return 0;
2789
2790 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2791 &f0, &f1);
2792 real_convert (&result, mode, &value);
2793
2794 /* Don't constant fold this floating point operation if
2795 the result has overflowed and flag_trapping_math. */
2796
2797 if (flag_trapping_math
2798 && MODE_HAS_INFINITIES (mode)
2799 && REAL_VALUE_ISINF (result)
2800 && !REAL_VALUE_ISINF (f0)
2801 && !REAL_VALUE_ISINF (f1))
2802 /* Overflow plus exception. */
2803 return 0;
2804
2805 /* Don't constant fold this floating point operation if the
2806 result may dependent upon the run-time rounding mode and
2807 flag_rounding_math is set, or if GCC's software emulation
2808 is unable to accurately represent the result. */
2809
2810 if ((flag_rounding_math
2811 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2812 && !flag_unsafe_math_optimizations))
2813 && (inexact || !real_identical (&result, &value)))
2814 return NULL_RTX;
2815
2816 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2817 }
2818 }
2819
2820 /* We can fold some multi-word operations. */
2821 if (GET_MODE_CLASS (mode) == MODE_INT
2822 && width == HOST_BITS_PER_WIDE_INT * 2
2823 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2824 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2825 {
2826 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2827 HOST_WIDE_INT h1, h2, hv, ht;
2828
2829 if (GET_CODE (op0) == CONST_DOUBLE)
2830 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2831 else
2832 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2833
2834 if (GET_CODE (op1) == CONST_DOUBLE)
2835 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2836 else
2837 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2838
2839 switch (code)
2840 {
2841 case MINUS:
2842 /* A - B == A + (-B). */
2843 neg_double (l2, h2, &lv, &hv);
2844 l2 = lv, h2 = hv;
2845
2846 /* Fall through.... */
2847
2848 case PLUS:
2849 add_double (l1, h1, l2, h2, &lv, &hv);
2850 break;
2851
2852 case MULT:
2853 mul_double (l1, h1, l2, h2, &lv, &hv);
2854 break;
2855
2856 case DIV:
2857 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2858 &lv, &hv, &lt, &ht))
2859 return 0;
2860 break;
2861
2862 case MOD:
2863 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2864 &lt, &ht, &lv, &hv))
2865 return 0;
2866 break;
2867
2868 case UDIV:
2869 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2870 &lv, &hv, &lt, &ht))
2871 return 0;
2872 break;
2873
2874 case UMOD:
2875 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2876 &lt, &ht, &lv, &hv))
2877 return 0;
2878 break;
2879
2880 case AND:
2881 lv = l1 & l2, hv = h1 & h2;
2882 break;
2883
2884 case IOR:
2885 lv = l1 | l2, hv = h1 | h2;
2886 break;
2887
2888 case XOR:
2889 lv = l1 ^ l2, hv = h1 ^ h2;
2890 break;
2891
2892 case SMIN:
2893 if (h1 < h2
2894 || (h1 == h2
2895 && ((unsigned HOST_WIDE_INT) l1
2896 < (unsigned HOST_WIDE_INT) l2)))
2897 lv = l1, hv = h1;
2898 else
2899 lv = l2, hv = h2;
2900 break;
2901
2902 case SMAX:
2903 if (h1 > h2
2904 || (h1 == h2
2905 && ((unsigned HOST_WIDE_INT) l1
2906 > (unsigned HOST_WIDE_INT) l2)))
2907 lv = l1, hv = h1;
2908 else
2909 lv = l2, hv = h2;
2910 break;
2911
2912 case UMIN:
2913 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2914 || (h1 == h2
2915 && ((unsigned HOST_WIDE_INT) l1
2916 < (unsigned HOST_WIDE_INT) l2)))
2917 lv = l1, hv = h1;
2918 else
2919 lv = l2, hv = h2;
2920 break;
2921
2922 case UMAX:
2923 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2924 || (h1 == h2
2925 && ((unsigned HOST_WIDE_INT) l1
2926 > (unsigned HOST_WIDE_INT) l2)))
2927 lv = l1, hv = h1;
2928 else
2929 lv = l2, hv = h2;
2930 break;
2931
2932 case LSHIFTRT: case ASHIFTRT:
2933 case ASHIFT:
2934 case ROTATE: case ROTATERT:
2935 if (SHIFT_COUNT_TRUNCATED)
2936 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2937
2938 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2939 return 0;
2940
2941 if (code == LSHIFTRT || code == ASHIFTRT)
2942 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2943 code == ASHIFTRT);
2944 else if (code == ASHIFT)
2945 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2946 else if (code == ROTATE)
2947 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2948 else /* code == ROTATERT */
2949 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2950 break;
2951
2952 default:
2953 return 0;
2954 }
2955
2956 return immed_double_const (lv, hv, mode);
2957 }
2958
2959 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2960 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2961 {
2962 /* Get the integer argument values in two forms:
2963 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2964
2965 arg0 = INTVAL (op0);
2966 arg1 = INTVAL (op1);
2967
2968 if (width < HOST_BITS_PER_WIDE_INT)
2969 {
2970 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2971 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2972
2973 arg0s = arg0;
2974 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2975 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2976
2977 arg1s = arg1;
2978 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2979 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2980 }
2981 else
2982 {
2983 arg0s = arg0;
2984 arg1s = arg1;
2985 }
2986
2987 /* Compute the value of the arithmetic. */
2988
2989 switch (code)
2990 {
2991 case PLUS:
2992 val = arg0s + arg1s;
2993 break;
2994
2995 case MINUS:
2996 val = arg0s - arg1s;
2997 break;
2998
2999 case MULT:
3000 val = arg0s * arg1s;
3001 break;
3002
3003 case DIV:
3004 if (arg1s == 0
3005 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3006 && arg1s == -1))
3007 return 0;
3008 val = arg0s / arg1s;
3009 break;
3010
3011 case MOD:
3012 if (arg1s == 0
3013 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3014 && arg1s == -1))
3015 return 0;
3016 val = arg0s % arg1s;
3017 break;
3018
3019 case UDIV:
3020 if (arg1 == 0
3021 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3022 && arg1s == -1))
3023 return 0;
3024 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3025 break;
3026
3027 case UMOD:
3028 if (arg1 == 0
3029 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3030 && arg1s == -1))
3031 return 0;
3032 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3033 break;
3034
3035 case AND:
3036 val = arg0 & arg1;
3037 break;
3038
3039 case IOR:
3040 val = arg0 | arg1;
3041 break;
3042
3043 case XOR:
3044 val = arg0 ^ arg1;
3045 break;
3046
3047 case LSHIFTRT:
3048 case ASHIFT:
3049 case ASHIFTRT:
3050 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3051 the value is in range. We can't return any old value for
3052 out-of-range arguments because either the middle-end (via
3053 shift_truncation_mask) or the back-end might be relying on
3054 target-specific knowledge. Nor can we rely on
3055 shift_truncation_mask, since the shift might not be part of an
3056 ashlM3, lshrM3 or ashrM3 instruction. */
3057 if (SHIFT_COUNT_TRUNCATED)
3058 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3059 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3060 return 0;
3061
3062 val = (code == ASHIFT
3063 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3064 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3065
3066 /* Sign-extend the result for arithmetic right shifts. */
3067 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3068 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3069 break;
3070
3071 case ROTATERT:
3072 if (arg1 < 0)
3073 return 0;
3074
3075 arg1 %= width;
3076 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3077 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3078 break;
3079
3080 case ROTATE:
3081 if (arg1 < 0)
3082 return 0;
3083
3084 arg1 %= width;
3085 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3086 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3087 break;
3088
3089 case COMPARE:
3090 /* Do nothing here. */
3091 return 0;
3092
3093 case SMIN:
3094 val = arg0s <= arg1s ? arg0s : arg1s;
3095 break;
3096
3097 case UMIN:
3098 val = ((unsigned HOST_WIDE_INT) arg0
3099 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3100 break;
3101
3102 case SMAX:
3103 val = arg0s > arg1s ? arg0s : arg1s;
3104 break;
3105
3106 case UMAX:
3107 val = ((unsigned HOST_WIDE_INT) arg0
3108 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3109 break;
3110
3111 case SS_PLUS:
3112 case US_PLUS:
3113 case SS_MINUS:
3114 case US_MINUS:
3115 case SS_ASHIFT:
3116 /* ??? There are simplifications that can be done. */
3117 return 0;
3118
3119 default:
3120 gcc_unreachable ();
3121 }
3122
3123 return gen_int_mode (val, mode);
3124 }
3125
3126 return NULL_RTX;
3127 }
3128
3129
3130 \f
3131 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3132 PLUS or MINUS.
3133
3134 Rather than test for specific case, we do this by a brute-force method
3135 and do all possible simplifications until no more changes occur. Then
3136 we rebuild the operation. */
3137
3138 struct simplify_plus_minus_op_data
3139 {
3140 rtx op;
3141 short neg;
3142 short ix;
3143 };
3144
3145 static int
3146 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3147 {
3148 const struct simplify_plus_minus_op_data *d1 = p1;
3149 const struct simplify_plus_minus_op_data *d2 = p2;
3150 int result;
3151
3152 result = (commutative_operand_precedence (d2->op)
3153 - commutative_operand_precedence (d1->op));
3154 if (result)
3155 return result;
3156 return d1->ix - d2->ix;
3157 }
3158
3159 static rtx
3160 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3161 rtx op1)
3162 {
3163 struct simplify_plus_minus_op_data ops[8];
3164 rtx result, tem;
3165 int n_ops = 2, input_ops = 2;
3166 int first, changed, canonicalized = 0;
3167 int i, j;
3168
3169 memset (ops, 0, sizeof ops);
3170
3171 /* Set up the two operands and then expand them until nothing has been
3172 changed. If we run out of room in our array, give up; this should
3173 almost never happen. */
3174
3175 ops[0].op = op0;
3176 ops[0].neg = 0;
3177 ops[1].op = op1;
3178 ops[1].neg = (code == MINUS);
3179
3180 do
3181 {
3182 changed = 0;
3183
3184 for (i = 0; i < n_ops; i++)
3185 {
3186 rtx this_op = ops[i].op;
3187 int this_neg = ops[i].neg;
3188 enum rtx_code this_code = GET_CODE (this_op);
3189
3190 switch (this_code)
3191 {
3192 case PLUS:
3193 case MINUS:
3194 if (n_ops == 7)
3195 return NULL_RTX;
3196
3197 ops[n_ops].op = XEXP (this_op, 1);
3198 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3199 n_ops++;
3200
3201 ops[i].op = XEXP (this_op, 0);
3202 input_ops++;
3203 changed = 1;
3204 canonicalized |= this_neg;
3205 break;
3206
3207 case NEG:
3208 ops[i].op = XEXP (this_op, 0);
3209 ops[i].neg = ! this_neg;
3210 changed = 1;
3211 canonicalized = 1;
3212 break;
3213
3214 case CONST:
3215 if (n_ops < 7
3216 && GET_CODE (XEXP (this_op, 0)) == PLUS
3217 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3218 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3219 {
3220 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3221 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3222 ops[n_ops].neg = this_neg;
3223 n_ops++;
3224 changed = 1;
3225 canonicalized = 1;
3226 }
3227 break;
3228
3229 case NOT:
3230 /* ~a -> (-a - 1) */
3231 if (n_ops != 7)
3232 {
3233 ops[n_ops].op = constm1_rtx;
3234 ops[n_ops++].neg = this_neg;
3235 ops[i].op = XEXP (this_op, 0);
3236 ops[i].neg = !this_neg;
3237 changed = 1;
3238 canonicalized = 1;
3239 }
3240 break;
3241
3242 case CONST_INT:
3243 if (this_neg)
3244 {
3245 ops[i].op = neg_const_int (mode, this_op);
3246 ops[i].neg = 0;
3247 changed = 1;
3248 canonicalized = 1;
3249 }
3250 break;
3251
3252 default:
3253 break;
3254 }
3255 }
3256 }
3257 while (changed);
3258
3259 gcc_assert (n_ops >= 2);
3260 if (!canonicalized)
3261 {
3262 int n_constants = 0;
3263
3264 for (i = 0; i < n_ops; i++)
3265 if (GET_CODE (ops[i].op) == CONST_INT)
3266 n_constants++;
3267
3268 if (n_constants <= 1)
3269 return NULL_RTX;
3270 }
3271
3272 /* If we only have two operands, we can avoid the loops. */
3273 if (n_ops == 2)
3274 {
3275 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3276 rtx lhs, rhs;
3277
3278 /* Get the two operands. Be careful with the order, especially for
3279 the cases where code == MINUS. */
3280 if (ops[0].neg && ops[1].neg)
3281 {
3282 lhs = gen_rtx_NEG (mode, ops[0].op);
3283 rhs = ops[1].op;
3284 }
3285 else if (ops[0].neg)
3286 {
3287 lhs = ops[1].op;
3288 rhs = ops[0].op;
3289 }
3290 else
3291 {
3292 lhs = ops[0].op;
3293 rhs = ops[1].op;
3294 }
3295
3296 return simplify_const_binary_operation (code, mode, lhs, rhs);
3297 }
3298
3299 /* Now simplify each pair of operands until nothing changes. The first
3300 time through just simplify constants against each other. */
3301
3302 first = 1;
3303 do
3304 {
3305 changed = first;
3306
3307 for (i = 0; i < n_ops - 1; i++)
3308 for (j = i + 1; j < n_ops; j++)
3309 {
3310 rtx lhs = ops[i].op, rhs = ops[j].op;
3311 int lneg = ops[i].neg, rneg = ops[j].neg;
3312
3313 if (lhs != 0 && rhs != 0
3314 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3315 {
3316 enum rtx_code ncode = PLUS;
3317
3318 if (lneg != rneg)
3319 {
3320 ncode = MINUS;
3321 if (lneg)
3322 tem = lhs, lhs = rhs, rhs = tem;
3323 }
3324 else if (swap_commutative_operands_p (lhs, rhs))
3325 tem = lhs, lhs = rhs, rhs = tem;
3326
3327 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3328 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3329 {
3330 rtx tem_lhs, tem_rhs;
3331
3332 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3333 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3334 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3335
3336 if (tem && !CONSTANT_P (tem))
3337 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3338 }
3339 else
3340 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3341
3342 /* Reject "simplifications" that just wrap the two
3343 arguments in a CONST. Failure to do so can result
3344 in infinite recursion with simplify_binary_operation
3345 when it calls us to simplify CONST operations. */
3346 if (tem
3347 && ! (GET_CODE (tem) == CONST
3348 && GET_CODE (XEXP (tem, 0)) == ncode
3349 && XEXP (XEXP (tem, 0), 0) == lhs
3350 && XEXP (XEXP (tem, 0), 1) == rhs)
3351 /* Don't allow -x + -1 -> ~x simplifications in the
3352 first pass. This allows us the chance to combine
3353 the -1 with other constants. */
3354 && ! (first
3355 && GET_CODE (tem) == NOT
3356 && XEXP (tem, 0) == rhs))
3357 {
3358 lneg &= rneg;
3359 if (GET_CODE (tem) == NEG)
3360 tem = XEXP (tem, 0), lneg = !lneg;
3361 if (GET_CODE (tem) == CONST_INT && lneg)
3362 tem = neg_const_int (mode, tem), lneg = 0;
3363
3364 ops[i].op = tem;
3365 ops[i].neg = lneg;
3366 ops[j].op = NULL_RTX;
3367 changed = 1;
3368 }
3369 }
3370 }
3371
3372 first = 0;
3373 }
3374 while (changed);
3375
3376 /* Pack all the operands to the lower-numbered entries. */
3377 for (i = 0, j = 0; j < n_ops; j++)
3378 if (ops[j].op)
3379 {
3380 ops[i] = ops[j];
3381 /* Stabilize sort. */
3382 ops[i].ix = i;
3383 i++;
3384 }
3385 n_ops = i;
3386
3387 /* Sort the operations based on swap_commutative_operands_p. */
3388 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3389
3390 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3391 if (n_ops == 2
3392 && GET_CODE (ops[1].op) == CONST_INT
3393 && CONSTANT_P (ops[0].op)
3394 && ops[0].neg)
3395 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3396
3397 /* We suppressed creation of trivial CONST expressions in the
3398 combination loop to avoid recursion. Create one manually now.
3399 The combination loop should have ensured that there is exactly
3400 one CONST_INT, and the sort will have ensured that it is last
3401 in the array and that any other constant will be next-to-last. */
3402
3403 if (n_ops > 1
3404 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3405 && CONSTANT_P (ops[n_ops - 2].op))
3406 {
3407 rtx value = ops[n_ops - 1].op;
3408 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3409 value = neg_const_int (mode, value);
3410 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3411 n_ops--;
3412 }
3413
3414 /* Put a non-negated operand first, if possible. */
3415
3416 for (i = 0; i < n_ops && ops[i].neg; i++)
3417 continue;
3418 if (i == n_ops)
3419 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3420 else if (i != 0)
3421 {
3422 tem = ops[0].op;
3423 ops[0] = ops[i];
3424 ops[i].op = tem;
3425 ops[i].neg = 1;
3426 }
3427
3428 /* Now make the result by performing the requested operations. */
3429 result = ops[0].op;
3430 for (i = 1; i < n_ops; i++)
3431 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3432 mode, result, ops[i].op);
3433
3434 return result;
3435 }
3436
3437 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3438 static bool
3439 plus_minus_operand_p (rtx x)
3440 {
3441 return GET_CODE (x) == PLUS
3442 || GET_CODE (x) == MINUS
3443 || (GET_CODE (x) == CONST
3444 && GET_CODE (XEXP (x, 0)) == PLUS
3445 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3446 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3447 }
3448
3449 /* Like simplify_binary_operation except used for relational operators.
3450 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3451 not also be VOIDmode.
3452
3453 CMP_MODE specifies in which mode the comparison is done in, so it is
3454 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3455 the operands or, if both are VOIDmode, the operands are compared in
3456 "infinite precision". */
3457 rtx
3458 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3459 enum machine_mode cmp_mode, rtx op0, rtx op1)
3460 {
3461 rtx tem, trueop0, trueop1;
3462
3463 if (cmp_mode == VOIDmode)
3464 cmp_mode = GET_MODE (op0);
3465 if (cmp_mode == VOIDmode)
3466 cmp_mode = GET_MODE (op1);
3467
3468 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3469 if (tem)
3470 {
3471 if (SCALAR_FLOAT_MODE_P (mode))
3472 {
3473 if (tem == const0_rtx)
3474 return CONST0_RTX (mode);
3475 #ifdef FLOAT_STORE_FLAG_VALUE
3476 {
3477 REAL_VALUE_TYPE val;
3478 val = FLOAT_STORE_FLAG_VALUE (mode);
3479 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3480 }
3481 #else
3482 return NULL_RTX;
3483 #endif
3484 }
3485 if (VECTOR_MODE_P (mode))
3486 {
3487 if (tem == const0_rtx)
3488 return CONST0_RTX (mode);
3489 #ifdef VECTOR_STORE_FLAG_VALUE
3490 {
3491 int i, units;
3492 rtvec v;
3493
3494 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3495 if (val == NULL_RTX)
3496 return NULL_RTX;
3497 if (val == const1_rtx)
3498 return CONST1_RTX (mode);
3499
3500 units = GET_MODE_NUNITS (mode);
3501 v = rtvec_alloc (units);
3502 for (i = 0; i < units; i++)
3503 RTVEC_ELT (v, i) = val;
3504 return gen_rtx_raw_CONST_VECTOR (mode, v);
3505 }
3506 #else
3507 return NULL_RTX;
3508 #endif
3509 }
3510
3511 return tem;
3512 }
3513
3514 /* For the following tests, ensure const0_rtx is op1. */
3515 if (swap_commutative_operands_p (op0, op1)
3516 || (op0 == const0_rtx && op1 != const0_rtx))
3517 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3518
3519 /* If op0 is a compare, extract the comparison arguments from it. */
3520 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3521 return simplify_relational_operation (code, mode, VOIDmode,
3522 XEXP (op0, 0), XEXP (op0, 1));
3523
3524 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3525 || CC0_P (op0))
3526 return NULL_RTX;
3527
3528 trueop0 = avoid_constant_pool_reference (op0);
3529 trueop1 = avoid_constant_pool_reference (op1);
3530 return simplify_relational_operation_1 (code, mode, cmp_mode,
3531 trueop0, trueop1);
3532 }
3533
3534 /* This part of simplify_relational_operation is only used when CMP_MODE
3535 is not in class MODE_CC (i.e. it is a real comparison).
3536
3537 MODE is the mode of the result, while CMP_MODE specifies in which
3538 mode the comparison is done in, so it is the mode of the operands. */
3539
3540 static rtx
3541 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3542 enum machine_mode cmp_mode, rtx op0, rtx op1)
3543 {
3544 enum rtx_code op0code = GET_CODE (op0);
3545
3546 if (GET_CODE (op1) == CONST_INT)
3547 {
3548 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3549 {
3550 /* If op0 is a comparison, extract the comparison arguments
3551 from it. */
3552 if (code == NE)
3553 {
3554 if (GET_MODE (op0) == mode)
3555 return simplify_rtx (op0);
3556 else
3557 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3558 XEXP (op0, 0), XEXP (op0, 1));
3559 }
3560 else if (code == EQ)
3561 {
3562 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3563 if (new_code != UNKNOWN)
3564 return simplify_gen_relational (new_code, mode, VOIDmode,
3565 XEXP (op0, 0), XEXP (op0, 1));
3566 }
3567 }
3568 }
3569
3570 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3571 if ((code == EQ || code == NE)
3572 && (op0code == PLUS || op0code == MINUS)
3573 && CONSTANT_P (op1)
3574 && CONSTANT_P (XEXP (op0, 1))
3575 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3576 {
3577 rtx x = XEXP (op0, 0);
3578 rtx c = XEXP (op0, 1);
3579
3580 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3581 cmp_mode, op1, c);
3582 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3583 }
3584
3585 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3586 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3587 if (code == NE
3588 && op1 == const0_rtx
3589 && GET_MODE_CLASS (mode) == MODE_INT
3590 && cmp_mode != VOIDmode
3591 /* ??? Work-around BImode bugs in the ia64 backend. */
3592 && mode != BImode
3593 && cmp_mode != BImode
3594 && nonzero_bits (op0, cmp_mode) == 1
3595 && STORE_FLAG_VALUE == 1)
3596 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3597 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3598 : lowpart_subreg (mode, op0, cmp_mode);
3599
3600 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3601 if ((code == EQ || code == NE)
3602 && op1 == const0_rtx
3603 && op0code == XOR)
3604 return simplify_gen_relational (code, mode, cmp_mode,
3605 XEXP (op0, 0), XEXP (op0, 1));
3606
3607 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3608 if ((code == EQ || code == NE)
3609 && op0code == XOR
3610 && rtx_equal_p (XEXP (op0, 0), op1)
3611 && !side_effects_p (XEXP (op0, 1)))
3612 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3613 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3614 if ((code == EQ || code == NE)
3615 && op0code == XOR
3616 && rtx_equal_p (XEXP (op0, 1), op1)
3617 && !side_effects_p (XEXP (op0, 0)))
3618 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3619
3620 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3621 if ((code == EQ || code == NE)
3622 && op0code == XOR
3623 && (GET_CODE (op1) == CONST_INT
3624 || GET_CODE (op1) == CONST_DOUBLE)
3625 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3626 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3627 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3628 simplify_gen_binary (XOR, cmp_mode,
3629 XEXP (op0, 1), op1));
3630
3631 return NULL_RTX;
3632 }
3633
3634 /* Check if the given comparison (done in the given MODE) is actually a
3635 tautology or a contradiction.
3636 If no simplification is possible, this function returns zero.
3637 Otherwise, it returns either const_true_rtx or const0_rtx. */
3638
3639 rtx
3640 simplify_const_relational_operation (enum rtx_code code,
3641 enum machine_mode mode,
3642 rtx op0, rtx op1)
3643 {
3644 int equal, op0lt, op0ltu, op1lt, op1ltu;
3645 rtx tem;
3646 rtx trueop0;
3647 rtx trueop1;
3648
3649 gcc_assert (mode != VOIDmode
3650 || (GET_MODE (op0) == VOIDmode
3651 && GET_MODE (op1) == VOIDmode));
3652
3653 /* If op0 is a compare, extract the comparison arguments from it. */
3654 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3655 {
3656 op1 = XEXP (op0, 1);
3657 op0 = XEXP (op0, 0);
3658
3659 if (GET_MODE (op0) != VOIDmode)
3660 mode = GET_MODE (op0);
3661 else if (GET_MODE (op1) != VOIDmode)
3662 mode = GET_MODE (op1);
3663 else
3664 return 0;
3665 }
3666
3667 /* We can't simplify MODE_CC values since we don't know what the
3668 actual comparison is. */
3669 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3670 return 0;
3671
3672 /* Make sure the constant is second. */
3673 if (swap_commutative_operands_p (op0, op1))
3674 {
3675 tem = op0, op0 = op1, op1 = tem;
3676 code = swap_condition (code);
3677 }
3678
3679 trueop0 = avoid_constant_pool_reference (op0);
3680 trueop1 = avoid_constant_pool_reference (op1);
3681
3682 /* For integer comparisons of A and B maybe we can simplify A - B and can
3683 then simplify a comparison of that with zero. If A and B are both either
3684 a register or a CONST_INT, this can't help; testing for these cases will
3685 prevent infinite recursion here and speed things up.
3686
3687 If CODE is an unsigned comparison, then we can never do this optimization,
3688 because it gives an incorrect result if the subtraction wraps around zero.
3689 ANSI C defines unsigned operations such that they never overflow, and
3690 thus such cases can not be ignored; but we cannot do it even for
3691 signed comparisons for languages such as Java, so test flag_wrapv. */
3692
3693 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3694 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3695 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3696 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3697 /* We cannot do this for == or != if tem is a nonzero address. */
3698 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3699 && code != GTU && code != GEU && code != LTU && code != LEU)
3700 return simplify_const_relational_operation (signed_condition (code),
3701 mode, tem, const0_rtx);
3702
3703 if (flag_unsafe_math_optimizations && code == ORDERED)
3704 return const_true_rtx;
3705
3706 if (flag_unsafe_math_optimizations && code == UNORDERED)
3707 return const0_rtx;
3708
3709 /* For modes without NaNs, if the two operands are equal, we know the
3710 result except if they have side-effects. */
3711 if (! HONOR_NANS (GET_MODE (trueop0))
3712 && rtx_equal_p (trueop0, trueop1)
3713 && ! side_effects_p (trueop0))
3714 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3715
3716 /* If the operands are floating-point constants, see if we can fold
3717 the result. */
3718 else if (GET_CODE (trueop0) == CONST_DOUBLE
3719 && GET_CODE (trueop1) == CONST_DOUBLE
3720 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3721 {
3722 REAL_VALUE_TYPE d0, d1;
3723
3724 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3725 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3726
3727 /* Comparisons are unordered iff at least one of the values is NaN. */
3728 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3729 switch (code)
3730 {
3731 case UNEQ:
3732 case UNLT:
3733 case UNGT:
3734 case UNLE:
3735 case UNGE:
3736 case NE:
3737 case UNORDERED:
3738 return const_true_rtx;
3739 case EQ:
3740 case LT:
3741 case GT:
3742 case LE:
3743 case GE:
3744 case LTGT:
3745 case ORDERED:
3746 return const0_rtx;
3747 default:
3748 return 0;
3749 }
3750
3751 equal = REAL_VALUES_EQUAL (d0, d1);
3752 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3753 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3754 }
3755
3756 /* Otherwise, see if the operands are both integers. */
3757 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3758 && (GET_CODE (trueop0) == CONST_DOUBLE
3759 || GET_CODE (trueop0) == CONST_INT)
3760 && (GET_CODE (trueop1) == CONST_DOUBLE
3761 || GET_CODE (trueop1) == CONST_INT))
3762 {
3763 int width = GET_MODE_BITSIZE (mode);
3764 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3765 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3766
3767 /* Get the two words comprising each integer constant. */
3768 if (GET_CODE (trueop0) == CONST_DOUBLE)
3769 {
3770 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3771 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3772 }
3773 else
3774 {
3775 l0u = l0s = INTVAL (trueop0);
3776 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3777 }
3778
3779 if (GET_CODE (trueop1) == CONST_DOUBLE)
3780 {
3781 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3782 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3783 }
3784 else
3785 {
3786 l1u = l1s = INTVAL (trueop1);
3787 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3788 }
3789
3790 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3791 we have to sign or zero-extend the values. */
3792 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3793 {
3794 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3795 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3796
3797 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3798 l0s |= ((HOST_WIDE_INT) (-1) << width);
3799
3800 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3801 l1s |= ((HOST_WIDE_INT) (-1) << width);
3802 }
3803 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3804 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3805
3806 equal = (h0u == h1u && l0u == l1u);
3807 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3808 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3809 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3810 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3811 }
3812
3813 /* Otherwise, there are some code-specific tests we can make. */
3814 else
3815 {
3816 /* Optimize comparisons with upper and lower bounds. */
3817 if (SCALAR_INT_MODE_P (mode)
3818 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3819 {
3820 rtx mmin, mmax;
3821 int sign;
3822
3823 if (code == GEU
3824 || code == LEU
3825 || code == GTU
3826 || code == LTU)
3827 sign = 0;
3828 else
3829 sign = 1;
3830
3831 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3832
3833 tem = NULL_RTX;
3834 switch (code)
3835 {
3836 case GEU:
3837 case GE:
3838 /* x >= min is always true. */
3839 if (rtx_equal_p (trueop1, mmin))
3840 tem = const_true_rtx;
3841 else
3842 break;
3843
3844 case LEU:
3845 case LE:
3846 /* x <= max is always true. */
3847 if (rtx_equal_p (trueop1, mmax))
3848 tem = const_true_rtx;
3849 break;
3850
3851 case GTU:
3852 case GT:
3853 /* x > max is always false. */
3854 if (rtx_equal_p (trueop1, mmax))
3855 tem = const0_rtx;
3856 break;
3857
3858 case LTU:
3859 case LT:
3860 /* x < min is always false. */
3861 if (rtx_equal_p (trueop1, mmin))
3862 tem = const0_rtx;
3863 break;
3864
3865 default:
3866 break;
3867 }
3868 if (tem == const0_rtx
3869 || tem == const_true_rtx)
3870 return tem;
3871 }
3872
3873 switch (code)
3874 {
3875 case EQ:
3876 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3877 return const0_rtx;
3878 break;
3879
3880 case NE:
3881 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3882 return const_true_rtx;
3883 break;
3884
3885 case LT:
3886 /* Optimize abs(x) < 0.0. */
3887 if (trueop1 == CONST0_RTX (mode)
3888 && !HONOR_SNANS (mode)
3889 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3890 {
3891 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3892 : trueop0;
3893 if (GET_CODE (tem) == ABS)
3894 return const0_rtx;
3895 }
3896 break;
3897
3898 case GE:
3899 /* Optimize abs(x) >= 0.0. */
3900 if (trueop1 == CONST0_RTX (mode)
3901 && !HONOR_NANS (mode)
3902 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3903 {
3904 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3905 : trueop0;
3906 if (GET_CODE (tem) == ABS)
3907 return const_true_rtx;
3908 }
3909 break;
3910
3911 case UNGE:
3912 /* Optimize ! (abs(x) < 0.0). */
3913 if (trueop1 == CONST0_RTX (mode))
3914 {
3915 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3916 : trueop0;
3917 if (GET_CODE (tem) == ABS)
3918 return const_true_rtx;
3919 }
3920 break;
3921
3922 default:
3923 break;
3924 }
3925
3926 return 0;
3927 }
3928
3929 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3930 as appropriate. */
3931 switch (code)
3932 {
3933 case EQ:
3934 case UNEQ:
3935 return equal ? const_true_rtx : const0_rtx;
3936 case NE:
3937 case LTGT:
3938 return ! equal ? const_true_rtx : const0_rtx;
3939 case LT:
3940 case UNLT:
3941 return op0lt ? const_true_rtx : const0_rtx;
3942 case GT:
3943 case UNGT:
3944 return op1lt ? const_true_rtx : const0_rtx;
3945 case LTU:
3946 return op0ltu ? const_true_rtx : const0_rtx;
3947 case GTU:
3948 return op1ltu ? const_true_rtx : const0_rtx;
3949 case LE:
3950 case UNLE:
3951 return equal || op0lt ? const_true_rtx : const0_rtx;
3952 case GE:
3953 case UNGE:
3954 return equal || op1lt ? const_true_rtx : const0_rtx;
3955 case LEU:
3956 return equal || op0ltu ? const_true_rtx : const0_rtx;
3957 case GEU:
3958 return equal || op1ltu ? const_true_rtx : const0_rtx;
3959 case ORDERED:
3960 return const_true_rtx;
3961 case UNORDERED:
3962 return const0_rtx;
3963 default:
3964 gcc_unreachable ();
3965 }
3966 }
3967 \f
3968 /* Simplify CODE, an operation with result mode MODE and three operands,
3969 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3970 a constant. Return 0 if no simplifications is possible. */
3971
3972 rtx
3973 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3974 enum machine_mode op0_mode, rtx op0, rtx op1,
3975 rtx op2)
3976 {
3977 unsigned int width = GET_MODE_BITSIZE (mode);
3978
3979 /* VOIDmode means "infinite" precision. */
3980 if (width == 0)
3981 width = HOST_BITS_PER_WIDE_INT;
3982
3983 switch (code)
3984 {
3985 case SIGN_EXTRACT:
3986 case ZERO_EXTRACT:
3987 if (GET_CODE (op0) == CONST_INT
3988 && GET_CODE (op1) == CONST_INT
3989 && GET_CODE (op2) == CONST_INT
3990 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3991 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3992 {
3993 /* Extracting a bit-field from a constant */
3994 HOST_WIDE_INT val = INTVAL (op0);
3995
3996 if (BITS_BIG_ENDIAN)
3997 val >>= (GET_MODE_BITSIZE (op0_mode)
3998 - INTVAL (op2) - INTVAL (op1));
3999 else
4000 val >>= INTVAL (op2);
4001
4002 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4003 {
4004 /* First zero-extend. */
4005 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4006 /* If desired, propagate sign bit. */
4007 if (code == SIGN_EXTRACT
4008 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4009 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4010 }
4011
4012 /* Clear the bits that don't belong in our mode,
4013 unless they and our sign bit are all one.
4014 So we get either a reasonable negative value or a reasonable
4015 unsigned value for this mode. */
4016 if (width < HOST_BITS_PER_WIDE_INT
4017 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4018 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4019 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4020
4021 return gen_int_mode (val, mode);
4022 }
4023 break;
4024
4025 case IF_THEN_ELSE:
4026 if (GET_CODE (op0) == CONST_INT)
4027 return op0 != const0_rtx ? op1 : op2;
4028
4029 /* Convert c ? a : a into "a". */
4030 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4031 return op1;
4032
4033 /* Convert a != b ? a : b into "a". */
4034 if (GET_CODE (op0) == NE
4035 && ! side_effects_p (op0)
4036 && ! HONOR_NANS (mode)
4037 && ! HONOR_SIGNED_ZEROS (mode)
4038 && ((rtx_equal_p (XEXP (op0, 0), op1)
4039 && rtx_equal_p (XEXP (op0, 1), op2))
4040 || (rtx_equal_p (XEXP (op0, 0), op2)
4041 && rtx_equal_p (XEXP (op0, 1), op1))))
4042 return op1;
4043
4044 /* Convert a == b ? a : b into "b". */
4045 if (GET_CODE (op0) == EQ
4046 && ! side_effects_p (op0)
4047 && ! HONOR_NANS (mode)
4048 && ! HONOR_SIGNED_ZEROS (mode)
4049 && ((rtx_equal_p (XEXP (op0, 0), op1)
4050 && rtx_equal_p (XEXP (op0, 1), op2))
4051 || (rtx_equal_p (XEXP (op0, 0), op2)
4052 && rtx_equal_p (XEXP (op0, 1), op1))))
4053 return op2;
4054
4055 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4056 {
4057 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4058 ? GET_MODE (XEXP (op0, 1))
4059 : GET_MODE (XEXP (op0, 0)));
4060 rtx temp;
4061
4062 /* Look for happy constants in op1 and op2. */
4063 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4064 {
4065 HOST_WIDE_INT t = INTVAL (op1);
4066 HOST_WIDE_INT f = INTVAL (op2);
4067
4068 if (t == STORE_FLAG_VALUE && f == 0)
4069 code = GET_CODE (op0);
4070 else if (t == 0 && f == STORE_FLAG_VALUE)
4071 {
4072 enum rtx_code tmp;
4073 tmp = reversed_comparison_code (op0, NULL_RTX);
4074 if (tmp == UNKNOWN)
4075 break;
4076 code = tmp;
4077 }
4078 else
4079 break;
4080
4081 return simplify_gen_relational (code, mode, cmp_mode,
4082 XEXP (op0, 0), XEXP (op0, 1));
4083 }
4084
4085 if (cmp_mode == VOIDmode)
4086 cmp_mode = op0_mode;
4087 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4088 cmp_mode, XEXP (op0, 0),
4089 XEXP (op0, 1));
4090
4091 /* See if any simplifications were possible. */
4092 if (temp)
4093 {
4094 if (GET_CODE (temp) == CONST_INT)
4095 return temp == const0_rtx ? op2 : op1;
4096 else if (temp)
4097 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4098 }
4099 }
4100 break;
4101
4102 case VEC_MERGE:
4103 gcc_assert (GET_MODE (op0) == mode);
4104 gcc_assert (GET_MODE (op1) == mode);
4105 gcc_assert (VECTOR_MODE_P (mode));
4106 op2 = avoid_constant_pool_reference (op2);
4107 if (GET_CODE (op2) == CONST_INT)
4108 {
4109 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4110 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4111 int mask = (1 << n_elts) - 1;
4112
4113 if (!(INTVAL (op2) & mask))
4114 return op1;
4115 if ((INTVAL (op2) & mask) == mask)
4116 return op0;
4117
4118 op0 = avoid_constant_pool_reference (op0);
4119 op1 = avoid_constant_pool_reference (op1);
4120 if (GET_CODE (op0) == CONST_VECTOR
4121 && GET_CODE (op1) == CONST_VECTOR)
4122 {
4123 rtvec v = rtvec_alloc (n_elts);
4124 unsigned int i;
4125
4126 for (i = 0; i < n_elts; i++)
4127 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4128 ? CONST_VECTOR_ELT (op0, i)
4129 : CONST_VECTOR_ELT (op1, i));
4130 return gen_rtx_CONST_VECTOR (mode, v);
4131 }
4132 }
4133 break;
4134
4135 default:
4136 gcc_unreachable ();
4137 }
4138
4139 return 0;
4140 }
4141
4142 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4143 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4144
4145 Works by unpacking OP into a collection of 8-bit values
4146 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4147 and then repacking them again for OUTERMODE. */
4148
4149 static rtx
4150 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4151 enum machine_mode innermode, unsigned int byte)
4152 {
4153 /* We support up to 512-bit values (for V8DFmode). */
4154 enum {
4155 max_bitsize = 512,
4156 value_bit = 8,
4157 value_mask = (1 << value_bit) - 1
4158 };
4159 unsigned char value[max_bitsize / value_bit];
4160 int value_start;
4161 int i;
4162 int elem;
4163
4164 int num_elem;
4165 rtx * elems;
4166 int elem_bitsize;
4167 rtx result_s;
4168 rtvec result_v = NULL;
4169 enum mode_class outer_class;
4170 enum machine_mode outer_submode;
4171
4172 /* Some ports misuse CCmode. */
4173 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4174 return op;
4175
4176 /* We have no way to represent a complex constant at the rtl level. */
4177 if (COMPLEX_MODE_P (outermode))
4178 return NULL_RTX;
4179
4180 /* Unpack the value. */
4181
4182 if (GET_CODE (op) == CONST_VECTOR)
4183 {
4184 num_elem = CONST_VECTOR_NUNITS (op);
4185 elems = &CONST_VECTOR_ELT (op, 0);
4186 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4187 }
4188 else
4189 {
4190 num_elem = 1;
4191 elems = &op;
4192 elem_bitsize = max_bitsize;
4193 }
4194 /* If this asserts, it is too complicated; reducing value_bit may help. */
4195 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4196 /* I don't know how to handle endianness of sub-units. */
4197 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4198
4199 for (elem = 0; elem < num_elem; elem++)
4200 {
4201 unsigned char * vp;
4202 rtx el = elems[elem];
4203
4204 /* Vectors are kept in target memory order. (This is probably
4205 a mistake.) */
4206 {
4207 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4208 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4209 / BITS_PER_UNIT);
4210 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4211 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4212 unsigned bytele = (subword_byte % UNITS_PER_WORD
4213 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4214 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4215 }
4216
4217 switch (GET_CODE (el))
4218 {
4219 case CONST_INT:
4220 for (i = 0;
4221 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4222 i += value_bit)
4223 *vp++ = INTVAL (el) >> i;
4224 /* CONST_INTs are always logically sign-extended. */
4225 for (; i < elem_bitsize; i += value_bit)
4226 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4227 break;
4228
4229 case CONST_DOUBLE:
4230 if (GET_MODE (el) == VOIDmode)
4231 {
4232 /* If this triggers, someone should have generated a
4233 CONST_INT instead. */
4234 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4235
4236 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4237 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4238 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4239 {
4240 *vp++
4241 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4242 i += value_bit;
4243 }
4244 /* It shouldn't matter what's done here, so fill it with
4245 zero. */
4246 for (; i < elem_bitsize; i += value_bit)
4247 *vp++ = 0;
4248 }
4249 else
4250 {
4251 long tmp[max_bitsize / 32];
4252 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4253
4254 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4255 gcc_assert (bitsize <= elem_bitsize);
4256 gcc_assert (bitsize % value_bit == 0);
4257
4258 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4259 GET_MODE (el));
4260
4261 /* real_to_target produces its result in words affected by
4262 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4263 and use WORDS_BIG_ENDIAN instead; see the documentation
4264 of SUBREG in rtl.texi. */
4265 for (i = 0; i < bitsize; i += value_bit)
4266 {
4267 int ibase;
4268 if (WORDS_BIG_ENDIAN)
4269 ibase = bitsize - 1 - i;
4270 else
4271 ibase = i;
4272 *vp++ = tmp[ibase / 32] >> i % 32;
4273 }
4274
4275 /* It shouldn't matter what's done here, so fill it with
4276 zero. */
4277 for (; i < elem_bitsize; i += value_bit)
4278 *vp++ = 0;
4279 }
4280 break;
4281
4282 default:
4283 gcc_unreachable ();
4284 }
4285 }
4286
4287 /* Now, pick the right byte to start with. */
4288 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4289 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4290 will already have offset 0. */
4291 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4292 {
4293 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4294 - byte);
4295 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4296 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4297 byte = (subword_byte % UNITS_PER_WORD
4298 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4299 }
4300
4301 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4302 so if it's become negative it will instead be very large.) */
4303 gcc_assert (byte < GET_MODE_SIZE (innermode));
4304
4305 /* Convert from bytes to chunks of size value_bit. */
4306 value_start = byte * (BITS_PER_UNIT / value_bit);
4307
4308 /* Re-pack the value. */
4309
4310 if (VECTOR_MODE_P (outermode))
4311 {
4312 num_elem = GET_MODE_NUNITS (outermode);
4313 result_v = rtvec_alloc (num_elem);
4314 elems = &RTVEC_ELT (result_v, 0);
4315 outer_submode = GET_MODE_INNER (outermode);
4316 }
4317 else
4318 {
4319 num_elem = 1;
4320 elems = &result_s;
4321 outer_submode = outermode;
4322 }
4323
4324 outer_class = GET_MODE_CLASS (outer_submode);
4325 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4326
4327 gcc_assert (elem_bitsize % value_bit == 0);
4328 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4329
4330 for (elem = 0; elem < num_elem; elem++)
4331 {
4332 unsigned char *vp;
4333
4334 /* Vectors are stored in target memory order. (This is probably
4335 a mistake.) */
4336 {
4337 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4338 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4339 / BITS_PER_UNIT);
4340 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4341 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4342 unsigned bytele = (subword_byte % UNITS_PER_WORD
4343 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4344 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4345 }
4346
4347 switch (outer_class)
4348 {
4349 case MODE_INT:
4350 case MODE_PARTIAL_INT:
4351 {
4352 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4353
4354 for (i = 0;
4355 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4356 i += value_bit)
4357 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4358 for (; i < elem_bitsize; i += value_bit)
4359 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4360 << (i - HOST_BITS_PER_WIDE_INT));
4361
4362 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4363 know why. */
4364 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4365 elems[elem] = gen_int_mode (lo, outer_submode);
4366 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4367 elems[elem] = immed_double_const (lo, hi, outer_submode);
4368 else
4369 return NULL_RTX;
4370 }
4371 break;
4372
4373 case MODE_FLOAT:
4374 case MODE_DECIMAL_FLOAT:
4375 {
4376 REAL_VALUE_TYPE r;
4377 long tmp[max_bitsize / 32];
4378
4379 /* real_from_target wants its input in words affected by
4380 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4381 and use WORDS_BIG_ENDIAN instead; see the documentation
4382 of SUBREG in rtl.texi. */
4383 for (i = 0; i < max_bitsize / 32; i++)
4384 tmp[i] = 0;
4385 for (i = 0; i < elem_bitsize; i += value_bit)
4386 {
4387 int ibase;
4388 if (WORDS_BIG_ENDIAN)
4389 ibase = elem_bitsize - 1 - i;
4390 else
4391 ibase = i;
4392 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4393 }
4394
4395 real_from_target (&r, tmp, outer_submode);
4396 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4397 }
4398 break;
4399
4400 default:
4401 gcc_unreachable ();
4402 }
4403 }
4404 if (VECTOR_MODE_P (outermode))
4405 return gen_rtx_CONST_VECTOR (outermode, result_v);
4406 else
4407 return result_s;
4408 }
4409
4410 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4411 Return 0 if no simplifications are possible. */
4412 rtx
4413 simplify_subreg (enum machine_mode outermode, rtx op,
4414 enum machine_mode innermode, unsigned int byte)
4415 {
4416 /* Little bit of sanity checking. */
4417 gcc_assert (innermode != VOIDmode);
4418 gcc_assert (outermode != VOIDmode);
4419 gcc_assert (innermode != BLKmode);
4420 gcc_assert (outermode != BLKmode);
4421
4422 gcc_assert (GET_MODE (op) == innermode
4423 || GET_MODE (op) == VOIDmode);
4424
4425 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4426 gcc_assert (byte < GET_MODE_SIZE (innermode));
4427
4428 if (outermode == innermode && !byte)
4429 return op;
4430
4431 if (GET_CODE (op) == CONST_INT
4432 || GET_CODE (op) == CONST_DOUBLE
4433 || GET_CODE (op) == CONST_VECTOR)
4434 return simplify_immed_subreg (outermode, op, innermode, byte);
4435
4436 /* Changing mode twice with SUBREG => just change it once,
4437 or not at all if changing back op starting mode. */
4438 if (GET_CODE (op) == SUBREG)
4439 {
4440 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4441 int final_offset = byte + SUBREG_BYTE (op);
4442 rtx newx;
4443
4444 if (outermode == innermostmode
4445 && byte == 0 && SUBREG_BYTE (op) == 0)
4446 return SUBREG_REG (op);
4447
4448 /* The SUBREG_BYTE represents offset, as if the value were stored
4449 in memory. Irritating exception is paradoxical subreg, where
4450 we define SUBREG_BYTE to be 0. On big endian machines, this
4451 value should be negative. For a moment, undo this exception. */
4452 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4453 {
4454 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4455 if (WORDS_BIG_ENDIAN)
4456 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4457 if (BYTES_BIG_ENDIAN)
4458 final_offset += difference % UNITS_PER_WORD;
4459 }
4460 if (SUBREG_BYTE (op) == 0
4461 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4462 {
4463 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4464 if (WORDS_BIG_ENDIAN)
4465 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4466 if (BYTES_BIG_ENDIAN)
4467 final_offset += difference % UNITS_PER_WORD;
4468 }
4469
4470 /* See whether resulting subreg will be paradoxical. */
4471 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4472 {
4473 /* In nonparadoxical subregs we can't handle negative offsets. */
4474 if (final_offset < 0)
4475 return NULL_RTX;
4476 /* Bail out in case resulting subreg would be incorrect. */
4477 if (final_offset % GET_MODE_SIZE (outermode)
4478 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4479 return NULL_RTX;
4480 }
4481 else
4482 {
4483 int offset = 0;
4484 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4485
4486 /* In paradoxical subreg, see if we are still looking on lower part.
4487 If so, our SUBREG_BYTE will be 0. */
4488 if (WORDS_BIG_ENDIAN)
4489 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4490 if (BYTES_BIG_ENDIAN)
4491 offset += difference % UNITS_PER_WORD;
4492 if (offset == final_offset)
4493 final_offset = 0;
4494 else
4495 return NULL_RTX;
4496 }
4497
4498 /* Recurse for further possible simplifications. */
4499 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4500 final_offset);
4501 if (newx)
4502 return newx;
4503 if (validate_subreg (outermode, innermostmode,
4504 SUBREG_REG (op), final_offset))
4505 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4506 return NULL_RTX;
4507 }
4508
4509 /* Merge implicit and explicit truncations. */
4510
4511 if (GET_CODE (op) == TRUNCATE
4512 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4513 && subreg_lowpart_offset (outermode, innermode) == byte)
4514 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4515 GET_MODE (XEXP (op, 0)));
4516
4517 /* SUBREG of a hard register => just change the register number
4518 and/or mode. If the hard register is not valid in that mode,
4519 suppress this simplification. If the hard register is the stack,
4520 frame, or argument pointer, leave this as a SUBREG. */
4521
4522 if (REG_P (op)
4523 && REGNO (op) < FIRST_PSEUDO_REGISTER
4524 #ifdef CANNOT_CHANGE_MODE_CLASS
4525 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4526 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4527 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4528 #endif
4529 && ((reload_completed && !frame_pointer_needed)
4530 || (REGNO (op) != FRAME_POINTER_REGNUM
4531 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4532 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4533 #endif
4534 ))
4535 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4536 && REGNO (op) != ARG_POINTER_REGNUM
4537 #endif
4538 && REGNO (op) != STACK_POINTER_REGNUM
4539 && subreg_offset_representable_p (REGNO (op), innermode,
4540 byte, outermode))
4541 {
4542 unsigned int regno = REGNO (op);
4543 unsigned int final_regno
4544 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4545
4546 /* ??? We do allow it if the current REG is not valid for
4547 its mode. This is a kludge to work around how float/complex
4548 arguments are passed on 32-bit SPARC and should be fixed. */
4549 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4550 || ! HARD_REGNO_MODE_OK (regno, innermode))
4551 {
4552 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4553
4554 /* Propagate original regno. We don't have any way to specify
4555 the offset inside original regno, so do so only for lowpart.
4556 The information is used only by alias analysis that can not
4557 grog partial register anyway. */
4558
4559 if (subreg_lowpart_offset (outermode, innermode) == byte)
4560 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4561 return x;
4562 }
4563 }
4564
4565 /* If we have a SUBREG of a register that we are replacing and we are
4566 replacing it with a MEM, make a new MEM and try replacing the
4567 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4568 or if we would be widening it. */
4569
4570 if (MEM_P (op)
4571 && ! mode_dependent_address_p (XEXP (op, 0))
4572 /* Allow splitting of volatile memory references in case we don't
4573 have instruction to move the whole thing. */
4574 && (! MEM_VOLATILE_P (op)
4575 || ! have_insn_for (SET, innermode))
4576 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4577 return adjust_address_nv (op, outermode, byte);
4578
4579 /* Handle complex values represented as CONCAT
4580 of real and imaginary part. */
4581 if (GET_CODE (op) == CONCAT)
4582 {
4583 unsigned int inner_size, final_offset;
4584 rtx part, res;
4585
4586 inner_size = GET_MODE_UNIT_SIZE (innermode);
4587 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4588 final_offset = byte % inner_size;
4589 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4590 return NULL_RTX;
4591
4592 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4593 if (res)
4594 return res;
4595 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4596 return gen_rtx_SUBREG (outermode, part, final_offset);
4597 return NULL_RTX;
4598 }
4599
4600 /* Optimize SUBREG truncations of zero and sign extended values. */
4601 if ((GET_CODE (op) == ZERO_EXTEND
4602 || GET_CODE (op) == SIGN_EXTEND)
4603 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4604 {
4605 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4606
4607 /* If we're requesting the lowpart of a zero or sign extension,
4608 there are three possibilities. If the outermode is the same
4609 as the origmode, we can omit both the extension and the subreg.
4610 If the outermode is not larger than the origmode, we can apply
4611 the truncation without the extension. Finally, if the outermode
4612 is larger than the origmode, but both are integer modes, we
4613 can just extend to the appropriate mode. */
4614 if (bitpos == 0)
4615 {
4616 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4617 if (outermode == origmode)
4618 return XEXP (op, 0);
4619 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4620 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4621 subreg_lowpart_offset (outermode,
4622 origmode));
4623 if (SCALAR_INT_MODE_P (outermode))
4624 return simplify_gen_unary (GET_CODE (op), outermode,
4625 XEXP (op, 0), origmode);
4626 }
4627
4628 /* A SUBREG resulting from a zero extension may fold to zero if
4629 it extracts higher bits that the ZERO_EXTEND's source bits. */
4630 if (GET_CODE (op) == ZERO_EXTEND
4631 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4632 return CONST0_RTX (outermode);
4633 }
4634
4635 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4636 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4637 the outer subreg is effectively a truncation to the original mode. */
4638 if ((GET_CODE (op) == LSHIFTRT
4639 || GET_CODE (op) == ASHIFTRT)
4640 && SCALAR_INT_MODE_P (outermode)
4641 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4642 to avoid the possibility that an outer LSHIFTRT shifts by more
4643 than the sign extension's sign_bit_copies and introduces zeros
4644 into the high bits of the result. */
4645 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4646 && GET_CODE (XEXP (op, 1)) == CONST_INT
4647 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4648 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4649 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4650 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4651 return simplify_gen_binary (ASHIFTRT, outermode,
4652 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4653
4654 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4655 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4656 the outer subreg is effectively a truncation to the original mode. */
4657 if ((GET_CODE (op) == LSHIFTRT
4658 || GET_CODE (op) == ASHIFTRT)
4659 && SCALAR_INT_MODE_P (outermode)
4660 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4661 && GET_CODE (XEXP (op, 1)) == CONST_INT
4662 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4663 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4664 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4665 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4666 return simplify_gen_binary (LSHIFTRT, outermode,
4667 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4668
4669 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4670 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4671 the outer subreg is effectively a truncation to the original mode. */
4672 if (GET_CODE (op) == ASHIFT
4673 && SCALAR_INT_MODE_P (outermode)
4674 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4675 && GET_CODE (XEXP (op, 1)) == CONST_INT
4676 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4677 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4678 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4679 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4680 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4681 return simplify_gen_binary (ASHIFT, outermode,
4682 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4683
4684 return NULL_RTX;
4685 }
4686
4687 /* Make a SUBREG operation or equivalent if it folds. */
4688
4689 rtx
4690 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4691 enum machine_mode innermode, unsigned int byte)
4692 {
4693 rtx newx;
4694
4695 newx = simplify_subreg (outermode, op, innermode, byte);
4696 if (newx)
4697 return newx;
4698
4699 if (GET_CODE (op) == SUBREG
4700 || GET_CODE (op) == CONCAT
4701 || GET_MODE (op) == VOIDmode)
4702 return NULL_RTX;
4703
4704 if (validate_subreg (outermode, innermode, op, byte))
4705 return gen_rtx_SUBREG (outermode, op, byte);
4706
4707 return NULL_RTX;
4708 }
4709
4710 /* Simplify X, an rtx expression.
4711
4712 Return the simplified expression or NULL if no simplifications
4713 were possible.
4714
4715 This is the preferred entry point into the simplification routines;
4716 however, we still allow passes to call the more specific routines.
4717
4718 Right now GCC has three (yes, three) major bodies of RTL simplification
4719 code that need to be unified.
4720
4721 1. fold_rtx in cse.c. This code uses various CSE specific
4722 information to aid in RTL simplification.
4723
4724 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4725 it uses combine specific information to aid in RTL
4726 simplification.
4727
4728 3. The routines in this file.
4729
4730
4731 Long term we want to only have one body of simplification code; to
4732 get to that state I recommend the following steps:
4733
4734 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4735 which are not pass dependent state into these routines.
4736
4737 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4738 use this routine whenever possible.
4739
4740 3. Allow for pass dependent state to be provided to these
4741 routines and add simplifications based on the pass dependent
4742 state. Remove code from cse.c & combine.c that becomes
4743 redundant/dead.
4744
4745 It will take time, but ultimately the compiler will be easier to
4746 maintain and improve. It's totally silly that when we add a
4747 simplification that it needs to be added to 4 places (3 for RTL
4748 simplification and 1 for tree simplification. */
4749
4750 rtx
4751 simplify_rtx (rtx x)
4752 {
4753 enum rtx_code code = GET_CODE (x);
4754 enum machine_mode mode = GET_MODE (x);
4755
4756 switch (GET_RTX_CLASS (code))
4757 {
4758 case RTX_UNARY:
4759 return simplify_unary_operation (code, mode,
4760 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4761 case RTX_COMM_ARITH:
4762 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4763 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4764
4765 /* Fall through.... */
4766
4767 case RTX_BIN_ARITH:
4768 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4769
4770 case RTX_TERNARY:
4771 case RTX_BITFIELD_OPS:
4772 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4773 XEXP (x, 0), XEXP (x, 1),
4774 XEXP (x, 2));
4775
4776 case RTX_COMPARE:
4777 case RTX_COMM_COMPARE:
4778 return simplify_relational_operation (code, mode,
4779 ((GET_MODE (XEXP (x, 0))
4780 != VOIDmode)
4781 ? GET_MODE (XEXP (x, 0))
4782 : GET_MODE (XEXP (x, 1))),
4783 XEXP (x, 0),
4784 XEXP (x, 1));
4785
4786 case RTX_EXTRA:
4787 if (code == SUBREG)
4788 return simplify_gen_subreg (mode, SUBREG_REG (x),
4789 GET_MODE (SUBREG_REG (x)),
4790 SUBREG_BYTE (x));
4791 break;
4792
4793 case RTX_OBJ:
4794 if (code == LO_SUM)
4795 {
4796 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4797 if (GET_CODE (XEXP (x, 0)) == HIGH
4798 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4799 return XEXP (x, 1);
4800 }
4801 break;
4802
4803 default:
4804 break;
4805 }
4806 return NULL;
4807 }
4808