simplify-rtx.c (simplify_immed_subreg): Only clear up to elem_bitsize bits, not max_b...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66 \f
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
71 {
72 return gen_int_mode (- INTVAL (i), mode);
73 }
74
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
80 {
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 }
108 \f
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112 rtx
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115 {
116 rtx tem;
117
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
122
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
127
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
130
131 if (code == PLUS || code == MINUS)
132 {
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
136 }
137
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
139 }
140 \f
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
143 rtx
144 avoid_constant_pool_reference (rtx x)
145 {
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
148 HOST_WIDE_INT offset = 0;
149
150 switch (GET_CODE (x))
151 {
152 case MEM:
153 break;
154
155 case FLOAT_EXTEND:
156 /* Handle float extensions of constant pool references. */
157 tmp = XEXP (x, 0);
158 c = avoid_constant_pool_reference (tmp);
159 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
160 {
161 REAL_VALUE_TYPE d;
162
163 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
164 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
165 }
166 return x;
167
168 default:
169 return x;
170 }
171
172 addr = XEXP (x, 0);
173
174 /* Call target hook to avoid the effects of -fpic etc.... */
175 addr = targetm.delegitimize_address (addr);
176
177 /* Split the address into a base and integer offset. */
178 if (GET_CODE (addr) == CONST
179 && GET_CODE (XEXP (addr, 0)) == PLUS
180 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
181 {
182 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
183 addr = XEXP (XEXP (addr, 0), 0);
184 }
185
186 if (GET_CODE (addr) == LO_SUM)
187 addr = XEXP (addr, 1);
188
189 /* If this is a constant pool reference, we can turn it into its
190 constant and hope that simplifications happen. */
191 if (GET_CODE (addr) == SYMBOL_REF
192 && CONSTANT_POOL_ADDRESS_P (addr))
193 {
194 c = get_pool_constant (addr);
195 cmode = get_pool_mode (addr);
196
197 /* If we're accessing the constant in a different mode than it was
198 originally stored, attempt to fix that up via subreg simplifications.
199 If that fails we have no choice but to return the original memory. */
200 if (offset != 0 || cmode != GET_MODE (x))
201 {
202 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
203 if (tem && CONSTANT_P (tem))
204 return tem;
205 }
206 else
207 return c;
208 }
209
210 return x;
211 }
212 \f
213 /* Make a unary operation by first seeing if it folds and otherwise making
214 the specified operation. */
215
216 rtx
217 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
218 enum machine_mode op_mode)
219 {
220 rtx tem;
221
222 /* If this simplifies, use it. */
223 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
224 return tem;
225
226 return gen_rtx_fmt_e (code, mode, op);
227 }
228
229 /* Likewise for ternary operations. */
230
231 rtx
232 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
233 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 {
235 rtx tem;
236
237 /* If this simplifies, use it. */
238 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 op0, op1, op2)))
240 return tem;
241
242 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
243 }
244
245 /* Likewise, for relational operations.
246 CMP_MODE specifies mode comparison is done in. */
247
248 rtx
249 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
250 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 {
252 rtx tem;
253
254 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 op0, op1)))
256 return tem;
257
258 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 }
260 \f
261 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
263
264 rtx
265 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
266 {
267 enum rtx_code code = GET_CODE (x);
268 enum machine_mode mode = GET_MODE (x);
269 enum machine_mode op_mode;
270 rtx op0, op1, op2;
271
272 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
275
276 if (x == old_rtx)
277 return new_rtx;
278
279 switch (GET_RTX_CLASS (code))
280 {
281 case RTX_UNARY:
282 op0 = XEXP (x, 0);
283 op_mode = GET_MODE (op0);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0))
286 return x;
287 return simplify_gen_unary (code, mode, op0, op_mode);
288
289 case RTX_BIN_ARITH:
290 case RTX_COMM_ARITH:
291 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
292 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
293 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294 return x;
295 return simplify_gen_binary (code, mode, op0, op1);
296
297 case RTX_COMPARE:
298 case RTX_COMM_COMPARE:
299 op0 = XEXP (x, 0);
300 op1 = XEXP (x, 1);
301 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
303 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
304 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
305 return x;
306 return simplify_gen_relational (code, mode, op_mode, op0, op1);
307
308 case RTX_TERNARY:
309 case RTX_BITFIELD_OPS:
310 op0 = XEXP (x, 0);
311 op_mode = GET_MODE (op0);
312 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
313 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
314 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
315 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
316 return x;
317 if (op_mode == VOIDmode)
318 op_mode = GET_MODE (op0);
319 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320
321 case RTX_EXTRA:
322 /* The only case we try to handle is a SUBREG. */
323 if (code == SUBREG)
324 {
325 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
326 if (op0 == SUBREG_REG (x))
327 return x;
328 op0 = simplify_gen_subreg (GET_MODE (x), op0,
329 GET_MODE (SUBREG_REG (x)),
330 SUBREG_BYTE (x));
331 return op0 ? op0 : x;
332 }
333 break;
334
335 case RTX_OBJ:
336 if (code == MEM)
337 {
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 if (op0 == XEXP (x, 0))
340 return x;
341 return replace_equiv_address_nv (x, op0);
342 }
343 else if (code == LO_SUM)
344 {
345 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
346 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
347
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350 return op1;
351
352 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
353 return x;
354 return gen_rtx_LO_SUM (mode, op0, op1);
355 }
356 else if (code == REG)
357 {
358 if (rtx_equal_p (x, old_rtx))
359 return new_rtx;
360 }
361 break;
362
363 default:
364 break;
365 }
366 return x;
367 }
368 \f
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
372 rtx
373 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
374 rtx op, enum machine_mode op_mode)
375 {
376 rtx trueop, tem;
377
378 if (GET_CODE (op) == CONST)
379 op = XEXP (op, 0);
380
381 trueop = avoid_constant_pool_reference (op);
382
383 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 if (tem)
385 return tem;
386
387 return simplify_unary_operation_1 (code, mode, op);
388 }
389
390 /* Perform some simplifications we can do even if the operands
391 aren't constant. */
392 static rtx
393 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
394 {
395 enum rtx_code reversed;
396 rtx temp;
397
398 switch (code)
399 {
400 case NOT:
401 /* (not (not X)) == X. */
402 if (GET_CODE (op) == NOT)
403 return XEXP (op, 0);
404
405 /* (not (eq X Y)) == (ne X Y), etc. */
406 if (COMPARISON_P (op)
407 && (mode == BImode || STORE_FLAG_VALUE == -1)
408 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
409 return simplify_gen_relational (reversed, mode, VOIDmode,
410 XEXP (op, 0), XEXP (op, 1));
411
412 /* (not (plus X -1)) can become (neg X). */
413 if (GET_CODE (op) == PLUS
414 && XEXP (op, 1) == constm1_rtx)
415 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
416
417 /* Similarly, (not (neg X)) is (plus X -1). */
418 if (GET_CODE (op) == NEG)
419 return plus_constant (XEXP (op, 0), -1);
420
421 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == XOR
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && (temp = simplify_unary_operation (NOT, mode,
425 XEXP (op, 1), mode)) != 0)
426 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
427
428 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
429 if (GET_CODE (op) == PLUS
430 && GET_CODE (XEXP (op, 1)) == CONST_INT
431 && mode_signbit_p (mode, XEXP (op, 1))
432 && (temp = simplify_unary_operation (NOT, mode,
433 XEXP (op, 1), mode)) != 0)
434 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435
436
437 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
438 operands other than 1, but that is not valid. We could do a
439 similar simplification for (not (lshiftrt C X)) where C is
440 just the sign bit, but this doesn't seem common enough to
441 bother with. */
442 if (GET_CODE (op) == ASHIFT
443 && XEXP (op, 0) == const1_rtx)
444 {
445 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
446 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 }
448
449 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
450 by reversing the comparison code if valid. */
451 if (STORE_FLAG_VALUE == -1
452 && COMPARISON_P (op)
453 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
454 return simplify_gen_relational (reversed, mode, VOIDmode,
455 XEXP (op, 0), XEXP (op, 1));
456
457 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
458 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
459 so we can perform the above simplification. */
460
461 if (STORE_FLAG_VALUE == -1
462 && GET_CODE (op) == ASHIFTRT
463 && GET_CODE (XEXP (op, 1)) == CONST_INT
464 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
465 return simplify_gen_relational (GE, mode, VOIDmode,
466 XEXP (op, 0), const0_rtx);
467
468 break;
469
470 case NEG:
471 /* (neg (neg X)) == X. */
472 if (GET_CODE (op) == NEG)
473 return XEXP (op, 0);
474
475 /* (neg (plus X 1)) can become (not X). */
476 if (GET_CODE (op) == PLUS
477 && XEXP (op, 1) == const1_rtx)
478 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
479
480 /* Similarly, (neg (not X)) is (plus X 1). */
481 if (GET_CODE (op) == NOT)
482 return plus_constant (XEXP (op, 0), 1);
483
484 /* (neg (minus X Y)) can become (minus Y X). This transformation
485 isn't safe for modes with signed zeros, since if X and Y are
486 both +0, (minus Y X) is the same as (minus X Y). If the
487 rounding mode is towards +infinity (or -infinity) then the two
488 expressions will be rounded differently. */
489 if (GET_CODE (op) == MINUS
490 && !HONOR_SIGNED_ZEROS (mode)
491 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
492 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
493
494 if (GET_CODE (op) == PLUS
495 && !HONOR_SIGNED_ZEROS (mode)
496 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
497 {
498 /* (neg (plus A C)) is simplified to (minus -C A). */
499 if (GET_CODE (XEXP (op, 1)) == CONST_INT
500 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
501 {
502 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
503 if (temp)
504 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
505 }
506
507 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
508 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
509 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
510 }
511
512 /* (neg (mult A B)) becomes (mult (neg A) B).
513 This works even for floating-point values. */
514 if (GET_CODE (op) == MULT
515 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
516 {
517 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
518 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
519 }
520
521 /* NEG commutes with ASHIFT since it is multiplication. Only do
522 this if we can then eliminate the NEG (e.g., if the operand
523 is a constant). */
524 if (GET_CODE (op) == ASHIFT)
525 {
526 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
527 if (temp)
528 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
529 }
530
531 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
532 C is equal to the width of MODE minus 1. */
533 if (GET_CODE (op) == ASHIFTRT
534 && GET_CODE (XEXP (op, 1)) == CONST_INT
535 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
536 return simplify_gen_binary (LSHIFTRT, mode,
537 XEXP (op, 0), XEXP (op, 1));
538
539 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
540 C is equal to the width of MODE minus 1. */
541 if (GET_CODE (op) == LSHIFTRT
542 && GET_CODE (XEXP (op, 1)) == CONST_INT
543 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
544 return simplify_gen_binary (ASHIFTRT, mode,
545 XEXP (op, 0), XEXP (op, 1));
546
547 break;
548
549 case SIGN_EXTEND:
550 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
551 becomes just the MINUS if its mode is MODE. This allows
552 folding switch statements on machines using casesi (such as
553 the VAX). */
554 if (GET_CODE (op) == TRUNCATE
555 && GET_MODE (XEXP (op, 0)) == mode
556 && GET_CODE (XEXP (op, 0)) == MINUS
557 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
558 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
559 return XEXP (op, 0);
560
561 /* Check for a sign extension of a subreg of a promoted
562 variable, where the promotion is sign-extended, and the
563 target mode is the same as the variable's promotion. */
564 if (GET_CODE (op) == SUBREG
565 && SUBREG_PROMOTED_VAR_P (op)
566 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
567 && GET_MODE (XEXP (op, 0)) == mode)
568 return XEXP (op, 0);
569
570 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
571 if (! POINTERS_EXTEND_UNSIGNED
572 && mode == Pmode && GET_MODE (op) == ptr_mode
573 && (CONSTANT_P (op)
574 || (GET_CODE (op) == SUBREG
575 && REG_P (SUBREG_REG (op))
576 && REG_POINTER (SUBREG_REG (op))
577 && GET_MODE (SUBREG_REG (op)) == Pmode)))
578 return convert_memory_address (Pmode, op);
579 #endif
580 break;
581
582 case ZERO_EXTEND:
583 /* Check for a zero extension of a subreg of a promoted
584 variable, where the promotion is zero-extended, and the
585 target mode is the same as the variable's promotion. */
586 if (GET_CODE (op) == SUBREG
587 && SUBREG_PROMOTED_VAR_P (op)
588 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
589 && GET_MODE (XEXP (op, 0)) == mode)
590 return XEXP (op, 0);
591
592 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
593 if (POINTERS_EXTEND_UNSIGNED > 0
594 && mode == Pmode && GET_MODE (op) == ptr_mode
595 && (CONSTANT_P (op)
596 || (GET_CODE (op) == SUBREG
597 && REG_P (SUBREG_REG (op))
598 && REG_POINTER (SUBREG_REG (op))
599 && GET_MODE (SUBREG_REG (op)) == Pmode)))
600 return convert_memory_address (Pmode, op);
601 #endif
602 break;
603
604 default:
605 break;
606 }
607
608 return 0;
609 }
610
611 /* Try to compute the value of a unary operation CODE whose output mode is to
612 be MODE with input operand OP whose mode was originally OP_MODE.
613 Return zero if the value cannot be computed. */
614 rtx
615 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
616 rtx op, enum machine_mode op_mode)
617 {
618 unsigned int width = GET_MODE_BITSIZE (mode);
619
620 if (code == VEC_DUPLICATE)
621 {
622 gcc_assert (VECTOR_MODE_P (mode));
623 if (GET_MODE (op) != VOIDmode)
624 {
625 if (!VECTOR_MODE_P (GET_MODE (op)))
626 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
627 else
628 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
629 (GET_MODE (op)));
630 }
631 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
632 || GET_CODE (op) == CONST_VECTOR)
633 {
634 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
635 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
636 rtvec v = rtvec_alloc (n_elts);
637 unsigned int i;
638
639 if (GET_CODE (op) != CONST_VECTOR)
640 for (i = 0; i < n_elts; i++)
641 RTVEC_ELT (v, i) = op;
642 else
643 {
644 enum machine_mode inmode = GET_MODE (op);
645 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
646 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
647
648 gcc_assert (in_n_elts < n_elts);
649 gcc_assert ((n_elts % in_n_elts) == 0);
650 for (i = 0; i < n_elts; i++)
651 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
652 }
653 return gen_rtx_CONST_VECTOR (mode, v);
654 }
655 }
656
657 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
658 {
659 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
660 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
661 enum machine_mode opmode = GET_MODE (op);
662 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
663 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
664 rtvec v = rtvec_alloc (n_elts);
665 unsigned int i;
666
667 gcc_assert (op_n_elts == n_elts);
668 for (i = 0; i < n_elts; i++)
669 {
670 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
671 CONST_VECTOR_ELT (op, i),
672 GET_MODE_INNER (opmode));
673 if (!x)
674 return 0;
675 RTVEC_ELT (v, i) = x;
676 }
677 return gen_rtx_CONST_VECTOR (mode, v);
678 }
679
680 /* The order of these tests is critical so that, for example, we don't
681 check the wrong mode (input vs. output) for a conversion operation,
682 such as FIX. At some point, this should be simplified. */
683
684 if (code == FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
686 {
687 HOST_WIDE_INT hv, lv;
688 REAL_VALUE_TYPE d;
689
690 if (GET_CODE (op) == CONST_INT)
691 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
692 else
693 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
694
695 REAL_VALUE_FROM_INT (d, lv, hv, mode);
696 d = real_value_truncate (mode, d);
697 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
698 }
699 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
700 && (GET_CODE (op) == CONST_DOUBLE
701 || GET_CODE (op) == CONST_INT))
702 {
703 HOST_WIDE_INT hv, lv;
704 REAL_VALUE_TYPE d;
705
706 if (GET_CODE (op) == CONST_INT)
707 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
708 else
709 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
710
711 if (op_mode == VOIDmode)
712 {
713 /* We don't know how to interpret negative-looking numbers in
714 this case, so don't try to fold those. */
715 if (hv < 0)
716 return 0;
717 }
718 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
719 ;
720 else
721 hv = 0, lv &= GET_MODE_MASK (op_mode);
722
723 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
724 d = real_value_truncate (mode, d);
725 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
726 }
727
728 if (GET_CODE (op) == CONST_INT
729 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
730 {
731 HOST_WIDE_INT arg0 = INTVAL (op);
732 HOST_WIDE_INT val;
733
734 switch (code)
735 {
736 case NOT:
737 val = ~ arg0;
738 break;
739
740 case NEG:
741 val = - arg0;
742 break;
743
744 case ABS:
745 val = (arg0 >= 0 ? arg0 : - arg0);
746 break;
747
748 case FFS:
749 /* Don't use ffs here. Instead, get low order bit and then its
750 number. If arg0 is zero, this will return 0, as desired. */
751 arg0 &= GET_MODE_MASK (mode);
752 val = exact_log2 (arg0 & (- arg0)) + 1;
753 break;
754
755 case CLZ:
756 arg0 &= GET_MODE_MASK (mode);
757 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
758 ;
759 else
760 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
761 break;
762
763 case CTZ:
764 arg0 &= GET_MODE_MASK (mode);
765 if (arg0 == 0)
766 {
767 /* Even if the value at zero is undefined, we have to come
768 up with some replacement. Seems good enough. */
769 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
770 val = GET_MODE_BITSIZE (mode);
771 }
772 else
773 val = exact_log2 (arg0 & -arg0);
774 break;
775
776 case POPCOUNT:
777 arg0 &= GET_MODE_MASK (mode);
778 val = 0;
779 while (arg0)
780 val++, arg0 &= arg0 - 1;
781 break;
782
783 case PARITY:
784 arg0 &= GET_MODE_MASK (mode);
785 val = 0;
786 while (arg0)
787 val++, arg0 &= arg0 - 1;
788 val &= 1;
789 break;
790
791 case TRUNCATE:
792 val = arg0;
793 break;
794
795 case ZERO_EXTEND:
796 /* When zero-extending a CONST_INT, we need to know its
797 original mode. */
798 gcc_assert (op_mode != VOIDmode);
799 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
800 {
801 /* If we were really extending the mode,
802 we would have to distinguish between zero-extension
803 and sign-extension. */
804 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
805 val = arg0;
806 }
807 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
808 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
809 else
810 return 0;
811 break;
812
813 case SIGN_EXTEND:
814 if (op_mode == VOIDmode)
815 op_mode = mode;
816 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
817 {
818 /* If we were really extending the mode,
819 we would have to distinguish between zero-extension
820 and sign-extension. */
821 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
822 val = arg0;
823 }
824 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
825 {
826 val
827 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
828 if (val
829 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
830 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
831 }
832 else
833 return 0;
834 break;
835
836 case SQRT:
837 case FLOAT_EXTEND:
838 case FLOAT_TRUNCATE:
839 case SS_TRUNCATE:
840 case US_TRUNCATE:
841 return 0;
842
843 default:
844 gcc_unreachable ();
845 }
846
847 return gen_int_mode (val, mode);
848 }
849
850 /* We can do some operations on integer CONST_DOUBLEs. Also allow
851 for a DImode operation on a CONST_INT. */
852 else if (GET_MODE (op) == VOIDmode
853 && width <= HOST_BITS_PER_WIDE_INT * 2
854 && (GET_CODE (op) == CONST_DOUBLE
855 || GET_CODE (op) == CONST_INT))
856 {
857 unsigned HOST_WIDE_INT l1, lv;
858 HOST_WIDE_INT h1, hv;
859
860 if (GET_CODE (op) == CONST_DOUBLE)
861 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
862 else
863 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
864
865 switch (code)
866 {
867 case NOT:
868 lv = ~ l1;
869 hv = ~ h1;
870 break;
871
872 case NEG:
873 neg_double (l1, h1, &lv, &hv);
874 break;
875
876 case ABS:
877 if (h1 < 0)
878 neg_double (l1, h1, &lv, &hv);
879 else
880 lv = l1, hv = h1;
881 break;
882
883 case FFS:
884 hv = 0;
885 if (l1 == 0)
886 {
887 if (h1 == 0)
888 lv = 0;
889 else
890 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
891 }
892 else
893 lv = exact_log2 (l1 & -l1) + 1;
894 break;
895
896 case CLZ:
897 hv = 0;
898 if (h1 != 0)
899 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
900 - HOST_BITS_PER_WIDE_INT;
901 else if (l1 != 0)
902 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
903 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
904 lv = GET_MODE_BITSIZE (mode);
905 break;
906
907 case CTZ:
908 hv = 0;
909 if (l1 != 0)
910 lv = exact_log2 (l1 & -l1);
911 else if (h1 != 0)
912 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
913 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
914 lv = GET_MODE_BITSIZE (mode);
915 break;
916
917 case POPCOUNT:
918 hv = 0;
919 lv = 0;
920 while (l1)
921 lv++, l1 &= l1 - 1;
922 while (h1)
923 lv++, h1 &= h1 - 1;
924 break;
925
926 case PARITY:
927 hv = 0;
928 lv = 0;
929 while (l1)
930 lv++, l1 &= l1 - 1;
931 while (h1)
932 lv++, h1 &= h1 - 1;
933 lv &= 1;
934 break;
935
936 case TRUNCATE:
937 /* This is just a change-of-mode, so do nothing. */
938 lv = l1, hv = h1;
939 break;
940
941 case ZERO_EXTEND:
942 gcc_assert (op_mode != VOIDmode);
943
944 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
945 return 0;
946
947 hv = 0;
948 lv = l1 & GET_MODE_MASK (op_mode);
949 break;
950
951 case SIGN_EXTEND:
952 if (op_mode == VOIDmode
953 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
954 return 0;
955 else
956 {
957 lv = l1 & GET_MODE_MASK (op_mode);
958 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
959 && (lv & ((HOST_WIDE_INT) 1
960 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
961 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
962
963 hv = HWI_SIGN_EXTEND (lv);
964 }
965 break;
966
967 case SQRT:
968 return 0;
969
970 default:
971 return 0;
972 }
973
974 return immed_double_const (lv, hv, mode);
975 }
976
977 else if (GET_CODE (op) == CONST_DOUBLE
978 && GET_MODE_CLASS (mode) == MODE_FLOAT)
979 {
980 REAL_VALUE_TYPE d, t;
981 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
982
983 switch (code)
984 {
985 case SQRT:
986 if (HONOR_SNANS (mode) && real_isnan (&d))
987 return 0;
988 real_sqrt (&t, mode, &d);
989 d = t;
990 break;
991 case ABS:
992 d = REAL_VALUE_ABS (d);
993 break;
994 case NEG:
995 d = REAL_VALUE_NEGATE (d);
996 break;
997 case FLOAT_TRUNCATE:
998 d = real_value_truncate (mode, d);
999 break;
1000 case FLOAT_EXTEND:
1001 /* All this does is change the mode. */
1002 break;
1003 case FIX:
1004 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1005 break;
1006 case NOT:
1007 {
1008 long tmp[4];
1009 int i;
1010
1011 real_to_target (tmp, &d, GET_MODE (op));
1012 for (i = 0; i < 4; i++)
1013 tmp[i] = ~tmp[i];
1014 real_from_target (&d, tmp, mode);
1015 break;
1016 }
1017 default:
1018 gcc_unreachable ();
1019 }
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 }
1022
1023 else if (GET_CODE (op) == CONST_DOUBLE
1024 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1025 && GET_MODE_CLASS (mode) == MODE_INT
1026 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1027 {
1028 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1029 operators are intentionally left unspecified (to ease implementation
1030 by target backends), for consistency, this routine implements the
1031 same semantics for constant folding as used by the middle-end. */
1032
1033 /* This was formerly used only for non-IEEE float.
1034 eggert@twinsun.com says it is safe for IEEE also. */
1035 HOST_WIDE_INT xh, xl, th, tl;
1036 REAL_VALUE_TYPE x, t;
1037 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1038 switch (code)
1039 {
1040 case FIX:
1041 if (REAL_VALUE_ISNAN (x))
1042 return const0_rtx;
1043
1044 /* Test against the signed upper bound. */
1045 if (width > HOST_BITS_PER_WIDE_INT)
1046 {
1047 th = ((unsigned HOST_WIDE_INT) 1
1048 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1049 tl = -1;
1050 }
1051 else
1052 {
1053 th = 0;
1054 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1055 }
1056 real_from_integer (&t, VOIDmode, tl, th, 0);
1057 if (REAL_VALUES_LESS (t, x))
1058 {
1059 xh = th;
1060 xl = tl;
1061 break;
1062 }
1063
1064 /* Test against the signed lower bound. */
1065 if (width > HOST_BITS_PER_WIDE_INT)
1066 {
1067 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1068 tl = 0;
1069 }
1070 else
1071 {
1072 th = -1;
1073 tl = (HOST_WIDE_INT) -1 << (width - 1);
1074 }
1075 real_from_integer (&t, VOIDmode, tl, th, 0);
1076 if (REAL_VALUES_LESS (x, t))
1077 {
1078 xh = th;
1079 xl = tl;
1080 break;
1081 }
1082 REAL_VALUE_TO_INT (&xl, &xh, x);
1083 break;
1084
1085 case UNSIGNED_FIX:
1086 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1087 return const0_rtx;
1088
1089 /* Test against the unsigned upper bound. */
1090 if (width == 2*HOST_BITS_PER_WIDE_INT)
1091 {
1092 th = -1;
1093 tl = -1;
1094 }
1095 else if (width >= HOST_BITS_PER_WIDE_INT)
1096 {
1097 th = ((unsigned HOST_WIDE_INT) 1
1098 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1099 tl = -1;
1100 }
1101 else
1102 {
1103 th = 0;
1104 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1105 }
1106 real_from_integer (&t, VOIDmode, tl, th, 1);
1107 if (REAL_VALUES_LESS (t, x))
1108 {
1109 xh = th;
1110 xl = tl;
1111 break;
1112 }
1113
1114 REAL_VALUE_TO_INT (&xl, &xh, x);
1115 break;
1116
1117 default:
1118 gcc_unreachable ();
1119 }
1120 return immed_double_const (xl, xh, mode);
1121 }
1122
1123 return NULL_RTX;
1124 }
1125 \f
1126 /* Subroutine of simplify_binary_operation to simplify a commutative,
1127 associative binary operation CODE with result mode MODE, operating
1128 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1129 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1130 canonicalization is possible. */
1131
1132 static rtx
1133 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1134 rtx op0, rtx op1)
1135 {
1136 rtx tem;
1137
1138 /* Linearize the operator to the left. */
1139 if (GET_CODE (op1) == code)
1140 {
1141 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1142 if (GET_CODE (op0) == code)
1143 {
1144 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1145 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1146 }
1147
1148 /* "a op (b op c)" becomes "(b op c) op a". */
1149 if (! swap_commutative_operands_p (op1, op0))
1150 return simplify_gen_binary (code, mode, op1, op0);
1151
1152 tem = op0;
1153 op0 = op1;
1154 op1 = tem;
1155 }
1156
1157 if (GET_CODE (op0) == code)
1158 {
1159 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1160 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1161 {
1162 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1163 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1164 }
1165
1166 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1167 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1168 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1169 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1170 if (tem != 0)
1171 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1172
1173 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1174 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1175 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1176 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1177 if (tem != 0)
1178 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1179 }
1180
1181 return 0;
1182 }
1183
1184
1185 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1186 and OP1. Return 0 if no simplification is possible.
1187
1188 Don't use this for relational operations such as EQ or LT.
1189 Use simplify_relational_operation instead. */
1190 rtx
1191 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1192 rtx op0, rtx op1)
1193 {
1194 rtx trueop0, trueop1;
1195 rtx tem;
1196
1197 /* Relational operations don't work here. We must know the mode
1198 of the operands in order to do the comparison correctly.
1199 Assuming a full word can give incorrect results.
1200 Consider comparing 128 with -128 in QImode. */
1201 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1202 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1203
1204 /* Make sure the constant is second. */
1205 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1206 && swap_commutative_operands_p (op0, op1))
1207 {
1208 tem = op0, op0 = op1, op1 = tem;
1209 }
1210
1211 trueop0 = avoid_constant_pool_reference (op0);
1212 trueop1 = avoid_constant_pool_reference (op1);
1213
1214 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1215 if (tem)
1216 return tem;
1217 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1218 }
1219
1220 static rtx
1221 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1222 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1223 {
1224 rtx tem;
1225 HOST_WIDE_INT val;
1226 unsigned int width = GET_MODE_BITSIZE (mode);
1227
1228 /* Even if we can't compute a constant result,
1229 there are some cases worth simplifying. */
1230
1231 switch (code)
1232 {
1233 case PLUS:
1234 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1235 when x is NaN, infinite, or finite and nonzero. They aren't
1236 when x is -0 and the rounding mode is not towards -infinity,
1237 since (-0) + 0 is then 0. */
1238 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1239 return op0;
1240
1241 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1242 transformations are safe even for IEEE. */
1243 if (GET_CODE (op0) == NEG)
1244 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1245 else if (GET_CODE (op1) == NEG)
1246 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1247
1248 /* (~a) + 1 -> -a */
1249 if (INTEGRAL_MODE_P (mode)
1250 && GET_CODE (op0) == NOT
1251 && trueop1 == const1_rtx)
1252 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1253
1254 /* Handle both-operands-constant cases. We can only add
1255 CONST_INTs to constants since the sum of relocatable symbols
1256 can't be handled by most assemblers. Don't add CONST_INT
1257 to CONST_INT since overflow won't be computed properly if wider
1258 than HOST_BITS_PER_WIDE_INT. */
1259
1260 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1261 && GET_CODE (op1) == CONST_INT)
1262 return plus_constant (op0, INTVAL (op1));
1263 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1264 && GET_CODE (op0) == CONST_INT)
1265 return plus_constant (op1, INTVAL (op0));
1266
1267 /* See if this is something like X * C - X or vice versa or
1268 if the multiplication is written as a shift. If so, we can
1269 distribute and make a new multiply, shift, or maybe just
1270 have X (if C is 2 in the example above). But don't make
1271 something more expensive than we had before. */
1272
1273 if (SCALAR_INT_MODE_P (mode))
1274 {
1275 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1276 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1277 rtx lhs = op0, rhs = op1;
1278
1279 if (GET_CODE (lhs) == NEG)
1280 {
1281 coeff0l = -1;
1282 coeff0h = -1;
1283 lhs = XEXP (lhs, 0);
1284 }
1285 else if (GET_CODE (lhs) == MULT
1286 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1287 {
1288 coeff0l = INTVAL (XEXP (lhs, 1));
1289 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1290 lhs = XEXP (lhs, 0);
1291 }
1292 else if (GET_CODE (lhs) == ASHIFT
1293 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1294 && INTVAL (XEXP (lhs, 1)) >= 0
1295 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1296 {
1297 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1298 coeff0h = 0;
1299 lhs = XEXP (lhs, 0);
1300 }
1301
1302 if (GET_CODE (rhs) == NEG)
1303 {
1304 coeff1l = -1;
1305 coeff1h = -1;
1306 rhs = XEXP (rhs, 0);
1307 }
1308 else if (GET_CODE (rhs) == MULT
1309 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1310 {
1311 coeff1l = INTVAL (XEXP (rhs, 1));
1312 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1313 rhs = XEXP (rhs, 0);
1314 }
1315 else if (GET_CODE (rhs) == ASHIFT
1316 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1317 && INTVAL (XEXP (rhs, 1)) >= 0
1318 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1319 {
1320 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1321 coeff1h = 0;
1322 rhs = XEXP (rhs, 0);
1323 }
1324
1325 if (rtx_equal_p (lhs, rhs))
1326 {
1327 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1328 rtx coeff;
1329 unsigned HOST_WIDE_INT l;
1330 HOST_WIDE_INT h;
1331
1332 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1333 coeff = immed_double_const (l, h, mode);
1334
1335 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1336 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1337 ? tem : 0;
1338 }
1339 }
1340
1341 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1342 if ((GET_CODE (op1) == CONST_INT
1343 || GET_CODE (op1) == CONST_DOUBLE)
1344 && GET_CODE (op0) == XOR
1345 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1346 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1347 && mode_signbit_p (mode, op1))
1348 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1349 simplify_gen_binary (XOR, mode, op1,
1350 XEXP (op0, 1)));
1351
1352 /* If one of the operands is a PLUS or a MINUS, see if we can
1353 simplify this by the associative law.
1354 Don't use the associative law for floating point.
1355 The inaccuracy makes it nonassociative,
1356 and subtle programs can break if operations are associated. */
1357
1358 if (INTEGRAL_MODE_P (mode)
1359 && (plus_minus_operand_p (op0)
1360 || plus_minus_operand_p (op1))
1361 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1362 return tem;
1363
1364 /* Reassociate floating point addition only when the user
1365 specifies unsafe math optimizations. */
1366 if (FLOAT_MODE_P (mode)
1367 && flag_unsafe_math_optimizations)
1368 {
1369 tem = simplify_associative_operation (code, mode, op0, op1);
1370 if (tem)
1371 return tem;
1372 }
1373 break;
1374
1375 case COMPARE:
1376 #ifdef HAVE_cc0
1377 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1378 using cc0, in which case we want to leave it as a COMPARE
1379 so we can distinguish it from a register-register-copy.
1380
1381 In IEEE floating point, x-0 is not the same as x. */
1382
1383 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1384 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1385 && trueop1 == CONST0_RTX (mode))
1386 return op0;
1387 #endif
1388
1389 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1390 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1391 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1392 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1393 {
1394 rtx xop00 = XEXP (op0, 0);
1395 rtx xop10 = XEXP (op1, 0);
1396
1397 #ifdef HAVE_cc0
1398 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1399 #else
1400 if (REG_P (xop00) && REG_P (xop10)
1401 && GET_MODE (xop00) == GET_MODE (xop10)
1402 && REGNO (xop00) == REGNO (xop10)
1403 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1404 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1405 #endif
1406 return xop00;
1407 }
1408 break;
1409
1410 case MINUS:
1411 /* We can't assume x-x is 0 even with non-IEEE floating point,
1412 but since it is zero except in very strange circumstances, we
1413 will treat it as zero with -funsafe-math-optimizations. */
1414 if (rtx_equal_p (trueop0, trueop1)
1415 && ! side_effects_p (op0)
1416 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1417 return CONST0_RTX (mode);
1418
1419 /* Change subtraction from zero into negation. (0 - x) is the
1420 same as -x when x is NaN, infinite, or finite and nonzero.
1421 But if the mode has signed zeros, and does not round towards
1422 -infinity, then 0 - 0 is 0, not -0. */
1423 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1424 return simplify_gen_unary (NEG, mode, op1, mode);
1425
1426 /* (-1 - a) is ~a. */
1427 if (trueop0 == constm1_rtx)
1428 return simplify_gen_unary (NOT, mode, op1, mode);
1429
1430 /* Subtracting 0 has no effect unless the mode has signed zeros
1431 and supports rounding towards -infinity. In such a case,
1432 0 - 0 is -0. */
1433 if (!(HONOR_SIGNED_ZEROS (mode)
1434 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1435 && trueop1 == CONST0_RTX (mode))
1436 return op0;
1437
1438 /* See if this is something like X * C - X or vice versa or
1439 if the multiplication is written as a shift. If so, we can
1440 distribute and make a new multiply, shift, or maybe just
1441 have X (if C is 2 in the example above). But don't make
1442 something more expensive than we had before. */
1443
1444 if (SCALAR_INT_MODE_P (mode))
1445 {
1446 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1447 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1448 rtx lhs = op0, rhs = op1;
1449
1450 if (GET_CODE (lhs) == NEG)
1451 {
1452 coeff0l = -1;
1453 coeff0h = -1;
1454 lhs = XEXP (lhs, 0);
1455 }
1456 else if (GET_CODE (lhs) == MULT
1457 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1458 {
1459 coeff0l = INTVAL (XEXP (lhs, 1));
1460 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1461 lhs = XEXP (lhs, 0);
1462 }
1463 else if (GET_CODE (lhs) == ASHIFT
1464 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1465 && INTVAL (XEXP (lhs, 1)) >= 0
1466 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1467 {
1468 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1469 coeff0h = 0;
1470 lhs = XEXP (lhs, 0);
1471 }
1472
1473 if (GET_CODE (rhs) == NEG)
1474 {
1475 negcoeff1l = 1;
1476 negcoeff1h = 0;
1477 rhs = XEXP (rhs, 0);
1478 }
1479 else if (GET_CODE (rhs) == MULT
1480 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1481 {
1482 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1483 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1484 rhs = XEXP (rhs, 0);
1485 }
1486 else if (GET_CODE (rhs) == ASHIFT
1487 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1488 && INTVAL (XEXP (rhs, 1)) >= 0
1489 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1490 {
1491 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1492 negcoeff1h = -1;
1493 rhs = XEXP (rhs, 0);
1494 }
1495
1496 if (rtx_equal_p (lhs, rhs))
1497 {
1498 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1499 rtx coeff;
1500 unsigned HOST_WIDE_INT l;
1501 HOST_WIDE_INT h;
1502
1503 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1504 coeff = immed_double_const (l, h, mode);
1505
1506 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1507 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1508 ? tem : 0;
1509 }
1510 }
1511
1512 /* (a - (-b)) -> (a + b). True even for IEEE. */
1513 if (GET_CODE (op1) == NEG)
1514 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1515
1516 /* (-x - c) may be simplified as (-c - x). */
1517 if (GET_CODE (op0) == NEG
1518 && (GET_CODE (op1) == CONST_INT
1519 || GET_CODE (op1) == CONST_DOUBLE))
1520 {
1521 tem = simplify_unary_operation (NEG, mode, op1, mode);
1522 if (tem)
1523 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1524 }
1525
1526 /* If one of the operands is a PLUS or a MINUS, see if we can
1527 simplify this by the associative law.
1528 Don't use the associative law for floating point.
1529 The inaccuracy makes it nonassociative,
1530 and subtle programs can break if operations are associated. */
1531
1532 if (INTEGRAL_MODE_P (mode)
1533 && (plus_minus_operand_p (op0)
1534 || plus_minus_operand_p (op1))
1535 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1536 return tem;
1537
1538 /* Don't let a relocatable value get a negative coeff. */
1539 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1540 return simplify_gen_binary (PLUS, mode,
1541 op0,
1542 neg_const_int (mode, op1));
1543
1544 /* (x - (x & y)) -> (x & ~y) */
1545 if (GET_CODE (op1) == AND)
1546 {
1547 if (rtx_equal_p (op0, XEXP (op1, 0)))
1548 {
1549 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1550 GET_MODE (XEXP (op1, 1)));
1551 return simplify_gen_binary (AND, mode, op0, tem);
1552 }
1553 if (rtx_equal_p (op0, XEXP (op1, 1)))
1554 {
1555 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1556 GET_MODE (XEXP (op1, 0)));
1557 return simplify_gen_binary (AND, mode, op0, tem);
1558 }
1559 }
1560 break;
1561
1562 case MULT:
1563 if (trueop1 == constm1_rtx)
1564 return simplify_gen_unary (NEG, mode, op0, mode);
1565
1566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1567 x is NaN, since x * 0 is then also NaN. Nor is it valid
1568 when the mode has signed zeros, since multiplying a negative
1569 number by 0 will give -0, not 0. */
1570 if (!HONOR_NANS (mode)
1571 && !HONOR_SIGNED_ZEROS (mode)
1572 && trueop1 == CONST0_RTX (mode)
1573 && ! side_effects_p (op0))
1574 return op1;
1575
1576 /* In IEEE floating point, x*1 is not equivalent to x for
1577 signalling NaNs. */
1578 if (!HONOR_SNANS (mode)
1579 && trueop1 == CONST1_RTX (mode))
1580 return op0;
1581
1582 /* Convert multiply by constant power of two into shift unless
1583 we are still generating RTL. This test is a kludge. */
1584 if (GET_CODE (trueop1) == CONST_INT
1585 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1586 /* If the mode is larger than the host word size, and the
1587 uppermost bit is set, then this isn't a power of two due
1588 to implicit sign extension. */
1589 && (width <= HOST_BITS_PER_WIDE_INT
1590 || val != HOST_BITS_PER_WIDE_INT - 1))
1591 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1592
1593 /* Likewise for multipliers wider than a word. */
1594 else if (GET_CODE (trueop1) == CONST_DOUBLE
1595 && (GET_MODE (trueop1) == VOIDmode
1596 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1597 && GET_MODE (op0) == mode
1598 && CONST_DOUBLE_LOW (trueop1) == 0
1599 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1600 return simplify_gen_binary (ASHIFT, mode, op0,
1601 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1602
1603 /* x*2 is x+x and x*(-1) is -x */
1604 if (GET_CODE (trueop1) == CONST_DOUBLE
1605 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1606 && GET_MODE (op0) == mode)
1607 {
1608 REAL_VALUE_TYPE d;
1609 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1610
1611 if (REAL_VALUES_EQUAL (d, dconst2))
1612 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1613
1614 if (REAL_VALUES_EQUAL (d, dconstm1))
1615 return simplify_gen_unary (NEG, mode, op0, mode);
1616 }
1617
1618 /* Reassociate multiplication, but for floating point MULTs
1619 only when the user specifies unsafe math optimizations. */
1620 if (! FLOAT_MODE_P (mode)
1621 || flag_unsafe_math_optimizations)
1622 {
1623 tem = simplify_associative_operation (code, mode, op0, op1);
1624 if (tem)
1625 return tem;
1626 }
1627 break;
1628
1629 case IOR:
1630 if (trueop1 == const0_rtx)
1631 return op0;
1632 if (GET_CODE (trueop1) == CONST_INT
1633 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1634 == GET_MODE_MASK (mode)))
1635 return op1;
1636 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1637 return op0;
1638 /* A | (~A) -> -1 */
1639 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1640 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1641 && ! side_effects_p (op0)
1642 && SCALAR_INT_MODE_P (mode))
1643 return constm1_rtx;
1644 tem = simplify_associative_operation (code, mode, op0, op1);
1645 if (tem)
1646 return tem;
1647 break;
1648
1649 case XOR:
1650 if (trueop1 == const0_rtx)
1651 return op0;
1652 if (GET_CODE (trueop1) == CONST_INT
1653 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1654 == GET_MODE_MASK (mode)))
1655 return simplify_gen_unary (NOT, mode, op0, mode);
1656 if (trueop0 == trueop1
1657 && ! side_effects_p (op0)
1658 && GET_MODE_CLASS (mode) != MODE_CC)
1659 return CONST0_RTX (mode);
1660
1661 /* Canonicalize XOR of the most significant bit to PLUS. */
1662 if ((GET_CODE (op1) == CONST_INT
1663 || GET_CODE (op1) == CONST_DOUBLE)
1664 && mode_signbit_p (mode, op1))
1665 return simplify_gen_binary (PLUS, mode, op0, op1);
1666 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1667 if ((GET_CODE (op1) == CONST_INT
1668 || GET_CODE (op1) == CONST_DOUBLE)
1669 && GET_CODE (op0) == PLUS
1670 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1671 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1672 && mode_signbit_p (mode, XEXP (op0, 1)))
1673 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1674 simplify_gen_binary (XOR, mode, op1,
1675 XEXP (op0, 1)));
1676
1677 tem = simplify_associative_operation (code, mode, op0, op1);
1678 if (tem)
1679 return tem;
1680 break;
1681
1682 case AND:
1683 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1684 return trueop1;
1685 /* If we are turning off bits already known off in OP0, we need
1686 not do an AND. */
1687 if (GET_CODE (trueop1) == CONST_INT
1688 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1689 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1690 return op0;
1691 if (trueop0 == trueop1 && ! side_effects_p (op0)
1692 && GET_MODE_CLASS (mode) != MODE_CC)
1693 return op0;
1694 /* A & (~A) -> 0 */
1695 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1696 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1697 && ! side_effects_p (op0)
1698 && GET_MODE_CLASS (mode) != MODE_CC)
1699 return CONST0_RTX (mode);
1700
1701 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1702 there are no nonzero bits of C outside of X's mode. */
1703 if ((GET_CODE (op0) == SIGN_EXTEND
1704 || GET_CODE (op0) == ZERO_EXTEND)
1705 && GET_CODE (trueop1) == CONST_INT
1706 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1707 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1708 & INTVAL (trueop1)) == 0)
1709 {
1710 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1711 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1712 gen_int_mode (INTVAL (trueop1),
1713 imode));
1714 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1715 }
1716
1717 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1718 ((A & N) + B) & M -> (A + B) & M
1719 Similarly if (N & M) == 0,
1720 ((A | N) + B) & M -> (A + B) & M
1721 and for - instead of + and/or ^ instead of |. */
1722 if (GET_CODE (trueop1) == CONST_INT
1723 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1724 && ~INTVAL (trueop1)
1725 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1726 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1727 {
1728 rtx pmop[2];
1729 int which;
1730
1731 pmop[0] = XEXP (op0, 0);
1732 pmop[1] = XEXP (op0, 1);
1733
1734 for (which = 0; which < 2; which++)
1735 {
1736 tem = pmop[which];
1737 switch (GET_CODE (tem))
1738 {
1739 case AND:
1740 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1741 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1742 == INTVAL (trueop1))
1743 pmop[which] = XEXP (tem, 0);
1744 break;
1745 case IOR:
1746 case XOR:
1747 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1748 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1749 pmop[which] = XEXP (tem, 0);
1750 break;
1751 default:
1752 break;
1753 }
1754 }
1755
1756 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1757 {
1758 tem = simplify_gen_binary (GET_CODE (op0), mode,
1759 pmop[0], pmop[1]);
1760 return simplify_gen_binary (code, mode, tem, op1);
1761 }
1762 }
1763 tem = simplify_associative_operation (code, mode, op0, op1);
1764 if (tem)
1765 return tem;
1766 break;
1767
1768 case UDIV:
1769 /* 0/x is 0 (or x&0 if x has side-effects). */
1770 if (trueop0 == CONST0_RTX (mode))
1771 {
1772 if (side_effects_p (op1))
1773 return simplify_gen_binary (AND, mode, op1, trueop0);
1774 return trueop0;
1775 }
1776 /* x/1 is x. */
1777 if (trueop1 == CONST1_RTX (mode))
1778 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1779 /* Convert divide by power of two into shift. */
1780 if (GET_CODE (trueop1) == CONST_INT
1781 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1782 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1783 break;
1784
1785 case DIV:
1786 /* Handle floating point and integers separately. */
1787 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1788 {
1789 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1790 safe for modes with NaNs, since 0.0 / 0.0 will then be
1791 NaN rather than 0.0. Nor is it safe for modes with signed
1792 zeros, since dividing 0 by a negative number gives -0.0 */
1793 if (trueop0 == CONST0_RTX (mode)
1794 && !HONOR_NANS (mode)
1795 && !HONOR_SIGNED_ZEROS (mode)
1796 && ! side_effects_p (op1))
1797 return op0;
1798 /* x/1.0 is x. */
1799 if (trueop1 == CONST1_RTX (mode)
1800 && !HONOR_SNANS (mode))
1801 return op0;
1802
1803 if (GET_CODE (trueop1) == CONST_DOUBLE
1804 && trueop1 != CONST0_RTX (mode))
1805 {
1806 REAL_VALUE_TYPE d;
1807 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1808
1809 /* x/-1.0 is -x. */
1810 if (REAL_VALUES_EQUAL (d, dconstm1)
1811 && !HONOR_SNANS (mode))
1812 return simplify_gen_unary (NEG, mode, op0, mode);
1813
1814 /* Change FP division by a constant into multiplication.
1815 Only do this with -funsafe-math-optimizations. */
1816 if (flag_unsafe_math_optimizations
1817 && !REAL_VALUES_EQUAL (d, dconst0))
1818 {
1819 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1820 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1821 return simplify_gen_binary (MULT, mode, op0, tem);
1822 }
1823 }
1824 }
1825 else
1826 {
1827 /* 0/x is 0 (or x&0 if x has side-effects). */
1828 if (trueop0 == CONST0_RTX (mode))
1829 {
1830 if (side_effects_p (op1))
1831 return simplify_gen_binary (AND, mode, op1, trueop0);
1832 return trueop0;
1833 }
1834 /* x/1 is x. */
1835 if (trueop1 == CONST1_RTX (mode))
1836 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1837 /* x/-1 is -x. */
1838 if (trueop1 == constm1_rtx)
1839 {
1840 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1841 return simplify_gen_unary (NEG, mode, x, mode);
1842 }
1843 }
1844 break;
1845
1846 case UMOD:
1847 /* 0%x is 0 (or x&0 if x has side-effects). */
1848 if (trueop0 == CONST0_RTX (mode))
1849 {
1850 if (side_effects_p (op1))
1851 return simplify_gen_binary (AND, mode, op1, trueop0);
1852 return trueop0;
1853 }
1854 /* x%1 is 0 (of x&0 if x has side-effects). */
1855 if (trueop1 == CONST1_RTX (mode))
1856 {
1857 if (side_effects_p (op0))
1858 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1859 return CONST0_RTX (mode);
1860 }
1861 /* Implement modulus by power of two as AND. */
1862 if (GET_CODE (trueop1) == CONST_INT
1863 && exact_log2 (INTVAL (trueop1)) > 0)
1864 return simplify_gen_binary (AND, mode, op0,
1865 GEN_INT (INTVAL (op1) - 1));
1866 break;
1867
1868 case MOD:
1869 /* 0%x is 0 (or x&0 if x has side-effects). */
1870 if (trueop0 == CONST0_RTX (mode))
1871 {
1872 if (side_effects_p (op1))
1873 return simplify_gen_binary (AND, mode, op1, trueop0);
1874 return trueop0;
1875 }
1876 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1877 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1878 {
1879 if (side_effects_p (op0))
1880 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1881 return CONST0_RTX (mode);
1882 }
1883 break;
1884
1885 case ROTATERT:
1886 case ROTATE:
1887 case ASHIFTRT:
1888 /* Rotating ~0 always results in ~0. */
1889 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1890 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1891 && ! side_effects_p (op1))
1892 return op0;
1893
1894 /* Fall through.... */
1895
1896 case ASHIFT:
1897 case LSHIFTRT:
1898 if (trueop1 == CONST0_RTX (mode))
1899 return op0;
1900 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1901 return op0;
1902 break;
1903
1904 case SMIN:
1905 if (width <= HOST_BITS_PER_WIDE_INT
1906 && GET_CODE (trueop1) == CONST_INT
1907 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1908 && ! side_effects_p (op0))
1909 return op1;
1910 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1911 return op0;
1912 tem = simplify_associative_operation (code, mode, op0, op1);
1913 if (tem)
1914 return tem;
1915 break;
1916
1917 case SMAX:
1918 if (width <= HOST_BITS_PER_WIDE_INT
1919 && GET_CODE (trueop1) == CONST_INT
1920 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1921 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1922 && ! side_effects_p (op0))
1923 return op1;
1924 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1925 return op0;
1926 tem = simplify_associative_operation (code, mode, op0, op1);
1927 if (tem)
1928 return tem;
1929 break;
1930
1931 case UMIN:
1932 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1933 return op1;
1934 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1935 return op0;
1936 tem = simplify_associative_operation (code, mode, op0, op1);
1937 if (tem)
1938 return tem;
1939 break;
1940
1941 case UMAX:
1942 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1943 return op1;
1944 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1945 return op0;
1946 tem = simplify_associative_operation (code, mode, op0, op1);
1947 if (tem)
1948 return tem;
1949 break;
1950
1951 case SS_PLUS:
1952 case US_PLUS:
1953 case SS_MINUS:
1954 case US_MINUS:
1955 /* ??? There are simplifications that can be done. */
1956 return 0;
1957
1958 case VEC_SELECT:
1959 if (!VECTOR_MODE_P (mode))
1960 {
1961 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1962 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1963 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1964 gcc_assert (XVECLEN (trueop1, 0) == 1);
1965 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1966
1967 if (GET_CODE (trueop0) == CONST_VECTOR)
1968 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1969 (trueop1, 0, 0)));
1970 }
1971 else
1972 {
1973 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1974 gcc_assert (GET_MODE_INNER (mode)
1975 == GET_MODE_INNER (GET_MODE (trueop0)));
1976 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1977
1978 if (GET_CODE (trueop0) == CONST_VECTOR)
1979 {
1980 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1981 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1982 rtvec v = rtvec_alloc (n_elts);
1983 unsigned int i;
1984
1985 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1986 for (i = 0; i < n_elts; i++)
1987 {
1988 rtx x = XVECEXP (trueop1, 0, i);
1989
1990 gcc_assert (GET_CODE (x) == CONST_INT);
1991 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1992 INTVAL (x));
1993 }
1994
1995 return gen_rtx_CONST_VECTOR (mode, v);
1996 }
1997 }
1998 return 0;
1999 case VEC_CONCAT:
2000 {
2001 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2002 ? GET_MODE (trueop0)
2003 : GET_MODE_INNER (mode));
2004 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2005 ? GET_MODE (trueop1)
2006 : GET_MODE_INNER (mode));
2007
2008 gcc_assert (VECTOR_MODE_P (mode));
2009 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2010 == GET_MODE_SIZE (mode));
2011
2012 if (VECTOR_MODE_P (op0_mode))
2013 gcc_assert (GET_MODE_INNER (mode)
2014 == GET_MODE_INNER (op0_mode));
2015 else
2016 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2017
2018 if (VECTOR_MODE_P (op1_mode))
2019 gcc_assert (GET_MODE_INNER (mode)
2020 == GET_MODE_INNER (op1_mode));
2021 else
2022 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2023
2024 if ((GET_CODE (trueop0) == CONST_VECTOR
2025 || GET_CODE (trueop0) == CONST_INT
2026 || GET_CODE (trueop0) == CONST_DOUBLE)
2027 && (GET_CODE (trueop1) == CONST_VECTOR
2028 || GET_CODE (trueop1) == CONST_INT
2029 || GET_CODE (trueop1) == CONST_DOUBLE))
2030 {
2031 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2032 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2033 rtvec v = rtvec_alloc (n_elts);
2034 unsigned int i;
2035 unsigned in_n_elts = 1;
2036
2037 if (VECTOR_MODE_P (op0_mode))
2038 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2039 for (i = 0; i < n_elts; i++)
2040 {
2041 if (i < in_n_elts)
2042 {
2043 if (!VECTOR_MODE_P (op0_mode))
2044 RTVEC_ELT (v, i) = trueop0;
2045 else
2046 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2047 }
2048 else
2049 {
2050 if (!VECTOR_MODE_P (op1_mode))
2051 RTVEC_ELT (v, i) = trueop1;
2052 else
2053 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2054 i - in_n_elts);
2055 }
2056 }
2057
2058 return gen_rtx_CONST_VECTOR (mode, v);
2059 }
2060 }
2061 return 0;
2062
2063 default:
2064 gcc_unreachable ();
2065 }
2066
2067 return 0;
2068 }
2069
2070 rtx
2071 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2072 rtx op0, rtx op1)
2073 {
2074 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2075 HOST_WIDE_INT val;
2076 unsigned int width = GET_MODE_BITSIZE (mode);
2077
2078 if (VECTOR_MODE_P (mode)
2079 && code != VEC_CONCAT
2080 && GET_CODE (op0) == CONST_VECTOR
2081 && GET_CODE (op1) == CONST_VECTOR)
2082 {
2083 unsigned n_elts = GET_MODE_NUNITS (mode);
2084 enum machine_mode op0mode = GET_MODE (op0);
2085 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2086 enum machine_mode op1mode = GET_MODE (op1);
2087 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2088 rtvec v = rtvec_alloc (n_elts);
2089 unsigned int i;
2090
2091 gcc_assert (op0_n_elts == n_elts);
2092 gcc_assert (op1_n_elts == n_elts);
2093 for (i = 0; i < n_elts; i++)
2094 {
2095 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2096 CONST_VECTOR_ELT (op0, i),
2097 CONST_VECTOR_ELT (op1, i));
2098 if (!x)
2099 return 0;
2100 RTVEC_ELT (v, i) = x;
2101 }
2102
2103 return gen_rtx_CONST_VECTOR (mode, v);
2104 }
2105
2106 if (VECTOR_MODE_P (mode)
2107 && code == VEC_CONCAT
2108 && CONSTANT_P (op0) && CONSTANT_P (op1))
2109 {
2110 unsigned n_elts = GET_MODE_NUNITS (mode);
2111 rtvec v = rtvec_alloc (n_elts);
2112
2113 gcc_assert (n_elts >= 2);
2114 if (n_elts == 2)
2115 {
2116 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2117 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2118
2119 RTVEC_ELT (v, 0) = op0;
2120 RTVEC_ELT (v, 1) = op1;
2121 }
2122 else
2123 {
2124 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2125 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2126 unsigned i;
2127
2128 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2129 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2130 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2131
2132 for (i = 0; i < op0_n_elts; ++i)
2133 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2134 for (i = 0; i < op1_n_elts; ++i)
2135 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2136 }
2137
2138 return gen_rtx_CONST_VECTOR (mode, v);
2139 }
2140
2141 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2142 && GET_CODE (op0) == CONST_DOUBLE
2143 && GET_CODE (op1) == CONST_DOUBLE
2144 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2145 {
2146 if (code == AND
2147 || code == IOR
2148 || code == XOR)
2149 {
2150 long tmp0[4];
2151 long tmp1[4];
2152 REAL_VALUE_TYPE r;
2153 int i;
2154
2155 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2156 GET_MODE (op0));
2157 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2158 GET_MODE (op1));
2159 for (i = 0; i < 4; i++)
2160 {
2161 switch (code)
2162 {
2163 case AND:
2164 tmp0[i] &= tmp1[i];
2165 break;
2166 case IOR:
2167 tmp0[i] |= tmp1[i];
2168 break;
2169 case XOR:
2170 tmp0[i] ^= tmp1[i];
2171 break;
2172 default:
2173 gcc_unreachable ();
2174 }
2175 }
2176 real_from_target (&r, tmp0, mode);
2177 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2178 }
2179 else
2180 {
2181 REAL_VALUE_TYPE f0, f1, value, result;
2182 bool inexact;
2183
2184 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2185 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2186 real_convert (&f0, mode, &f0);
2187 real_convert (&f1, mode, &f1);
2188
2189 if (HONOR_SNANS (mode)
2190 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2191 return 0;
2192
2193 if (code == DIV
2194 && REAL_VALUES_EQUAL (f1, dconst0)
2195 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2196 return 0;
2197
2198 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2199 && flag_trapping_math
2200 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2201 {
2202 int s0 = REAL_VALUE_NEGATIVE (f0);
2203 int s1 = REAL_VALUE_NEGATIVE (f1);
2204
2205 switch (code)
2206 {
2207 case PLUS:
2208 /* Inf + -Inf = NaN plus exception. */
2209 if (s0 != s1)
2210 return 0;
2211 break;
2212 case MINUS:
2213 /* Inf - Inf = NaN plus exception. */
2214 if (s0 == s1)
2215 return 0;
2216 break;
2217 case DIV:
2218 /* Inf / Inf = NaN plus exception. */
2219 return 0;
2220 default:
2221 break;
2222 }
2223 }
2224
2225 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2226 && flag_trapping_math
2227 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2228 || (REAL_VALUE_ISINF (f1)
2229 && REAL_VALUES_EQUAL (f0, dconst0))))
2230 /* Inf * 0 = NaN plus exception. */
2231 return 0;
2232
2233 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2234 &f0, &f1);
2235 real_convert (&result, mode, &value);
2236
2237 /* Don't constant fold this floating point operation if the
2238 result may dependent upon the run-time rounding mode and
2239 flag_rounding_math is set, or if GCC's software emulation
2240 is unable to accurately represent the result. */
2241
2242 if ((flag_rounding_math
2243 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2244 && !flag_unsafe_math_optimizations))
2245 && (inexact || !real_identical (&result, &value)))
2246 return NULL_RTX;
2247
2248 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2249 }
2250 }
2251
2252 /* We can fold some multi-word operations. */
2253 if (GET_MODE_CLASS (mode) == MODE_INT
2254 && width == HOST_BITS_PER_WIDE_INT * 2
2255 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2256 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2257 {
2258 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2259 HOST_WIDE_INT h1, h2, hv, ht;
2260
2261 if (GET_CODE (op0) == CONST_DOUBLE)
2262 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2263 else
2264 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2265
2266 if (GET_CODE (op1) == CONST_DOUBLE)
2267 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2268 else
2269 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2270
2271 switch (code)
2272 {
2273 case MINUS:
2274 /* A - B == A + (-B). */
2275 neg_double (l2, h2, &lv, &hv);
2276 l2 = lv, h2 = hv;
2277
2278 /* Fall through.... */
2279
2280 case PLUS:
2281 add_double (l1, h1, l2, h2, &lv, &hv);
2282 break;
2283
2284 case MULT:
2285 mul_double (l1, h1, l2, h2, &lv, &hv);
2286 break;
2287
2288 case DIV:
2289 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2290 &lv, &hv, &lt, &ht))
2291 return 0;
2292 break;
2293
2294 case MOD:
2295 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2296 &lt, &ht, &lv, &hv))
2297 return 0;
2298 break;
2299
2300 case UDIV:
2301 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2302 &lv, &hv, &lt, &ht))
2303 return 0;
2304 break;
2305
2306 case UMOD:
2307 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2308 &lt, &ht, &lv, &hv))
2309 return 0;
2310 break;
2311
2312 case AND:
2313 lv = l1 & l2, hv = h1 & h2;
2314 break;
2315
2316 case IOR:
2317 lv = l1 | l2, hv = h1 | h2;
2318 break;
2319
2320 case XOR:
2321 lv = l1 ^ l2, hv = h1 ^ h2;
2322 break;
2323
2324 case SMIN:
2325 if (h1 < h2
2326 || (h1 == h2
2327 && ((unsigned HOST_WIDE_INT) l1
2328 < (unsigned HOST_WIDE_INT) l2)))
2329 lv = l1, hv = h1;
2330 else
2331 lv = l2, hv = h2;
2332 break;
2333
2334 case SMAX:
2335 if (h1 > h2
2336 || (h1 == h2
2337 && ((unsigned HOST_WIDE_INT) l1
2338 > (unsigned HOST_WIDE_INT) l2)))
2339 lv = l1, hv = h1;
2340 else
2341 lv = l2, hv = h2;
2342 break;
2343
2344 case UMIN:
2345 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2346 || (h1 == h2
2347 && ((unsigned HOST_WIDE_INT) l1
2348 < (unsigned HOST_WIDE_INT) l2)))
2349 lv = l1, hv = h1;
2350 else
2351 lv = l2, hv = h2;
2352 break;
2353
2354 case UMAX:
2355 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2356 || (h1 == h2
2357 && ((unsigned HOST_WIDE_INT) l1
2358 > (unsigned HOST_WIDE_INT) l2)))
2359 lv = l1, hv = h1;
2360 else
2361 lv = l2, hv = h2;
2362 break;
2363
2364 case LSHIFTRT: case ASHIFTRT:
2365 case ASHIFT:
2366 case ROTATE: case ROTATERT:
2367 if (SHIFT_COUNT_TRUNCATED)
2368 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2369
2370 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2371 return 0;
2372
2373 if (code == LSHIFTRT || code == ASHIFTRT)
2374 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2375 code == ASHIFTRT);
2376 else if (code == ASHIFT)
2377 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2378 else if (code == ROTATE)
2379 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2380 else /* code == ROTATERT */
2381 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2382 break;
2383
2384 default:
2385 return 0;
2386 }
2387
2388 return immed_double_const (lv, hv, mode);
2389 }
2390
2391 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2392 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2393 {
2394 /* Get the integer argument values in two forms:
2395 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2396
2397 arg0 = INTVAL (op0);
2398 arg1 = INTVAL (op1);
2399
2400 if (width < HOST_BITS_PER_WIDE_INT)
2401 {
2402 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2403 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2404
2405 arg0s = arg0;
2406 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2407 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2408
2409 arg1s = arg1;
2410 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2411 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2412 }
2413 else
2414 {
2415 arg0s = arg0;
2416 arg1s = arg1;
2417 }
2418
2419 /* Compute the value of the arithmetic. */
2420
2421 switch (code)
2422 {
2423 case PLUS:
2424 val = arg0s + arg1s;
2425 break;
2426
2427 case MINUS:
2428 val = arg0s - arg1s;
2429 break;
2430
2431 case MULT:
2432 val = arg0s * arg1s;
2433 break;
2434
2435 case DIV:
2436 if (arg1s == 0
2437 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2438 && arg1s == -1))
2439 return 0;
2440 val = arg0s / arg1s;
2441 break;
2442
2443 case MOD:
2444 if (arg1s == 0
2445 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2446 && arg1s == -1))
2447 return 0;
2448 val = arg0s % arg1s;
2449 break;
2450
2451 case UDIV:
2452 if (arg1 == 0
2453 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2454 && arg1s == -1))
2455 return 0;
2456 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2457 break;
2458
2459 case UMOD:
2460 if (arg1 == 0
2461 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2462 && arg1s == -1))
2463 return 0;
2464 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2465 break;
2466
2467 case AND:
2468 val = arg0 & arg1;
2469 break;
2470
2471 case IOR:
2472 val = arg0 | arg1;
2473 break;
2474
2475 case XOR:
2476 val = arg0 ^ arg1;
2477 break;
2478
2479 case LSHIFTRT:
2480 case ASHIFT:
2481 case ASHIFTRT:
2482 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2483 the value is in range. We can't return any old value for
2484 out-of-range arguments because either the middle-end (via
2485 shift_truncation_mask) or the back-end might be relying on
2486 target-specific knowledge. Nor can we rely on
2487 shift_truncation_mask, since the shift might not be part of an
2488 ashlM3, lshrM3 or ashrM3 instruction. */
2489 if (SHIFT_COUNT_TRUNCATED)
2490 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2491 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2492 return 0;
2493
2494 val = (code == ASHIFT
2495 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2496 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2497
2498 /* Sign-extend the result for arithmetic right shifts. */
2499 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2500 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2501 break;
2502
2503 case ROTATERT:
2504 if (arg1 < 0)
2505 return 0;
2506
2507 arg1 %= width;
2508 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2509 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2510 break;
2511
2512 case ROTATE:
2513 if (arg1 < 0)
2514 return 0;
2515
2516 arg1 %= width;
2517 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2518 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2519 break;
2520
2521 case COMPARE:
2522 /* Do nothing here. */
2523 return 0;
2524
2525 case SMIN:
2526 val = arg0s <= arg1s ? arg0s : arg1s;
2527 break;
2528
2529 case UMIN:
2530 val = ((unsigned HOST_WIDE_INT) arg0
2531 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2532 break;
2533
2534 case SMAX:
2535 val = arg0s > arg1s ? arg0s : arg1s;
2536 break;
2537
2538 case UMAX:
2539 val = ((unsigned HOST_WIDE_INT) arg0
2540 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2541 break;
2542
2543 case SS_PLUS:
2544 case US_PLUS:
2545 case SS_MINUS:
2546 case US_MINUS:
2547 /* ??? There are simplifications that can be done. */
2548 return 0;
2549
2550 default:
2551 gcc_unreachable ();
2552 }
2553
2554 return gen_int_mode (val, mode);
2555 }
2556
2557 return NULL_RTX;
2558 }
2559
2560
2561 \f
2562 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2563 PLUS or MINUS.
2564
2565 Rather than test for specific case, we do this by a brute-force method
2566 and do all possible simplifications until no more changes occur. Then
2567 we rebuild the operation.
2568
2569 If FORCE is true, then always generate the rtx. This is used to
2570 canonicalize stuff emitted from simplify_gen_binary. Note that this
2571 can still fail if the rtx is too complex. It won't fail just because
2572 the result is not 'simpler' than the input, however. */
2573
2574 struct simplify_plus_minus_op_data
2575 {
2576 rtx op;
2577 int neg;
2578 };
2579
2580 static int
2581 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2582 {
2583 const struct simplify_plus_minus_op_data *d1 = p1;
2584 const struct simplify_plus_minus_op_data *d2 = p2;
2585
2586 return (commutative_operand_precedence (d2->op)
2587 - commutative_operand_precedence (d1->op));
2588 }
2589
2590 static rtx
2591 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2592 rtx op1, int force)
2593 {
2594 struct simplify_plus_minus_op_data ops[8];
2595 rtx result, tem;
2596 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2597 int first, changed;
2598 int i, j;
2599
2600 memset (ops, 0, sizeof ops);
2601
2602 /* Set up the two operands and then expand them until nothing has been
2603 changed. If we run out of room in our array, give up; this should
2604 almost never happen. */
2605
2606 ops[0].op = op0;
2607 ops[0].neg = 0;
2608 ops[1].op = op1;
2609 ops[1].neg = (code == MINUS);
2610
2611 do
2612 {
2613 changed = 0;
2614
2615 for (i = 0; i < n_ops; i++)
2616 {
2617 rtx this_op = ops[i].op;
2618 int this_neg = ops[i].neg;
2619 enum rtx_code this_code = GET_CODE (this_op);
2620
2621 switch (this_code)
2622 {
2623 case PLUS:
2624 case MINUS:
2625 if (n_ops == 7)
2626 return NULL_RTX;
2627
2628 ops[n_ops].op = XEXP (this_op, 1);
2629 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2630 n_ops++;
2631
2632 ops[i].op = XEXP (this_op, 0);
2633 input_ops++;
2634 changed = 1;
2635 break;
2636
2637 case NEG:
2638 ops[i].op = XEXP (this_op, 0);
2639 ops[i].neg = ! this_neg;
2640 changed = 1;
2641 break;
2642
2643 case CONST:
2644 if (n_ops < 7
2645 && GET_CODE (XEXP (this_op, 0)) == PLUS
2646 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2647 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2648 {
2649 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2650 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2651 ops[n_ops].neg = this_neg;
2652 n_ops++;
2653 input_consts++;
2654 changed = 1;
2655 }
2656 break;
2657
2658 case NOT:
2659 /* ~a -> (-a - 1) */
2660 if (n_ops != 7)
2661 {
2662 ops[n_ops].op = constm1_rtx;
2663 ops[n_ops++].neg = this_neg;
2664 ops[i].op = XEXP (this_op, 0);
2665 ops[i].neg = !this_neg;
2666 changed = 1;
2667 }
2668 break;
2669
2670 case CONST_INT:
2671 if (this_neg)
2672 {
2673 ops[i].op = neg_const_int (mode, this_op);
2674 ops[i].neg = 0;
2675 changed = 1;
2676 }
2677 break;
2678
2679 default:
2680 break;
2681 }
2682 }
2683 }
2684 while (changed);
2685
2686 /* If we only have two operands, we can't do anything. */
2687 if (n_ops <= 2 && !force)
2688 return NULL_RTX;
2689
2690 /* Count the number of CONSTs we didn't split above. */
2691 for (i = 0; i < n_ops; i++)
2692 if (GET_CODE (ops[i].op) == CONST)
2693 input_consts++;
2694
2695 /* Now simplify each pair of operands until nothing changes. The first
2696 time through just simplify constants against each other. */
2697
2698 first = 1;
2699 do
2700 {
2701 changed = first;
2702
2703 for (i = 0; i < n_ops - 1; i++)
2704 for (j = i + 1; j < n_ops; j++)
2705 {
2706 rtx lhs = ops[i].op, rhs = ops[j].op;
2707 int lneg = ops[i].neg, rneg = ops[j].neg;
2708
2709 if (lhs != 0 && rhs != 0
2710 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2711 {
2712 enum rtx_code ncode = PLUS;
2713
2714 if (lneg != rneg)
2715 {
2716 ncode = MINUS;
2717 if (lneg)
2718 tem = lhs, lhs = rhs, rhs = tem;
2719 }
2720 else if (swap_commutative_operands_p (lhs, rhs))
2721 tem = lhs, lhs = rhs, rhs = tem;
2722
2723 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2724
2725 /* Reject "simplifications" that just wrap the two
2726 arguments in a CONST. Failure to do so can result
2727 in infinite recursion with simplify_binary_operation
2728 when it calls us to simplify CONST operations. */
2729 if (tem
2730 && ! (GET_CODE (tem) == CONST
2731 && GET_CODE (XEXP (tem, 0)) == ncode
2732 && XEXP (XEXP (tem, 0), 0) == lhs
2733 && XEXP (XEXP (tem, 0), 1) == rhs)
2734 /* Don't allow -x + -1 -> ~x simplifications in the
2735 first pass. This allows us the chance to combine
2736 the -1 with other constants. */
2737 && ! (first
2738 && GET_CODE (tem) == NOT
2739 && XEXP (tem, 0) == rhs))
2740 {
2741 lneg &= rneg;
2742 if (GET_CODE (tem) == NEG)
2743 tem = XEXP (tem, 0), lneg = !lneg;
2744 if (GET_CODE (tem) == CONST_INT && lneg)
2745 tem = neg_const_int (mode, tem), lneg = 0;
2746
2747 ops[i].op = tem;
2748 ops[i].neg = lneg;
2749 ops[j].op = NULL_RTX;
2750 changed = 1;
2751 }
2752 }
2753 }
2754
2755 first = 0;
2756 }
2757 while (changed);
2758
2759 /* Pack all the operands to the lower-numbered entries. */
2760 for (i = 0, j = 0; j < n_ops; j++)
2761 if (ops[j].op)
2762 ops[i++] = ops[j];
2763 n_ops = i;
2764
2765 /* Sort the operations based on swap_commutative_operands_p. */
2766 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2767
2768 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2769 if (n_ops == 2
2770 && GET_CODE (ops[1].op) == CONST_INT
2771 && CONSTANT_P (ops[0].op)
2772 && ops[0].neg)
2773 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2774
2775 /* We suppressed creation of trivial CONST expressions in the
2776 combination loop to avoid recursion. Create one manually now.
2777 The combination loop should have ensured that there is exactly
2778 one CONST_INT, and the sort will have ensured that it is last
2779 in the array and that any other constant will be next-to-last. */
2780
2781 if (n_ops > 1
2782 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2783 && CONSTANT_P (ops[n_ops - 2].op))
2784 {
2785 rtx value = ops[n_ops - 1].op;
2786 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2787 value = neg_const_int (mode, value);
2788 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2789 n_ops--;
2790 }
2791
2792 /* Count the number of CONSTs that we generated. */
2793 n_consts = 0;
2794 for (i = 0; i < n_ops; i++)
2795 if (GET_CODE (ops[i].op) == CONST)
2796 n_consts++;
2797
2798 /* Give up if we didn't reduce the number of operands we had. Make
2799 sure we count a CONST as two operands. If we have the same
2800 number of operands, but have made more CONSTs than before, this
2801 is also an improvement, so accept it. */
2802 if (!force
2803 && (n_ops + n_consts > input_ops
2804 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2805 return NULL_RTX;
2806
2807 /* Put a non-negated operand first, if possible. */
2808
2809 for (i = 0; i < n_ops && ops[i].neg; i++)
2810 continue;
2811 if (i == n_ops)
2812 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2813 else if (i != 0)
2814 {
2815 tem = ops[0].op;
2816 ops[0] = ops[i];
2817 ops[i].op = tem;
2818 ops[i].neg = 1;
2819 }
2820
2821 /* Now make the result by performing the requested operations. */
2822 result = ops[0].op;
2823 for (i = 1; i < n_ops; i++)
2824 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2825 mode, result, ops[i].op);
2826
2827 return result;
2828 }
2829
2830 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2831 static bool
2832 plus_minus_operand_p (rtx x)
2833 {
2834 return GET_CODE (x) == PLUS
2835 || GET_CODE (x) == MINUS
2836 || (GET_CODE (x) == CONST
2837 && GET_CODE (XEXP (x, 0)) == PLUS
2838 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2839 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2840 }
2841
2842 /* Like simplify_binary_operation except used for relational operators.
2843 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2844 not also be VOIDmode.
2845
2846 CMP_MODE specifies in which mode the comparison is done in, so it is
2847 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2848 the operands or, if both are VOIDmode, the operands are compared in
2849 "infinite precision". */
2850 rtx
2851 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2852 enum machine_mode cmp_mode, rtx op0, rtx op1)
2853 {
2854 rtx tem, trueop0, trueop1;
2855
2856 if (cmp_mode == VOIDmode)
2857 cmp_mode = GET_MODE (op0);
2858 if (cmp_mode == VOIDmode)
2859 cmp_mode = GET_MODE (op1);
2860
2861 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2862 if (tem)
2863 {
2864 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2865 {
2866 if (tem == const0_rtx)
2867 return CONST0_RTX (mode);
2868 #ifdef FLOAT_STORE_FLAG_VALUE
2869 {
2870 REAL_VALUE_TYPE val;
2871 val = FLOAT_STORE_FLAG_VALUE (mode);
2872 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2873 }
2874 #else
2875 return NULL_RTX;
2876 #endif
2877 }
2878 if (VECTOR_MODE_P (mode))
2879 {
2880 if (tem == const0_rtx)
2881 return CONST0_RTX (mode);
2882 #ifdef VECTOR_STORE_FLAG_VALUE
2883 {
2884 int i, units;
2885 rtvec v;
2886
2887 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2888 if (val == NULL_RTX)
2889 return NULL_RTX;
2890 if (val == const1_rtx)
2891 return CONST1_RTX (mode);
2892
2893 units = GET_MODE_NUNITS (mode);
2894 v = rtvec_alloc (units);
2895 for (i = 0; i < units; i++)
2896 RTVEC_ELT (v, i) = val;
2897 return gen_rtx_raw_CONST_VECTOR (mode, v);
2898 }
2899 #else
2900 return NULL_RTX;
2901 #endif
2902 }
2903
2904 return tem;
2905 }
2906
2907 /* For the following tests, ensure const0_rtx is op1. */
2908 if (swap_commutative_operands_p (op0, op1)
2909 || (op0 == const0_rtx && op1 != const0_rtx))
2910 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2911
2912 /* If op0 is a compare, extract the comparison arguments from it. */
2913 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2914 return simplify_relational_operation (code, mode, VOIDmode,
2915 XEXP (op0, 0), XEXP (op0, 1));
2916
2917 if (mode == VOIDmode
2918 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2919 || CC0_P (op0))
2920 return NULL_RTX;
2921
2922 trueop0 = avoid_constant_pool_reference (op0);
2923 trueop1 = avoid_constant_pool_reference (op1);
2924 return simplify_relational_operation_1 (code, mode, cmp_mode,
2925 trueop0, trueop1);
2926 }
2927
2928 /* This part of simplify_relational_operation is only used when CMP_MODE
2929 is not in class MODE_CC (i.e. it is a real comparison).
2930
2931 MODE is the mode of the result, while CMP_MODE specifies in which
2932 mode the comparison is done in, so it is the mode of the operands. */
2933
2934 static rtx
2935 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2936 enum machine_mode cmp_mode, rtx op0, rtx op1)
2937 {
2938 enum rtx_code op0code = GET_CODE (op0);
2939
2940 if (GET_CODE (op1) == CONST_INT)
2941 {
2942 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2943 {
2944 /* If op0 is a comparison, extract the comparison arguments form it. */
2945 if (code == NE)
2946 {
2947 if (GET_MODE (op0) == mode)
2948 return simplify_rtx (op0);
2949 else
2950 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2951 XEXP (op0, 0), XEXP (op0, 1));
2952 }
2953 else if (code == EQ)
2954 {
2955 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2956 if (new_code != UNKNOWN)
2957 return simplify_gen_relational (new_code, mode, VOIDmode,
2958 XEXP (op0, 0), XEXP (op0, 1));
2959 }
2960 }
2961 }
2962
2963 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2964 if ((code == EQ || code == NE)
2965 && (op0code == PLUS || op0code == MINUS)
2966 && CONSTANT_P (op1)
2967 && CONSTANT_P (XEXP (op0, 1))
2968 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2969 {
2970 rtx x = XEXP (op0, 0);
2971 rtx c = XEXP (op0, 1);
2972
2973 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2974 cmp_mode, op1, c);
2975 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2976 }
2977
2978 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2979 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2980 if (code == NE
2981 && op1 == const0_rtx
2982 && GET_MODE_CLASS (mode) == MODE_INT
2983 && cmp_mode != VOIDmode
2984 /* ??? Work-around BImode bugs in the ia64 backend. */
2985 && mode != BImode
2986 && cmp_mode != BImode
2987 && nonzero_bits (op0, cmp_mode) == 1
2988 && STORE_FLAG_VALUE == 1)
2989 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2990 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2991 : lowpart_subreg (mode, op0, cmp_mode);
2992
2993 return NULL_RTX;
2994 }
2995
2996 /* Check if the given comparison (done in the given MODE) is actually a
2997 tautology or a contradiction.
2998 If no simplification is possible, this function returns zero.
2999 Otherwise, it returns either const_true_rtx or const0_rtx. */
3000
3001 rtx
3002 simplify_const_relational_operation (enum rtx_code code,
3003 enum machine_mode mode,
3004 rtx op0, rtx op1)
3005 {
3006 int equal, op0lt, op0ltu, op1lt, op1ltu;
3007 rtx tem;
3008 rtx trueop0;
3009 rtx trueop1;
3010
3011 gcc_assert (mode != VOIDmode
3012 || (GET_MODE (op0) == VOIDmode
3013 && GET_MODE (op1) == VOIDmode));
3014
3015 /* If op0 is a compare, extract the comparison arguments from it. */
3016 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3017 {
3018 op1 = XEXP (op0, 1);
3019 op0 = XEXP (op0, 0);
3020
3021 if (GET_MODE (op0) != VOIDmode)
3022 mode = GET_MODE (op0);
3023 else if (GET_MODE (op1) != VOIDmode)
3024 mode = GET_MODE (op1);
3025 else
3026 return 0;
3027 }
3028
3029 /* We can't simplify MODE_CC values since we don't know what the
3030 actual comparison is. */
3031 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3032 return 0;
3033
3034 /* Make sure the constant is second. */
3035 if (swap_commutative_operands_p (op0, op1))
3036 {
3037 tem = op0, op0 = op1, op1 = tem;
3038 code = swap_condition (code);
3039 }
3040
3041 trueop0 = avoid_constant_pool_reference (op0);
3042 trueop1 = avoid_constant_pool_reference (op1);
3043
3044 /* For integer comparisons of A and B maybe we can simplify A - B and can
3045 then simplify a comparison of that with zero. If A and B are both either
3046 a register or a CONST_INT, this can't help; testing for these cases will
3047 prevent infinite recursion here and speed things up.
3048
3049 If CODE is an unsigned comparison, then we can never do this optimization,
3050 because it gives an incorrect result if the subtraction wraps around zero.
3051 ANSI C defines unsigned operations such that they never overflow, and
3052 thus such cases can not be ignored; but we cannot do it even for
3053 signed comparisons for languages such as Java, so test flag_wrapv. */
3054
3055 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3056 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3057 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3058 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3059 /* We cannot do this for == or != if tem is a nonzero address. */
3060 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3061 && code != GTU && code != GEU && code != LTU && code != LEU)
3062 return simplify_const_relational_operation (signed_condition (code),
3063 mode, tem, const0_rtx);
3064
3065 if (flag_unsafe_math_optimizations && code == ORDERED)
3066 return const_true_rtx;
3067
3068 if (flag_unsafe_math_optimizations && code == UNORDERED)
3069 return const0_rtx;
3070
3071 /* For modes without NaNs, if the two operands are equal, we know the
3072 result except if they have side-effects. */
3073 if (! HONOR_NANS (GET_MODE (trueop0))
3074 && rtx_equal_p (trueop0, trueop1)
3075 && ! side_effects_p (trueop0))
3076 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3077
3078 /* If the operands are floating-point constants, see if we can fold
3079 the result. */
3080 else if (GET_CODE (trueop0) == CONST_DOUBLE
3081 && GET_CODE (trueop1) == CONST_DOUBLE
3082 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3083 {
3084 REAL_VALUE_TYPE d0, d1;
3085
3086 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3087 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3088
3089 /* Comparisons are unordered iff at least one of the values is NaN. */
3090 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3091 switch (code)
3092 {
3093 case UNEQ:
3094 case UNLT:
3095 case UNGT:
3096 case UNLE:
3097 case UNGE:
3098 case NE:
3099 case UNORDERED:
3100 return const_true_rtx;
3101 case EQ:
3102 case LT:
3103 case GT:
3104 case LE:
3105 case GE:
3106 case LTGT:
3107 case ORDERED:
3108 return const0_rtx;
3109 default:
3110 return 0;
3111 }
3112
3113 equal = REAL_VALUES_EQUAL (d0, d1);
3114 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3115 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3116 }
3117
3118 /* Otherwise, see if the operands are both integers. */
3119 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3120 && (GET_CODE (trueop0) == CONST_DOUBLE
3121 || GET_CODE (trueop0) == CONST_INT)
3122 && (GET_CODE (trueop1) == CONST_DOUBLE
3123 || GET_CODE (trueop1) == CONST_INT))
3124 {
3125 int width = GET_MODE_BITSIZE (mode);
3126 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3127 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3128
3129 /* Get the two words comprising each integer constant. */
3130 if (GET_CODE (trueop0) == CONST_DOUBLE)
3131 {
3132 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3133 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3134 }
3135 else
3136 {
3137 l0u = l0s = INTVAL (trueop0);
3138 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3139 }
3140
3141 if (GET_CODE (trueop1) == CONST_DOUBLE)
3142 {
3143 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3144 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3145 }
3146 else
3147 {
3148 l1u = l1s = INTVAL (trueop1);
3149 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3150 }
3151
3152 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3153 we have to sign or zero-extend the values. */
3154 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3155 {
3156 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3157 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3158
3159 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3160 l0s |= ((HOST_WIDE_INT) (-1) << width);
3161
3162 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3163 l1s |= ((HOST_WIDE_INT) (-1) << width);
3164 }
3165 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3166 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3167
3168 equal = (h0u == h1u && l0u == l1u);
3169 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3170 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3171 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3172 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3173 }
3174
3175 /* Otherwise, there are some code-specific tests we can make. */
3176 else
3177 {
3178 /* Optimize comparisons with upper and lower bounds. */
3179 if (SCALAR_INT_MODE_P (mode)
3180 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3181 {
3182 rtx mmin, mmax;
3183 int sign;
3184
3185 if (code == GEU
3186 || code == LEU
3187 || code == GTU
3188 || code == LTU)
3189 sign = 0;
3190 else
3191 sign = 1;
3192
3193 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3194
3195 tem = NULL_RTX;
3196 switch (code)
3197 {
3198 case GEU:
3199 case GE:
3200 /* x >= min is always true. */
3201 if (rtx_equal_p (trueop1, mmin))
3202 tem = const_true_rtx;
3203 else
3204 break;
3205
3206 case LEU:
3207 case LE:
3208 /* x <= max is always true. */
3209 if (rtx_equal_p (trueop1, mmax))
3210 tem = const_true_rtx;
3211 break;
3212
3213 case GTU:
3214 case GT:
3215 /* x > max is always false. */
3216 if (rtx_equal_p (trueop1, mmax))
3217 tem = const0_rtx;
3218 break;
3219
3220 case LTU:
3221 case LT:
3222 /* x < min is always false. */
3223 if (rtx_equal_p (trueop1, mmin))
3224 tem = const0_rtx;
3225 break;
3226
3227 default:
3228 break;
3229 }
3230 if (tem == const0_rtx
3231 || tem == const_true_rtx)
3232 return tem;
3233 }
3234
3235 switch (code)
3236 {
3237 case EQ:
3238 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3239 return const0_rtx;
3240 break;
3241
3242 case NE:
3243 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3244 return const_true_rtx;
3245 break;
3246
3247 case LT:
3248 /* Optimize abs(x) < 0.0. */
3249 if (trueop1 == CONST0_RTX (mode)
3250 && !HONOR_SNANS (mode)
3251 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3252 {
3253 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3254 : trueop0;
3255 if (GET_CODE (tem) == ABS)
3256 return const0_rtx;
3257 }
3258 break;
3259
3260 case GE:
3261 /* Optimize abs(x) >= 0.0. */
3262 if (trueop1 == CONST0_RTX (mode)
3263 && !HONOR_NANS (mode)
3264 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3265 {
3266 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3267 : trueop0;
3268 if (GET_CODE (tem) == ABS)
3269 return const_true_rtx;
3270 }
3271 break;
3272
3273 case UNGE:
3274 /* Optimize ! (abs(x) < 0.0). */
3275 if (trueop1 == CONST0_RTX (mode))
3276 {
3277 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3278 : trueop0;
3279 if (GET_CODE (tem) == ABS)
3280 return const_true_rtx;
3281 }
3282 break;
3283
3284 default:
3285 break;
3286 }
3287
3288 return 0;
3289 }
3290
3291 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3292 as appropriate. */
3293 switch (code)
3294 {
3295 case EQ:
3296 case UNEQ:
3297 return equal ? const_true_rtx : const0_rtx;
3298 case NE:
3299 case LTGT:
3300 return ! equal ? const_true_rtx : const0_rtx;
3301 case LT:
3302 case UNLT:
3303 return op0lt ? const_true_rtx : const0_rtx;
3304 case GT:
3305 case UNGT:
3306 return op1lt ? const_true_rtx : const0_rtx;
3307 case LTU:
3308 return op0ltu ? const_true_rtx : const0_rtx;
3309 case GTU:
3310 return op1ltu ? const_true_rtx : const0_rtx;
3311 case LE:
3312 case UNLE:
3313 return equal || op0lt ? const_true_rtx : const0_rtx;
3314 case GE:
3315 case UNGE:
3316 return equal || op1lt ? const_true_rtx : const0_rtx;
3317 case LEU:
3318 return equal || op0ltu ? const_true_rtx : const0_rtx;
3319 case GEU:
3320 return equal || op1ltu ? const_true_rtx : const0_rtx;
3321 case ORDERED:
3322 return const_true_rtx;
3323 case UNORDERED:
3324 return const0_rtx;
3325 default:
3326 gcc_unreachable ();
3327 }
3328 }
3329 \f
3330 /* Simplify CODE, an operation with result mode MODE and three operands,
3331 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3332 a constant. Return 0 if no simplifications is possible. */
3333
3334 rtx
3335 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3336 enum machine_mode op0_mode, rtx op0, rtx op1,
3337 rtx op2)
3338 {
3339 unsigned int width = GET_MODE_BITSIZE (mode);
3340
3341 /* VOIDmode means "infinite" precision. */
3342 if (width == 0)
3343 width = HOST_BITS_PER_WIDE_INT;
3344
3345 switch (code)
3346 {
3347 case SIGN_EXTRACT:
3348 case ZERO_EXTRACT:
3349 if (GET_CODE (op0) == CONST_INT
3350 && GET_CODE (op1) == CONST_INT
3351 && GET_CODE (op2) == CONST_INT
3352 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3353 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3354 {
3355 /* Extracting a bit-field from a constant */
3356 HOST_WIDE_INT val = INTVAL (op0);
3357
3358 if (BITS_BIG_ENDIAN)
3359 val >>= (GET_MODE_BITSIZE (op0_mode)
3360 - INTVAL (op2) - INTVAL (op1));
3361 else
3362 val >>= INTVAL (op2);
3363
3364 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3365 {
3366 /* First zero-extend. */
3367 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3368 /* If desired, propagate sign bit. */
3369 if (code == SIGN_EXTRACT
3370 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3371 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3372 }
3373
3374 /* Clear the bits that don't belong in our mode,
3375 unless they and our sign bit are all one.
3376 So we get either a reasonable negative value or a reasonable
3377 unsigned value for this mode. */
3378 if (width < HOST_BITS_PER_WIDE_INT
3379 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3380 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3381 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3382
3383 return gen_int_mode (val, mode);
3384 }
3385 break;
3386
3387 case IF_THEN_ELSE:
3388 if (GET_CODE (op0) == CONST_INT)
3389 return op0 != const0_rtx ? op1 : op2;
3390
3391 /* Convert c ? a : a into "a". */
3392 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3393 return op1;
3394
3395 /* Convert a != b ? a : b into "a". */
3396 if (GET_CODE (op0) == NE
3397 && ! side_effects_p (op0)
3398 && ! HONOR_NANS (mode)
3399 && ! HONOR_SIGNED_ZEROS (mode)
3400 && ((rtx_equal_p (XEXP (op0, 0), op1)
3401 && rtx_equal_p (XEXP (op0, 1), op2))
3402 || (rtx_equal_p (XEXP (op0, 0), op2)
3403 && rtx_equal_p (XEXP (op0, 1), op1))))
3404 return op1;
3405
3406 /* Convert a == b ? a : b into "b". */
3407 if (GET_CODE (op0) == EQ
3408 && ! side_effects_p (op0)
3409 && ! HONOR_NANS (mode)
3410 && ! HONOR_SIGNED_ZEROS (mode)
3411 && ((rtx_equal_p (XEXP (op0, 0), op1)
3412 && rtx_equal_p (XEXP (op0, 1), op2))
3413 || (rtx_equal_p (XEXP (op0, 0), op2)
3414 && rtx_equal_p (XEXP (op0, 1), op1))))
3415 return op2;
3416
3417 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3418 {
3419 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3420 ? GET_MODE (XEXP (op0, 1))
3421 : GET_MODE (XEXP (op0, 0)));
3422 rtx temp;
3423
3424 /* Look for happy constants in op1 and op2. */
3425 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3426 {
3427 HOST_WIDE_INT t = INTVAL (op1);
3428 HOST_WIDE_INT f = INTVAL (op2);
3429
3430 if (t == STORE_FLAG_VALUE && f == 0)
3431 code = GET_CODE (op0);
3432 else if (t == 0 && f == STORE_FLAG_VALUE)
3433 {
3434 enum rtx_code tmp;
3435 tmp = reversed_comparison_code (op0, NULL_RTX);
3436 if (tmp == UNKNOWN)
3437 break;
3438 code = tmp;
3439 }
3440 else
3441 break;
3442
3443 return simplify_gen_relational (code, mode, cmp_mode,
3444 XEXP (op0, 0), XEXP (op0, 1));
3445 }
3446
3447 if (cmp_mode == VOIDmode)
3448 cmp_mode = op0_mode;
3449 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3450 cmp_mode, XEXP (op0, 0),
3451 XEXP (op0, 1));
3452
3453 /* See if any simplifications were possible. */
3454 if (temp)
3455 {
3456 if (GET_CODE (temp) == CONST_INT)
3457 return temp == const0_rtx ? op2 : op1;
3458 else if (temp)
3459 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3460 }
3461 }
3462 break;
3463
3464 case VEC_MERGE:
3465 gcc_assert (GET_MODE (op0) == mode);
3466 gcc_assert (GET_MODE (op1) == mode);
3467 gcc_assert (VECTOR_MODE_P (mode));
3468 op2 = avoid_constant_pool_reference (op2);
3469 if (GET_CODE (op2) == CONST_INT)
3470 {
3471 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3472 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3473 int mask = (1 << n_elts) - 1;
3474
3475 if (!(INTVAL (op2) & mask))
3476 return op1;
3477 if ((INTVAL (op2) & mask) == mask)
3478 return op0;
3479
3480 op0 = avoid_constant_pool_reference (op0);
3481 op1 = avoid_constant_pool_reference (op1);
3482 if (GET_CODE (op0) == CONST_VECTOR
3483 && GET_CODE (op1) == CONST_VECTOR)
3484 {
3485 rtvec v = rtvec_alloc (n_elts);
3486 unsigned int i;
3487
3488 for (i = 0; i < n_elts; i++)
3489 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3490 ? CONST_VECTOR_ELT (op0, i)
3491 : CONST_VECTOR_ELT (op1, i));
3492 return gen_rtx_CONST_VECTOR (mode, v);
3493 }
3494 }
3495 break;
3496
3497 default:
3498 gcc_unreachable ();
3499 }
3500
3501 return 0;
3502 }
3503
3504 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3505 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3506
3507 Works by unpacking OP into a collection of 8-bit values
3508 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3509 and then repacking them again for OUTERMODE. */
3510
3511 static rtx
3512 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3513 enum machine_mode innermode, unsigned int byte)
3514 {
3515 /* We support up to 512-bit values (for V8DFmode). */
3516 enum {
3517 max_bitsize = 512,
3518 value_bit = 8,
3519 value_mask = (1 << value_bit) - 1
3520 };
3521 unsigned char value[max_bitsize / value_bit];
3522 int value_start;
3523 int i;
3524 int elem;
3525
3526 int num_elem;
3527 rtx * elems;
3528 int elem_bitsize;
3529 rtx result_s;
3530 rtvec result_v = NULL;
3531 enum mode_class outer_class;
3532 enum machine_mode outer_submode;
3533
3534 /* Some ports misuse CCmode. */
3535 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3536 return op;
3537
3538 /* We have no way to represent a complex constant at the rtl level. */
3539 if (COMPLEX_MODE_P (outermode))
3540 return NULL_RTX;
3541
3542 /* Unpack the value. */
3543
3544 if (GET_CODE (op) == CONST_VECTOR)
3545 {
3546 num_elem = CONST_VECTOR_NUNITS (op);
3547 elems = &CONST_VECTOR_ELT (op, 0);
3548 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3549 }
3550 else
3551 {
3552 num_elem = 1;
3553 elems = &op;
3554 elem_bitsize = max_bitsize;
3555 }
3556 /* If this asserts, it is too complicated; reducing value_bit may help. */
3557 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3558 /* I don't know how to handle endianness of sub-units. */
3559 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3560
3561 for (elem = 0; elem < num_elem; elem++)
3562 {
3563 unsigned char * vp;
3564 rtx el = elems[elem];
3565
3566 /* Vectors are kept in target memory order. (This is probably
3567 a mistake.) */
3568 {
3569 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3570 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3571 / BITS_PER_UNIT);
3572 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3573 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3574 unsigned bytele = (subword_byte % UNITS_PER_WORD
3575 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3576 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3577 }
3578
3579 switch (GET_CODE (el))
3580 {
3581 case CONST_INT:
3582 for (i = 0;
3583 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3584 i += value_bit)
3585 *vp++ = INTVAL (el) >> i;
3586 /* CONST_INTs are always logically sign-extended. */
3587 for (; i < elem_bitsize; i += value_bit)
3588 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3589 break;
3590
3591 case CONST_DOUBLE:
3592 if (GET_MODE (el) == VOIDmode)
3593 {
3594 /* If this triggers, someone should have generated a
3595 CONST_INT instead. */
3596 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3597
3598 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3599 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3600 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3601 {
3602 *vp++
3603 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3604 i += value_bit;
3605 }
3606 /* It shouldn't matter what's done here, so fill it with
3607 zero. */
3608 for (; i < elem_bitsize; i += value_bit)
3609 *vp++ = 0;
3610 }
3611 else
3612 {
3613 long tmp[max_bitsize / 32];
3614 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3615
3616 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3617 gcc_assert (bitsize <= elem_bitsize);
3618 gcc_assert (bitsize % value_bit == 0);
3619
3620 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3621 GET_MODE (el));
3622
3623 /* real_to_target produces its result in words affected by
3624 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3625 and use WORDS_BIG_ENDIAN instead; see the documentation
3626 of SUBREG in rtl.texi. */
3627 for (i = 0; i < bitsize; i += value_bit)
3628 {
3629 int ibase;
3630 if (WORDS_BIG_ENDIAN)
3631 ibase = bitsize - 1 - i;
3632 else
3633 ibase = i;
3634 *vp++ = tmp[ibase / 32] >> i % 32;
3635 }
3636
3637 /* It shouldn't matter what's done here, so fill it with
3638 zero. */
3639 for (; i < elem_bitsize; i += value_bit)
3640 *vp++ = 0;
3641 }
3642 break;
3643
3644 default:
3645 gcc_unreachable ();
3646 }
3647 }
3648
3649 /* Now, pick the right byte to start with. */
3650 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3651 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3652 will already have offset 0. */
3653 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3654 {
3655 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3656 - byte);
3657 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3658 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3659 byte = (subword_byte % UNITS_PER_WORD
3660 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3661 }
3662
3663 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3664 so if it's become negative it will instead be very large.) */
3665 gcc_assert (byte < GET_MODE_SIZE (innermode));
3666
3667 /* Convert from bytes to chunks of size value_bit. */
3668 value_start = byte * (BITS_PER_UNIT / value_bit);
3669
3670 /* Re-pack the value. */
3671
3672 if (VECTOR_MODE_P (outermode))
3673 {
3674 num_elem = GET_MODE_NUNITS (outermode);
3675 result_v = rtvec_alloc (num_elem);
3676 elems = &RTVEC_ELT (result_v, 0);
3677 outer_submode = GET_MODE_INNER (outermode);
3678 }
3679 else
3680 {
3681 num_elem = 1;
3682 elems = &result_s;
3683 outer_submode = outermode;
3684 }
3685
3686 outer_class = GET_MODE_CLASS (outer_submode);
3687 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3688
3689 gcc_assert (elem_bitsize % value_bit == 0);
3690 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3691
3692 for (elem = 0; elem < num_elem; elem++)
3693 {
3694 unsigned char *vp;
3695
3696 /* Vectors are stored in target memory order. (This is probably
3697 a mistake.) */
3698 {
3699 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3700 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3701 / BITS_PER_UNIT);
3702 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3703 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3704 unsigned bytele = (subword_byte % UNITS_PER_WORD
3705 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3706 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3707 }
3708
3709 switch (outer_class)
3710 {
3711 case MODE_INT:
3712 case MODE_PARTIAL_INT:
3713 {
3714 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3715
3716 for (i = 0;
3717 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3718 i += value_bit)
3719 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3720 for (; i < elem_bitsize; i += value_bit)
3721 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3722 << (i - HOST_BITS_PER_WIDE_INT));
3723
3724 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3725 know why. */
3726 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3727 elems[elem] = gen_int_mode (lo, outer_submode);
3728 else
3729 elems[elem] = immed_double_const (lo, hi, outer_submode);
3730 }
3731 break;
3732
3733 case MODE_FLOAT:
3734 {
3735 REAL_VALUE_TYPE r;
3736 long tmp[max_bitsize / 32];
3737
3738 /* real_from_target wants its input in words affected by
3739 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3740 and use WORDS_BIG_ENDIAN instead; see the documentation
3741 of SUBREG in rtl.texi. */
3742 for (i = 0; i < max_bitsize / 32; i++)
3743 tmp[i] = 0;
3744 for (i = 0; i < elem_bitsize; i += value_bit)
3745 {
3746 int ibase;
3747 if (WORDS_BIG_ENDIAN)
3748 ibase = elem_bitsize - 1 - i;
3749 else
3750 ibase = i;
3751 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3752 }
3753
3754 real_from_target (&r, tmp, outer_submode);
3755 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3756 }
3757 break;
3758
3759 default:
3760 gcc_unreachable ();
3761 }
3762 }
3763 if (VECTOR_MODE_P (outermode))
3764 return gen_rtx_CONST_VECTOR (outermode, result_v);
3765 else
3766 return result_s;
3767 }
3768
3769 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3770 Return 0 if no simplifications are possible. */
3771 rtx
3772 simplify_subreg (enum machine_mode outermode, rtx op,
3773 enum machine_mode innermode, unsigned int byte)
3774 {
3775 /* Little bit of sanity checking. */
3776 gcc_assert (innermode != VOIDmode);
3777 gcc_assert (outermode != VOIDmode);
3778 gcc_assert (innermode != BLKmode);
3779 gcc_assert (outermode != BLKmode);
3780
3781 gcc_assert (GET_MODE (op) == innermode
3782 || GET_MODE (op) == VOIDmode);
3783
3784 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3785 gcc_assert (byte < GET_MODE_SIZE (innermode));
3786
3787 if (outermode == innermode && !byte)
3788 return op;
3789
3790 if (GET_CODE (op) == CONST_INT
3791 || GET_CODE (op) == CONST_DOUBLE
3792 || GET_CODE (op) == CONST_VECTOR)
3793 return simplify_immed_subreg (outermode, op, innermode, byte);
3794
3795 /* Changing mode twice with SUBREG => just change it once,
3796 or not at all if changing back op starting mode. */
3797 if (GET_CODE (op) == SUBREG)
3798 {
3799 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3800 int final_offset = byte + SUBREG_BYTE (op);
3801 rtx newx;
3802
3803 if (outermode == innermostmode
3804 && byte == 0 && SUBREG_BYTE (op) == 0)
3805 return SUBREG_REG (op);
3806
3807 /* The SUBREG_BYTE represents offset, as if the value were stored
3808 in memory. Irritating exception is paradoxical subreg, where
3809 we define SUBREG_BYTE to be 0. On big endian machines, this
3810 value should be negative. For a moment, undo this exception. */
3811 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3812 {
3813 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3814 if (WORDS_BIG_ENDIAN)
3815 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3816 if (BYTES_BIG_ENDIAN)
3817 final_offset += difference % UNITS_PER_WORD;
3818 }
3819 if (SUBREG_BYTE (op) == 0
3820 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3821 {
3822 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3823 if (WORDS_BIG_ENDIAN)
3824 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3825 if (BYTES_BIG_ENDIAN)
3826 final_offset += difference % UNITS_PER_WORD;
3827 }
3828
3829 /* See whether resulting subreg will be paradoxical. */
3830 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3831 {
3832 /* In nonparadoxical subregs we can't handle negative offsets. */
3833 if (final_offset < 0)
3834 return NULL_RTX;
3835 /* Bail out in case resulting subreg would be incorrect. */
3836 if (final_offset % GET_MODE_SIZE (outermode)
3837 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3838 return NULL_RTX;
3839 }
3840 else
3841 {
3842 int offset = 0;
3843 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3844
3845 /* In paradoxical subreg, see if we are still looking on lower part.
3846 If so, our SUBREG_BYTE will be 0. */
3847 if (WORDS_BIG_ENDIAN)
3848 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3849 if (BYTES_BIG_ENDIAN)
3850 offset += difference % UNITS_PER_WORD;
3851 if (offset == final_offset)
3852 final_offset = 0;
3853 else
3854 return NULL_RTX;
3855 }
3856
3857 /* Recurse for further possible simplifications. */
3858 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3859 final_offset);
3860 if (newx)
3861 return newx;
3862 if (validate_subreg (outermode, innermostmode,
3863 SUBREG_REG (op), final_offset))
3864 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3865 return NULL_RTX;
3866 }
3867
3868 /* SUBREG of a hard register => just change the register number
3869 and/or mode. If the hard register is not valid in that mode,
3870 suppress this simplification. If the hard register is the stack,
3871 frame, or argument pointer, leave this as a SUBREG. */
3872
3873 if (REG_P (op)
3874 && REGNO (op) < FIRST_PSEUDO_REGISTER
3875 #ifdef CANNOT_CHANGE_MODE_CLASS
3876 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3877 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3878 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3879 #endif
3880 && ((reload_completed && !frame_pointer_needed)
3881 || (REGNO (op) != FRAME_POINTER_REGNUM
3882 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3883 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3884 #endif
3885 ))
3886 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3887 && REGNO (op) != ARG_POINTER_REGNUM
3888 #endif
3889 && REGNO (op) != STACK_POINTER_REGNUM
3890 && subreg_offset_representable_p (REGNO (op), innermode,
3891 byte, outermode))
3892 {
3893 unsigned int regno = REGNO (op);
3894 unsigned int final_regno
3895 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3896
3897 /* ??? We do allow it if the current REG is not valid for
3898 its mode. This is a kludge to work around how float/complex
3899 arguments are passed on 32-bit SPARC and should be fixed. */
3900 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3901 || ! HARD_REGNO_MODE_OK (regno, innermode))
3902 {
3903 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3904
3905 /* Propagate original regno. We don't have any way to specify
3906 the offset inside original regno, so do so only for lowpart.
3907 The information is used only by alias analysis that can not
3908 grog partial register anyway. */
3909
3910 if (subreg_lowpart_offset (outermode, innermode) == byte)
3911 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3912 return x;
3913 }
3914 }
3915
3916 /* If we have a SUBREG of a register that we are replacing and we are
3917 replacing it with a MEM, make a new MEM and try replacing the
3918 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3919 or if we would be widening it. */
3920
3921 if (MEM_P (op)
3922 && ! mode_dependent_address_p (XEXP (op, 0))
3923 /* Allow splitting of volatile memory references in case we don't
3924 have instruction to move the whole thing. */
3925 && (! MEM_VOLATILE_P (op)
3926 || ! have_insn_for (SET, innermode))
3927 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3928 return adjust_address_nv (op, outermode, byte);
3929
3930 /* Handle complex values represented as CONCAT
3931 of real and imaginary part. */
3932 if (GET_CODE (op) == CONCAT)
3933 {
3934 unsigned int inner_size, final_offset;
3935 rtx part, res;
3936
3937 inner_size = GET_MODE_UNIT_SIZE (innermode);
3938 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3939 final_offset = byte % inner_size;
3940 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3941 return NULL_RTX;
3942
3943 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3944 if (res)
3945 return res;
3946 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3947 return gen_rtx_SUBREG (outermode, part, final_offset);
3948 return NULL_RTX;
3949 }
3950
3951 /* Optimize SUBREG truncations of zero and sign extended values. */
3952 if ((GET_CODE (op) == ZERO_EXTEND
3953 || GET_CODE (op) == SIGN_EXTEND)
3954 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3955 {
3956 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3957
3958 /* If we're requesting the lowpart of a zero or sign extension,
3959 there are three possibilities. If the outermode is the same
3960 as the origmode, we can omit both the extension and the subreg.
3961 If the outermode is not larger than the origmode, we can apply
3962 the truncation without the extension. Finally, if the outermode
3963 is larger than the origmode, but both are integer modes, we
3964 can just extend to the appropriate mode. */
3965 if (bitpos == 0)
3966 {
3967 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3968 if (outermode == origmode)
3969 return XEXP (op, 0);
3970 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3971 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3972 subreg_lowpart_offset (outermode,
3973 origmode));
3974 if (SCALAR_INT_MODE_P (outermode))
3975 return simplify_gen_unary (GET_CODE (op), outermode,
3976 XEXP (op, 0), origmode);
3977 }
3978
3979 /* A SUBREG resulting from a zero extension may fold to zero if
3980 it extracts higher bits that the ZERO_EXTEND's source bits. */
3981 if (GET_CODE (op) == ZERO_EXTEND
3982 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3983 return CONST0_RTX (outermode);
3984 }
3985
3986 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3987 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3988 the outer subreg is effectively a truncation to the original mode. */
3989 if ((GET_CODE (op) == LSHIFTRT
3990 || GET_CODE (op) == ASHIFTRT)
3991 && SCALAR_INT_MODE_P (outermode)
3992 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3993 to avoid the possibility that an outer LSHIFTRT shifts by more
3994 than the sign extension's sign_bit_copies and introduces zeros
3995 into the high bits of the result. */
3996 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3997 && GET_CODE (XEXP (op, 1)) == CONST_INT
3998 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3999 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4000 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4001 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4002 return simplify_gen_binary (ASHIFTRT, outermode,
4003 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4004
4005 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4006 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4007 the outer subreg is effectively a truncation to the original mode. */
4008 if ((GET_CODE (op) == LSHIFTRT
4009 || GET_CODE (op) == ASHIFTRT)
4010 && SCALAR_INT_MODE_P (outermode)
4011 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4012 && GET_CODE (XEXP (op, 1)) == CONST_INT
4013 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4014 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4015 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4016 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4017 return simplify_gen_binary (LSHIFTRT, outermode,
4018 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4019
4020 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4021 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4022 the outer subreg is effectively a truncation to the original mode. */
4023 if (GET_CODE (op) == ASHIFT
4024 && SCALAR_INT_MODE_P (outermode)
4025 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4026 && GET_CODE (XEXP (op, 1)) == CONST_INT
4027 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4028 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4029 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4030 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4031 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4032 return simplify_gen_binary (ASHIFT, outermode,
4033 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4034
4035 return NULL_RTX;
4036 }
4037
4038 /* Make a SUBREG operation or equivalent if it folds. */
4039
4040 rtx
4041 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4042 enum machine_mode innermode, unsigned int byte)
4043 {
4044 rtx newx;
4045
4046 newx = simplify_subreg (outermode, op, innermode, byte);
4047 if (newx)
4048 return newx;
4049
4050 if (GET_CODE (op) == SUBREG
4051 || GET_CODE (op) == CONCAT
4052 || GET_MODE (op) == VOIDmode)
4053 return NULL_RTX;
4054
4055 if (validate_subreg (outermode, innermode, op, byte))
4056 return gen_rtx_SUBREG (outermode, op, byte);
4057
4058 return NULL_RTX;
4059 }
4060
4061 /* Simplify X, an rtx expression.
4062
4063 Return the simplified expression or NULL if no simplifications
4064 were possible.
4065
4066 This is the preferred entry point into the simplification routines;
4067 however, we still allow passes to call the more specific routines.
4068
4069 Right now GCC has three (yes, three) major bodies of RTL simplification
4070 code that need to be unified.
4071
4072 1. fold_rtx in cse.c. This code uses various CSE specific
4073 information to aid in RTL simplification.
4074
4075 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4076 it uses combine specific information to aid in RTL
4077 simplification.
4078
4079 3. The routines in this file.
4080
4081
4082 Long term we want to only have one body of simplification code; to
4083 get to that state I recommend the following steps:
4084
4085 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4086 which are not pass dependent state into these routines.
4087
4088 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4089 use this routine whenever possible.
4090
4091 3. Allow for pass dependent state to be provided to these
4092 routines and add simplifications based on the pass dependent
4093 state. Remove code from cse.c & combine.c that becomes
4094 redundant/dead.
4095
4096 It will take time, but ultimately the compiler will be easier to
4097 maintain and improve. It's totally silly that when we add a
4098 simplification that it needs to be added to 4 places (3 for RTL
4099 simplification and 1 for tree simplification. */
4100
4101 rtx
4102 simplify_rtx (rtx x)
4103 {
4104 enum rtx_code code = GET_CODE (x);
4105 enum machine_mode mode = GET_MODE (x);
4106
4107 switch (GET_RTX_CLASS (code))
4108 {
4109 case RTX_UNARY:
4110 return simplify_unary_operation (code, mode,
4111 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4112 case RTX_COMM_ARITH:
4113 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4114 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4115
4116 /* Fall through.... */
4117
4118 case RTX_BIN_ARITH:
4119 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4120
4121 case RTX_TERNARY:
4122 case RTX_BITFIELD_OPS:
4123 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4124 XEXP (x, 0), XEXP (x, 1),
4125 XEXP (x, 2));
4126
4127 case RTX_COMPARE:
4128 case RTX_COMM_COMPARE:
4129 return simplify_relational_operation (code, mode,
4130 ((GET_MODE (XEXP (x, 0))
4131 != VOIDmode)
4132 ? GET_MODE (XEXP (x, 0))
4133 : GET_MODE (XEXP (x, 1))),
4134 XEXP (x, 0),
4135 XEXP (x, 1));
4136
4137 case RTX_EXTRA:
4138 if (code == SUBREG)
4139 return simplify_gen_subreg (mode, SUBREG_REG (x),
4140 GET_MODE (SUBREG_REG (x)),
4141 SUBREG_BYTE (x));
4142 break;
4143
4144 case RTX_OBJ:
4145 if (code == LO_SUM)
4146 {
4147 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4148 if (GET_CODE (XEXP (x, 0)) == HIGH
4149 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4150 return XEXP (x, 1);
4151 }
4152 break;
4153
4154 default:
4155 break;
4156 }
4157 return NULL;
4158 }