8a1b9bb4805689ea083ec5cd52560a595258cb70
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66 \f
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
71 {
72 return gen_int_mode (- INTVAL (i), mode);
73 }
74
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
80 {
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 }
108 \f
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112 rtx
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115 {
116 rtx tem;
117
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
122
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
127
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
130
131 if (code == PLUS || code == MINUS)
132 {
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
136 }
137
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
139 }
140 \f
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
143 rtx
144 avoid_constant_pool_reference (rtx x)
145 {
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
148
149 switch (GET_CODE (x))
150 {
151 case MEM:
152 break;
153
154 case FLOAT_EXTEND:
155 /* Handle float extensions of constant pool references. */
156 tmp = XEXP (x, 0);
157 c = avoid_constant_pool_reference (tmp);
158 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
159 {
160 REAL_VALUE_TYPE d;
161
162 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
164 }
165 return x;
166
167 default:
168 return x;
169 }
170
171 addr = XEXP (x, 0);
172
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr = targetm.delegitimize_address (addr);
175
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
178
179 if (GET_CODE (addr) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr))
181 return x;
182
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode != GET_MODE (x))
190 {
191 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
192 return c ? c : x;
193 }
194
195 return c;
196 }
197 \f
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
200
201 rtx
202 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
203 enum machine_mode op_mode)
204 {
205 rtx tem;
206
207 /* If this simplifies, use it. */
208 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
209 return tem;
210
211 return gen_rtx_fmt_e (code, mode, op);
212 }
213
214 /* Likewise for ternary operations. */
215
216 rtx
217 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
218 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
219 {
220 rtx tem;
221
222 /* If this simplifies, use it. */
223 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 op0, op1, op2)))
225 return tem;
226
227 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
228 }
229
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
232
233 rtx
234 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
235 enum machine_mode cmp_mode, rtx op0, rtx op1)
236 {
237 rtx tem;
238
239 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 op0, op1)))
241 return tem;
242
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
244 }
245 \f
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
248
249 rtx
250 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
251 {
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254 enum machine_mode op_mode;
255 rtx op0, op1, op2;
256
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
260
261 if (x == old_rtx)
262 return new_rtx;
263
264 switch (GET_RTX_CLASS (code))
265 {
266 case RTX_UNARY:
267 op0 = XEXP (x, 0);
268 op_mode = GET_MODE (op0);
269 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
270 if (op0 == XEXP (x, 0))
271 return x;
272 return simplify_gen_unary (code, mode, op0, op_mode);
273
274 case RTX_BIN_ARITH:
275 case RTX_COMM_ARITH:
276 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
277 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
279 return x;
280 return simplify_gen_binary (code, mode, op0, op1);
281
282 case RTX_COMPARE:
283 case RTX_COMM_COMPARE:
284 op0 = XEXP (x, 0);
285 op1 = XEXP (x, 1);
286 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
287 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
288 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
289 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
290 return x;
291 return simplify_gen_relational (code, mode, op_mode, op0, op1);
292
293 case RTX_TERNARY:
294 case RTX_BITFIELD_OPS:
295 op0 = XEXP (x, 0);
296 op_mode = GET_MODE (op0);
297 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
298 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
299 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
300 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
301 return x;
302 if (op_mode == VOIDmode)
303 op_mode = GET_MODE (op0);
304 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
305
306 case RTX_EXTRA:
307 /* The only case we try to handle is a SUBREG. */
308 if (code == SUBREG)
309 {
310 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
311 if (op0 == SUBREG_REG (x))
312 return x;
313 op0 = simplify_gen_subreg (GET_MODE (x), op0,
314 GET_MODE (SUBREG_REG (x)),
315 SUBREG_BYTE (x));
316 return op0 ? op0 : x;
317 }
318 break;
319
320 case RTX_OBJ:
321 if (code == MEM)
322 {
323 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
324 if (op0 == XEXP (x, 0))
325 return x;
326 return replace_equiv_address_nv (x, op0);
327 }
328 else if (code == LO_SUM)
329 {
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
332
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
336
337 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
338 return x;
339 return gen_rtx_LO_SUM (mode, op0, op1);
340 }
341 else if (code == REG)
342 {
343 if (rtx_equal_p (x, old_rtx))
344 return new_rtx;
345 }
346 break;
347
348 default:
349 break;
350 }
351 return x;
352 }
353 \f
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
357 rtx
358 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
359 rtx op, enum machine_mode op_mode)
360 {
361 rtx trueop, tem;
362
363 if (GET_CODE (op) == CONST)
364 op = XEXP (op, 0);
365
366 trueop = avoid_constant_pool_reference (op);
367
368 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
369 if (tem)
370 return tem;
371
372 return simplify_unary_operation_1 (code, mode, op);
373 }
374
375 /* Perform some simplifications we can do even if the operands
376 aren't constant. */
377 static rtx
378 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
379 {
380 enum rtx_code reversed;
381 rtx temp;
382
383 switch (code)
384 {
385 case NOT:
386 /* (not (not X)) == X. */
387 if (GET_CODE (op) == NOT)
388 return XEXP (op, 0);
389
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op)
392 && (mode == BImode || STORE_FLAG_VALUE == -1)
393 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
394 return simplify_gen_relational (reversed, mode, VOIDmode,
395 XEXP (op, 0), XEXP (op, 1));
396
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op) == PLUS
399 && XEXP (op, 1) == constm1_rtx)
400 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
401
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op) == NEG)
404 return plus_constant (XEXP (op, 0), -1);
405
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op) == XOR
408 && GET_CODE (XEXP (op, 1)) == CONST_INT
409 && (temp = simplify_unary_operation (NOT, mode,
410 XEXP (op, 1), mode)) != 0)
411 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
412
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op) == PLUS
415 && GET_CODE (XEXP (op, 1)) == CONST_INT
416 && mode_signbit_p (mode, XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
420
421
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
426 bother with. */
427 if (GET_CODE (op) == ASHIFT
428 && XEXP (op, 0) == const1_rtx)
429 {
430 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
431 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
432 }
433
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE == -1
437 && COMPARISON_P (op)
438 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
439 return simplify_gen_relational (reversed, mode, VOIDmode,
440 XEXP (op, 0), XEXP (op, 1));
441
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
445
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
452
453 break;
454
455 case NEG:
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op) == NEG)
458 return XEXP (op, 0);
459
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op) == PLUS
462 && XEXP (op, 1) == const1_rtx)
463 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
464
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op) == NOT)
467 return plus_constant (XEXP (op, 0), 1);
468
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
477 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
478
479 if (GET_CODE (op) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
482 {
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op, 1)) == CONST_INT
485 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
486 {
487 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
488 if (temp)
489 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
490 }
491
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
494 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
495 }
496
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
501 {
502 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
503 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
504 }
505
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
508 is a constant). */
509 if (GET_CODE (op) == ASHIFT)
510 {
511 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
512 if (temp)
513 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
514 }
515
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op) == ASHIFTRT
519 && GET_CODE (XEXP (op, 1)) == CONST_INT
520 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
521 return simplify_gen_binary (LSHIFTRT, mode,
522 XEXP (op, 0), XEXP (op, 1));
523
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op) == LSHIFTRT
527 && GET_CODE (XEXP (op, 1)) == CONST_INT
528 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
529 return simplify_gen_binary (ASHIFTRT, mode,
530 XEXP (op, 0), XEXP (op, 1));
531
532 break;
533
534 case SIGN_EXTEND:
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
538 the VAX). */
539 if (GET_CODE (op) == TRUNCATE
540 && GET_MODE (XEXP (op, 0)) == mode
541 && GET_CODE (XEXP (op, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
544 return XEXP (op, 0);
545
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
552 && GET_MODE (XEXP (op, 0)) == mode)
553 return XEXP (op, 0);
554
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode == Pmode && GET_MODE (op) == ptr_mode
558 && (CONSTANT_P (op)
559 || (GET_CODE (op) == SUBREG
560 && REG_P (SUBREG_REG (op))
561 && REG_POINTER (SUBREG_REG (op))
562 && GET_MODE (SUBREG_REG (op)) == Pmode)))
563 return convert_memory_address (Pmode, op);
564 #endif
565 break;
566
567 case ZERO_EXTEND:
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op)
573 && SUBREG_PROMOTED_UNSIGNED_P (op)
574 && GET_MODE (XEXP (op, 0)) == mode)
575 return XEXP (op, 0);
576
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED > 0
579 && mode == Pmode && GET_MODE (op) == ptr_mode
580 && (CONSTANT_P (op)
581 || (GET_CODE (op) == SUBREG
582 && REG_P (SUBREG_REG (op))
583 && REG_POINTER (SUBREG_REG (op))
584 && GET_MODE (SUBREG_REG (op)) == Pmode)))
585 return convert_memory_address (Pmode, op);
586 #endif
587 break;
588
589 default:
590 break;
591 }
592
593 return 0;
594 }
595
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
599 rtx
600 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
601 rtx op, enum machine_mode op_mode)
602 {
603 unsigned int width = GET_MODE_BITSIZE (mode);
604
605 if (code == VEC_DUPLICATE)
606 {
607 gcc_assert (VECTOR_MODE_P (mode));
608 if (GET_MODE (op) != VOIDmode)
609 {
610 if (!VECTOR_MODE_P (GET_MODE (op)))
611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
612 else
613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
614 (GET_MODE (op)));
615 }
616 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
617 || GET_CODE (op) == CONST_VECTOR)
618 {
619 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
620 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
621 rtvec v = rtvec_alloc (n_elts);
622 unsigned int i;
623
624 if (GET_CODE (op) != CONST_VECTOR)
625 for (i = 0; i < n_elts; i++)
626 RTVEC_ELT (v, i) = op;
627 else
628 {
629 enum machine_mode inmode = GET_MODE (op);
630 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
631 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
632
633 gcc_assert (in_n_elts < n_elts);
634 gcc_assert ((n_elts % in_n_elts) == 0);
635 for (i = 0; i < n_elts; i++)
636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
637 }
638 return gen_rtx_CONST_VECTOR (mode, v);
639 }
640 }
641
642 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
643 {
644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
646 enum machine_mode opmode = GET_MODE (op);
647 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
648 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
649 rtvec v = rtvec_alloc (n_elts);
650 unsigned int i;
651
652 gcc_assert (op_n_elts == n_elts);
653 for (i = 0; i < n_elts; i++)
654 {
655 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
656 CONST_VECTOR_ELT (op, i),
657 GET_MODE_INNER (opmode));
658 if (!x)
659 return 0;
660 RTVEC_ELT (v, i) = x;
661 }
662 return gen_rtx_CONST_VECTOR (mode, v);
663 }
664
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
668
669 if (code == FLOAT && GET_MODE (op) == VOIDmode
670 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
671 {
672 HOST_WIDE_INT hv, lv;
673 REAL_VALUE_TYPE d;
674
675 if (GET_CODE (op) == CONST_INT)
676 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
677 else
678 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
679
680 REAL_VALUE_FROM_INT (d, lv, hv, mode);
681 d = real_value_truncate (mode, d);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
683 }
684 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE
686 || GET_CODE (op) == CONST_INT))
687 {
688 HOST_WIDE_INT hv, lv;
689 REAL_VALUE_TYPE d;
690
691 if (GET_CODE (op) == CONST_INT)
692 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
693 else
694 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
695
696 if (op_mode == VOIDmode)
697 {
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
700 if (hv < 0)
701 return 0;
702 }
703 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
704 ;
705 else
706 hv = 0, lv &= GET_MODE_MASK (op_mode);
707
708 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
709 d = real_value_truncate (mode, d);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
711 }
712
713 if (GET_CODE (op) == CONST_INT
714 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
715 {
716 HOST_WIDE_INT arg0 = INTVAL (op);
717 HOST_WIDE_INT val;
718
719 switch (code)
720 {
721 case NOT:
722 val = ~ arg0;
723 break;
724
725 case NEG:
726 val = - arg0;
727 break;
728
729 case ABS:
730 val = (arg0 >= 0 ? arg0 : - arg0);
731 break;
732
733 case FFS:
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0 &= GET_MODE_MASK (mode);
737 val = exact_log2 (arg0 & (- arg0)) + 1;
738 break;
739
740 case CLZ:
741 arg0 &= GET_MODE_MASK (mode);
742 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
743 ;
744 else
745 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
746 break;
747
748 case CTZ:
749 arg0 &= GET_MODE_MASK (mode);
750 if (arg0 == 0)
751 {
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 val = GET_MODE_BITSIZE (mode);
756 }
757 else
758 val = exact_log2 (arg0 & -arg0);
759 break;
760
761 case POPCOUNT:
762 arg0 &= GET_MODE_MASK (mode);
763 val = 0;
764 while (arg0)
765 val++, arg0 &= arg0 - 1;
766 break;
767
768 case PARITY:
769 arg0 &= GET_MODE_MASK (mode);
770 val = 0;
771 while (arg0)
772 val++, arg0 &= arg0 - 1;
773 val &= 1;
774 break;
775
776 case TRUNCATE:
777 val = arg0;
778 break;
779
780 case ZERO_EXTEND:
781 /* When zero-extending a CONST_INT, we need to know its
782 original mode. */
783 gcc_assert (op_mode != VOIDmode);
784 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
785 {
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
790 val = arg0;
791 }
792 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
793 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
794 else
795 return 0;
796 break;
797
798 case SIGN_EXTEND:
799 if (op_mode == VOIDmode)
800 op_mode = mode;
801 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
802 {
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
807 val = arg0;
808 }
809 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
810 {
811 val
812 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
813 if (val
814 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
815 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
816 }
817 else
818 return 0;
819 break;
820
821 case SQRT:
822 case FLOAT_EXTEND:
823 case FLOAT_TRUNCATE:
824 case SS_TRUNCATE:
825 case US_TRUNCATE:
826 return 0;
827
828 default:
829 gcc_unreachable ();
830 }
831
832 return gen_int_mode (val, mode);
833 }
834
835 /* We can do some operations on integer CONST_DOUBLEs. Also allow
836 for a DImode operation on a CONST_INT. */
837 else if (GET_MODE (op) == VOIDmode
838 && width <= HOST_BITS_PER_WIDE_INT * 2
839 && (GET_CODE (op) == CONST_DOUBLE
840 || GET_CODE (op) == CONST_INT))
841 {
842 unsigned HOST_WIDE_INT l1, lv;
843 HOST_WIDE_INT h1, hv;
844
845 if (GET_CODE (op) == CONST_DOUBLE)
846 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
847 else
848 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
849
850 switch (code)
851 {
852 case NOT:
853 lv = ~ l1;
854 hv = ~ h1;
855 break;
856
857 case NEG:
858 neg_double (l1, h1, &lv, &hv);
859 break;
860
861 case ABS:
862 if (h1 < 0)
863 neg_double (l1, h1, &lv, &hv);
864 else
865 lv = l1, hv = h1;
866 break;
867
868 case FFS:
869 hv = 0;
870 if (l1 == 0)
871 {
872 if (h1 == 0)
873 lv = 0;
874 else
875 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
876 }
877 else
878 lv = exact_log2 (l1 & -l1) + 1;
879 break;
880
881 case CLZ:
882 hv = 0;
883 if (h1 != 0)
884 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
885 - HOST_BITS_PER_WIDE_INT;
886 else if (l1 != 0)
887 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
889 lv = GET_MODE_BITSIZE (mode);
890 break;
891
892 case CTZ:
893 hv = 0;
894 if (l1 != 0)
895 lv = exact_log2 (l1 & -l1);
896 else if (h1 != 0)
897 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
898 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
899 lv = GET_MODE_BITSIZE (mode);
900 break;
901
902 case POPCOUNT:
903 hv = 0;
904 lv = 0;
905 while (l1)
906 lv++, l1 &= l1 - 1;
907 while (h1)
908 lv++, h1 &= h1 - 1;
909 break;
910
911 case PARITY:
912 hv = 0;
913 lv = 0;
914 while (l1)
915 lv++, l1 &= l1 - 1;
916 while (h1)
917 lv++, h1 &= h1 - 1;
918 lv &= 1;
919 break;
920
921 case TRUNCATE:
922 /* This is just a change-of-mode, so do nothing. */
923 lv = l1, hv = h1;
924 break;
925
926 case ZERO_EXTEND:
927 gcc_assert (op_mode != VOIDmode);
928
929 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
930 return 0;
931
932 hv = 0;
933 lv = l1 & GET_MODE_MASK (op_mode);
934 break;
935
936 case SIGN_EXTEND:
937 if (op_mode == VOIDmode
938 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
939 return 0;
940 else
941 {
942 lv = l1 & GET_MODE_MASK (op_mode);
943 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
944 && (lv & ((HOST_WIDE_INT) 1
945 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
946 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
947
948 hv = HWI_SIGN_EXTEND (lv);
949 }
950 break;
951
952 case SQRT:
953 return 0;
954
955 default:
956 return 0;
957 }
958
959 return immed_double_const (lv, hv, mode);
960 }
961
962 else if (GET_CODE (op) == CONST_DOUBLE
963 && GET_MODE_CLASS (mode) == MODE_FLOAT)
964 {
965 REAL_VALUE_TYPE d, t;
966 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
967
968 switch (code)
969 {
970 case SQRT:
971 if (HONOR_SNANS (mode) && real_isnan (&d))
972 return 0;
973 real_sqrt (&t, mode, &d);
974 d = t;
975 break;
976 case ABS:
977 d = REAL_VALUE_ABS (d);
978 break;
979 case NEG:
980 d = REAL_VALUE_NEGATE (d);
981 break;
982 case FLOAT_TRUNCATE:
983 d = real_value_truncate (mode, d);
984 break;
985 case FLOAT_EXTEND:
986 /* All this does is change the mode. */
987 break;
988 case FIX:
989 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
990 break;
991 case NOT:
992 {
993 long tmp[4];
994 int i;
995
996 real_to_target (tmp, &d, GET_MODE (op));
997 for (i = 0; i < 4; i++)
998 tmp[i] = ~tmp[i];
999 real_from_target (&d, tmp, mode);
1000 break;
1001 }
1002 default:
1003 gcc_unreachable ();
1004 }
1005 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1006 }
1007
1008 else if (GET_CODE (op) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1010 && GET_MODE_CLASS (mode) == MODE_INT
1011 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1012 {
1013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1014 operators are intentionally left unspecified (to ease implementation
1015 by target backends), for consistency, this routine implements the
1016 same semantics for constant folding as used by the middle-end. */
1017
1018 /* This was formerly used only for non-IEEE float.
1019 eggert@twinsun.com says it is safe for IEEE also. */
1020 HOST_WIDE_INT xh, xl, th, tl;
1021 REAL_VALUE_TYPE x, t;
1022 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1023 switch (code)
1024 {
1025 case FIX:
1026 if (REAL_VALUE_ISNAN (x))
1027 return const0_rtx;
1028
1029 /* Test against the signed upper bound. */
1030 if (width > HOST_BITS_PER_WIDE_INT)
1031 {
1032 th = ((unsigned HOST_WIDE_INT) 1
1033 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1034 tl = -1;
1035 }
1036 else
1037 {
1038 th = 0;
1039 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1040 }
1041 real_from_integer (&t, VOIDmode, tl, th, 0);
1042 if (REAL_VALUES_LESS (t, x))
1043 {
1044 xh = th;
1045 xl = tl;
1046 break;
1047 }
1048
1049 /* Test against the signed lower bound. */
1050 if (width > HOST_BITS_PER_WIDE_INT)
1051 {
1052 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1053 tl = 0;
1054 }
1055 else
1056 {
1057 th = -1;
1058 tl = (HOST_WIDE_INT) -1 << (width - 1);
1059 }
1060 real_from_integer (&t, VOIDmode, tl, th, 0);
1061 if (REAL_VALUES_LESS (x, t))
1062 {
1063 xh = th;
1064 xl = tl;
1065 break;
1066 }
1067 REAL_VALUE_TO_INT (&xl, &xh, x);
1068 break;
1069
1070 case UNSIGNED_FIX:
1071 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1072 return const0_rtx;
1073
1074 /* Test against the unsigned upper bound. */
1075 if (width == 2*HOST_BITS_PER_WIDE_INT)
1076 {
1077 th = -1;
1078 tl = -1;
1079 }
1080 else if (width >= HOST_BITS_PER_WIDE_INT)
1081 {
1082 th = ((unsigned HOST_WIDE_INT) 1
1083 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1084 tl = -1;
1085 }
1086 else
1087 {
1088 th = 0;
1089 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1090 }
1091 real_from_integer (&t, VOIDmode, tl, th, 1);
1092 if (REAL_VALUES_LESS (t, x))
1093 {
1094 xh = th;
1095 xl = tl;
1096 break;
1097 }
1098
1099 REAL_VALUE_TO_INT (&xl, &xh, x);
1100 break;
1101
1102 default:
1103 gcc_unreachable ();
1104 }
1105 return immed_double_const (xl, xh, mode);
1106 }
1107
1108 return NULL_RTX;
1109 }
1110 \f
1111 /* Subroutine of simplify_binary_operation to simplify a commutative,
1112 associative binary operation CODE with result mode MODE, operating
1113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1115 canonicalization is possible. */
1116
1117 static rtx
1118 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1119 rtx op0, rtx op1)
1120 {
1121 rtx tem;
1122
1123 /* Linearize the operator to the left. */
1124 if (GET_CODE (op1) == code)
1125 {
1126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1127 if (GET_CODE (op0) == code)
1128 {
1129 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1130 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1131 }
1132
1133 /* "a op (b op c)" becomes "(b op c) op a". */
1134 if (! swap_commutative_operands_p (op1, op0))
1135 return simplify_gen_binary (code, mode, op1, op0);
1136
1137 tem = op0;
1138 op0 = op1;
1139 op1 = tem;
1140 }
1141
1142 if (GET_CODE (op0) == code)
1143 {
1144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1145 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1146 {
1147 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1148 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1149 }
1150
1151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1152 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1153 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1154 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1155 if (tem != 0)
1156 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1157
1158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1159 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1160 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1161 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1162 if (tem != 0)
1163 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1164 }
1165
1166 return 0;
1167 }
1168
1169
1170 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1171 and OP1. Return 0 if no simplification is possible.
1172
1173 Don't use this for relational operations such as EQ or LT.
1174 Use simplify_relational_operation instead. */
1175 rtx
1176 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1177 rtx op0, rtx op1)
1178 {
1179 rtx trueop0, trueop1;
1180 rtx tem;
1181
1182 /* Relational operations don't work here. We must know the mode
1183 of the operands in order to do the comparison correctly.
1184 Assuming a full word can give incorrect results.
1185 Consider comparing 128 with -128 in QImode. */
1186 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1187 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1188
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1192 {
1193 tem = op0, op0 = op1, op1 = tem;
1194 }
1195
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1198
1199 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1200 if (tem)
1201 return tem;
1202 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1203 }
1204
1205 static rtx
1206 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1207 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1208 {
1209 rtx tem;
1210 HOST_WIDE_INT val;
1211 unsigned int width = GET_MODE_BITSIZE (mode);
1212
1213 /* Even if we can't compute a constant result,
1214 there are some cases worth simplifying. */
1215
1216 switch (code)
1217 {
1218 case PLUS:
1219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1220 when x is NaN, infinite, or finite and nonzero. They aren't
1221 when x is -0 and the rounding mode is not towards -infinity,
1222 since (-0) + 0 is then 0. */
1223 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1224 return op0;
1225
1226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1227 transformations are safe even for IEEE. */
1228 if (GET_CODE (op0) == NEG)
1229 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1230 else if (GET_CODE (op1) == NEG)
1231 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1232
1233 /* (~a) + 1 -> -a */
1234 if (INTEGRAL_MODE_P (mode)
1235 && GET_CODE (op0) == NOT
1236 && trueop1 == const1_rtx)
1237 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1238
1239 /* Handle both-operands-constant cases. We can only add
1240 CONST_INTs to constants since the sum of relocatable symbols
1241 can't be handled by most assemblers. Don't add CONST_INT
1242 to CONST_INT since overflow won't be computed properly if wider
1243 than HOST_BITS_PER_WIDE_INT. */
1244
1245 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1246 && GET_CODE (op1) == CONST_INT)
1247 return plus_constant (op0, INTVAL (op1));
1248 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1249 && GET_CODE (op0) == CONST_INT)
1250 return plus_constant (op1, INTVAL (op0));
1251
1252 /* See if this is something like X * C - X or vice versa or
1253 if the multiplication is written as a shift. If so, we can
1254 distribute and make a new multiply, shift, or maybe just
1255 have X (if C is 2 in the example above). But don't make
1256 something more expensive than we had before. */
1257
1258 if (! FLOAT_MODE_P (mode))
1259 {
1260 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1261 rtx lhs = op0, rhs = op1;
1262
1263 if (GET_CODE (lhs) == NEG)
1264 coeff0 = -1, lhs = XEXP (lhs, 0);
1265 else if (GET_CODE (lhs) == MULT
1266 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1267 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1268 else if (GET_CODE (lhs) == ASHIFT
1269 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1270 && INTVAL (XEXP (lhs, 1)) >= 0
1271 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1272 {
1273 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1274 lhs = XEXP (lhs, 0);
1275 }
1276
1277 if (GET_CODE (rhs) == NEG)
1278 coeff1 = -1, rhs = XEXP (rhs, 0);
1279 else if (GET_CODE (rhs) == MULT
1280 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1281 {
1282 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1283 }
1284 else if (GET_CODE (rhs) == ASHIFT
1285 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs, 1)) >= 0
1287 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1288 {
1289 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1290 rhs = XEXP (rhs, 0);
1291 }
1292
1293 if (rtx_equal_p (lhs, rhs))
1294 {
1295 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1296 tem = simplify_gen_binary (MULT, mode, lhs,
1297 GEN_INT (coeff0 + coeff1));
1298 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1299 ? tem : 0;
1300 }
1301 }
1302
1303 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1304 if ((GET_CODE (op1) == CONST_INT
1305 || GET_CODE (op1) == CONST_DOUBLE)
1306 && GET_CODE (op0) == XOR
1307 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1308 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1309 && mode_signbit_p (mode, op1))
1310 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1311 simplify_gen_binary (XOR, mode, op1,
1312 XEXP (op0, 1)));
1313
1314 /* If one of the operands is a PLUS or a MINUS, see if we can
1315 simplify this by the associative law.
1316 Don't use the associative law for floating point.
1317 The inaccuracy makes it nonassociative,
1318 and subtle programs can break if operations are associated. */
1319
1320 if (INTEGRAL_MODE_P (mode)
1321 && (plus_minus_operand_p (op0)
1322 || plus_minus_operand_p (op1))
1323 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1324 return tem;
1325
1326 /* Reassociate floating point addition only when the user
1327 specifies unsafe math optimizations. */
1328 if (FLOAT_MODE_P (mode)
1329 && flag_unsafe_math_optimizations)
1330 {
1331 tem = simplify_associative_operation (code, mode, op0, op1);
1332 if (tem)
1333 return tem;
1334 }
1335 break;
1336
1337 case COMPARE:
1338 #ifdef HAVE_cc0
1339 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1340 using cc0, in which case we want to leave it as a COMPARE
1341 so we can distinguish it from a register-register-copy.
1342
1343 In IEEE floating point, x-0 is not the same as x. */
1344
1345 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1346 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1347 && trueop1 == CONST0_RTX (mode))
1348 return op0;
1349 #endif
1350
1351 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1352 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1353 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1354 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1355 {
1356 rtx xop00 = XEXP (op0, 0);
1357 rtx xop10 = XEXP (op1, 0);
1358
1359 #ifdef HAVE_cc0
1360 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1361 #else
1362 if (REG_P (xop00) && REG_P (xop10)
1363 && GET_MODE (xop00) == GET_MODE (xop10)
1364 && REGNO (xop00) == REGNO (xop10)
1365 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1366 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1367 #endif
1368 return xop00;
1369 }
1370 break;
1371
1372 case MINUS:
1373 /* We can't assume x-x is 0 even with non-IEEE floating point,
1374 but since it is zero except in very strange circumstances, we
1375 will treat it as zero with -funsafe-math-optimizations. */
1376 if (rtx_equal_p (trueop0, trueop1)
1377 && ! side_effects_p (op0)
1378 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1379 return CONST0_RTX (mode);
1380
1381 /* Change subtraction from zero into negation. (0 - x) is the
1382 same as -x when x is NaN, infinite, or finite and nonzero.
1383 But if the mode has signed zeros, and does not round towards
1384 -infinity, then 0 - 0 is 0, not -0. */
1385 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1386 return simplify_gen_unary (NEG, mode, op1, mode);
1387
1388 /* (-1 - a) is ~a. */
1389 if (trueop0 == constm1_rtx)
1390 return simplify_gen_unary (NOT, mode, op1, mode);
1391
1392 /* Subtracting 0 has no effect unless the mode has signed zeros
1393 and supports rounding towards -infinity. In such a case,
1394 0 - 0 is -0. */
1395 if (!(HONOR_SIGNED_ZEROS (mode)
1396 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1397 && trueop1 == CONST0_RTX (mode))
1398 return op0;
1399
1400 /* See if this is something like X * C - X or vice versa or
1401 if the multiplication is written as a shift. If so, we can
1402 distribute and make a new multiply, shift, or maybe just
1403 have X (if C is 2 in the example above). But don't make
1404 something more expensive than we had before. */
1405
1406 if (! FLOAT_MODE_P (mode))
1407 {
1408 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1409 rtx lhs = op0, rhs = op1;
1410
1411 if (GET_CODE (lhs) == NEG)
1412 coeff0 = -1, lhs = XEXP (lhs, 0);
1413 else if (GET_CODE (lhs) == MULT
1414 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1415 {
1416 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1417 }
1418 else if (GET_CODE (lhs) == ASHIFT
1419 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1420 && INTVAL (XEXP (lhs, 1)) >= 0
1421 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1422 {
1423 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1424 lhs = XEXP (lhs, 0);
1425 }
1426
1427 if (GET_CODE (rhs) == NEG)
1428 coeff1 = - 1, rhs = XEXP (rhs, 0);
1429 else if (GET_CODE (rhs) == MULT
1430 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1431 {
1432 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1433 }
1434 else if (GET_CODE (rhs) == ASHIFT
1435 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1436 && INTVAL (XEXP (rhs, 1)) >= 0
1437 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1438 {
1439 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1440 rhs = XEXP (rhs, 0);
1441 }
1442
1443 if (rtx_equal_p (lhs, rhs))
1444 {
1445 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1446 tem = simplify_gen_binary (MULT, mode, lhs,
1447 GEN_INT (coeff0 - coeff1));
1448 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1449 ? tem : 0;
1450 }
1451 }
1452
1453 /* (a - (-b)) -> (a + b). True even for IEEE. */
1454 if (GET_CODE (op1) == NEG)
1455 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1456
1457 /* (-x - c) may be simplified as (-c - x). */
1458 if (GET_CODE (op0) == NEG
1459 && (GET_CODE (op1) == CONST_INT
1460 || GET_CODE (op1) == CONST_DOUBLE))
1461 {
1462 tem = simplify_unary_operation (NEG, mode, op1, mode);
1463 if (tem)
1464 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1465 }
1466
1467 /* If one of the operands is a PLUS or a MINUS, see if we can
1468 simplify this by the associative law.
1469 Don't use the associative law for floating point.
1470 The inaccuracy makes it nonassociative,
1471 and subtle programs can break if operations are associated. */
1472
1473 if (INTEGRAL_MODE_P (mode)
1474 && (plus_minus_operand_p (op0)
1475 || plus_minus_operand_p (op1))
1476 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1477 return tem;
1478
1479 /* Don't let a relocatable value get a negative coeff. */
1480 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1481 return simplify_gen_binary (PLUS, mode,
1482 op0,
1483 neg_const_int (mode, op1));
1484
1485 /* (x - (x & y)) -> (x & ~y) */
1486 if (GET_CODE (op1) == AND)
1487 {
1488 if (rtx_equal_p (op0, XEXP (op1, 0)))
1489 {
1490 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1491 GET_MODE (XEXP (op1, 1)));
1492 return simplify_gen_binary (AND, mode, op0, tem);
1493 }
1494 if (rtx_equal_p (op0, XEXP (op1, 1)))
1495 {
1496 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1497 GET_MODE (XEXP (op1, 0)));
1498 return simplify_gen_binary (AND, mode, op0, tem);
1499 }
1500 }
1501 break;
1502
1503 case MULT:
1504 if (trueop1 == constm1_rtx)
1505 return simplify_gen_unary (NEG, mode, op0, mode);
1506
1507 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1508 x is NaN, since x * 0 is then also NaN. Nor is it valid
1509 when the mode has signed zeros, since multiplying a negative
1510 number by 0 will give -0, not 0. */
1511 if (!HONOR_NANS (mode)
1512 && !HONOR_SIGNED_ZEROS (mode)
1513 && trueop1 == CONST0_RTX (mode)
1514 && ! side_effects_p (op0))
1515 return op1;
1516
1517 /* In IEEE floating point, x*1 is not equivalent to x for
1518 signalling NaNs. */
1519 if (!HONOR_SNANS (mode)
1520 && trueop1 == CONST1_RTX (mode))
1521 return op0;
1522
1523 /* Convert multiply by constant power of two into shift unless
1524 we are still generating RTL. This test is a kludge. */
1525 if (GET_CODE (trueop1) == CONST_INT
1526 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1527 /* If the mode is larger than the host word size, and the
1528 uppermost bit is set, then this isn't a power of two due
1529 to implicit sign extension. */
1530 && (width <= HOST_BITS_PER_WIDE_INT
1531 || val != HOST_BITS_PER_WIDE_INT - 1))
1532 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1533
1534 /* x*2 is x+x and x*(-1) is -x */
1535 if (GET_CODE (trueop1) == CONST_DOUBLE
1536 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1537 && GET_MODE (op0) == mode)
1538 {
1539 REAL_VALUE_TYPE d;
1540 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1541
1542 if (REAL_VALUES_EQUAL (d, dconst2))
1543 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1544
1545 if (REAL_VALUES_EQUAL (d, dconstm1))
1546 return simplify_gen_unary (NEG, mode, op0, mode);
1547 }
1548
1549 /* Reassociate multiplication, but for floating point MULTs
1550 only when the user specifies unsafe math optimizations. */
1551 if (! FLOAT_MODE_P (mode)
1552 || flag_unsafe_math_optimizations)
1553 {
1554 tem = simplify_associative_operation (code, mode, op0, op1);
1555 if (tem)
1556 return tem;
1557 }
1558 break;
1559
1560 case IOR:
1561 if (trueop1 == const0_rtx)
1562 return op0;
1563 if (GET_CODE (trueop1) == CONST_INT
1564 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1565 == GET_MODE_MASK (mode)))
1566 return op1;
1567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1568 return op0;
1569 /* A | (~A) -> -1 */
1570 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1571 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1572 && ! side_effects_p (op0)
1573 && GET_MODE_CLASS (mode) != MODE_CC)
1574 return constm1_rtx;
1575 tem = simplify_associative_operation (code, mode, op0, op1);
1576 if (tem)
1577 return tem;
1578 break;
1579
1580 case XOR:
1581 if (trueop1 == const0_rtx)
1582 return op0;
1583 if (GET_CODE (trueop1) == CONST_INT
1584 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1585 == GET_MODE_MASK (mode)))
1586 return simplify_gen_unary (NOT, mode, op0, mode);
1587 if (trueop0 == trueop1
1588 && ! side_effects_p (op0)
1589 && GET_MODE_CLASS (mode) != MODE_CC)
1590 return const0_rtx;
1591
1592 /* Canonicalize XOR of the most significant bit to PLUS. */
1593 if ((GET_CODE (op1) == CONST_INT
1594 || GET_CODE (op1) == CONST_DOUBLE)
1595 && mode_signbit_p (mode, op1))
1596 return simplify_gen_binary (PLUS, mode, op0, op1);
1597 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1598 if ((GET_CODE (op1) == CONST_INT
1599 || GET_CODE (op1) == CONST_DOUBLE)
1600 && GET_CODE (op0) == PLUS
1601 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1602 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1603 && mode_signbit_p (mode, XEXP (op0, 1)))
1604 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1605 simplify_gen_binary (XOR, mode, op1,
1606 XEXP (op0, 1)));
1607
1608 tem = simplify_associative_operation (code, mode, op0, op1);
1609 if (tem)
1610 return tem;
1611 break;
1612
1613 case AND:
1614 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1615 return const0_rtx;
1616 /* If we are turning off bits already known off in OP0, we need
1617 not do an AND. */
1618 if (GET_CODE (trueop1) == CONST_INT
1619 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1620 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1621 return op0;
1622 if (trueop0 == trueop1 && ! side_effects_p (op0)
1623 && GET_MODE_CLASS (mode) != MODE_CC)
1624 return op0;
1625 /* A & (~A) -> 0 */
1626 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1627 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1628 && ! side_effects_p (op0)
1629 && GET_MODE_CLASS (mode) != MODE_CC)
1630 return const0_rtx;
1631
1632 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1633 there are no nonzero bits of C outside of X's mode. */
1634 if ((GET_CODE (op0) == SIGN_EXTEND
1635 || GET_CODE (op0) == ZERO_EXTEND)
1636 && GET_CODE (trueop1) == CONST_INT
1637 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1638 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1639 & INTVAL (trueop1)) == 0)
1640 {
1641 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1642 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1643 gen_int_mode (INTVAL (trueop1),
1644 imode));
1645 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1646 }
1647
1648 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1649 ((A & N) + B) & M -> (A + B) & M
1650 Similarly if (N & M) == 0,
1651 ((A | N) + B) & M -> (A + B) & M
1652 and for - instead of + and/or ^ instead of |. */
1653 if (GET_CODE (trueop1) == CONST_INT
1654 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1655 && ~INTVAL (trueop1)
1656 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1657 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1658 {
1659 rtx pmop[2];
1660 int which;
1661
1662 pmop[0] = XEXP (op0, 0);
1663 pmop[1] = XEXP (op0, 1);
1664
1665 for (which = 0; which < 2; which++)
1666 {
1667 tem = pmop[which];
1668 switch (GET_CODE (tem))
1669 {
1670 case AND:
1671 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1672 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1673 == INTVAL (trueop1))
1674 pmop[which] = XEXP (tem, 0);
1675 break;
1676 case IOR:
1677 case XOR:
1678 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1679 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1680 pmop[which] = XEXP (tem, 0);
1681 break;
1682 default:
1683 break;
1684 }
1685 }
1686
1687 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1688 {
1689 tem = simplify_gen_binary (GET_CODE (op0), mode,
1690 pmop[0], pmop[1]);
1691 return simplify_gen_binary (code, mode, tem, op1);
1692 }
1693 }
1694 tem = simplify_associative_operation (code, mode, op0, op1);
1695 if (tem)
1696 return tem;
1697 break;
1698
1699 case UDIV:
1700 /* 0/x is 0 (or x&0 if x has side-effects). */
1701 if (trueop0 == const0_rtx)
1702 return side_effects_p (op1)
1703 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1704 : const0_rtx;
1705 /* x/1 is x. */
1706 if (trueop1 == const1_rtx)
1707 {
1708 /* Handle narrowing UDIV. */
1709 rtx x = gen_lowpart_common (mode, op0);
1710 if (x)
1711 return x;
1712 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1713 return gen_lowpart_SUBREG (mode, op0);
1714 return op0;
1715 }
1716 /* Convert divide by power of two into shift. */
1717 if (GET_CODE (trueop1) == CONST_INT
1718 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1719 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1720 break;
1721
1722 case DIV:
1723 /* Handle floating point and integers separately. */
1724 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1725 {
1726 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1727 safe for modes with NaNs, since 0.0 / 0.0 will then be
1728 NaN rather than 0.0. Nor is it safe for modes with signed
1729 zeros, since dividing 0 by a negative number gives -0.0 */
1730 if (trueop0 == CONST0_RTX (mode)
1731 && !HONOR_NANS (mode)
1732 && !HONOR_SIGNED_ZEROS (mode)
1733 && ! side_effects_p (op1))
1734 return op0;
1735 /* x/1.0 is x. */
1736 if (trueop1 == CONST1_RTX (mode)
1737 && !HONOR_SNANS (mode))
1738 return op0;
1739
1740 if (GET_CODE (trueop1) == CONST_DOUBLE
1741 && trueop1 != CONST0_RTX (mode))
1742 {
1743 REAL_VALUE_TYPE d;
1744 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1745
1746 /* x/-1.0 is -x. */
1747 if (REAL_VALUES_EQUAL (d, dconstm1)
1748 && !HONOR_SNANS (mode))
1749 return simplify_gen_unary (NEG, mode, op0, mode);
1750
1751 /* Change FP division by a constant into multiplication.
1752 Only do this with -funsafe-math-optimizations. */
1753 if (flag_unsafe_math_optimizations
1754 && !REAL_VALUES_EQUAL (d, dconst0))
1755 {
1756 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1757 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1758 return simplify_gen_binary (MULT, mode, op0, tem);
1759 }
1760 }
1761 }
1762 else
1763 {
1764 /* 0/x is 0 (or x&0 if x has side-effects). */
1765 if (trueop0 == const0_rtx)
1766 return side_effects_p (op1)
1767 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1768 : const0_rtx;
1769 /* x/1 is x. */
1770 if (trueop1 == const1_rtx)
1771 {
1772 /* Handle narrowing DIV. */
1773 rtx x = gen_lowpart_common (mode, op0);
1774 if (x)
1775 return x;
1776 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1777 return gen_lowpart_SUBREG (mode, op0);
1778 return op0;
1779 }
1780 /* x/-1 is -x. */
1781 if (trueop1 == constm1_rtx)
1782 {
1783 rtx x = gen_lowpart_common (mode, op0);
1784 if (!x)
1785 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1786 ? gen_lowpart_SUBREG (mode, op0) : op0;
1787 return simplify_gen_unary (NEG, mode, x, mode);
1788 }
1789 }
1790 break;
1791
1792 case UMOD:
1793 /* 0%x is 0 (or x&0 if x has side-effects). */
1794 if (trueop0 == const0_rtx)
1795 return side_effects_p (op1)
1796 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1797 : const0_rtx;
1798 /* x%1 is 0 (of x&0 if x has side-effects). */
1799 if (trueop1 == const1_rtx)
1800 return side_effects_p (op0)
1801 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1802 : const0_rtx;
1803 /* Implement modulus by power of two as AND. */
1804 if (GET_CODE (trueop1) == CONST_INT
1805 && exact_log2 (INTVAL (trueop1)) > 0)
1806 return simplify_gen_binary (AND, mode, op0,
1807 GEN_INT (INTVAL (op1) - 1));
1808 break;
1809
1810 case MOD:
1811 /* 0%x is 0 (or x&0 if x has side-effects). */
1812 if (trueop0 == const0_rtx)
1813 return side_effects_p (op1)
1814 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1815 : const0_rtx;
1816 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1817 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1818 return side_effects_p (op0)
1819 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1820 : const0_rtx;
1821 break;
1822
1823 case ROTATERT:
1824 case ROTATE:
1825 case ASHIFTRT:
1826 /* Rotating ~0 always results in ~0. */
1827 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1828 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1829 && ! side_effects_p (op1))
1830 return op0;
1831
1832 /* Fall through.... */
1833
1834 case ASHIFT:
1835 case LSHIFTRT:
1836 if (trueop1 == const0_rtx)
1837 return op0;
1838 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1839 return op0;
1840 break;
1841
1842 case SMIN:
1843 if (width <= HOST_BITS_PER_WIDE_INT
1844 && GET_CODE (trueop1) == CONST_INT
1845 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1846 && ! side_effects_p (op0))
1847 return op1;
1848 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1849 return op0;
1850 tem = simplify_associative_operation (code, mode, op0, op1);
1851 if (tem)
1852 return tem;
1853 break;
1854
1855 case SMAX:
1856 if (width <= HOST_BITS_PER_WIDE_INT
1857 && GET_CODE (trueop1) == CONST_INT
1858 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1859 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1860 && ! side_effects_p (op0))
1861 return op1;
1862 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1863 return op0;
1864 tem = simplify_associative_operation (code, mode, op0, op1);
1865 if (tem)
1866 return tem;
1867 break;
1868
1869 case UMIN:
1870 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1871 return op1;
1872 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1873 return op0;
1874 tem = simplify_associative_operation (code, mode, op0, op1);
1875 if (tem)
1876 return tem;
1877 break;
1878
1879 case UMAX:
1880 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1881 return op1;
1882 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1883 return op0;
1884 tem = simplify_associative_operation (code, mode, op0, op1);
1885 if (tem)
1886 return tem;
1887 break;
1888
1889 case SS_PLUS:
1890 case US_PLUS:
1891 case SS_MINUS:
1892 case US_MINUS:
1893 /* ??? There are simplifications that can be done. */
1894 return 0;
1895
1896 case VEC_SELECT:
1897 if (!VECTOR_MODE_P (mode))
1898 {
1899 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1900 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1901 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1902 gcc_assert (XVECLEN (trueop1, 0) == 1);
1903 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1904
1905 if (GET_CODE (trueop0) == CONST_VECTOR)
1906 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1907 (trueop1, 0, 0)));
1908 }
1909 else
1910 {
1911 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1912 gcc_assert (GET_MODE_INNER (mode)
1913 == GET_MODE_INNER (GET_MODE (trueop0)));
1914 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1915
1916 if (GET_CODE (trueop0) == CONST_VECTOR)
1917 {
1918 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1919 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1920 rtvec v = rtvec_alloc (n_elts);
1921 unsigned int i;
1922
1923 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1924 for (i = 0; i < n_elts; i++)
1925 {
1926 rtx x = XVECEXP (trueop1, 0, i);
1927
1928 gcc_assert (GET_CODE (x) == CONST_INT);
1929 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1930 INTVAL (x));
1931 }
1932
1933 return gen_rtx_CONST_VECTOR (mode, v);
1934 }
1935 }
1936 return 0;
1937 case VEC_CONCAT:
1938 {
1939 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1940 ? GET_MODE (trueop0)
1941 : GET_MODE_INNER (mode));
1942 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1943 ? GET_MODE (trueop1)
1944 : GET_MODE_INNER (mode));
1945
1946 gcc_assert (VECTOR_MODE_P (mode));
1947 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1948 == GET_MODE_SIZE (mode));
1949
1950 if (VECTOR_MODE_P (op0_mode))
1951 gcc_assert (GET_MODE_INNER (mode)
1952 == GET_MODE_INNER (op0_mode));
1953 else
1954 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
1955
1956 if (VECTOR_MODE_P (op1_mode))
1957 gcc_assert (GET_MODE_INNER (mode)
1958 == GET_MODE_INNER (op1_mode));
1959 else
1960 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
1961
1962 if ((GET_CODE (trueop0) == CONST_VECTOR
1963 || GET_CODE (trueop0) == CONST_INT
1964 || GET_CODE (trueop0) == CONST_DOUBLE)
1965 && (GET_CODE (trueop1) == CONST_VECTOR
1966 || GET_CODE (trueop1) == CONST_INT
1967 || GET_CODE (trueop1) == CONST_DOUBLE))
1968 {
1969 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1970 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1971 rtvec v = rtvec_alloc (n_elts);
1972 unsigned int i;
1973 unsigned in_n_elts = 1;
1974
1975 if (VECTOR_MODE_P (op0_mode))
1976 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1977 for (i = 0; i < n_elts; i++)
1978 {
1979 if (i < in_n_elts)
1980 {
1981 if (!VECTOR_MODE_P (op0_mode))
1982 RTVEC_ELT (v, i) = trueop0;
1983 else
1984 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1985 }
1986 else
1987 {
1988 if (!VECTOR_MODE_P (op1_mode))
1989 RTVEC_ELT (v, i) = trueop1;
1990 else
1991 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1992 i - in_n_elts);
1993 }
1994 }
1995
1996 return gen_rtx_CONST_VECTOR (mode, v);
1997 }
1998 }
1999 return 0;
2000
2001 default:
2002 gcc_unreachable ();
2003 }
2004
2005 return 0;
2006 }
2007
2008 rtx
2009 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2010 rtx op0, rtx op1)
2011 {
2012 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2013 HOST_WIDE_INT val;
2014 unsigned int width = GET_MODE_BITSIZE (mode);
2015
2016 if (VECTOR_MODE_P (mode)
2017 && code != VEC_CONCAT
2018 && GET_CODE (op0) == CONST_VECTOR
2019 && GET_CODE (op1) == CONST_VECTOR)
2020 {
2021 unsigned n_elts = GET_MODE_NUNITS (mode);
2022 enum machine_mode op0mode = GET_MODE (op0);
2023 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2024 enum machine_mode op1mode = GET_MODE (op1);
2025 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2026 rtvec v = rtvec_alloc (n_elts);
2027 unsigned int i;
2028
2029 gcc_assert (op0_n_elts == n_elts);
2030 gcc_assert (op1_n_elts == n_elts);
2031 for (i = 0; i < n_elts; i++)
2032 {
2033 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2034 CONST_VECTOR_ELT (op0, i),
2035 CONST_VECTOR_ELT (op1, i));
2036 if (!x)
2037 return 0;
2038 RTVEC_ELT (v, i) = x;
2039 }
2040
2041 return gen_rtx_CONST_VECTOR (mode, v);
2042 }
2043
2044 if (VECTOR_MODE_P (mode)
2045 && code == VEC_CONCAT
2046 && CONSTANT_P (op0) && CONSTANT_P (op1))
2047 {
2048 unsigned n_elts = GET_MODE_NUNITS (mode);
2049 rtvec v = rtvec_alloc (n_elts);
2050
2051 gcc_assert (n_elts >= 2);
2052 if (n_elts == 2)
2053 {
2054 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2055 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2056
2057 RTVEC_ELT (v, 0) = op0;
2058 RTVEC_ELT (v, 1) = op1;
2059 }
2060 else
2061 {
2062 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2063 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2064 unsigned i;
2065
2066 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2067 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2068 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2069
2070 for (i = 0; i < op0_n_elts; ++i)
2071 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2072 for (i = 0; i < op1_n_elts; ++i)
2073 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2074 }
2075
2076 return gen_rtx_CONST_VECTOR (mode, v);
2077 }
2078
2079 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2080 && GET_CODE (op0) == CONST_DOUBLE
2081 && GET_CODE (op1) == CONST_DOUBLE
2082 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2083 {
2084 if (code == AND
2085 || code == IOR
2086 || code == XOR)
2087 {
2088 long tmp0[4];
2089 long tmp1[4];
2090 REAL_VALUE_TYPE r;
2091 int i;
2092
2093 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2094 GET_MODE (op0));
2095 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2096 GET_MODE (op1));
2097 for (i = 0; i < 4; i++)
2098 {
2099 switch (code)
2100 {
2101 case AND:
2102 tmp0[i] &= tmp1[i];
2103 break;
2104 case IOR:
2105 tmp0[i] |= tmp1[i];
2106 break;
2107 case XOR:
2108 tmp0[i] ^= tmp1[i];
2109 break;
2110 default:
2111 gcc_unreachable ();
2112 }
2113 }
2114 real_from_target (&r, tmp0, mode);
2115 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2116 }
2117 else
2118 {
2119 REAL_VALUE_TYPE f0, f1, value, result;
2120 bool inexact;
2121
2122 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2123 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2124 real_convert (&f0, mode, &f0);
2125 real_convert (&f1, mode, &f1);
2126
2127 if (HONOR_SNANS (mode)
2128 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2129 return 0;
2130
2131 if (code == DIV
2132 && REAL_VALUES_EQUAL (f1, dconst0)
2133 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2134 return 0;
2135
2136 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2137 && flag_trapping_math
2138 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2139 {
2140 int s0 = REAL_VALUE_NEGATIVE (f0);
2141 int s1 = REAL_VALUE_NEGATIVE (f1);
2142
2143 switch (code)
2144 {
2145 case PLUS:
2146 /* Inf + -Inf = NaN plus exception. */
2147 if (s0 != s1)
2148 return 0;
2149 break;
2150 case MINUS:
2151 /* Inf - Inf = NaN plus exception. */
2152 if (s0 == s1)
2153 return 0;
2154 break;
2155 case DIV:
2156 /* Inf / Inf = NaN plus exception. */
2157 return 0;
2158 default:
2159 break;
2160 }
2161 }
2162
2163 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2164 && flag_trapping_math
2165 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2166 || (REAL_VALUE_ISINF (f1)
2167 && REAL_VALUES_EQUAL (f0, dconst0))))
2168 /* Inf * 0 = NaN plus exception. */
2169 return 0;
2170
2171 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2172 &f0, &f1);
2173 real_convert (&result, mode, &value);
2174
2175 /* Don't constant fold this floating point operation if the
2176 result may dependent upon the run-time rounding mode and
2177 flag_rounding_math is set, or if GCC's software emulation
2178 is unable to accurately represent the result. */
2179
2180 if ((flag_rounding_math
2181 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2182 && !flag_unsafe_math_optimizations))
2183 && (inexact || !real_identical (&result, &value)))
2184 return NULL_RTX;
2185
2186 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2187 }
2188 }
2189
2190 /* We can fold some multi-word operations. */
2191 if (GET_MODE_CLASS (mode) == MODE_INT
2192 && width == HOST_BITS_PER_WIDE_INT * 2
2193 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2194 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2195 {
2196 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2197 HOST_WIDE_INT h1, h2, hv, ht;
2198
2199 if (GET_CODE (op0) == CONST_DOUBLE)
2200 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2201 else
2202 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2203
2204 if (GET_CODE (op1) == CONST_DOUBLE)
2205 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2206 else
2207 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2208
2209 switch (code)
2210 {
2211 case MINUS:
2212 /* A - B == A + (-B). */
2213 neg_double (l2, h2, &lv, &hv);
2214 l2 = lv, h2 = hv;
2215
2216 /* Fall through.... */
2217
2218 case PLUS:
2219 add_double (l1, h1, l2, h2, &lv, &hv);
2220 break;
2221
2222 case MULT:
2223 mul_double (l1, h1, l2, h2, &lv, &hv);
2224 break;
2225
2226 case DIV:
2227 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2228 &lv, &hv, &lt, &ht))
2229 return 0;
2230 break;
2231
2232 case MOD:
2233 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2234 &lt, &ht, &lv, &hv))
2235 return 0;
2236 break;
2237
2238 case UDIV:
2239 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2240 &lv, &hv, &lt, &ht))
2241 return 0;
2242 break;
2243
2244 case UMOD:
2245 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2246 &lt, &ht, &lv, &hv))
2247 return 0;
2248 break;
2249
2250 case AND:
2251 lv = l1 & l2, hv = h1 & h2;
2252 break;
2253
2254 case IOR:
2255 lv = l1 | l2, hv = h1 | h2;
2256 break;
2257
2258 case XOR:
2259 lv = l1 ^ l2, hv = h1 ^ h2;
2260 break;
2261
2262 case SMIN:
2263 if (h1 < h2
2264 || (h1 == h2
2265 && ((unsigned HOST_WIDE_INT) l1
2266 < (unsigned HOST_WIDE_INT) l2)))
2267 lv = l1, hv = h1;
2268 else
2269 lv = l2, hv = h2;
2270 break;
2271
2272 case SMAX:
2273 if (h1 > h2
2274 || (h1 == h2
2275 && ((unsigned HOST_WIDE_INT) l1
2276 > (unsigned HOST_WIDE_INT) l2)))
2277 lv = l1, hv = h1;
2278 else
2279 lv = l2, hv = h2;
2280 break;
2281
2282 case UMIN:
2283 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2284 || (h1 == h2
2285 && ((unsigned HOST_WIDE_INT) l1
2286 < (unsigned HOST_WIDE_INT) l2)))
2287 lv = l1, hv = h1;
2288 else
2289 lv = l2, hv = h2;
2290 break;
2291
2292 case UMAX:
2293 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2294 || (h1 == h2
2295 && ((unsigned HOST_WIDE_INT) l1
2296 > (unsigned HOST_WIDE_INT) l2)))
2297 lv = l1, hv = h1;
2298 else
2299 lv = l2, hv = h2;
2300 break;
2301
2302 case LSHIFTRT: case ASHIFTRT:
2303 case ASHIFT:
2304 case ROTATE: case ROTATERT:
2305 if (SHIFT_COUNT_TRUNCATED)
2306 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2307
2308 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2309 return 0;
2310
2311 if (code == LSHIFTRT || code == ASHIFTRT)
2312 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2313 code == ASHIFTRT);
2314 else if (code == ASHIFT)
2315 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2316 else if (code == ROTATE)
2317 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2318 else /* code == ROTATERT */
2319 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2320 break;
2321
2322 default:
2323 return 0;
2324 }
2325
2326 return immed_double_const (lv, hv, mode);
2327 }
2328
2329 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2330 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2331 {
2332 /* Get the integer argument values in two forms:
2333 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2334
2335 arg0 = INTVAL (op0);
2336 arg1 = INTVAL (op1);
2337
2338 if (width < HOST_BITS_PER_WIDE_INT)
2339 {
2340 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2341 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2342
2343 arg0s = arg0;
2344 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2345 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2346
2347 arg1s = arg1;
2348 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2349 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2350 }
2351 else
2352 {
2353 arg0s = arg0;
2354 arg1s = arg1;
2355 }
2356
2357 /* Compute the value of the arithmetic. */
2358
2359 switch (code)
2360 {
2361 case PLUS:
2362 val = arg0s + arg1s;
2363 break;
2364
2365 case MINUS:
2366 val = arg0s - arg1s;
2367 break;
2368
2369 case MULT:
2370 val = arg0s * arg1s;
2371 break;
2372
2373 case DIV:
2374 if (arg1s == 0
2375 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2376 && arg1s == -1))
2377 return 0;
2378 val = arg0s / arg1s;
2379 break;
2380
2381 case MOD:
2382 if (arg1s == 0
2383 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2384 && arg1s == -1))
2385 return 0;
2386 val = arg0s % arg1s;
2387 break;
2388
2389 case UDIV:
2390 if (arg1 == 0
2391 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2392 && arg1s == -1))
2393 return 0;
2394 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2395 break;
2396
2397 case UMOD:
2398 if (arg1 == 0
2399 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2400 && arg1s == -1))
2401 return 0;
2402 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2403 break;
2404
2405 case AND:
2406 val = arg0 & arg1;
2407 break;
2408
2409 case IOR:
2410 val = arg0 | arg1;
2411 break;
2412
2413 case XOR:
2414 val = arg0 ^ arg1;
2415 break;
2416
2417 case LSHIFTRT:
2418 case ASHIFT:
2419 case ASHIFTRT:
2420 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2421 the value is in range. We can't return any old value for
2422 out-of-range arguments because either the middle-end (via
2423 shift_truncation_mask) or the back-end might be relying on
2424 target-specific knowledge. Nor can we rely on
2425 shift_truncation_mask, since the shift might not be part of an
2426 ashlM3, lshrM3 or ashrM3 instruction. */
2427 if (SHIFT_COUNT_TRUNCATED)
2428 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2429 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2430 return 0;
2431
2432 val = (code == ASHIFT
2433 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2434 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2435
2436 /* Sign-extend the result for arithmetic right shifts. */
2437 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2438 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2439 break;
2440
2441 case ROTATERT:
2442 if (arg1 < 0)
2443 return 0;
2444
2445 arg1 %= width;
2446 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2447 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2448 break;
2449
2450 case ROTATE:
2451 if (arg1 < 0)
2452 return 0;
2453
2454 arg1 %= width;
2455 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2456 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2457 break;
2458
2459 case COMPARE:
2460 /* Do nothing here. */
2461 return 0;
2462
2463 case SMIN:
2464 val = arg0s <= arg1s ? arg0s : arg1s;
2465 break;
2466
2467 case UMIN:
2468 val = ((unsigned HOST_WIDE_INT) arg0
2469 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2470 break;
2471
2472 case SMAX:
2473 val = arg0s > arg1s ? arg0s : arg1s;
2474 break;
2475
2476 case UMAX:
2477 val = ((unsigned HOST_WIDE_INT) arg0
2478 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2479 break;
2480
2481 case SS_PLUS:
2482 case US_PLUS:
2483 case SS_MINUS:
2484 case US_MINUS:
2485 /* ??? There are simplifications that can be done. */
2486 return 0;
2487
2488 default:
2489 gcc_unreachable ();
2490 }
2491
2492 return gen_int_mode (val, mode);
2493 }
2494
2495 return NULL_RTX;
2496 }
2497
2498
2499 \f
2500 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2501 PLUS or MINUS.
2502
2503 Rather than test for specific case, we do this by a brute-force method
2504 and do all possible simplifications until no more changes occur. Then
2505 we rebuild the operation.
2506
2507 If FORCE is true, then always generate the rtx. This is used to
2508 canonicalize stuff emitted from simplify_gen_binary. Note that this
2509 can still fail if the rtx is too complex. It won't fail just because
2510 the result is not 'simpler' than the input, however. */
2511
2512 struct simplify_plus_minus_op_data
2513 {
2514 rtx op;
2515 int neg;
2516 };
2517
2518 static int
2519 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2520 {
2521 const struct simplify_plus_minus_op_data *d1 = p1;
2522 const struct simplify_plus_minus_op_data *d2 = p2;
2523
2524 return (commutative_operand_precedence (d2->op)
2525 - commutative_operand_precedence (d1->op));
2526 }
2527
2528 static rtx
2529 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2530 rtx op1, int force)
2531 {
2532 struct simplify_plus_minus_op_data ops[8];
2533 rtx result, tem;
2534 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2535 int first, changed;
2536 int i, j;
2537
2538 memset (ops, 0, sizeof ops);
2539
2540 /* Set up the two operands and then expand them until nothing has been
2541 changed. If we run out of room in our array, give up; this should
2542 almost never happen. */
2543
2544 ops[0].op = op0;
2545 ops[0].neg = 0;
2546 ops[1].op = op1;
2547 ops[1].neg = (code == MINUS);
2548
2549 do
2550 {
2551 changed = 0;
2552
2553 for (i = 0; i < n_ops; i++)
2554 {
2555 rtx this_op = ops[i].op;
2556 int this_neg = ops[i].neg;
2557 enum rtx_code this_code = GET_CODE (this_op);
2558
2559 switch (this_code)
2560 {
2561 case PLUS:
2562 case MINUS:
2563 if (n_ops == 7)
2564 return NULL_RTX;
2565
2566 ops[n_ops].op = XEXP (this_op, 1);
2567 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2568 n_ops++;
2569
2570 ops[i].op = XEXP (this_op, 0);
2571 input_ops++;
2572 changed = 1;
2573 break;
2574
2575 case NEG:
2576 ops[i].op = XEXP (this_op, 0);
2577 ops[i].neg = ! this_neg;
2578 changed = 1;
2579 break;
2580
2581 case CONST:
2582 if (n_ops < 7
2583 && GET_CODE (XEXP (this_op, 0)) == PLUS
2584 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2585 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2586 {
2587 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2588 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2589 ops[n_ops].neg = this_neg;
2590 n_ops++;
2591 input_consts++;
2592 changed = 1;
2593 }
2594 break;
2595
2596 case NOT:
2597 /* ~a -> (-a - 1) */
2598 if (n_ops != 7)
2599 {
2600 ops[n_ops].op = constm1_rtx;
2601 ops[n_ops++].neg = this_neg;
2602 ops[i].op = XEXP (this_op, 0);
2603 ops[i].neg = !this_neg;
2604 changed = 1;
2605 }
2606 break;
2607
2608 case CONST_INT:
2609 if (this_neg)
2610 {
2611 ops[i].op = neg_const_int (mode, this_op);
2612 ops[i].neg = 0;
2613 changed = 1;
2614 }
2615 break;
2616
2617 default:
2618 break;
2619 }
2620 }
2621 }
2622 while (changed);
2623
2624 /* If we only have two operands, we can't do anything. */
2625 if (n_ops <= 2 && !force)
2626 return NULL_RTX;
2627
2628 /* Count the number of CONSTs we didn't split above. */
2629 for (i = 0; i < n_ops; i++)
2630 if (GET_CODE (ops[i].op) == CONST)
2631 input_consts++;
2632
2633 /* Now simplify each pair of operands until nothing changes. The first
2634 time through just simplify constants against each other. */
2635
2636 first = 1;
2637 do
2638 {
2639 changed = first;
2640
2641 for (i = 0; i < n_ops - 1; i++)
2642 for (j = i + 1; j < n_ops; j++)
2643 {
2644 rtx lhs = ops[i].op, rhs = ops[j].op;
2645 int lneg = ops[i].neg, rneg = ops[j].neg;
2646
2647 if (lhs != 0 && rhs != 0
2648 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2649 {
2650 enum rtx_code ncode = PLUS;
2651
2652 if (lneg != rneg)
2653 {
2654 ncode = MINUS;
2655 if (lneg)
2656 tem = lhs, lhs = rhs, rhs = tem;
2657 }
2658 else if (swap_commutative_operands_p (lhs, rhs))
2659 tem = lhs, lhs = rhs, rhs = tem;
2660
2661 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2662
2663 /* Reject "simplifications" that just wrap the two
2664 arguments in a CONST. Failure to do so can result
2665 in infinite recursion with simplify_binary_operation
2666 when it calls us to simplify CONST operations. */
2667 if (tem
2668 && ! (GET_CODE (tem) == CONST
2669 && GET_CODE (XEXP (tem, 0)) == ncode
2670 && XEXP (XEXP (tem, 0), 0) == lhs
2671 && XEXP (XEXP (tem, 0), 1) == rhs)
2672 /* Don't allow -x + -1 -> ~x simplifications in the
2673 first pass. This allows us the chance to combine
2674 the -1 with other constants. */
2675 && ! (first
2676 && GET_CODE (tem) == NOT
2677 && XEXP (tem, 0) == rhs))
2678 {
2679 lneg &= rneg;
2680 if (GET_CODE (tem) == NEG)
2681 tem = XEXP (tem, 0), lneg = !lneg;
2682 if (GET_CODE (tem) == CONST_INT && lneg)
2683 tem = neg_const_int (mode, tem), lneg = 0;
2684
2685 ops[i].op = tem;
2686 ops[i].neg = lneg;
2687 ops[j].op = NULL_RTX;
2688 changed = 1;
2689 }
2690 }
2691 }
2692
2693 first = 0;
2694 }
2695 while (changed);
2696
2697 /* Pack all the operands to the lower-numbered entries. */
2698 for (i = 0, j = 0; j < n_ops; j++)
2699 if (ops[j].op)
2700 ops[i++] = ops[j];
2701 n_ops = i;
2702
2703 /* Sort the operations based on swap_commutative_operands_p. */
2704 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2705
2706 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2707 if (n_ops == 2
2708 && GET_CODE (ops[1].op) == CONST_INT
2709 && CONSTANT_P (ops[0].op)
2710 && ops[0].neg)
2711 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2712
2713 /* We suppressed creation of trivial CONST expressions in the
2714 combination loop to avoid recursion. Create one manually now.
2715 The combination loop should have ensured that there is exactly
2716 one CONST_INT, and the sort will have ensured that it is last
2717 in the array and that any other constant will be next-to-last. */
2718
2719 if (n_ops > 1
2720 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2721 && CONSTANT_P (ops[n_ops - 2].op))
2722 {
2723 rtx value = ops[n_ops - 1].op;
2724 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2725 value = neg_const_int (mode, value);
2726 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2727 n_ops--;
2728 }
2729
2730 /* Count the number of CONSTs that we generated. */
2731 n_consts = 0;
2732 for (i = 0; i < n_ops; i++)
2733 if (GET_CODE (ops[i].op) == CONST)
2734 n_consts++;
2735
2736 /* Give up if we didn't reduce the number of operands we had. Make
2737 sure we count a CONST as two operands. If we have the same
2738 number of operands, but have made more CONSTs than before, this
2739 is also an improvement, so accept it. */
2740 if (!force
2741 && (n_ops + n_consts > input_ops
2742 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2743 return NULL_RTX;
2744
2745 /* Put a non-negated operand first, if possible. */
2746
2747 for (i = 0; i < n_ops && ops[i].neg; i++)
2748 continue;
2749 if (i == n_ops)
2750 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2751 else if (i != 0)
2752 {
2753 tem = ops[0].op;
2754 ops[0] = ops[i];
2755 ops[i].op = tem;
2756 ops[i].neg = 1;
2757 }
2758
2759 /* Now make the result by performing the requested operations. */
2760 result = ops[0].op;
2761 for (i = 1; i < n_ops; i++)
2762 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2763 mode, result, ops[i].op);
2764
2765 return result;
2766 }
2767
2768 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2769 static bool
2770 plus_minus_operand_p (rtx x)
2771 {
2772 return GET_CODE (x) == PLUS
2773 || GET_CODE (x) == MINUS
2774 || (GET_CODE (x) == CONST
2775 && GET_CODE (XEXP (x, 0)) == PLUS
2776 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2777 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2778 }
2779
2780 /* Like simplify_binary_operation except used for relational operators.
2781 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2782 not also be VOIDmode.
2783
2784 CMP_MODE specifies in which mode the comparison is done in, so it is
2785 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2786 the operands or, if both are VOIDmode, the operands are compared in
2787 "infinite precision". */
2788 rtx
2789 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2790 enum machine_mode cmp_mode, rtx op0, rtx op1)
2791 {
2792 rtx tem, trueop0, trueop1;
2793
2794 if (cmp_mode == VOIDmode)
2795 cmp_mode = GET_MODE (op0);
2796 if (cmp_mode == VOIDmode)
2797 cmp_mode = GET_MODE (op1);
2798
2799 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2800 if (tem)
2801 {
2802 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2803 {
2804 if (tem == const0_rtx)
2805 return CONST0_RTX (mode);
2806 #ifdef FLOAT_STORE_FLAG_VALUE
2807 {
2808 REAL_VALUE_TYPE val;
2809 val = FLOAT_STORE_FLAG_VALUE (mode);
2810 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2811 }
2812 #else
2813 return NULL_RTX;
2814 #endif
2815 }
2816 if (VECTOR_MODE_P (mode))
2817 {
2818 if (tem == const0_rtx)
2819 return CONST0_RTX (mode);
2820 #ifdef VECTOR_STORE_FLAG_VALUE
2821 {
2822 int i, units;
2823 rtvec v;
2824
2825 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2826 if (val == NULL_RTX)
2827 return NULL_RTX;
2828 if (val == const1_rtx)
2829 return CONST1_RTX (mode);
2830
2831 units = GET_MODE_NUNITS (mode);
2832 v = rtvec_alloc (units);
2833 for (i = 0; i < units; i++)
2834 RTVEC_ELT (v, i) = val;
2835 return gen_rtx_raw_CONST_VECTOR (mode, v);
2836 }
2837 #else
2838 return NULL_RTX;
2839 #endif
2840 }
2841
2842 return tem;
2843 }
2844
2845 /* For the following tests, ensure const0_rtx is op1. */
2846 if (swap_commutative_operands_p (op0, op1)
2847 || (op0 == const0_rtx && op1 != const0_rtx))
2848 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2849
2850 /* If op0 is a compare, extract the comparison arguments from it. */
2851 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2852 return simplify_relational_operation (code, mode, VOIDmode,
2853 XEXP (op0, 0), XEXP (op0, 1));
2854
2855 if (mode == VOIDmode
2856 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2857 || CC0_P (op0))
2858 return NULL_RTX;
2859
2860 trueop0 = avoid_constant_pool_reference (op0);
2861 trueop1 = avoid_constant_pool_reference (op1);
2862 return simplify_relational_operation_1 (code, mode, cmp_mode,
2863 trueop0, trueop1);
2864 }
2865
2866 /* This part of simplify_relational_operation is only used when CMP_MODE
2867 is not in class MODE_CC (i.e. it is a real comparison).
2868
2869 MODE is the mode of the result, while CMP_MODE specifies in which
2870 mode the comparison is done in, so it is the mode of the operands. */
2871
2872 static rtx
2873 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2874 enum machine_mode cmp_mode, rtx op0, rtx op1)
2875 {
2876 enum rtx_code op0code = GET_CODE (op0);
2877
2878 if (GET_CODE (op1) == CONST_INT)
2879 {
2880 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2881 {
2882 /* If op0 is a comparison, extract the comparison arguments form it. */
2883 if (code == NE)
2884 {
2885 if (GET_MODE (op0) == cmp_mode)
2886 return simplify_rtx (op0);
2887 else
2888 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2889 XEXP (op0, 0), XEXP (op0, 1));
2890 }
2891 else if (code == EQ)
2892 {
2893 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2894 if (new_code != UNKNOWN)
2895 return simplify_gen_relational (new_code, mode, VOIDmode,
2896 XEXP (op0, 0), XEXP (op0, 1));
2897 }
2898 }
2899 }
2900
2901 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2902 if ((code == EQ || code == NE)
2903 && (op0code == PLUS || op0code == MINUS)
2904 && CONSTANT_P (op1)
2905 && CONSTANT_P (XEXP (op0, 1))
2906 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2907 {
2908 rtx x = XEXP (op0, 0);
2909 rtx c = XEXP (op0, 1);
2910
2911 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2912 cmp_mode, op1, c);
2913 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2914 }
2915
2916 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2917 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2918 if (code == NE
2919 && op1 == const0_rtx
2920 && GET_MODE_CLASS (mode) == MODE_INT
2921 && cmp_mode != VOIDmode
2922 /* ??? Work-around BImode bugs in the ia64 backend. */
2923 && mode != BImode
2924 && cmp_mode != BImode
2925 && nonzero_bits (op0, cmp_mode) == 1
2926 && STORE_FLAG_VALUE == 1)
2927 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2928 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2929 : lowpart_subreg (mode, op0, cmp_mode);
2930
2931 return NULL_RTX;
2932 }
2933
2934 /* Check if the given comparison (done in the given MODE) is actually a
2935 tautology or a contradiction.
2936 If no simplification is possible, this function returns zero.
2937 Otherwise, it returns either const_true_rtx or const0_rtx. */
2938
2939 rtx
2940 simplify_const_relational_operation (enum rtx_code code,
2941 enum machine_mode mode,
2942 rtx op0, rtx op1)
2943 {
2944 int equal, op0lt, op0ltu, op1lt, op1ltu;
2945 rtx tem;
2946 rtx trueop0;
2947 rtx trueop1;
2948
2949 gcc_assert (mode != VOIDmode
2950 || (GET_MODE (op0) == VOIDmode
2951 && GET_MODE (op1) == VOIDmode));
2952
2953 /* If op0 is a compare, extract the comparison arguments from it. */
2954 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2955 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2956
2957 /* We can't simplify MODE_CC values since we don't know what the
2958 actual comparison is. */
2959 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2960 return 0;
2961
2962 /* Make sure the constant is second. */
2963 if (swap_commutative_operands_p (op0, op1))
2964 {
2965 tem = op0, op0 = op1, op1 = tem;
2966 code = swap_condition (code);
2967 }
2968
2969 trueop0 = avoid_constant_pool_reference (op0);
2970 trueop1 = avoid_constant_pool_reference (op1);
2971
2972 /* For integer comparisons of A and B maybe we can simplify A - B and can
2973 then simplify a comparison of that with zero. If A and B are both either
2974 a register or a CONST_INT, this can't help; testing for these cases will
2975 prevent infinite recursion here and speed things up.
2976
2977 If CODE is an unsigned comparison, then we can never do this optimization,
2978 because it gives an incorrect result if the subtraction wraps around zero.
2979 ANSI C defines unsigned operations such that they never overflow, and
2980 thus such cases can not be ignored; but we cannot do it even for
2981 signed comparisons for languages such as Java, so test flag_wrapv. */
2982
2983 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2984 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2985 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2986 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2987 /* We cannot do this for == or != if tem is a nonzero address. */
2988 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2989 && code != GTU && code != GEU && code != LTU && code != LEU)
2990 return simplify_const_relational_operation (signed_condition (code),
2991 mode, tem, const0_rtx);
2992
2993 if (flag_unsafe_math_optimizations && code == ORDERED)
2994 return const_true_rtx;
2995
2996 if (flag_unsafe_math_optimizations && code == UNORDERED)
2997 return const0_rtx;
2998
2999 /* For modes without NaNs, if the two operands are equal, we know the
3000 result except if they have side-effects. */
3001 if (! HONOR_NANS (GET_MODE (trueop0))
3002 && rtx_equal_p (trueop0, trueop1)
3003 && ! side_effects_p (trueop0))
3004 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3005
3006 /* If the operands are floating-point constants, see if we can fold
3007 the result. */
3008 else if (GET_CODE (trueop0) == CONST_DOUBLE
3009 && GET_CODE (trueop1) == CONST_DOUBLE
3010 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3011 {
3012 REAL_VALUE_TYPE d0, d1;
3013
3014 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3015 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3016
3017 /* Comparisons are unordered iff at least one of the values is NaN. */
3018 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3019 switch (code)
3020 {
3021 case UNEQ:
3022 case UNLT:
3023 case UNGT:
3024 case UNLE:
3025 case UNGE:
3026 case NE:
3027 case UNORDERED:
3028 return const_true_rtx;
3029 case EQ:
3030 case LT:
3031 case GT:
3032 case LE:
3033 case GE:
3034 case LTGT:
3035 case ORDERED:
3036 return const0_rtx;
3037 default:
3038 return 0;
3039 }
3040
3041 equal = REAL_VALUES_EQUAL (d0, d1);
3042 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3043 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3044 }
3045
3046 /* Otherwise, see if the operands are both integers. */
3047 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3048 && (GET_CODE (trueop0) == CONST_DOUBLE
3049 || GET_CODE (trueop0) == CONST_INT)
3050 && (GET_CODE (trueop1) == CONST_DOUBLE
3051 || GET_CODE (trueop1) == CONST_INT))
3052 {
3053 int width = GET_MODE_BITSIZE (mode);
3054 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3055 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3056
3057 /* Get the two words comprising each integer constant. */
3058 if (GET_CODE (trueop0) == CONST_DOUBLE)
3059 {
3060 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3061 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3062 }
3063 else
3064 {
3065 l0u = l0s = INTVAL (trueop0);
3066 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3067 }
3068
3069 if (GET_CODE (trueop1) == CONST_DOUBLE)
3070 {
3071 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3072 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3073 }
3074 else
3075 {
3076 l1u = l1s = INTVAL (trueop1);
3077 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3078 }
3079
3080 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3081 we have to sign or zero-extend the values. */
3082 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3083 {
3084 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3085 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3086
3087 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3088 l0s |= ((HOST_WIDE_INT) (-1) << width);
3089
3090 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3091 l1s |= ((HOST_WIDE_INT) (-1) << width);
3092 }
3093 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3094 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3095
3096 equal = (h0u == h1u && l0u == l1u);
3097 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3098 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3099 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3100 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3101 }
3102
3103 /* Otherwise, there are some code-specific tests we can make. */
3104 else
3105 {
3106 /* Optimize comparisons with upper and lower bounds. */
3107 if (SCALAR_INT_MODE_P (mode)
3108 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3109 {
3110 rtx mmin, mmax;
3111 int sign;
3112
3113 if (code == GEU
3114 || code == LEU
3115 || code == GTU
3116 || code == LTU)
3117 sign = 0;
3118 else
3119 sign = 1;
3120
3121 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3122
3123 tem = NULL_RTX;
3124 switch (code)
3125 {
3126 case GEU:
3127 case GE:
3128 /* x >= min is always true. */
3129 if (rtx_equal_p (trueop1, mmin))
3130 tem = const_true_rtx;
3131 else
3132 break;
3133
3134 case LEU:
3135 case LE:
3136 /* x <= max is always true. */
3137 if (rtx_equal_p (trueop1, mmax))
3138 tem = const_true_rtx;
3139 break;
3140
3141 case GTU:
3142 case GT:
3143 /* x > max is always false. */
3144 if (rtx_equal_p (trueop1, mmax))
3145 tem = const0_rtx;
3146 break;
3147
3148 case LTU:
3149 case LT:
3150 /* x < min is always false. */
3151 if (rtx_equal_p (trueop1, mmin))
3152 tem = const0_rtx;
3153 break;
3154
3155 default:
3156 break;
3157 }
3158 if (tem == const0_rtx
3159 || tem == const_true_rtx)
3160 return tem;
3161 }
3162
3163 switch (code)
3164 {
3165 case EQ:
3166 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3167 return const0_rtx;
3168 break;
3169
3170 case NE:
3171 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3172 return const_true_rtx;
3173 break;
3174
3175 case LT:
3176 /* Optimize abs(x) < 0.0. */
3177 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3178 {
3179 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3180 : trueop0;
3181 if (GET_CODE (tem) == ABS)
3182 return const0_rtx;
3183 }
3184 break;
3185
3186 case GE:
3187 /* Optimize abs(x) >= 0.0. */
3188 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3189 {
3190 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3191 : trueop0;
3192 if (GET_CODE (tem) == ABS)
3193 return const_true_rtx;
3194 }
3195 break;
3196
3197 case UNGE:
3198 /* Optimize ! (abs(x) < 0.0). */
3199 if (trueop1 == CONST0_RTX (mode))
3200 {
3201 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3202 : trueop0;
3203 if (GET_CODE (tem) == ABS)
3204 return const_true_rtx;
3205 }
3206 break;
3207
3208 default:
3209 break;
3210 }
3211
3212 return 0;
3213 }
3214
3215 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3216 as appropriate. */
3217 switch (code)
3218 {
3219 case EQ:
3220 case UNEQ:
3221 return equal ? const_true_rtx : const0_rtx;
3222 case NE:
3223 case LTGT:
3224 return ! equal ? const_true_rtx : const0_rtx;
3225 case LT:
3226 case UNLT:
3227 return op0lt ? const_true_rtx : const0_rtx;
3228 case GT:
3229 case UNGT:
3230 return op1lt ? const_true_rtx : const0_rtx;
3231 case LTU:
3232 return op0ltu ? const_true_rtx : const0_rtx;
3233 case GTU:
3234 return op1ltu ? const_true_rtx : const0_rtx;
3235 case LE:
3236 case UNLE:
3237 return equal || op0lt ? const_true_rtx : const0_rtx;
3238 case GE:
3239 case UNGE:
3240 return equal || op1lt ? const_true_rtx : const0_rtx;
3241 case LEU:
3242 return equal || op0ltu ? const_true_rtx : const0_rtx;
3243 case GEU:
3244 return equal || op1ltu ? const_true_rtx : const0_rtx;
3245 case ORDERED:
3246 return const_true_rtx;
3247 case UNORDERED:
3248 return const0_rtx;
3249 default:
3250 gcc_unreachable ();
3251 }
3252 }
3253 \f
3254 /* Simplify CODE, an operation with result mode MODE and three operands,
3255 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3256 a constant. Return 0 if no simplifications is possible. */
3257
3258 rtx
3259 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3260 enum machine_mode op0_mode, rtx op0, rtx op1,
3261 rtx op2)
3262 {
3263 unsigned int width = GET_MODE_BITSIZE (mode);
3264
3265 /* VOIDmode means "infinite" precision. */
3266 if (width == 0)
3267 width = HOST_BITS_PER_WIDE_INT;
3268
3269 switch (code)
3270 {
3271 case SIGN_EXTRACT:
3272 case ZERO_EXTRACT:
3273 if (GET_CODE (op0) == CONST_INT
3274 && GET_CODE (op1) == CONST_INT
3275 && GET_CODE (op2) == CONST_INT
3276 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3277 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3278 {
3279 /* Extracting a bit-field from a constant */
3280 HOST_WIDE_INT val = INTVAL (op0);
3281
3282 if (BITS_BIG_ENDIAN)
3283 val >>= (GET_MODE_BITSIZE (op0_mode)
3284 - INTVAL (op2) - INTVAL (op1));
3285 else
3286 val >>= INTVAL (op2);
3287
3288 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3289 {
3290 /* First zero-extend. */
3291 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3292 /* If desired, propagate sign bit. */
3293 if (code == SIGN_EXTRACT
3294 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3295 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3296 }
3297
3298 /* Clear the bits that don't belong in our mode,
3299 unless they and our sign bit are all one.
3300 So we get either a reasonable negative value or a reasonable
3301 unsigned value for this mode. */
3302 if (width < HOST_BITS_PER_WIDE_INT
3303 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3304 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3305 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3306
3307 return gen_int_mode (val, mode);
3308 }
3309 break;
3310
3311 case IF_THEN_ELSE:
3312 if (GET_CODE (op0) == CONST_INT)
3313 return op0 != const0_rtx ? op1 : op2;
3314
3315 /* Convert c ? a : a into "a". */
3316 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3317 return op1;
3318
3319 /* Convert a != b ? a : b into "a". */
3320 if (GET_CODE (op0) == NE
3321 && ! side_effects_p (op0)
3322 && ! HONOR_NANS (mode)
3323 && ! HONOR_SIGNED_ZEROS (mode)
3324 && ((rtx_equal_p (XEXP (op0, 0), op1)
3325 && rtx_equal_p (XEXP (op0, 1), op2))
3326 || (rtx_equal_p (XEXP (op0, 0), op2)
3327 && rtx_equal_p (XEXP (op0, 1), op1))))
3328 return op1;
3329
3330 /* Convert a == b ? a : b into "b". */
3331 if (GET_CODE (op0) == EQ
3332 && ! side_effects_p (op0)
3333 && ! HONOR_NANS (mode)
3334 && ! HONOR_SIGNED_ZEROS (mode)
3335 && ((rtx_equal_p (XEXP (op0, 0), op1)
3336 && rtx_equal_p (XEXP (op0, 1), op2))
3337 || (rtx_equal_p (XEXP (op0, 0), op2)
3338 && rtx_equal_p (XEXP (op0, 1), op1))))
3339 return op2;
3340
3341 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3342 {
3343 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3344 ? GET_MODE (XEXP (op0, 1))
3345 : GET_MODE (XEXP (op0, 0)));
3346 rtx temp;
3347
3348 /* Look for happy constants in op1 and op2. */
3349 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3350 {
3351 HOST_WIDE_INT t = INTVAL (op1);
3352 HOST_WIDE_INT f = INTVAL (op2);
3353
3354 if (t == STORE_FLAG_VALUE && f == 0)
3355 code = GET_CODE (op0);
3356 else if (t == 0 && f == STORE_FLAG_VALUE)
3357 {
3358 enum rtx_code tmp;
3359 tmp = reversed_comparison_code (op0, NULL_RTX);
3360 if (tmp == UNKNOWN)
3361 break;
3362 code = tmp;
3363 }
3364 else
3365 break;
3366
3367 return simplify_gen_relational (code, mode, cmp_mode,
3368 XEXP (op0, 0), XEXP (op0, 1));
3369 }
3370
3371 if (cmp_mode == VOIDmode)
3372 cmp_mode = op0_mode;
3373 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3374 cmp_mode, XEXP (op0, 0),
3375 XEXP (op0, 1));
3376
3377 /* See if any simplifications were possible. */
3378 if (temp)
3379 {
3380 if (GET_CODE (temp) == CONST_INT)
3381 return temp == const0_rtx ? op2 : op1;
3382 else if (temp)
3383 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3384 }
3385 }
3386 break;
3387
3388 case VEC_MERGE:
3389 gcc_assert (GET_MODE (op0) == mode);
3390 gcc_assert (GET_MODE (op1) == mode);
3391 gcc_assert (VECTOR_MODE_P (mode));
3392 op2 = avoid_constant_pool_reference (op2);
3393 if (GET_CODE (op2) == CONST_INT)
3394 {
3395 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3396 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3397 int mask = (1 << n_elts) - 1;
3398
3399 if (!(INTVAL (op2) & mask))
3400 return op1;
3401 if ((INTVAL (op2) & mask) == mask)
3402 return op0;
3403
3404 op0 = avoid_constant_pool_reference (op0);
3405 op1 = avoid_constant_pool_reference (op1);
3406 if (GET_CODE (op0) == CONST_VECTOR
3407 && GET_CODE (op1) == CONST_VECTOR)
3408 {
3409 rtvec v = rtvec_alloc (n_elts);
3410 unsigned int i;
3411
3412 for (i = 0; i < n_elts; i++)
3413 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3414 ? CONST_VECTOR_ELT (op0, i)
3415 : CONST_VECTOR_ELT (op1, i));
3416 return gen_rtx_CONST_VECTOR (mode, v);
3417 }
3418 }
3419 break;
3420
3421 default:
3422 gcc_unreachable ();
3423 }
3424
3425 return 0;
3426 }
3427
3428 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3429 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3430
3431 Works by unpacking OP into a collection of 8-bit values
3432 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3433 and then repacking them again for OUTERMODE. */
3434
3435 static rtx
3436 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3437 enum machine_mode innermode, unsigned int byte)
3438 {
3439 /* We support up to 512-bit values (for V8DFmode). */
3440 enum {
3441 max_bitsize = 512,
3442 value_bit = 8,
3443 value_mask = (1 << value_bit) - 1
3444 };
3445 unsigned char value[max_bitsize / value_bit];
3446 int value_start;
3447 int i;
3448 int elem;
3449
3450 int num_elem;
3451 rtx * elems;
3452 int elem_bitsize;
3453 rtx result_s;
3454 rtvec result_v = NULL;
3455 enum mode_class outer_class;
3456 enum machine_mode outer_submode;
3457
3458 /* Some ports misuse CCmode. */
3459 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3460 return op;
3461
3462 /* We have no way to represent a complex constant at the rtl level. */
3463 if (COMPLEX_MODE_P (outermode))
3464 return NULL_RTX;
3465
3466 /* Unpack the value. */
3467
3468 if (GET_CODE (op) == CONST_VECTOR)
3469 {
3470 num_elem = CONST_VECTOR_NUNITS (op);
3471 elems = &CONST_VECTOR_ELT (op, 0);
3472 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3473 }
3474 else
3475 {
3476 num_elem = 1;
3477 elems = &op;
3478 elem_bitsize = max_bitsize;
3479 }
3480 /* If this asserts, it is too complicated; reducing value_bit may help. */
3481 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3482 /* I don't know how to handle endianness of sub-units. */
3483 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3484
3485 for (elem = 0; elem < num_elem; elem++)
3486 {
3487 unsigned char * vp;
3488 rtx el = elems[elem];
3489
3490 /* Vectors are kept in target memory order. (This is probably
3491 a mistake.) */
3492 {
3493 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3494 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3495 / BITS_PER_UNIT);
3496 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3497 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3498 unsigned bytele = (subword_byte % UNITS_PER_WORD
3499 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3500 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3501 }
3502
3503 switch (GET_CODE (el))
3504 {
3505 case CONST_INT:
3506 for (i = 0;
3507 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3508 i += value_bit)
3509 *vp++ = INTVAL (el) >> i;
3510 /* CONST_INTs are always logically sign-extended. */
3511 for (; i < elem_bitsize; i += value_bit)
3512 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3513 break;
3514
3515 case CONST_DOUBLE:
3516 if (GET_MODE (el) == VOIDmode)
3517 {
3518 /* If this triggers, someone should have generated a
3519 CONST_INT instead. */
3520 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3521
3522 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3523 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3524 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3525 {
3526 *vp++
3527 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3528 i += value_bit;
3529 }
3530 /* It shouldn't matter what's done here, so fill it with
3531 zero. */
3532 for (; i < max_bitsize; i += value_bit)
3533 *vp++ = 0;
3534 }
3535 else
3536 {
3537 long tmp[max_bitsize / 32];
3538 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3539
3540 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3541 gcc_assert (bitsize <= elem_bitsize);
3542 gcc_assert (bitsize % value_bit == 0);
3543
3544 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3545 GET_MODE (el));
3546
3547 /* real_to_target produces its result in words affected by
3548 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3549 and use WORDS_BIG_ENDIAN instead; see the documentation
3550 of SUBREG in rtl.texi. */
3551 for (i = 0; i < bitsize; i += value_bit)
3552 {
3553 int ibase;
3554 if (WORDS_BIG_ENDIAN)
3555 ibase = bitsize - 1 - i;
3556 else
3557 ibase = i;
3558 *vp++ = tmp[ibase / 32] >> i % 32;
3559 }
3560
3561 /* It shouldn't matter what's done here, so fill it with
3562 zero. */
3563 for (; i < elem_bitsize; i += value_bit)
3564 *vp++ = 0;
3565 }
3566 break;
3567
3568 default:
3569 gcc_unreachable ();
3570 }
3571 }
3572
3573 /* Now, pick the right byte to start with. */
3574 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3575 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3576 will already have offset 0. */
3577 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3578 {
3579 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3580 - byte);
3581 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3582 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3583 byte = (subword_byte % UNITS_PER_WORD
3584 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3585 }
3586
3587 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3588 so if it's become negative it will instead be very large.) */
3589 gcc_assert (byte < GET_MODE_SIZE (innermode));
3590
3591 /* Convert from bytes to chunks of size value_bit. */
3592 value_start = byte * (BITS_PER_UNIT / value_bit);
3593
3594 /* Re-pack the value. */
3595
3596 if (VECTOR_MODE_P (outermode))
3597 {
3598 num_elem = GET_MODE_NUNITS (outermode);
3599 result_v = rtvec_alloc (num_elem);
3600 elems = &RTVEC_ELT (result_v, 0);
3601 outer_submode = GET_MODE_INNER (outermode);
3602 }
3603 else
3604 {
3605 num_elem = 1;
3606 elems = &result_s;
3607 outer_submode = outermode;
3608 }
3609
3610 outer_class = GET_MODE_CLASS (outer_submode);
3611 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3612
3613 gcc_assert (elem_bitsize % value_bit == 0);
3614 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3615
3616 for (elem = 0; elem < num_elem; elem++)
3617 {
3618 unsigned char *vp;
3619
3620 /* Vectors are stored in target memory order. (This is probably
3621 a mistake.) */
3622 {
3623 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3624 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3625 / BITS_PER_UNIT);
3626 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3627 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3628 unsigned bytele = (subword_byte % UNITS_PER_WORD
3629 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3630 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3631 }
3632
3633 switch (outer_class)
3634 {
3635 case MODE_INT:
3636 case MODE_PARTIAL_INT:
3637 {
3638 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3639
3640 for (i = 0;
3641 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3642 i += value_bit)
3643 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3644 for (; i < elem_bitsize; i += value_bit)
3645 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3646 << (i - HOST_BITS_PER_WIDE_INT));
3647
3648 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3649 know why. */
3650 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3651 elems[elem] = gen_int_mode (lo, outer_submode);
3652 else
3653 elems[elem] = immed_double_const (lo, hi, outer_submode);
3654 }
3655 break;
3656
3657 case MODE_FLOAT:
3658 {
3659 REAL_VALUE_TYPE r;
3660 long tmp[max_bitsize / 32];
3661
3662 /* real_from_target wants its input in words affected by
3663 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3664 and use WORDS_BIG_ENDIAN instead; see the documentation
3665 of SUBREG in rtl.texi. */
3666 for (i = 0; i < max_bitsize / 32; i++)
3667 tmp[i] = 0;
3668 for (i = 0; i < elem_bitsize; i += value_bit)
3669 {
3670 int ibase;
3671 if (WORDS_BIG_ENDIAN)
3672 ibase = elem_bitsize - 1 - i;
3673 else
3674 ibase = i;
3675 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3676 }
3677
3678 real_from_target (&r, tmp, outer_submode);
3679 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3680 }
3681 break;
3682
3683 default:
3684 gcc_unreachable ();
3685 }
3686 }
3687 if (VECTOR_MODE_P (outermode))
3688 return gen_rtx_CONST_VECTOR (outermode, result_v);
3689 else
3690 return result_s;
3691 }
3692
3693 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3694 Return 0 if no simplifications are possible. */
3695 rtx
3696 simplify_subreg (enum machine_mode outermode, rtx op,
3697 enum machine_mode innermode, unsigned int byte)
3698 {
3699 /* Little bit of sanity checking. */
3700 gcc_assert (innermode != VOIDmode);
3701 gcc_assert (outermode != VOIDmode);
3702 gcc_assert (innermode != BLKmode);
3703 gcc_assert (outermode != BLKmode);
3704
3705 gcc_assert (GET_MODE (op) == innermode
3706 || GET_MODE (op) == VOIDmode);
3707
3708 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3709 gcc_assert (byte < GET_MODE_SIZE (innermode));
3710
3711 if (outermode == innermode && !byte)
3712 return op;
3713
3714 if (GET_CODE (op) == CONST_INT
3715 || GET_CODE (op) == CONST_DOUBLE
3716 || GET_CODE (op) == CONST_VECTOR)
3717 return simplify_immed_subreg (outermode, op, innermode, byte);
3718
3719 /* Changing mode twice with SUBREG => just change it once,
3720 or not at all if changing back op starting mode. */
3721 if (GET_CODE (op) == SUBREG)
3722 {
3723 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3724 int final_offset = byte + SUBREG_BYTE (op);
3725 rtx newx;
3726
3727 if (outermode == innermostmode
3728 && byte == 0 && SUBREG_BYTE (op) == 0)
3729 return SUBREG_REG (op);
3730
3731 /* The SUBREG_BYTE represents offset, as if the value were stored
3732 in memory. Irritating exception is paradoxical subreg, where
3733 we define SUBREG_BYTE to be 0. On big endian machines, this
3734 value should be negative. For a moment, undo this exception. */
3735 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3736 {
3737 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3738 if (WORDS_BIG_ENDIAN)
3739 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3740 if (BYTES_BIG_ENDIAN)
3741 final_offset += difference % UNITS_PER_WORD;
3742 }
3743 if (SUBREG_BYTE (op) == 0
3744 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3745 {
3746 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3747 if (WORDS_BIG_ENDIAN)
3748 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3749 if (BYTES_BIG_ENDIAN)
3750 final_offset += difference % UNITS_PER_WORD;
3751 }
3752
3753 /* See whether resulting subreg will be paradoxical. */
3754 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3755 {
3756 /* In nonparadoxical subregs we can't handle negative offsets. */
3757 if (final_offset < 0)
3758 return NULL_RTX;
3759 /* Bail out in case resulting subreg would be incorrect. */
3760 if (final_offset % GET_MODE_SIZE (outermode)
3761 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3762 return NULL_RTX;
3763 }
3764 else
3765 {
3766 int offset = 0;
3767 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3768
3769 /* In paradoxical subreg, see if we are still looking on lower part.
3770 If so, our SUBREG_BYTE will be 0. */
3771 if (WORDS_BIG_ENDIAN)
3772 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3773 if (BYTES_BIG_ENDIAN)
3774 offset += difference % UNITS_PER_WORD;
3775 if (offset == final_offset)
3776 final_offset = 0;
3777 else
3778 return NULL_RTX;
3779 }
3780
3781 /* Recurse for further possible simplifications. */
3782 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3783 final_offset);
3784 if (newx)
3785 return newx;
3786 if (validate_subreg (outermode, innermostmode,
3787 SUBREG_REG (op), final_offset))
3788 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3789 return NULL_RTX;
3790 }
3791
3792 /* SUBREG of a hard register => just change the register number
3793 and/or mode. If the hard register is not valid in that mode,
3794 suppress this simplification. If the hard register is the stack,
3795 frame, or argument pointer, leave this as a SUBREG. */
3796
3797 if (REG_P (op)
3798 && REGNO (op) < FIRST_PSEUDO_REGISTER
3799 #ifdef CANNOT_CHANGE_MODE_CLASS
3800 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3801 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3802 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3803 #endif
3804 && ((reload_completed && !frame_pointer_needed)
3805 || (REGNO (op) != FRAME_POINTER_REGNUM
3806 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3807 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3808 #endif
3809 ))
3810 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3811 && REGNO (op) != ARG_POINTER_REGNUM
3812 #endif
3813 && REGNO (op) != STACK_POINTER_REGNUM
3814 && subreg_offset_representable_p (REGNO (op), innermode,
3815 byte, outermode))
3816 {
3817 unsigned int regno = REGNO (op);
3818 unsigned int final_regno
3819 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3820
3821 /* ??? We do allow it if the current REG is not valid for
3822 its mode. This is a kludge to work around how float/complex
3823 arguments are passed on 32-bit SPARC and should be fixed. */
3824 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3825 || ! HARD_REGNO_MODE_OK (regno, innermode))
3826 {
3827 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3828
3829 /* Propagate original regno. We don't have any way to specify
3830 the offset inside original regno, so do so only for lowpart.
3831 The information is used only by alias analysis that can not
3832 grog partial register anyway. */
3833
3834 if (subreg_lowpart_offset (outermode, innermode) == byte)
3835 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3836 return x;
3837 }
3838 }
3839
3840 /* If we have a SUBREG of a register that we are replacing and we are
3841 replacing it with a MEM, make a new MEM and try replacing the
3842 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3843 or if we would be widening it. */
3844
3845 if (MEM_P (op)
3846 && ! mode_dependent_address_p (XEXP (op, 0))
3847 /* Allow splitting of volatile memory references in case we don't
3848 have instruction to move the whole thing. */
3849 && (! MEM_VOLATILE_P (op)
3850 || ! have_insn_for (SET, innermode))
3851 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3852 return adjust_address_nv (op, outermode, byte);
3853
3854 /* Handle complex values represented as CONCAT
3855 of real and imaginary part. */
3856 if (GET_CODE (op) == CONCAT)
3857 {
3858 unsigned int inner_size, final_offset;
3859 rtx part, res;
3860
3861 inner_size = GET_MODE_UNIT_SIZE (innermode);
3862 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3863 final_offset = byte % inner_size;
3864 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3865 return NULL_RTX;
3866
3867 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3868 if (res)
3869 return res;
3870 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3871 return gen_rtx_SUBREG (outermode, part, final_offset);
3872 return NULL_RTX;
3873 }
3874
3875 /* Optimize SUBREG truncations of zero and sign extended values. */
3876 if ((GET_CODE (op) == ZERO_EXTEND
3877 || GET_CODE (op) == SIGN_EXTEND)
3878 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3879 {
3880 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3881
3882 /* If we're requesting the lowpart of a zero or sign extension,
3883 there are three possibilities. If the outermode is the same
3884 as the origmode, we can omit both the extension and the subreg.
3885 If the outermode is not larger than the origmode, we can apply
3886 the truncation without the extension. Finally, if the outermode
3887 is larger than the origmode, but both are integer modes, we
3888 can just extend to the appropriate mode. */
3889 if (bitpos == 0)
3890 {
3891 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3892 if (outermode == origmode)
3893 return XEXP (op, 0);
3894 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3895 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3896 subreg_lowpart_offset (outermode,
3897 origmode));
3898 if (SCALAR_INT_MODE_P (outermode))
3899 return simplify_gen_unary (GET_CODE (op), outermode,
3900 XEXP (op, 0), origmode);
3901 }
3902
3903 /* A SUBREG resulting from a zero extension may fold to zero if
3904 it extracts higher bits that the ZERO_EXTEND's source bits. */
3905 if (GET_CODE (op) == ZERO_EXTEND
3906 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3907 return CONST0_RTX (outermode);
3908 }
3909
3910 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3911 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3912 the outer subreg is effectively a truncation to the original mode. */
3913 if ((GET_CODE (op) == LSHIFTRT
3914 || GET_CODE (op) == ASHIFTRT)
3915 && SCALAR_INT_MODE_P (outermode)
3916 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3917 to avoid the possibility that an outer LSHIFTRT shifts by more
3918 than the sign extension's sign_bit_copies and introduces zeros
3919 into the high bits of the result. */
3920 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3921 && GET_CODE (XEXP (op, 1)) == CONST_INT
3922 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3923 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3924 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3925 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3926 return simplify_gen_binary (ASHIFTRT, outermode,
3927 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3928
3929 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3930 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3931 the outer subreg is effectively a truncation to the original mode. */
3932 if ((GET_CODE (op) == LSHIFTRT
3933 || GET_CODE (op) == ASHIFTRT)
3934 && SCALAR_INT_MODE_P (outermode)
3935 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3936 && GET_CODE (XEXP (op, 1)) == CONST_INT
3937 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3938 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3939 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3940 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3941 return simplify_gen_binary (LSHIFTRT, outermode,
3942 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3943
3944 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3945 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3946 the outer subreg is effectively a truncation to the original mode. */
3947 if (GET_CODE (op) == ASHIFT
3948 && SCALAR_INT_MODE_P (outermode)
3949 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3950 && GET_CODE (XEXP (op, 1)) == CONST_INT
3951 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3952 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3953 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3954 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3955 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3956 return simplify_gen_binary (ASHIFT, outermode,
3957 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3958
3959 return NULL_RTX;
3960 }
3961
3962 /* Make a SUBREG operation or equivalent if it folds. */
3963
3964 rtx
3965 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3966 enum machine_mode innermode, unsigned int byte)
3967 {
3968 rtx newx;
3969
3970 newx = simplify_subreg (outermode, op, innermode, byte);
3971 if (newx)
3972 return newx;
3973
3974 if (GET_CODE (op) == SUBREG
3975 || GET_CODE (op) == CONCAT
3976 || GET_MODE (op) == VOIDmode)
3977 return NULL_RTX;
3978
3979 if (validate_subreg (outermode, innermode, op, byte))
3980 return gen_rtx_SUBREG (outermode, op, byte);
3981
3982 return NULL_RTX;
3983 }
3984
3985 /* Simplify X, an rtx expression.
3986
3987 Return the simplified expression or NULL if no simplifications
3988 were possible.
3989
3990 This is the preferred entry point into the simplification routines;
3991 however, we still allow passes to call the more specific routines.
3992
3993 Right now GCC has three (yes, three) major bodies of RTL simplification
3994 code that need to be unified.
3995
3996 1. fold_rtx in cse.c. This code uses various CSE specific
3997 information to aid in RTL simplification.
3998
3999 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4000 it uses combine specific information to aid in RTL
4001 simplification.
4002
4003 3. The routines in this file.
4004
4005
4006 Long term we want to only have one body of simplification code; to
4007 get to that state I recommend the following steps:
4008
4009 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4010 which are not pass dependent state into these routines.
4011
4012 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4013 use this routine whenever possible.
4014
4015 3. Allow for pass dependent state to be provided to these
4016 routines and add simplifications based on the pass dependent
4017 state. Remove code from cse.c & combine.c that becomes
4018 redundant/dead.
4019
4020 It will take time, but ultimately the compiler will be easier to
4021 maintain and improve. It's totally silly that when we add a
4022 simplification that it needs to be added to 4 places (3 for RTL
4023 simplification and 1 for tree simplification. */
4024
4025 rtx
4026 simplify_rtx (rtx x)
4027 {
4028 enum rtx_code code = GET_CODE (x);
4029 enum machine_mode mode = GET_MODE (x);
4030
4031 switch (GET_RTX_CLASS (code))
4032 {
4033 case RTX_UNARY:
4034 return simplify_unary_operation (code, mode,
4035 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4036 case RTX_COMM_ARITH:
4037 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4038 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4039
4040 /* Fall through.... */
4041
4042 case RTX_BIN_ARITH:
4043 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4044
4045 case RTX_TERNARY:
4046 case RTX_BITFIELD_OPS:
4047 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4048 XEXP (x, 0), XEXP (x, 1),
4049 XEXP (x, 2));
4050
4051 case RTX_COMPARE:
4052 case RTX_COMM_COMPARE:
4053 return simplify_relational_operation (code, mode,
4054 ((GET_MODE (XEXP (x, 0))
4055 != VOIDmode)
4056 ? GET_MODE (XEXP (x, 0))
4057 : GET_MODE (XEXP (x, 1))),
4058 XEXP (x, 0),
4059 XEXP (x, 1));
4060
4061 case RTX_EXTRA:
4062 if (code == SUBREG)
4063 return simplify_gen_subreg (mode, SUBREG_REG (x),
4064 GET_MODE (SUBREG_REG (x)),
4065 SUBREG_BYTE (x));
4066 break;
4067
4068 case RTX_OBJ:
4069 if (code == LO_SUM)
4070 {
4071 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4072 if (GET_CODE (XEXP (x, 0)) == HIGH
4073 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4074 return XEXP (x, 1);
4075 }
4076 break;
4077
4078 default:
4079 break;
4080 }
4081 return NULL;
4082 }