Add new fp flags: -fassociative-math and -freciprocal-math
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
207
208 rtx
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
211 {
212 rtx tem;
213
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
217
218 return gen_rtx_fmt_e (code, mode, op);
219 }
220
221 /* Likewise for ternary operations. */
222
223 rtx
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
226 {
227 rtx tem;
228
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
233
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
235 }
236
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
239
240 rtx
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
243 {
244 rtx tem;
245
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
249
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 }
252 \f
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
255
256 rtx
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
258 {
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
263
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
267
268 if (x == old_rtx)
269 return new_rtx;
270
271 switch (GET_RTX_CLASS (code))
272 {
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
280
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
288
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
299
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
312
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
316 {
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
324 }
325 break;
326
327 case RTX_OBJ:
328 if (code == MEM)
329 {
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
334 }
335 else if (code == LO_SUM)
336 {
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
339
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
343
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
347 }
348 else if (code == REG)
349 {
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
352 }
353 break;
354
355 default:
356 break;
357 }
358 return x;
359 }
360 \f
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
364 rtx
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
367 {
368 rtx trueop, tem;
369
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
372
373 trueop = avoid_constant_pool_reference (op);
374
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
378
379 return simplify_unary_operation_1 (code, mode, op);
380 }
381
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
386 {
387 enum rtx_code reversed;
388 rtx temp;
389
390 switch (code)
391 {
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
396
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
404
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
409
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
413
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
420
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
428
429
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
437 {
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
440 }
441
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
445
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
452
453
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
460 {
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
463
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
469 }
470
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
475
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
477 {
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
480
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
483
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
488
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
490 {
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
493 }
494
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
497 }
498 break;
499
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
504
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
509
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
513
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
523
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 {
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
531 {
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
535 }
536
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
540 }
541
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
546 {
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
549 }
550
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
555 {
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
559 }
560
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
568
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
576
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
582
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
711
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
720
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
728
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
732
733 /* (float_extend (float_extend x)) is (float_extend x)
734
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
737 */
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
748
749 break;
750
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
756
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
761
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
771
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
775
776 break;
777
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
785
786 case POPCOUNT:
787 switch (GET_CODE (op))
788 {
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 default:
804 break;
805 }
806 break;
807
808 case PARITY:
809 switch (GET_CODE (op))
810 {
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
817
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
825
826 default:
827 break;
828 }
829 break;
830
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
836
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
843
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
855
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE (XEXP (op, 0)) == mode)
863 return XEXP (op, 0);
864
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
876
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE (XEXP (op, 0)) == mode)
885 return XEXP (op, 0);
886
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
898
899 default:
900 break;
901 }
902
903 return 0;
904 }
905
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
909 rtx
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
912 {
913 unsigned int width = GET_MODE_BITSIZE (mode);
914
915 if (code == VEC_DUPLICATE)
916 {
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
919 {
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
925 }
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
928 {
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
933
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
938 {
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 }
948 return gen_rtx_CONST_VECTOR (mode, v);
949 }
950 }
951
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 {
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
961
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
964 {
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
971 }
972 return gen_rtx_CONST_VECTOR (mode, v);
973 }
974
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
978
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 {
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
984
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 }
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
997 {
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1000
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005
1006 if (op_mode == VOIDmode)
1007 {
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1012 }
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 ;
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 }
1022
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 {
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1028
1029 switch (code)
1030 {
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1034
1035 case NEG:
1036 val = - arg0;
1037 break;
1038
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1042
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1049
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 ;
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1057
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1061 {
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1066 }
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1070
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1077
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1085
1086 case BSWAP:
1087 {
1088 unsigned int s;
1089
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1092 {
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1097 }
1098 }
1099 break;
1100
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1104
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 {
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1116 }
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1122
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 {
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1133 }
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 {
1136 val
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1141 }
1142 else
1143 return 0;
1144 break;
1145
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1154
1155 default:
1156 gcc_unreachable ();
1157 }
1158
1159 return gen_int_mode (val, mode);
1160 }
1161
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || GET_CODE (op) == CONST_INT))
1168 {
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1171
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1176
1177 switch (code)
1178 {
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1183
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1187
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1194
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1198 {
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 }
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1207
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1218
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1228
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1237
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1247
1248 case BSWAP:
1249 {
1250 unsigned int s;
1251
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1255 {
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1258
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1268 }
1269 }
1270 break;
1271
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1276
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1279
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1282
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1286
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1292 {
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298
1299 hv = HWI_SIGN_EXTEND (lv);
1300 }
1301 break;
1302
1303 case SQRT:
1304 return 0;
1305
1306 default:
1307 return 0;
1308 }
1309
1310 return immed_double_const (lv, hv, mode);
1311 }
1312
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1315 {
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1318
1319 switch (code)
1320 {
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1343 {
1344 long tmp[4];
1345 int i;
1346
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1352 }
1353 default:
1354 gcc_unreachable ();
1355 }
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 }
1358
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 {
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1368
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1375 {
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1379
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1382 {
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1386 }
1387 else
1388 {
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 }
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1394 {
1395 xh = th;
1396 xl = tl;
1397 break;
1398 }
1399
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1402 {
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1405 }
1406 else
1407 {
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 }
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1413 {
1414 xh = th;
1415 xl = tl;
1416 break;
1417 }
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1420
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1424
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1427 {
1428 th = -1;
1429 tl = -1;
1430 }
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 {
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1436 }
1437 else
1438 {
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 }
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1444 {
1445 xh = th;
1446 xl = tl;
1447 break;
1448 }
1449
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1452
1453 default:
1454 gcc_unreachable ();
1455 }
1456 return immed_double_const (xl, xh, mode);
1457 }
1458
1459 return NULL_RTX;
1460 }
1461 \f
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1467
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1471 {
1472 rtx tem;
1473
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1476 {
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1479 {
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 }
1483
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1487
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1491 }
1492
1493 if (GET_CODE (op0) == code)
1494 {
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 {
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 }
1501
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1511 }
1512
1513 return 0;
1514 }
1515
1516
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1519
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1522 rtx
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1525 {
1526 rtx trueop0, trueop1;
1527 rtx tem;
1528
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1535
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1539 {
1540 tem = op0, op0 = op1, op1 = tem;
1541 }
1542
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1545
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 }
1551
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1556
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1560 {
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1564
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1567
1568 switch (code)
1569 {
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1577
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1584
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1590
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1596
1597 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1598 && GET_CODE (op1) == CONST_INT)
1599 return plus_constant (op0, INTVAL (op1));
1600 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1601 && GET_CODE (op0) == CONST_INT)
1602 return plus_constant (op1, INTVAL (op0));
1603
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1609
1610 if (SCALAR_INT_MODE_P (mode))
1611 {
1612 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1613 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1614 rtx lhs = op0, rhs = op1;
1615
1616 if (GET_CODE (lhs) == NEG)
1617 {
1618 coeff0l = -1;
1619 coeff0h = -1;
1620 lhs = XEXP (lhs, 0);
1621 }
1622 else if (GET_CODE (lhs) == MULT
1623 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1624 {
1625 coeff0l = INTVAL (XEXP (lhs, 1));
1626 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1627 lhs = XEXP (lhs, 0);
1628 }
1629 else if (GET_CODE (lhs) == ASHIFT
1630 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs, 1)) >= 0
1632 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1633 {
1634 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1635 coeff0h = 0;
1636 lhs = XEXP (lhs, 0);
1637 }
1638
1639 if (GET_CODE (rhs) == NEG)
1640 {
1641 coeff1l = -1;
1642 coeff1h = -1;
1643 rhs = XEXP (rhs, 0);
1644 }
1645 else if (GET_CODE (rhs) == MULT
1646 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1647 {
1648 coeff1l = INTVAL (XEXP (rhs, 1));
1649 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1650 rhs = XEXP (rhs, 0);
1651 }
1652 else if (GET_CODE (rhs) == ASHIFT
1653 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs, 1)) >= 0
1655 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1656 {
1657 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1658 coeff1h = 0;
1659 rhs = XEXP (rhs, 0);
1660 }
1661
1662 if (rtx_equal_p (lhs, rhs))
1663 {
1664 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1665 rtx coeff;
1666 unsigned HOST_WIDE_INT l;
1667 HOST_WIDE_INT h;
1668
1669 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1670 coeff = immed_double_const (l, h, mode);
1671
1672 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1673 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1674 ? tem : 0;
1675 }
1676 }
1677
1678 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1679 if ((GET_CODE (op1) == CONST_INT
1680 || GET_CODE (op1) == CONST_DOUBLE)
1681 && GET_CODE (op0) == XOR
1682 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1683 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1684 && mode_signbit_p (mode, op1))
1685 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1686 simplify_gen_binary (XOR, mode, op1,
1687 XEXP (op0, 1)));
1688
1689 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1690 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1691 && GET_CODE (op0) == MULT
1692 && GET_CODE (XEXP (op0, 0)) == NEG)
1693 {
1694 rtx in1, in2;
1695
1696 in1 = XEXP (XEXP (op0, 0), 0);
1697 in2 = XEXP (op0, 1);
1698 return simplify_gen_binary (MINUS, mode, op1,
1699 simplify_gen_binary (MULT, mode,
1700 in1, in2));
1701 }
1702
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1705 is 1. */
1706 if (COMPARISON_P (op0)
1707 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1708 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1709 && (reversed = reversed_comparison (op0, mode)))
1710 return
1711 simplify_gen_unary (NEG, mode, reversed, mode);
1712
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1718
1719 if (INTEGRAL_MODE_P (mode)
1720 && (plus_minus_operand_p (op0)
1721 || plus_minus_operand_p (op1))
1722 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 return tem;
1724
1725 /* Reassociate floating point addition only when the user
1726 specifies associative math operations. */
1727 if (FLOAT_MODE_P (mode)
1728 && flag_associative_math)
1729 {
1730 tem = simplify_associative_operation (code, mode, op0, op1);
1731 if (tem)
1732 return tem;
1733 }
1734 break;
1735
1736 case COMPARE:
1737 #ifdef HAVE_cc0
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1741
1742 In IEEE floating point, x-0 is not the same as x. */
1743
1744 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1746 && trueop1 == CONST0_RTX (mode))
1747 return op0;
1748 #endif
1749
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1752 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1753 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1754 {
1755 rtx xop00 = XEXP (op0, 0);
1756 rtx xop10 = XEXP (op1, 0);
1757
1758 #ifdef HAVE_cc0
1759 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 #else
1761 if (REG_P (xop00) && REG_P (xop10)
1762 && GET_MODE (xop00) == GET_MODE (xop10)
1763 && REGNO (xop00) == REGNO (xop10)
1764 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1766 #endif
1767 return xop00;
1768 }
1769 break;
1770
1771 case MINUS:
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -ffinite-math-only. */
1775 if (rtx_equal_p (trueop0, trueop1)
1776 && ! side_effects_p (op0)
1777 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1778 return CONST0_RTX (mode);
1779
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1785 return simplify_gen_unary (NEG, mode, op1, mode);
1786
1787 /* (-1 - a) is ~a. */
1788 if (trueop0 == constm1_rtx)
1789 return simplify_gen_unary (NOT, mode, op1, mode);
1790
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1793 0 - 0 is -0. */
1794 if (!(HONOR_SIGNED_ZEROS (mode)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1796 && trueop1 == CONST0_RTX (mode))
1797 return op0;
1798
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1804
1805 if (SCALAR_INT_MODE_P (mode))
1806 {
1807 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1808 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1809 rtx lhs = op0, rhs = op1;
1810
1811 if (GET_CODE (lhs) == NEG)
1812 {
1813 coeff0l = -1;
1814 coeff0h = -1;
1815 lhs = XEXP (lhs, 0);
1816 }
1817 else if (GET_CODE (lhs) == MULT
1818 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1819 {
1820 coeff0l = INTVAL (XEXP (lhs, 1));
1821 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1822 lhs = XEXP (lhs, 0);
1823 }
1824 else if (GET_CODE (lhs) == ASHIFT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs, 1)) >= 0
1827 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1828 {
1829 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 coeff0h = 0;
1831 lhs = XEXP (lhs, 0);
1832 }
1833
1834 if (GET_CODE (rhs) == NEG)
1835 {
1836 negcoeff1l = 1;
1837 negcoeff1h = 0;
1838 rhs = XEXP (rhs, 0);
1839 }
1840 else if (GET_CODE (rhs) == MULT
1841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1842 {
1843 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1844 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1845 rhs = XEXP (rhs, 0);
1846 }
1847 else if (GET_CODE (rhs) == ASHIFT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs, 1)) >= 0
1850 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1851 {
1852 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 negcoeff1h = -1;
1854 rhs = XEXP (rhs, 0);
1855 }
1856
1857 if (rtx_equal_p (lhs, rhs))
1858 {
1859 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 rtx coeff;
1861 unsigned HOST_WIDE_INT l;
1862 HOST_WIDE_INT h;
1863
1864 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1865 coeff = immed_double_const (l, h, mode);
1866
1867 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1868 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1869 ? tem : 0;
1870 }
1871 }
1872
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1) == NEG)
1875 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1876
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0) == NEG
1879 && (GET_CODE (op1) == CONST_INT
1880 || GET_CODE (op1) == CONST_DOUBLE))
1881 {
1882 tem = simplify_unary_operation (NEG, mode, op1, mode);
1883 if (tem)
1884 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1885 }
1886
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1889 return simplify_gen_binary (PLUS, mode,
1890 op0,
1891 neg_const_int (mode, op1));
1892
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1) == AND)
1895 {
1896 if (rtx_equal_p (op0, XEXP (op1, 0)))
1897 {
1898 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1899 GET_MODE (XEXP (op1, 1)));
1900 return simplify_gen_binary (AND, mode, op0, tem);
1901 }
1902 if (rtx_equal_p (op0, XEXP (op1, 1)))
1903 {
1904 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1905 GET_MODE (XEXP (op1, 0)));
1906 return simplify_gen_binary (AND, mode, op0, tem);
1907 }
1908 }
1909
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE == 1
1913 && trueop0 == const1_rtx
1914 && COMPARISON_P (op1)
1915 && (reversed = reversed_comparison (op1, mode)))
1916 return reversed;
1917
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1920 && GET_CODE (op1) == MULT
1921 && GET_CODE (XEXP (op1, 0)) == NEG)
1922 {
1923 rtx in1, in2;
1924
1925 in1 = XEXP (XEXP (op1, 0), 0);
1926 in2 = XEXP (op1, 1);
1927 return simplify_gen_binary (PLUS, mode,
1928 simplify_gen_binary (MULT, mode,
1929 in1, in2),
1930 op0);
1931 }
1932
1933 /* Canonicalize (minus (neg A) (mult B C)) to
1934 (minus (mult (neg B) C) A). */
1935 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1936 && GET_CODE (op1) == MULT
1937 && GET_CODE (op0) == NEG)
1938 {
1939 rtx in1, in2;
1940
1941 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1942 in2 = XEXP (op1, 1);
1943 return simplify_gen_binary (MINUS, mode,
1944 simplify_gen_binary (MULT, mode,
1945 in1, in2),
1946 XEXP (op0, 0));
1947 }
1948
1949 /* If one of the operands is a PLUS or a MINUS, see if we can
1950 simplify this by the associative law. This will, for example,
1951 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1952 Don't use the associative law for floating point.
1953 The inaccuracy makes it nonassociative,
1954 and subtle programs can break if operations are associated. */
1955
1956 if (INTEGRAL_MODE_P (mode)
1957 && (plus_minus_operand_p (op0)
1958 || plus_minus_operand_p (op1))
1959 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1960 return tem;
1961 break;
1962
1963 case MULT:
1964 if (trueop1 == constm1_rtx)
1965 return simplify_gen_unary (NEG, mode, op0, mode);
1966
1967 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1968 x is NaN, since x * 0 is then also NaN. Nor is it valid
1969 when the mode has signed zeros, since multiplying a negative
1970 number by 0 will give -0, not 0. */
1971 if (!HONOR_NANS (mode)
1972 && !HONOR_SIGNED_ZEROS (mode)
1973 && trueop1 == CONST0_RTX (mode)
1974 && ! side_effects_p (op0))
1975 return op1;
1976
1977 /* In IEEE floating point, x*1 is not equivalent to x for
1978 signalling NaNs. */
1979 if (!HONOR_SNANS (mode)
1980 && trueop1 == CONST1_RTX (mode))
1981 return op0;
1982
1983 /* Convert multiply by constant power of two into shift unless
1984 we are still generating RTL. This test is a kludge. */
1985 if (GET_CODE (trueop1) == CONST_INT
1986 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1987 /* If the mode is larger than the host word size, and the
1988 uppermost bit is set, then this isn't a power of two due
1989 to implicit sign extension. */
1990 && (width <= HOST_BITS_PER_WIDE_INT
1991 || val != HOST_BITS_PER_WIDE_INT - 1))
1992 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1993
1994 /* Likewise for multipliers wider than a word. */
1995 if (GET_CODE (trueop1) == CONST_DOUBLE
1996 && (GET_MODE (trueop1) == VOIDmode
1997 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1998 && GET_MODE (op0) == mode
1999 && CONST_DOUBLE_LOW (trueop1) == 0
2000 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2001 return simplify_gen_binary (ASHIFT, mode, op0,
2002 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2003
2004 /* x*2 is x+x and x*(-1) is -x */
2005 if (GET_CODE (trueop1) == CONST_DOUBLE
2006 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2007 && GET_MODE (op0) == mode)
2008 {
2009 REAL_VALUE_TYPE d;
2010 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2011
2012 if (REAL_VALUES_EQUAL (d, dconst2))
2013 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2014
2015 if (!HONOR_SNANS (mode)
2016 && REAL_VALUES_EQUAL (d, dconstm1))
2017 return simplify_gen_unary (NEG, mode, op0, mode);
2018 }
2019
2020 /* Optimize -x * -x as x * x. */
2021 if (FLOAT_MODE_P (mode)
2022 && GET_CODE (op0) == NEG
2023 && GET_CODE (op1) == NEG
2024 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2025 && !side_effects_p (XEXP (op0, 0)))
2026 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2027
2028 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2029 if (SCALAR_FLOAT_MODE_P (mode)
2030 && GET_CODE (op0) == ABS
2031 && GET_CODE (op1) == ABS
2032 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2033 && !side_effects_p (XEXP (op0, 0)))
2034 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2035
2036 /* Reassociate multiplication, but for floating point MULTs
2037 only when the user specifies unsafe math optimizations. */
2038 if (! FLOAT_MODE_P (mode)
2039 || flag_unsafe_math_optimizations)
2040 {
2041 tem = simplify_associative_operation (code, mode, op0, op1);
2042 if (tem)
2043 return tem;
2044 }
2045 break;
2046
2047 case IOR:
2048 if (trueop1 == const0_rtx)
2049 return op0;
2050 if (GET_CODE (trueop1) == CONST_INT
2051 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2052 == GET_MODE_MASK (mode)))
2053 return op1;
2054 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2055 return op0;
2056 /* A | (~A) -> -1 */
2057 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2058 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2059 && ! side_effects_p (op0)
2060 && SCALAR_INT_MODE_P (mode))
2061 return constm1_rtx;
2062
2063 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2064 if (GET_CODE (op1) == CONST_INT
2065 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2066 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2067 return op1;
2068
2069 /* Canonicalize (X & C1) | C2. */
2070 if (GET_CODE (op0) == AND
2071 && GET_CODE (trueop1) == CONST_INT
2072 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2073 {
2074 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2075 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2076 HOST_WIDE_INT c2 = INTVAL (trueop1);
2077
2078 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2079 if ((c1 & c2) == c1
2080 && !side_effects_p (XEXP (op0, 0)))
2081 return trueop1;
2082
2083 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2084 if (((c1|c2) & mask) == mask)
2085 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2086
2087 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2088 if (((c1 & ~c2) & mask) != (c1 & mask))
2089 {
2090 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2091 gen_int_mode (c1 & ~c2, mode));
2092 return simplify_gen_binary (IOR, mode, tem, op1);
2093 }
2094 }
2095
2096 /* Convert (A & B) | A to A. */
2097 if (GET_CODE (op0) == AND
2098 && (rtx_equal_p (XEXP (op0, 0), op1)
2099 || rtx_equal_p (XEXP (op0, 1), op1))
2100 && ! side_effects_p (XEXP (op0, 0))
2101 && ! side_effects_p (XEXP (op0, 1)))
2102 return op1;
2103
2104 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2105 mode size to (rotate A CX). */
2106
2107 if (GET_CODE (op1) == ASHIFT
2108 || GET_CODE (op1) == SUBREG)
2109 {
2110 opleft = op1;
2111 opright = op0;
2112 }
2113 else
2114 {
2115 opright = op1;
2116 opleft = op0;
2117 }
2118
2119 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2120 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2121 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2122 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2123 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2124 == GET_MODE_BITSIZE (mode)))
2125 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2126
2127 /* Same, but for ashift that has been "simplified" to a wider mode
2128 by simplify_shift_const. */
2129
2130 if (GET_CODE (opleft) == SUBREG
2131 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2132 && GET_CODE (opright) == LSHIFTRT
2133 && GET_CODE (XEXP (opright, 0)) == SUBREG
2134 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2135 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2136 && (GET_MODE_SIZE (GET_MODE (opleft))
2137 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2138 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2139 SUBREG_REG (XEXP (opright, 0)))
2140 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2141 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2142 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2143 == GET_MODE_BITSIZE (mode)))
2144 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2145 XEXP (SUBREG_REG (opleft), 1));
2146
2147 /* If we have (ior (and (X C1) C2)), simplify this by making
2148 C1 as small as possible if C1 actually changes. */
2149 if (GET_CODE (op1) == CONST_INT
2150 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2151 || INTVAL (op1) > 0)
2152 && GET_CODE (op0) == AND
2153 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2154 && GET_CODE (op1) == CONST_INT
2155 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2156 return simplify_gen_binary (IOR, mode,
2157 simplify_gen_binary
2158 (AND, mode, XEXP (op0, 0),
2159 GEN_INT (INTVAL (XEXP (op0, 1))
2160 & ~INTVAL (op1))),
2161 op1);
2162
2163 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2164 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2165 the PLUS does not affect any of the bits in OP1: then we can do
2166 the IOR as a PLUS and we can associate. This is valid if OP1
2167 can be safely shifted left C bits. */
2168 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2169 && GET_CODE (XEXP (op0, 0)) == PLUS
2170 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2171 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2172 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2173 {
2174 int count = INTVAL (XEXP (op0, 1));
2175 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2176
2177 if (mask >> count == INTVAL (trueop1)
2178 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2179 return simplify_gen_binary (ASHIFTRT, mode,
2180 plus_constant (XEXP (op0, 0), mask),
2181 XEXP (op0, 1));
2182 }
2183
2184 tem = simplify_associative_operation (code, mode, op0, op1);
2185 if (tem)
2186 return tem;
2187 break;
2188
2189 case XOR:
2190 if (trueop1 == const0_rtx)
2191 return op0;
2192 if (GET_CODE (trueop1) == CONST_INT
2193 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2194 == GET_MODE_MASK (mode)))
2195 return simplify_gen_unary (NOT, mode, op0, mode);
2196 if (rtx_equal_p (trueop0, trueop1)
2197 && ! side_effects_p (op0)
2198 && GET_MODE_CLASS (mode) != MODE_CC)
2199 return CONST0_RTX (mode);
2200
2201 /* Canonicalize XOR of the most significant bit to PLUS. */
2202 if ((GET_CODE (op1) == CONST_INT
2203 || GET_CODE (op1) == CONST_DOUBLE)
2204 && mode_signbit_p (mode, op1))
2205 return simplify_gen_binary (PLUS, mode, op0, op1);
2206 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && GET_CODE (op0) == PLUS
2210 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2211 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2212 && mode_signbit_p (mode, XEXP (op0, 1)))
2213 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2214 simplify_gen_binary (XOR, mode, op1,
2215 XEXP (op0, 1)));
2216
2217 /* If we are XORing two things that have no bits in common,
2218 convert them into an IOR. This helps to detect rotation encoded
2219 using those methods and possibly other simplifications. */
2220
2221 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2222 && (nonzero_bits (op0, mode)
2223 & nonzero_bits (op1, mode)) == 0)
2224 return (simplify_gen_binary (IOR, mode, op0, op1));
2225
2226 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2227 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2228 (NOT y). */
2229 {
2230 int num_negated = 0;
2231
2232 if (GET_CODE (op0) == NOT)
2233 num_negated++, op0 = XEXP (op0, 0);
2234 if (GET_CODE (op1) == NOT)
2235 num_negated++, op1 = XEXP (op1, 0);
2236
2237 if (num_negated == 2)
2238 return simplify_gen_binary (XOR, mode, op0, op1);
2239 else if (num_negated == 1)
2240 return simplify_gen_unary (NOT, mode,
2241 simplify_gen_binary (XOR, mode, op0, op1),
2242 mode);
2243 }
2244
2245 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2246 correspond to a machine insn or result in further simplifications
2247 if B is a constant. */
2248
2249 if (GET_CODE (op0) == AND
2250 && rtx_equal_p (XEXP (op0, 1), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode,
2253 simplify_gen_unary (NOT, mode,
2254 XEXP (op0, 0), mode),
2255 op1);
2256
2257 else if (GET_CODE (op0) == AND
2258 && rtx_equal_p (XEXP (op0, 0), op1)
2259 && ! side_effects_p (op1))
2260 return simplify_gen_binary (AND, mode,
2261 simplify_gen_unary (NOT, mode,
2262 XEXP (op0, 1), mode),
2263 op1);
2264
2265 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2266 comparison if STORE_FLAG_VALUE is 1. */
2267 if (STORE_FLAG_VALUE == 1
2268 && trueop1 == const1_rtx
2269 && COMPARISON_P (op0)
2270 && (reversed = reversed_comparison (op0, mode)))
2271 return reversed;
2272
2273 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2274 is (lt foo (const_int 0)), so we can perform the above
2275 simplification if STORE_FLAG_VALUE is 1. */
2276
2277 if (STORE_FLAG_VALUE == 1
2278 && trueop1 == const1_rtx
2279 && GET_CODE (op0) == LSHIFTRT
2280 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2281 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2282 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2283
2284 /* (xor (comparison foo bar) (const_int sign-bit))
2285 when STORE_FLAG_VALUE is the sign bit. */
2286 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2287 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2288 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2289 && trueop1 == const_true_rtx
2290 && COMPARISON_P (op0)
2291 && (reversed = reversed_comparison (op0, mode)))
2292 return reversed;
2293
2294 tem = simplify_associative_operation (code, mode, op0, op1);
2295 if (tem)
2296 return tem;
2297 break;
2298
2299 case AND:
2300 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2301 return trueop1;
2302 /* If we are turning off bits already known off in OP0, we need
2303 not do an AND. */
2304 if (GET_CODE (trueop1) == CONST_INT
2305 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2307 return op0;
2308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2309 && GET_MODE_CLASS (mode) != MODE_CC)
2310 return op0;
2311 /* A & (~A) -> 0 */
2312 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2313 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2314 && ! side_effects_p (op0)
2315 && GET_MODE_CLASS (mode) != MODE_CC)
2316 return CONST0_RTX (mode);
2317
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0) == SIGN_EXTEND
2321 || GET_CODE (op0) == ZERO_EXTEND)
2322 && GET_CODE (trueop1) == CONST_INT
2323 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2325 & INTVAL (trueop1)) == 0)
2326 {
2327 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2328 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2329 gen_int_mode (INTVAL (trueop1),
2330 imode));
2331 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2332 }
2333
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0) == IOR
2336 && GET_CODE (trueop1) == CONST_INT
2337 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2338 {
2339 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2340 return simplify_gen_binary (IOR, mode,
2341 simplify_gen_binary (AND, mode,
2342 XEXP (op0, 0), op1),
2343 gen_int_mode (tmp, mode));
2344 }
2345
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0) == XOR
2349 && rtx_equal_p (XEXP (op0, 0), op1)
2350 && ! side_effects_p (op1))
2351 return simplify_gen_binary (AND, mode,
2352 simplify_gen_unary (NOT, mode,
2353 XEXP (op0, 1), mode),
2354 op1);
2355
2356 if (GET_CODE (op0) == XOR
2357 && rtx_equal_p (XEXP (op0, 1), op1)
2358 && ! side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode,
2360 simplify_gen_unary (NOT, mode,
2361 XEXP (op0, 0), mode),
2362 op1);
2363
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0) == NOT
2366 && GET_CODE (XEXP (op0, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2368 && ! side_effects_p (op1))
2369 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2370
2371 if (GET_CODE (op0) == NOT
2372 && GET_CODE (XEXP (op0, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2374 && ! side_effects_p (op1))
2375 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2376
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0) == IOR
2379 && (rtx_equal_p (XEXP (op0, 0), op1)
2380 || rtx_equal_p (XEXP (op0, 1), op1))
2381 && ! side_effects_p (XEXP (op0, 0))
2382 && ! side_effects_p (XEXP (op0, 1)))
2383 return op1;
2384
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1) == CONST_INT
2391 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1)
2393 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2394 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2395 {
2396 rtx pmop[2];
2397 int which;
2398
2399 pmop[0] = XEXP (op0, 0);
2400 pmop[1] = XEXP (op0, 1);
2401
2402 for (which = 0; which < 2; which++)
2403 {
2404 tem = pmop[which];
2405 switch (GET_CODE (tem))
2406 {
2407 case AND:
2408 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2410 == INTVAL (trueop1))
2411 pmop[which] = XEXP (tem, 0);
2412 break;
2413 case IOR:
2414 case XOR:
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2417 pmop[which] = XEXP (tem, 0);
2418 break;
2419 default:
2420 break;
2421 }
2422 }
2423
2424 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2425 {
2426 tem = simplify_gen_binary (GET_CODE (op0), mode,
2427 pmop[0], pmop[1]);
2428 return simplify_gen_binary (code, mode, tem, op1);
2429 }
2430 }
2431 tem = simplify_associative_operation (code, mode, op0, op1);
2432 if (tem)
2433 return tem;
2434 break;
2435
2436 case UDIV:
2437 /* 0/x is 0 (or x&0 if x has side-effects). */
2438 if (trueop0 == CONST0_RTX (mode))
2439 {
2440 if (side_effects_p (op1))
2441 return simplify_gen_binary (AND, mode, op1, trueop0);
2442 return trueop0;
2443 }
2444 /* x/1 is x. */
2445 if (trueop1 == CONST1_RTX (mode))
2446 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2447 /* Convert divide by power of two into shift. */
2448 if (GET_CODE (trueop1) == CONST_INT
2449 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2450 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2451 break;
2452
2453 case DIV:
2454 /* Handle floating point and integers separately. */
2455 if (SCALAR_FLOAT_MODE_P (mode))
2456 {
2457 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2458 safe for modes with NaNs, since 0.0 / 0.0 will then be
2459 NaN rather than 0.0. Nor is it safe for modes with signed
2460 zeros, since dividing 0 by a negative number gives -0.0 */
2461 if (trueop0 == CONST0_RTX (mode)
2462 && !HONOR_NANS (mode)
2463 && !HONOR_SIGNED_ZEROS (mode)
2464 && ! side_effects_p (op1))
2465 return op0;
2466 /* x/1.0 is x. */
2467 if (trueop1 == CONST1_RTX (mode)
2468 && !HONOR_SNANS (mode))
2469 return op0;
2470
2471 if (GET_CODE (trueop1) == CONST_DOUBLE
2472 && trueop1 != CONST0_RTX (mode))
2473 {
2474 REAL_VALUE_TYPE d;
2475 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2476
2477 /* x/-1.0 is -x. */
2478 if (REAL_VALUES_EQUAL (d, dconstm1)
2479 && !HONOR_SNANS (mode))
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2481
2482 /* Change FP division by a constant into multiplication.
2483 Only do this with -freciprocal-math. */
2484 if (flag_reciprocal_math
2485 && !REAL_VALUES_EQUAL (d, dconst0))
2486 {
2487 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2488 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2489 return simplify_gen_binary (MULT, mode, op0, tem);
2490 }
2491 }
2492 }
2493 else
2494 {
2495 /* 0/x is 0 (or x&0 if x has side-effects). */
2496 if (trueop0 == CONST0_RTX (mode))
2497 {
2498 if (side_effects_p (op1))
2499 return simplify_gen_binary (AND, mode, op1, trueop0);
2500 return trueop0;
2501 }
2502 /* x/1 is x. */
2503 if (trueop1 == CONST1_RTX (mode))
2504 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2505 /* x/-1 is -x. */
2506 if (trueop1 == constm1_rtx)
2507 {
2508 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2509 return simplify_gen_unary (NEG, mode, x, mode);
2510 }
2511 }
2512 break;
2513
2514 case UMOD:
2515 /* 0%x is 0 (or x&0 if x has side-effects). */
2516 if (trueop0 == CONST0_RTX (mode))
2517 {
2518 if (side_effects_p (op1))
2519 return simplify_gen_binary (AND, mode, op1, trueop0);
2520 return trueop0;
2521 }
2522 /* x%1 is 0 (of x&0 if x has side-effects). */
2523 if (trueop1 == CONST1_RTX (mode))
2524 {
2525 if (side_effects_p (op0))
2526 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2527 return CONST0_RTX (mode);
2528 }
2529 /* Implement modulus by power of two as AND. */
2530 if (GET_CODE (trueop1) == CONST_INT
2531 && exact_log2 (INTVAL (trueop1)) > 0)
2532 return simplify_gen_binary (AND, mode, op0,
2533 GEN_INT (INTVAL (op1) - 1));
2534 break;
2535
2536 case MOD:
2537 /* 0%x is 0 (or x&0 if x has side-effects). */
2538 if (trueop0 == CONST0_RTX (mode))
2539 {
2540 if (side_effects_p (op1))
2541 return simplify_gen_binary (AND, mode, op1, trueop0);
2542 return trueop0;
2543 }
2544 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2545 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2546 {
2547 if (side_effects_p (op0))
2548 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2549 return CONST0_RTX (mode);
2550 }
2551 break;
2552
2553 case ROTATERT:
2554 case ROTATE:
2555 case ASHIFTRT:
2556 if (trueop1 == CONST0_RTX (mode))
2557 return op0;
2558 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2559 return op0;
2560 /* Rotating ~0 always results in ~0. */
2561 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2562 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2563 && ! side_effects_p (op1))
2564 return op0;
2565 break;
2566
2567 case ASHIFT:
2568 case SS_ASHIFT:
2569 case US_ASHIFT:
2570 if (trueop1 == CONST0_RTX (mode))
2571 return op0;
2572 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2573 return op0;
2574 break;
2575
2576 case LSHIFTRT:
2577 if (trueop1 == CONST0_RTX (mode))
2578 return op0;
2579 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2580 return op0;
2581 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2582 if (GET_CODE (op0) == CLZ
2583 && GET_CODE (trueop1) == CONST_INT
2584 && STORE_FLAG_VALUE == 1
2585 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2586 {
2587 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2588 unsigned HOST_WIDE_INT zero_val = 0;
2589
2590 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2591 && zero_val == GET_MODE_BITSIZE (imode)
2592 && INTVAL (trueop1) == exact_log2 (zero_val))
2593 return simplify_gen_relational (EQ, mode, imode,
2594 XEXP (op0, 0), const0_rtx);
2595 }
2596 break;
2597
2598 case SMIN:
2599 if (width <= HOST_BITS_PER_WIDE_INT
2600 && GET_CODE (trueop1) == CONST_INT
2601 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2602 && ! side_effects_p (op0))
2603 return op1;
2604 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2605 return op0;
2606 tem = simplify_associative_operation (code, mode, op0, op1);
2607 if (tem)
2608 return tem;
2609 break;
2610
2611 case SMAX:
2612 if (width <= HOST_BITS_PER_WIDE_INT
2613 && GET_CODE (trueop1) == CONST_INT
2614 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2615 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2616 && ! side_effects_p (op0))
2617 return op1;
2618 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2619 return op0;
2620 tem = simplify_associative_operation (code, mode, op0, op1);
2621 if (tem)
2622 return tem;
2623 break;
2624
2625 case UMIN:
2626 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2627 return op1;
2628 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2629 return op0;
2630 tem = simplify_associative_operation (code, mode, op0, op1);
2631 if (tem)
2632 return tem;
2633 break;
2634
2635 case UMAX:
2636 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2637 return op1;
2638 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2639 return op0;
2640 tem = simplify_associative_operation (code, mode, op0, op1);
2641 if (tem)
2642 return tem;
2643 break;
2644
2645 case SS_PLUS:
2646 case US_PLUS:
2647 case SS_MINUS:
2648 case US_MINUS:
2649 case SS_MULT:
2650 case US_MULT:
2651 case SS_DIV:
2652 case US_DIV:
2653 /* ??? There are simplifications that can be done. */
2654 return 0;
2655
2656 case VEC_SELECT:
2657 if (!VECTOR_MODE_P (mode))
2658 {
2659 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2660 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2661 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2662 gcc_assert (XVECLEN (trueop1, 0) == 1);
2663 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2664
2665 if (GET_CODE (trueop0) == CONST_VECTOR)
2666 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2667 (trueop1, 0, 0)));
2668
2669 /* Extract a scalar element from a nested VEC_SELECT expression
2670 (with optional nested VEC_CONCAT expression). Some targets
2671 (i386) extract scalar element from a vector using chain of
2672 nested VEC_SELECT expressions. When input operand is a memory
2673 operand, this operation can be simplified to a simple scalar
2674 load from an offseted memory address. */
2675 if (GET_CODE (trueop0) == VEC_SELECT)
2676 {
2677 rtx op0 = XEXP (trueop0, 0);
2678 rtx op1 = XEXP (trueop0, 1);
2679
2680 enum machine_mode opmode = GET_MODE (op0);
2681 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2682 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2683
2684 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2685 int elem;
2686
2687 rtvec vec;
2688 rtx tmp_op, tmp;
2689
2690 gcc_assert (GET_CODE (op1) == PARALLEL);
2691 gcc_assert (i < n_elts);
2692
2693 /* Select element, pointed by nested selector. */
2694 elem = INTVAL (XVECEXP (op1, 0, i));
2695
2696 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2697 if (GET_CODE (op0) == VEC_CONCAT)
2698 {
2699 rtx op00 = XEXP (op0, 0);
2700 rtx op01 = XEXP (op0, 1);
2701
2702 enum machine_mode mode00, mode01;
2703 int n_elts00, n_elts01;
2704
2705 mode00 = GET_MODE (op00);
2706 mode01 = GET_MODE (op01);
2707
2708 /* Find out number of elements of each operand. */
2709 if (VECTOR_MODE_P (mode00))
2710 {
2711 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2712 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2713 }
2714 else
2715 n_elts00 = 1;
2716
2717 if (VECTOR_MODE_P (mode01))
2718 {
2719 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2720 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2721 }
2722 else
2723 n_elts01 = 1;
2724
2725 gcc_assert (n_elts == n_elts00 + n_elts01);
2726
2727 /* Select correct operand of VEC_CONCAT
2728 and adjust selector. */
2729 if (elem < n_elts01)
2730 tmp_op = op00;
2731 else
2732 {
2733 tmp_op = op01;
2734 elem -= n_elts00;
2735 }
2736 }
2737 else
2738 tmp_op = op0;
2739
2740 vec = rtvec_alloc (1);
2741 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2742
2743 tmp = gen_rtx_fmt_ee (code, mode,
2744 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2745 return tmp;
2746 }
2747 }
2748 else
2749 {
2750 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2751 gcc_assert (GET_MODE_INNER (mode)
2752 == GET_MODE_INNER (GET_MODE (trueop0)));
2753 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2754
2755 if (GET_CODE (trueop0) == CONST_VECTOR)
2756 {
2757 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2758 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2759 rtvec v = rtvec_alloc (n_elts);
2760 unsigned int i;
2761
2762 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2763 for (i = 0; i < n_elts; i++)
2764 {
2765 rtx x = XVECEXP (trueop1, 0, i);
2766
2767 gcc_assert (GET_CODE (x) == CONST_INT);
2768 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2769 INTVAL (x));
2770 }
2771
2772 return gen_rtx_CONST_VECTOR (mode, v);
2773 }
2774 }
2775
2776 if (XVECLEN (trueop1, 0) == 1
2777 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2778 && GET_CODE (trueop0) == VEC_CONCAT)
2779 {
2780 rtx vec = trueop0;
2781 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2782
2783 /* Try to find the element in the VEC_CONCAT. */
2784 while (GET_MODE (vec) != mode
2785 && GET_CODE (vec) == VEC_CONCAT)
2786 {
2787 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2788 if (offset < vec_size)
2789 vec = XEXP (vec, 0);
2790 else
2791 {
2792 offset -= vec_size;
2793 vec = XEXP (vec, 1);
2794 }
2795 vec = avoid_constant_pool_reference (vec);
2796 }
2797
2798 if (GET_MODE (vec) == mode)
2799 return vec;
2800 }
2801
2802 return 0;
2803 case VEC_CONCAT:
2804 {
2805 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2806 ? GET_MODE (trueop0)
2807 : GET_MODE_INNER (mode));
2808 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2809 ? GET_MODE (trueop1)
2810 : GET_MODE_INNER (mode));
2811
2812 gcc_assert (VECTOR_MODE_P (mode));
2813 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2814 == GET_MODE_SIZE (mode));
2815
2816 if (VECTOR_MODE_P (op0_mode))
2817 gcc_assert (GET_MODE_INNER (mode)
2818 == GET_MODE_INNER (op0_mode));
2819 else
2820 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2821
2822 if (VECTOR_MODE_P (op1_mode))
2823 gcc_assert (GET_MODE_INNER (mode)
2824 == GET_MODE_INNER (op1_mode));
2825 else
2826 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2827
2828 if ((GET_CODE (trueop0) == CONST_VECTOR
2829 || GET_CODE (trueop0) == CONST_INT
2830 || GET_CODE (trueop0) == CONST_DOUBLE)
2831 && (GET_CODE (trueop1) == CONST_VECTOR
2832 || GET_CODE (trueop1) == CONST_INT
2833 || GET_CODE (trueop1) == CONST_DOUBLE))
2834 {
2835 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2836 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2837 rtvec v = rtvec_alloc (n_elts);
2838 unsigned int i;
2839 unsigned in_n_elts = 1;
2840
2841 if (VECTOR_MODE_P (op0_mode))
2842 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2843 for (i = 0; i < n_elts; i++)
2844 {
2845 if (i < in_n_elts)
2846 {
2847 if (!VECTOR_MODE_P (op0_mode))
2848 RTVEC_ELT (v, i) = trueop0;
2849 else
2850 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2851 }
2852 else
2853 {
2854 if (!VECTOR_MODE_P (op1_mode))
2855 RTVEC_ELT (v, i) = trueop1;
2856 else
2857 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2858 i - in_n_elts);
2859 }
2860 }
2861
2862 return gen_rtx_CONST_VECTOR (mode, v);
2863 }
2864 }
2865 return 0;
2866
2867 default:
2868 gcc_unreachable ();
2869 }
2870
2871 return 0;
2872 }
2873
2874 rtx
2875 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2876 rtx op0, rtx op1)
2877 {
2878 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2879 HOST_WIDE_INT val;
2880 unsigned int width = GET_MODE_BITSIZE (mode);
2881
2882 if (VECTOR_MODE_P (mode)
2883 && code != VEC_CONCAT
2884 && GET_CODE (op0) == CONST_VECTOR
2885 && GET_CODE (op1) == CONST_VECTOR)
2886 {
2887 unsigned n_elts = GET_MODE_NUNITS (mode);
2888 enum machine_mode op0mode = GET_MODE (op0);
2889 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2890 enum machine_mode op1mode = GET_MODE (op1);
2891 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2892 rtvec v = rtvec_alloc (n_elts);
2893 unsigned int i;
2894
2895 gcc_assert (op0_n_elts == n_elts);
2896 gcc_assert (op1_n_elts == n_elts);
2897 for (i = 0; i < n_elts; i++)
2898 {
2899 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2900 CONST_VECTOR_ELT (op0, i),
2901 CONST_VECTOR_ELT (op1, i));
2902 if (!x)
2903 return 0;
2904 RTVEC_ELT (v, i) = x;
2905 }
2906
2907 return gen_rtx_CONST_VECTOR (mode, v);
2908 }
2909
2910 if (VECTOR_MODE_P (mode)
2911 && code == VEC_CONCAT
2912 && CONSTANT_P (op0) && CONSTANT_P (op1))
2913 {
2914 unsigned n_elts = GET_MODE_NUNITS (mode);
2915 rtvec v = rtvec_alloc (n_elts);
2916
2917 gcc_assert (n_elts >= 2);
2918 if (n_elts == 2)
2919 {
2920 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2921 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2922
2923 RTVEC_ELT (v, 0) = op0;
2924 RTVEC_ELT (v, 1) = op1;
2925 }
2926 else
2927 {
2928 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2929 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2930 unsigned i;
2931
2932 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2933 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2934 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2935
2936 for (i = 0; i < op0_n_elts; ++i)
2937 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2938 for (i = 0; i < op1_n_elts; ++i)
2939 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2940 }
2941
2942 return gen_rtx_CONST_VECTOR (mode, v);
2943 }
2944
2945 if (SCALAR_FLOAT_MODE_P (mode)
2946 && GET_CODE (op0) == CONST_DOUBLE
2947 && GET_CODE (op1) == CONST_DOUBLE
2948 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2949 {
2950 if (code == AND
2951 || code == IOR
2952 || code == XOR)
2953 {
2954 long tmp0[4];
2955 long tmp1[4];
2956 REAL_VALUE_TYPE r;
2957 int i;
2958
2959 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2960 GET_MODE (op0));
2961 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2962 GET_MODE (op1));
2963 for (i = 0; i < 4; i++)
2964 {
2965 switch (code)
2966 {
2967 case AND:
2968 tmp0[i] &= tmp1[i];
2969 break;
2970 case IOR:
2971 tmp0[i] |= tmp1[i];
2972 break;
2973 case XOR:
2974 tmp0[i] ^= tmp1[i];
2975 break;
2976 default:
2977 gcc_unreachable ();
2978 }
2979 }
2980 real_from_target (&r, tmp0, mode);
2981 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2982 }
2983 else
2984 {
2985 REAL_VALUE_TYPE f0, f1, value, result;
2986 bool inexact;
2987
2988 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2989 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2990 real_convert (&f0, mode, &f0);
2991 real_convert (&f1, mode, &f1);
2992
2993 if (HONOR_SNANS (mode)
2994 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2995 return 0;
2996
2997 if (code == DIV
2998 && REAL_VALUES_EQUAL (f1, dconst0)
2999 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3000 return 0;
3001
3002 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3003 && flag_trapping_math
3004 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3005 {
3006 int s0 = REAL_VALUE_NEGATIVE (f0);
3007 int s1 = REAL_VALUE_NEGATIVE (f1);
3008
3009 switch (code)
3010 {
3011 case PLUS:
3012 /* Inf + -Inf = NaN plus exception. */
3013 if (s0 != s1)
3014 return 0;
3015 break;
3016 case MINUS:
3017 /* Inf - Inf = NaN plus exception. */
3018 if (s0 == s1)
3019 return 0;
3020 break;
3021 case DIV:
3022 /* Inf / Inf = NaN plus exception. */
3023 return 0;
3024 default:
3025 break;
3026 }
3027 }
3028
3029 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3030 && flag_trapping_math
3031 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3032 || (REAL_VALUE_ISINF (f1)
3033 && REAL_VALUES_EQUAL (f0, dconst0))))
3034 /* Inf * 0 = NaN plus exception. */
3035 return 0;
3036
3037 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3038 &f0, &f1);
3039 real_convert (&result, mode, &value);
3040
3041 /* Don't constant fold this floating point operation if
3042 the result has overflowed and flag_trapping_math. */
3043
3044 if (flag_trapping_math
3045 && MODE_HAS_INFINITIES (mode)
3046 && REAL_VALUE_ISINF (result)
3047 && !REAL_VALUE_ISINF (f0)
3048 && !REAL_VALUE_ISINF (f1))
3049 /* Overflow plus exception. */
3050 return 0;
3051
3052 /* Don't constant fold this floating point operation if the
3053 result may dependent upon the run-time rounding mode and
3054 flag_rounding_math is set, or if GCC's software emulation
3055 is unable to accurately represent the result. */
3056
3057 if ((flag_rounding_math
3058 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
3059 && !flag_unsafe_math_optimizations))
3060 && (inexact || !real_identical (&result, &value)))
3061 return NULL_RTX;
3062
3063 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3064 }
3065 }
3066
3067 /* We can fold some multi-word operations. */
3068 if (GET_MODE_CLASS (mode) == MODE_INT
3069 && width == HOST_BITS_PER_WIDE_INT * 2
3070 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3071 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3072 {
3073 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3074 HOST_WIDE_INT h1, h2, hv, ht;
3075
3076 if (GET_CODE (op0) == CONST_DOUBLE)
3077 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3078 else
3079 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3080
3081 if (GET_CODE (op1) == CONST_DOUBLE)
3082 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3083 else
3084 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3085
3086 switch (code)
3087 {
3088 case MINUS:
3089 /* A - B == A + (-B). */
3090 neg_double (l2, h2, &lv, &hv);
3091 l2 = lv, h2 = hv;
3092
3093 /* Fall through.... */
3094
3095 case PLUS:
3096 add_double (l1, h1, l2, h2, &lv, &hv);
3097 break;
3098
3099 case MULT:
3100 mul_double (l1, h1, l2, h2, &lv, &hv);
3101 break;
3102
3103 case DIV:
3104 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3105 &lv, &hv, &lt, &ht))
3106 return 0;
3107 break;
3108
3109 case MOD:
3110 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3111 &lt, &ht, &lv, &hv))
3112 return 0;
3113 break;
3114
3115 case UDIV:
3116 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3117 &lv, &hv, &lt, &ht))
3118 return 0;
3119 break;
3120
3121 case UMOD:
3122 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3123 &lt, &ht, &lv, &hv))
3124 return 0;
3125 break;
3126
3127 case AND:
3128 lv = l1 & l2, hv = h1 & h2;
3129 break;
3130
3131 case IOR:
3132 lv = l1 | l2, hv = h1 | h2;
3133 break;
3134
3135 case XOR:
3136 lv = l1 ^ l2, hv = h1 ^ h2;
3137 break;
3138
3139 case SMIN:
3140 if (h1 < h2
3141 || (h1 == h2
3142 && ((unsigned HOST_WIDE_INT) l1
3143 < (unsigned HOST_WIDE_INT) l2)))
3144 lv = l1, hv = h1;
3145 else
3146 lv = l2, hv = h2;
3147 break;
3148
3149 case SMAX:
3150 if (h1 > h2
3151 || (h1 == h2
3152 && ((unsigned HOST_WIDE_INT) l1
3153 > (unsigned HOST_WIDE_INT) l2)))
3154 lv = l1, hv = h1;
3155 else
3156 lv = l2, hv = h2;
3157 break;
3158
3159 case UMIN:
3160 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3161 || (h1 == h2
3162 && ((unsigned HOST_WIDE_INT) l1
3163 < (unsigned HOST_WIDE_INT) l2)))
3164 lv = l1, hv = h1;
3165 else
3166 lv = l2, hv = h2;
3167 break;
3168
3169 case UMAX:
3170 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3171 || (h1 == h2
3172 && ((unsigned HOST_WIDE_INT) l1
3173 > (unsigned HOST_WIDE_INT) l2)))
3174 lv = l1, hv = h1;
3175 else
3176 lv = l2, hv = h2;
3177 break;
3178
3179 case LSHIFTRT: case ASHIFTRT:
3180 case ASHIFT:
3181 case ROTATE: case ROTATERT:
3182 if (SHIFT_COUNT_TRUNCATED)
3183 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3184
3185 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3186 return 0;
3187
3188 if (code == LSHIFTRT || code == ASHIFTRT)
3189 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3190 code == ASHIFTRT);
3191 else if (code == ASHIFT)
3192 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3193 else if (code == ROTATE)
3194 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3195 else /* code == ROTATERT */
3196 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3197 break;
3198
3199 default:
3200 return 0;
3201 }
3202
3203 return immed_double_const (lv, hv, mode);
3204 }
3205
3206 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3207 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3208 {
3209 /* Get the integer argument values in two forms:
3210 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3211
3212 arg0 = INTVAL (op0);
3213 arg1 = INTVAL (op1);
3214
3215 if (width < HOST_BITS_PER_WIDE_INT)
3216 {
3217 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3218 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3219
3220 arg0s = arg0;
3221 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3222 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3223
3224 arg1s = arg1;
3225 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3226 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3227 }
3228 else
3229 {
3230 arg0s = arg0;
3231 arg1s = arg1;
3232 }
3233
3234 /* Compute the value of the arithmetic. */
3235
3236 switch (code)
3237 {
3238 case PLUS:
3239 val = arg0s + arg1s;
3240 break;
3241
3242 case MINUS:
3243 val = arg0s - arg1s;
3244 break;
3245
3246 case MULT:
3247 val = arg0s * arg1s;
3248 break;
3249
3250 case DIV:
3251 if (arg1s == 0
3252 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3253 && arg1s == -1))
3254 return 0;
3255 val = arg0s / arg1s;
3256 break;
3257
3258 case MOD:
3259 if (arg1s == 0
3260 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3261 && arg1s == -1))
3262 return 0;
3263 val = arg0s % arg1s;
3264 break;
3265
3266 case UDIV:
3267 if (arg1 == 0
3268 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3269 && arg1s == -1))
3270 return 0;
3271 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3272 break;
3273
3274 case UMOD:
3275 if (arg1 == 0
3276 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3277 && arg1s == -1))
3278 return 0;
3279 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3280 break;
3281
3282 case AND:
3283 val = arg0 & arg1;
3284 break;
3285
3286 case IOR:
3287 val = arg0 | arg1;
3288 break;
3289
3290 case XOR:
3291 val = arg0 ^ arg1;
3292 break;
3293
3294 case LSHIFTRT:
3295 case ASHIFT:
3296 case ASHIFTRT:
3297 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3298 the value is in range. We can't return any old value for
3299 out-of-range arguments because either the middle-end (via
3300 shift_truncation_mask) or the back-end might be relying on
3301 target-specific knowledge. Nor can we rely on
3302 shift_truncation_mask, since the shift might not be part of an
3303 ashlM3, lshrM3 or ashrM3 instruction. */
3304 if (SHIFT_COUNT_TRUNCATED)
3305 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3306 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3307 return 0;
3308
3309 val = (code == ASHIFT
3310 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3311 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3312
3313 /* Sign-extend the result for arithmetic right shifts. */
3314 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3315 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3316 break;
3317
3318 case ROTATERT:
3319 if (arg1 < 0)
3320 return 0;
3321
3322 arg1 %= width;
3323 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3324 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3325 break;
3326
3327 case ROTATE:
3328 if (arg1 < 0)
3329 return 0;
3330
3331 arg1 %= width;
3332 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3333 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3334 break;
3335
3336 case COMPARE:
3337 /* Do nothing here. */
3338 return 0;
3339
3340 case SMIN:
3341 val = arg0s <= arg1s ? arg0s : arg1s;
3342 break;
3343
3344 case UMIN:
3345 val = ((unsigned HOST_WIDE_INT) arg0
3346 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3347 break;
3348
3349 case SMAX:
3350 val = arg0s > arg1s ? arg0s : arg1s;
3351 break;
3352
3353 case UMAX:
3354 val = ((unsigned HOST_WIDE_INT) arg0
3355 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3356 break;
3357
3358 case SS_PLUS:
3359 case US_PLUS:
3360 case SS_MINUS:
3361 case US_MINUS:
3362 case SS_MULT:
3363 case US_MULT:
3364 case SS_DIV:
3365 case US_DIV:
3366 case SS_ASHIFT:
3367 case US_ASHIFT:
3368 /* ??? There are simplifications that can be done. */
3369 return 0;
3370
3371 default:
3372 gcc_unreachable ();
3373 }
3374
3375 return gen_int_mode (val, mode);
3376 }
3377
3378 return NULL_RTX;
3379 }
3380
3381
3382 \f
3383 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3384 PLUS or MINUS.
3385
3386 Rather than test for specific case, we do this by a brute-force method
3387 and do all possible simplifications until no more changes occur. Then
3388 we rebuild the operation. */
3389
3390 struct simplify_plus_minus_op_data
3391 {
3392 rtx op;
3393 short neg;
3394 };
3395
3396 static bool
3397 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3398 {
3399 int result;
3400
3401 result = (commutative_operand_precedence (y)
3402 - commutative_operand_precedence (x));
3403 if (result)
3404 return result > 0;
3405
3406 /* Group together equal REGs to do more simplification. */
3407 if (REG_P (x) && REG_P (y))
3408 return REGNO (x) > REGNO (y);
3409 else
3410 return false;
3411 }
3412
3413 static rtx
3414 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3415 rtx op1)
3416 {
3417 struct simplify_plus_minus_op_data ops[8];
3418 rtx result, tem;
3419 int n_ops = 2, input_ops = 2;
3420 int changed, n_constants = 0, canonicalized = 0;
3421 int i, j;
3422
3423 memset (ops, 0, sizeof ops);
3424
3425 /* Set up the two operands and then expand them until nothing has been
3426 changed. If we run out of room in our array, give up; this should
3427 almost never happen. */
3428
3429 ops[0].op = op0;
3430 ops[0].neg = 0;
3431 ops[1].op = op1;
3432 ops[1].neg = (code == MINUS);
3433
3434 do
3435 {
3436 changed = 0;
3437
3438 for (i = 0; i < n_ops; i++)
3439 {
3440 rtx this_op = ops[i].op;
3441 int this_neg = ops[i].neg;
3442 enum rtx_code this_code = GET_CODE (this_op);
3443
3444 switch (this_code)
3445 {
3446 case PLUS:
3447 case MINUS:
3448 if (n_ops == 7)
3449 return NULL_RTX;
3450
3451 ops[n_ops].op = XEXP (this_op, 1);
3452 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3453 n_ops++;
3454
3455 ops[i].op = XEXP (this_op, 0);
3456 input_ops++;
3457 changed = 1;
3458 canonicalized |= this_neg;
3459 break;
3460
3461 case NEG:
3462 ops[i].op = XEXP (this_op, 0);
3463 ops[i].neg = ! this_neg;
3464 changed = 1;
3465 canonicalized = 1;
3466 break;
3467
3468 case CONST:
3469 if (n_ops < 7
3470 && GET_CODE (XEXP (this_op, 0)) == PLUS
3471 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3472 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3473 {
3474 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3475 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3476 ops[n_ops].neg = this_neg;
3477 n_ops++;
3478 changed = 1;
3479 canonicalized = 1;
3480 }
3481 break;
3482
3483 case NOT:
3484 /* ~a -> (-a - 1) */
3485 if (n_ops != 7)
3486 {
3487 ops[n_ops].op = constm1_rtx;
3488 ops[n_ops++].neg = this_neg;
3489 ops[i].op = XEXP (this_op, 0);
3490 ops[i].neg = !this_neg;
3491 changed = 1;
3492 canonicalized = 1;
3493 }
3494 break;
3495
3496 case CONST_INT:
3497 n_constants++;
3498 if (this_neg)
3499 {
3500 ops[i].op = neg_const_int (mode, this_op);
3501 ops[i].neg = 0;
3502 changed = 1;
3503 canonicalized = 1;
3504 }
3505 break;
3506
3507 default:
3508 break;
3509 }
3510 }
3511 }
3512 while (changed);
3513
3514 if (n_constants > 1)
3515 canonicalized = 1;
3516
3517 gcc_assert (n_ops >= 2);
3518
3519 /* If we only have two operands, we can avoid the loops. */
3520 if (n_ops == 2)
3521 {
3522 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3523 rtx lhs, rhs;
3524
3525 /* Get the two operands. Be careful with the order, especially for
3526 the cases where code == MINUS. */
3527 if (ops[0].neg && ops[1].neg)
3528 {
3529 lhs = gen_rtx_NEG (mode, ops[0].op);
3530 rhs = ops[1].op;
3531 }
3532 else if (ops[0].neg)
3533 {
3534 lhs = ops[1].op;
3535 rhs = ops[0].op;
3536 }
3537 else
3538 {
3539 lhs = ops[0].op;
3540 rhs = ops[1].op;
3541 }
3542
3543 return simplify_const_binary_operation (code, mode, lhs, rhs);
3544 }
3545
3546 /* Now simplify each pair of operands until nothing changes. */
3547 do
3548 {
3549 /* Insertion sort is good enough for an eight-element array. */
3550 for (i = 1; i < n_ops; i++)
3551 {
3552 struct simplify_plus_minus_op_data save;
3553 j = i - 1;
3554 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3555 continue;
3556
3557 canonicalized = 1;
3558 save = ops[i];
3559 do
3560 ops[j + 1] = ops[j];
3561 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3562 ops[j + 1] = save;
3563 }
3564
3565 /* This is only useful the first time through. */
3566 if (!canonicalized)
3567 return NULL_RTX;
3568
3569 changed = 0;
3570 for (i = n_ops - 1; i > 0; i--)
3571 for (j = i - 1; j >= 0; j--)
3572 {
3573 rtx lhs = ops[j].op, rhs = ops[i].op;
3574 int lneg = ops[j].neg, rneg = ops[i].neg;
3575
3576 if (lhs != 0 && rhs != 0)
3577 {
3578 enum rtx_code ncode = PLUS;
3579
3580 if (lneg != rneg)
3581 {
3582 ncode = MINUS;
3583 if (lneg)
3584 tem = lhs, lhs = rhs, rhs = tem;
3585 }
3586 else if (swap_commutative_operands_p (lhs, rhs))
3587 tem = lhs, lhs = rhs, rhs = tem;
3588
3589 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3590 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3591 {
3592 rtx tem_lhs, tem_rhs;
3593
3594 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3595 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3596 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3597
3598 if (tem && !CONSTANT_P (tem))
3599 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3600 }
3601 else
3602 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3603
3604 /* Reject "simplifications" that just wrap the two
3605 arguments in a CONST. Failure to do so can result
3606 in infinite recursion with simplify_binary_operation
3607 when it calls us to simplify CONST operations. */
3608 if (tem
3609 && ! (GET_CODE (tem) == CONST
3610 && GET_CODE (XEXP (tem, 0)) == ncode
3611 && XEXP (XEXP (tem, 0), 0) == lhs
3612 && XEXP (XEXP (tem, 0), 1) == rhs))
3613 {
3614 lneg &= rneg;
3615 if (GET_CODE (tem) == NEG)
3616 tem = XEXP (tem, 0), lneg = !lneg;
3617 if (GET_CODE (tem) == CONST_INT && lneg)
3618 tem = neg_const_int (mode, tem), lneg = 0;
3619
3620 ops[i].op = tem;
3621 ops[i].neg = lneg;
3622 ops[j].op = NULL_RTX;
3623 changed = 1;
3624 }
3625 }
3626 }
3627
3628 /* Pack all the operands to the lower-numbered entries. */
3629 for (i = 0, j = 0; j < n_ops; j++)
3630 if (ops[j].op)
3631 {
3632 ops[i] = ops[j];
3633 i++;
3634 }
3635 n_ops = i;
3636 }
3637 while (changed);
3638
3639 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3640 if (n_ops == 2
3641 && GET_CODE (ops[1].op) == CONST_INT
3642 && CONSTANT_P (ops[0].op)
3643 && ops[0].neg)
3644 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3645
3646 /* We suppressed creation of trivial CONST expressions in the
3647 combination loop to avoid recursion. Create one manually now.
3648 The combination loop should have ensured that there is exactly
3649 one CONST_INT, and the sort will have ensured that it is last
3650 in the array and that any other constant will be next-to-last. */
3651
3652 if (n_ops > 1
3653 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3654 && CONSTANT_P (ops[n_ops - 2].op))
3655 {
3656 rtx value = ops[n_ops - 1].op;
3657 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3658 value = neg_const_int (mode, value);
3659 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3660 n_ops--;
3661 }
3662
3663 /* Put a non-negated operand first, if possible. */
3664
3665 for (i = 0; i < n_ops && ops[i].neg; i++)
3666 continue;
3667 if (i == n_ops)
3668 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3669 else if (i != 0)
3670 {
3671 tem = ops[0].op;
3672 ops[0] = ops[i];
3673 ops[i].op = tem;
3674 ops[i].neg = 1;
3675 }
3676
3677 /* Now make the result by performing the requested operations. */
3678 result = ops[0].op;
3679 for (i = 1; i < n_ops; i++)
3680 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3681 mode, result, ops[i].op);
3682
3683 return result;
3684 }
3685
3686 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3687 static bool
3688 plus_minus_operand_p (const_rtx x)
3689 {
3690 return GET_CODE (x) == PLUS
3691 || GET_CODE (x) == MINUS
3692 || (GET_CODE (x) == CONST
3693 && GET_CODE (XEXP (x, 0)) == PLUS
3694 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3695 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3696 }
3697
3698 /* Like simplify_binary_operation except used for relational operators.
3699 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3700 not also be VOIDmode.
3701
3702 CMP_MODE specifies in which mode the comparison is done in, so it is
3703 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3704 the operands or, if both are VOIDmode, the operands are compared in
3705 "infinite precision". */
3706 rtx
3707 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3708 enum machine_mode cmp_mode, rtx op0, rtx op1)
3709 {
3710 rtx tem, trueop0, trueop1;
3711
3712 if (cmp_mode == VOIDmode)
3713 cmp_mode = GET_MODE (op0);
3714 if (cmp_mode == VOIDmode)
3715 cmp_mode = GET_MODE (op1);
3716
3717 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3718 if (tem)
3719 {
3720 if (SCALAR_FLOAT_MODE_P (mode))
3721 {
3722 if (tem == const0_rtx)
3723 return CONST0_RTX (mode);
3724 #ifdef FLOAT_STORE_FLAG_VALUE
3725 {
3726 REAL_VALUE_TYPE val;
3727 val = FLOAT_STORE_FLAG_VALUE (mode);
3728 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3729 }
3730 #else
3731 return NULL_RTX;
3732 #endif
3733 }
3734 if (VECTOR_MODE_P (mode))
3735 {
3736 if (tem == const0_rtx)
3737 return CONST0_RTX (mode);
3738 #ifdef VECTOR_STORE_FLAG_VALUE
3739 {
3740 int i, units;
3741 rtvec v;
3742
3743 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3744 if (val == NULL_RTX)
3745 return NULL_RTX;
3746 if (val == const1_rtx)
3747 return CONST1_RTX (mode);
3748
3749 units = GET_MODE_NUNITS (mode);
3750 v = rtvec_alloc (units);
3751 for (i = 0; i < units; i++)
3752 RTVEC_ELT (v, i) = val;
3753 return gen_rtx_raw_CONST_VECTOR (mode, v);
3754 }
3755 #else
3756 return NULL_RTX;
3757 #endif
3758 }
3759
3760 return tem;
3761 }
3762
3763 /* For the following tests, ensure const0_rtx is op1. */
3764 if (swap_commutative_operands_p (op0, op1)
3765 || (op0 == const0_rtx && op1 != const0_rtx))
3766 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3767
3768 /* If op0 is a compare, extract the comparison arguments from it. */
3769 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3770 return simplify_relational_operation (code, mode, VOIDmode,
3771 XEXP (op0, 0), XEXP (op0, 1));
3772
3773 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3774 || CC0_P (op0))
3775 return NULL_RTX;
3776
3777 trueop0 = avoid_constant_pool_reference (op0);
3778 trueop1 = avoid_constant_pool_reference (op1);
3779 return simplify_relational_operation_1 (code, mode, cmp_mode,
3780 trueop0, trueop1);
3781 }
3782
3783 /* This part of simplify_relational_operation is only used when CMP_MODE
3784 is not in class MODE_CC (i.e. it is a real comparison).
3785
3786 MODE is the mode of the result, while CMP_MODE specifies in which
3787 mode the comparison is done in, so it is the mode of the operands. */
3788
3789 static rtx
3790 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3791 enum machine_mode cmp_mode, rtx op0, rtx op1)
3792 {
3793 enum rtx_code op0code = GET_CODE (op0);
3794
3795 if (op1 == const0_rtx && COMPARISON_P (op0))
3796 {
3797 /* If op0 is a comparison, extract the comparison arguments
3798 from it. */
3799 if (code == NE)
3800 {
3801 if (GET_MODE (op0) == mode)
3802 return simplify_rtx (op0);
3803 else
3804 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3805 XEXP (op0, 0), XEXP (op0, 1));
3806 }
3807 else if (code == EQ)
3808 {
3809 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3810 if (new_code != UNKNOWN)
3811 return simplify_gen_relational (new_code, mode, VOIDmode,
3812 XEXP (op0, 0), XEXP (op0, 1));
3813 }
3814 }
3815
3816 if (op1 == const0_rtx)
3817 {
3818 /* Canonicalize (GTU x 0) as (NE x 0). */
3819 if (code == GTU)
3820 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3821 /* Canonicalize (LEU x 0) as (EQ x 0). */
3822 if (code == LEU)
3823 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3824 }
3825 else if (op1 == const1_rtx)
3826 {
3827 switch (code)
3828 {
3829 case GE:
3830 /* Canonicalize (GE x 1) as (GT x 0). */
3831 return simplify_gen_relational (GT, mode, cmp_mode,
3832 op0, const0_rtx);
3833 case GEU:
3834 /* Canonicalize (GEU x 1) as (NE x 0). */
3835 return simplify_gen_relational (NE, mode, cmp_mode,
3836 op0, const0_rtx);
3837 case LT:
3838 /* Canonicalize (LT x 1) as (LE x 0). */
3839 return simplify_gen_relational (LE, mode, cmp_mode,
3840 op0, const0_rtx);
3841 case LTU:
3842 /* Canonicalize (LTU x 1) as (EQ x 0). */
3843 return simplify_gen_relational (EQ, mode, cmp_mode,
3844 op0, const0_rtx);
3845 default:
3846 break;
3847 }
3848 }
3849 else if (op1 == constm1_rtx)
3850 {
3851 /* Canonicalize (LE x -1) as (LT x 0). */
3852 if (code == LE)
3853 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3854 /* Canonicalize (GT x -1) as (GE x 0). */
3855 if (code == GT)
3856 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3857 }
3858
3859 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3860 if ((code == EQ || code == NE)
3861 && (op0code == PLUS || op0code == MINUS)
3862 && CONSTANT_P (op1)
3863 && CONSTANT_P (XEXP (op0, 1))
3864 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3865 {
3866 rtx x = XEXP (op0, 0);
3867 rtx c = XEXP (op0, 1);
3868
3869 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3870 cmp_mode, op1, c);
3871 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3872 }
3873
3874 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3875 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3876 if (code == NE
3877 && op1 == const0_rtx
3878 && GET_MODE_CLASS (mode) == MODE_INT
3879 && cmp_mode != VOIDmode
3880 /* ??? Work-around BImode bugs in the ia64 backend. */
3881 && mode != BImode
3882 && cmp_mode != BImode
3883 && nonzero_bits (op0, cmp_mode) == 1
3884 && STORE_FLAG_VALUE == 1)
3885 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3886 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3887 : lowpart_subreg (mode, op0, cmp_mode);
3888
3889 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3890 if ((code == EQ || code == NE)
3891 && op1 == const0_rtx
3892 && op0code == XOR)
3893 return simplify_gen_relational (code, mode, cmp_mode,
3894 XEXP (op0, 0), XEXP (op0, 1));
3895
3896 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3897 if ((code == EQ || code == NE)
3898 && op0code == XOR
3899 && rtx_equal_p (XEXP (op0, 0), op1)
3900 && !side_effects_p (XEXP (op0, 0)))
3901 return simplify_gen_relational (code, mode, cmp_mode,
3902 XEXP (op0, 1), const0_rtx);
3903
3904 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3905 if ((code == EQ || code == NE)
3906 && op0code == XOR
3907 && rtx_equal_p (XEXP (op0, 1), op1)
3908 && !side_effects_p (XEXP (op0, 1)))
3909 return simplify_gen_relational (code, mode, cmp_mode,
3910 XEXP (op0, 0), const0_rtx);
3911
3912 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3913 if ((code == EQ || code == NE)
3914 && op0code == XOR
3915 && (GET_CODE (op1) == CONST_INT
3916 || GET_CODE (op1) == CONST_DOUBLE)
3917 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3918 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3919 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3920 simplify_gen_binary (XOR, cmp_mode,
3921 XEXP (op0, 1), op1));
3922
3923 if (op0code == POPCOUNT && op1 == const0_rtx)
3924 switch (code)
3925 {
3926 case EQ:
3927 case LE:
3928 case LEU:
3929 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3930 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3931 XEXP (op0, 0), const0_rtx);
3932
3933 case NE:
3934 case GT:
3935 case GTU:
3936 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3937 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3938 XEXP (op0, 0), const0_rtx);
3939
3940 default:
3941 break;
3942 }
3943
3944 return NULL_RTX;
3945 }
3946
3947 /* Check if the given comparison (done in the given MODE) is actually a
3948 tautology or a contradiction.
3949 If no simplification is possible, this function returns zero.
3950 Otherwise, it returns either const_true_rtx or const0_rtx. */
3951
3952 rtx
3953 simplify_const_relational_operation (enum rtx_code code,
3954 enum machine_mode mode,
3955 rtx op0, rtx op1)
3956 {
3957 int equal, op0lt, op0ltu, op1lt, op1ltu;
3958 rtx tem;
3959 rtx trueop0;
3960 rtx trueop1;
3961
3962 gcc_assert (mode != VOIDmode
3963 || (GET_MODE (op0) == VOIDmode
3964 && GET_MODE (op1) == VOIDmode));
3965
3966 /* If op0 is a compare, extract the comparison arguments from it. */
3967 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3968 {
3969 op1 = XEXP (op0, 1);
3970 op0 = XEXP (op0, 0);
3971
3972 if (GET_MODE (op0) != VOIDmode)
3973 mode = GET_MODE (op0);
3974 else if (GET_MODE (op1) != VOIDmode)
3975 mode = GET_MODE (op1);
3976 else
3977 return 0;
3978 }
3979
3980 /* We can't simplify MODE_CC values since we don't know what the
3981 actual comparison is. */
3982 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3983 return 0;
3984
3985 /* Make sure the constant is second. */
3986 if (swap_commutative_operands_p (op0, op1))
3987 {
3988 tem = op0, op0 = op1, op1 = tem;
3989 code = swap_condition (code);
3990 }
3991
3992 trueop0 = avoid_constant_pool_reference (op0);
3993 trueop1 = avoid_constant_pool_reference (op1);
3994
3995 /* For integer comparisons of A and B maybe we can simplify A - B and can
3996 then simplify a comparison of that with zero. If A and B are both either
3997 a register or a CONST_INT, this can't help; testing for these cases will
3998 prevent infinite recursion here and speed things up.
3999
4000 We can only do this for EQ and NE comparisons as otherwise we may
4001 lose or introduce overflow which we cannot disregard as undefined as
4002 we do not know the signedness of the operation on either the left or
4003 the right hand side of the comparison. */
4004
4005 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4006 && (code == EQ || code == NE)
4007 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
4008 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
4009 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4010 /* We cannot do this if tem is a nonzero address. */
4011 && ! nonzero_address_p (tem))
4012 return simplify_const_relational_operation (signed_condition (code),
4013 mode, tem, const0_rtx);
4014
4015 if (! HONOR_NANS (mode) && code == ORDERED)
4016 return const_true_rtx;
4017
4018 if (! HONOR_NANS (mode) && code == UNORDERED)
4019 return const0_rtx;
4020
4021 /* For modes without NaNs, if the two operands are equal, we know the
4022 result except if they have side-effects. */
4023 if (! HONOR_NANS (GET_MODE (trueop0))
4024 && rtx_equal_p (trueop0, trueop1)
4025 && ! side_effects_p (trueop0))
4026 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4027
4028 /* If the operands are floating-point constants, see if we can fold
4029 the result. */
4030 else if (GET_CODE (trueop0) == CONST_DOUBLE
4031 && GET_CODE (trueop1) == CONST_DOUBLE
4032 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4033 {
4034 REAL_VALUE_TYPE d0, d1;
4035
4036 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4037 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4038
4039 /* Comparisons are unordered iff at least one of the values is NaN. */
4040 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4041 switch (code)
4042 {
4043 case UNEQ:
4044 case UNLT:
4045 case UNGT:
4046 case UNLE:
4047 case UNGE:
4048 case NE:
4049 case UNORDERED:
4050 return const_true_rtx;
4051 case EQ:
4052 case LT:
4053 case GT:
4054 case LE:
4055 case GE:
4056 case LTGT:
4057 case ORDERED:
4058 return const0_rtx;
4059 default:
4060 return 0;
4061 }
4062
4063 equal = REAL_VALUES_EQUAL (d0, d1);
4064 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4065 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4066 }
4067
4068 /* Otherwise, see if the operands are both integers. */
4069 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4070 && (GET_CODE (trueop0) == CONST_DOUBLE
4071 || GET_CODE (trueop0) == CONST_INT)
4072 && (GET_CODE (trueop1) == CONST_DOUBLE
4073 || GET_CODE (trueop1) == CONST_INT))
4074 {
4075 int width = GET_MODE_BITSIZE (mode);
4076 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4077 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4078
4079 /* Get the two words comprising each integer constant. */
4080 if (GET_CODE (trueop0) == CONST_DOUBLE)
4081 {
4082 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4083 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4084 }
4085 else
4086 {
4087 l0u = l0s = INTVAL (trueop0);
4088 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4089 }
4090
4091 if (GET_CODE (trueop1) == CONST_DOUBLE)
4092 {
4093 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4094 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4095 }
4096 else
4097 {
4098 l1u = l1s = INTVAL (trueop1);
4099 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4100 }
4101
4102 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4103 we have to sign or zero-extend the values. */
4104 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4105 {
4106 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4107 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4108
4109 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4110 l0s |= ((HOST_WIDE_INT) (-1) << width);
4111
4112 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4113 l1s |= ((HOST_WIDE_INT) (-1) << width);
4114 }
4115 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4116 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4117
4118 equal = (h0u == h1u && l0u == l1u);
4119 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4120 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4121 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4122 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4123 }
4124
4125 /* Otherwise, there are some code-specific tests we can make. */
4126 else
4127 {
4128 /* Optimize comparisons with upper and lower bounds. */
4129 if (SCALAR_INT_MODE_P (mode)
4130 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4131 {
4132 rtx mmin, mmax;
4133 int sign;
4134
4135 if (code == GEU
4136 || code == LEU
4137 || code == GTU
4138 || code == LTU)
4139 sign = 0;
4140 else
4141 sign = 1;
4142
4143 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4144
4145 tem = NULL_RTX;
4146 switch (code)
4147 {
4148 case GEU:
4149 case GE:
4150 /* x >= min is always true. */
4151 if (rtx_equal_p (trueop1, mmin))
4152 tem = const_true_rtx;
4153 else
4154 break;
4155
4156 case LEU:
4157 case LE:
4158 /* x <= max is always true. */
4159 if (rtx_equal_p (trueop1, mmax))
4160 tem = const_true_rtx;
4161 break;
4162
4163 case GTU:
4164 case GT:
4165 /* x > max is always false. */
4166 if (rtx_equal_p (trueop1, mmax))
4167 tem = const0_rtx;
4168 break;
4169
4170 case LTU:
4171 case LT:
4172 /* x < min is always false. */
4173 if (rtx_equal_p (trueop1, mmin))
4174 tem = const0_rtx;
4175 break;
4176
4177 default:
4178 break;
4179 }
4180 if (tem == const0_rtx
4181 || tem == const_true_rtx)
4182 return tem;
4183 }
4184
4185 switch (code)
4186 {
4187 case EQ:
4188 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4189 return const0_rtx;
4190 break;
4191
4192 case NE:
4193 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4194 return const_true_rtx;
4195 break;
4196
4197 case LT:
4198 /* Optimize abs(x) < 0.0. */
4199 if (trueop1 == CONST0_RTX (mode)
4200 && !HONOR_SNANS (mode)
4201 && (!INTEGRAL_MODE_P (mode)
4202 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4203 {
4204 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4205 : trueop0;
4206 if (GET_CODE (tem) == ABS)
4207 {
4208 if (INTEGRAL_MODE_P (mode)
4209 && (issue_strict_overflow_warning
4210 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4211 warning (OPT_Wstrict_overflow,
4212 ("assuming signed overflow does not occur when "
4213 "assuming abs (x) < 0 is false"));
4214 return const0_rtx;
4215 }
4216 }
4217
4218 /* Optimize popcount (x) < 0. */
4219 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4220 return const_true_rtx;
4221 break;
4222
4223 case GE:
4224 /* Optimize abs(x) >= 0.0. */
4225 if (trueop1 == CONST0_RTX (mode)
4226 && !HONOR_NANS (mode)
4227 && (!INTEGRAL_MODE_P (mode)
4228 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4229 {
4230 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4231 : trueop0;
4232 if (GET_CODE (tem) == ABS)
4233 {
4234 if (INTEGRAL_MODE_P (mode)
4235 && (issue_strict_overflow_warning
4236 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4237 warning (OPT_Wstrict_overflow,
4238 ("assuming signed overflow does not occur when "
4239 "assuming abs (x) >= 0 is true"));
4240 return const_true_rtx;
4241 }
4242 }
4243
4244 /* Optimize popcount (x) >= 0. */
4245 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4246 return const_true_rtx;
4247 break;
4248
4249 case UNGE:
4250 /* Optimize ! (abs(x) < 0.0). */
4251 if (trueop1 == CONST0_RTX (mode))
4252 {
4253 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4254 : trueop0;
4255 if (GET_CODE (tem) == ABS)
4256 return const_true_rtx;
4257 }
4258 break;
4259
4260 default:
4261 break;
4262 }
4263
4264 return 0;
4265 }
4266
4267 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4268 as appropriate. */
4269 switch (code)
4270 {
4271 case EQ:
4272 case UNEQ:
4273 return equal ? const_true_rtx : const0_rtx;
4274 case NE:
4275 case LTGT:
4276 return ! equal ? const_true_rtx : const0_rtx;
4277 case LT:
4278 case UNLT:
4279 return op0lt ? const_true_rtx : const0_rtx;
4280 case GT:
4281 case UNGT:
4282 return op1lt ? const_true_rtx : const0_rtx;
4283 case LTU:
4284 return op0ltu ? const_true_rtx : const0_rtx;
4285 case GTU:
4286 return op1ltu ? const_true_rtx : const0_rtx;
4287 case LE:
4288 case UNLE:
4289 return equal || op0lt ? const_true_rtx : const0_rtx;
4290 case GE:
4291 case UNGE:
4292 return equal || op1lt ? const_true_rtx : const0_rtx;
4293 case LEU:
4294 return equal || op0ltu ? const_true_rtx : const0_rtx;
4295 case GEU:
4296 return equal || op1ltu ? const_true_rtx : const0_rtx;
4297 case ORDERED:
4298 return const_true_rtx;
4299 case UNORDERED:
4300 return const0_rtx;
4301 default:
4302 gcc_unreachable ();
4303 }
4304 }
4305 \f
4306 /* Simplify CODE, an operation with result mode MODE and three operands,
4307 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4308 a constant. Return 0 if no simplifications is possible. */
4309
4310 rtx
4311 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4312 enum machine_mode op0_mode, rtx op0, rtx op1,
4313 rtx op2)
4314 {
4315 unsigned int width = GET_MODE_BITSIZE (mode);
4316
4317 /* VOIDmode means "infinite" precision. */
4318 if (width == 0)
4319 width = HOST_BITS_PER_WIDE_INT;
4320
4321 switch (code)
4322 {
4323 case SIGN_EXTRACT:
4324 case ZERO_EXTRACT:
4325 if (GET_CODE (op0) == CONST_INT
4326 && GET_CODE (op1) == CONST_INT
4327 && GET_CODE (op2) == CONST_INT
4328 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4329 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4330 {
4331 /* Extracting a bit-field from a constant */
4332 HOST_WIDE_INT val = INTVAL (op0);
4333
4334 if (BITS_BIG_ENDIAN)
4335 val >>= (GET_MODE_BITSIZE (op0_mode)
4336 - INTVAL (op2) - INTVAL (op1));
4337 else
4338 val >>= INTVAL (op2);
4339
4340 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4341 {
4342 /* First zero-extend. */
4343 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4344 /* If desired, propagate sign bit. */
4345 if (code == SIGN_EXTRACT
4346 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4347 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4348 }
4349
4350 /* Clear the bits that don't belong in our mode,
4351 unless they and our sign bit are all one.
4352 So we get either a reasonable negative value or a reasonable
4353 unsigned value for this mode. */
4354 if (width < HOST_BITS_PER_WIDE_INT
4355 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4356 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4357 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4358
4359 return gen_int_mode (val, mode);
4360 }
4361 break;
4362
4363 case IF_THEN_ELSE:
4364 if (GET_CODE (op0) == CONST_INT)
4365 return op0 != const0_rtx ? op1 : op2;
4366
4367 /* Convert c ? a : a into "a". */
4368 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4369 return op1;
4370
4371 /* Convert a != b ? a : b into "a". */
4372 if (GET_CODE (op0) == NE
4373 && ! side_effects_p (op0)
4374 && ! HONOR_NANS (mode)
4375 && ! HONOR_SIGNED_ZEROS (mode)
4376 && ((rtx_equal_p (XEXP (op0, 0), op1)
4377 && rtx_equal_p (XEXP (op0, 1), op2))
4378 || (rtx_equal_p (XEXP (op0, 0), op2)
4379 && rtx_equal_p (XEXP (op0, 1), op1))))
4380 return op1;
4381
4382 /* Convert a == b ? a : b into "b". */
4383 if (GET_CODE (op0) == EQ
4384 && ! side_effects_p (op0)
4385 && ! HONOR_NANS (mode)
4386 && ! HONOR_SIGNED_ZEROS (mode)
4387 && ((rtx_equal_p (XEXP (op0, 0), op1)
4388 && rtx_equal_p (XEXP (op0, 1), op2))
4389 || (rtx_equal_p (XEXP (op0, 0), op2)
4390 && rtx_equal_p (XEXP (op0, 1), op1))))
4391 return op2;
4392
4393 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4394 {
4395 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4396 ? GET_MODE (XEXP (op0, 1))
4397 : GET_MODE (XEXP (op0, 0)));
4398 rtx temp;
4399
4400 /* Look for happy constants in op1 and op2. */
4401 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4402 {
4403 HOST_WIDE_INT t = INTVAL (op1);
4404 HOST_WIDE_INT f = INTVAL (op2);
4405
4406 if (t == STORE_FLAG_VALUE && f == 0)
4407 code = GET_CODE (op0);
4408 else if (t == 0 && f == STORE_FLAG_VALUE)
4409 {
4410 enum rtx_code tmp;
4411 tmp = reversed_comparison_code (op0, NULL_RTX);
4412 if (tmp == UNKNOWN)
4413 break;
4414 code = tmp;
4415 }
4416 else
4417 break;
4418
4419 return simplify_gen_relational (code, mode, cmp_mode,
4420 XEXP (op0, 0), XEXP (op0, 1));
4421 }
4422
4423 if (cmp_mode == VOIDmode)
4424 cmp_mode = op0_mode;
4425 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4426 cmp_mode, XEXP (op0, 0),
4427 XEXP (op0, 1));
4428
4429 /* See if any simplifications were possible. */
4430 if (temp)
4431 {
4432 if (GET_CODE (temp) == CONST_INT)
4433 return temp == const0_rtx ? op2 : op1;
4434 else if (temp)
4435 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4436 }
4437 }
4438 break;
4439
4440 case VEC_MERGE:
4441 gcc_assert (GET_MODE (op0) == mode);
4442 gcc_assert (GET_MODE (op1) == mode);
4443 gcc_assert (VECTOR_MODE_P (mode));
4444 op2 = avoid_constant_pool_reference (op2);
4445 if (GET_CODE (op2) == CONST_INT)
4446 {
4447 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4448 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4449 int mask = (1 << n_elts) - 1;
4450
4451 if (!(INTVAL (op2) & mask))
4452 return op1;
4453 if ((INTVAL (op2) & mask) == mask)
4454 return op0;
4455
4456 op0 = avoid_constant_pool_reference (op0);
4457 op1 = avoid_constant_pool_reference (op1);
4458 if (GET_CODE (op0) == CONST_VECTOR
4459 && GET_CODE (op1) == CONST_VECTOR)
4460 {
4461 rtvec v = rtvec_alloc (n_elts);
4462 unsigned int i;
4463
4464 for (i = 0; i < n_elts; i++)
4465 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4466 ? CONST_VECTOR_ELT (op0, i)
4467 : CONST_VECTOR_ELT (op1, i));
4468 return gen_rtx_CONST_VECTOR (mode, v);
4469 }
4470 }
4471 break;
4472
4473 default:
4474 gcc_unreachable ();
4475 }
4476
4477 return 0;
4478 }
4479
4480 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4481 or CONST_VECTOR,
4482 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4483
4484 Works by unpacking OP into a collection of 8-bit values
4485 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4486 and then repacking them again for OUTERMODE. */
4487
4488 static rtx
4489 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4490 enum machine_mode innermode, unsigned int byte)
4491 {
4492 /* We support up to 512-bit values (for V8DFmode). */
4493 enum {
4494 max_bitsize = 512,
4495 value_bit = 8,
4496 value_mask = (1 << value_bit) - 1
4497 };
4498 unsigned char value[max_bitsize / value_bit];
4499 int value_start;
4500 int i;
4501 int elem;
4502
4503 int num_elem;
4504 rtx * elems;
4505 int elem_bitsize;
4506 rtx result_s;
4507 rtvec result_v = NULL;
4508 enum mode_class outer_class;
4509 enum machine_mode outer_submode;
4510
4511 /* Some ports misuse CCmode. */
4512 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4513 return op;
4514
4515 /* We have no way to represent a complex constant at the rtl level. */
4516 if (COMPLEX_MODE_P (outermode))
4517 return NULL_RTX;
4518
4519 /* Unpack the value. */
4520
4521 if (GET_CODE (op) == CONST_VECTOR)
4522 {
4523 num_elem = CONST_VECTOR_NUNITS (op);
4524 elems = &CONST_VECTOR_ELT (op, 0);
4525 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4526 }
4527 else
4528 {
4529 num_elem = 1;
4530 elems = &op;
4531 elem_bitsize = max_bitsize;
4532 }
4533 /* If this asserts, it is too complicated; reducing value_bit may help. */
4534 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4535 /* I don't know how to handle endianness of sub-units. */
4536 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4537
4538 for (elem = 0; elem < num_elem; elem++)
4539 {
4540 unsigned char * vp;
4541 rtx el = elems[elem];
4542
4543 /* Vectors are kept in target memory order. (This is probably
4544 a mistake.) */
4545 {
4546 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4547 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4548 / BITS_PER_UNIT);
4549 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4550 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4551 unsigned bytele = (subword_byte % UNITS_PER_WORD
4552 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4553 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4554 }
4555
4556 switch (GET_CODE (el))
4557 {
4558 case CONST_INT:
4559 for (i = 0;
4560 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4561 i += value_bit)
4562 *vp++ = INTVAL (el) >> i;
4563 /* CONST_INTs are always logically sign-extended. */
4564 for (; i < elem_bitsize; i += value_bit)
4565 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4566 break;
4567
4568 case CONST_DOUBLE:
4569 if (GET_MODE (el) == VOIDmode)
4570 {
4571 /* If this triggers, someone should have generated a
4572 CONST_INT instead. */
4573 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4574
4575 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4576 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4577 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4578 {
4579 *vp++
4580 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4581 i += value_bit;
4582 }
4583 /* It shouldn't matter what's done here, so fill it with
4584 zero. */
4585 for (; i < elem_bitsize; i += value_bit)
4586 *vp++ = 0;
4587 }
4588 else
4589 {
4590 long tmp[max_bitsize / 32];
4591 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4592
4593 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4594 gcc_assert (bitsize <= elem_bitsize);
4595 gcc_assert (bitsize % value_bit == 0);
4596
4597 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4598 GET_MODE (el));
4599
4600 /* real_to_target produces its result in words affected by
4601 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4602 and use WORDS_BIG_ENDIAN instead; see the documentation
4603 of SUBREG in rtl.texi. */
4604 for (i = 0; i < bitsize; i += value_bit)
4605 {
4606 int ibase;
4607 if (WORDS_BIG_ENDIAN)
4608 ibase = bitsize - 1 - i;
4609 else
4610 ibase = i;
4611 *vp++ = tmp[ibase / 32] >> i % 32;
4612 }
4613
4614 /* It shouldn't matter what's done here, so fill it with
4615 zero. */
4616 for (; i < elem_bitsize; i += value_bit)
4617 *vp++ = 0;
4618 }
4619 break;
4620
4621 case CONST_FIXED:
4622 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4623 {
4624 for (i = 0; i < elem_bitsize; i += value_bit)
4625 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4626 }
4627 else
4628 {
4629 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4630 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4631 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4632 i += value_bit)
4633 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4634 >> (i - HOST_BITS_PER_WIDE_INT);
4635 for (; i < elem_bitsize; i += value_bit)
4636 *vp++ = 0;
4637 }
4638 break;
4639
4640 default:
4641 gcc_unreachable ();
4642 }
4643 }
4644
4645 /* Now, pick the right byte to start with. */
4646 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4647 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4648 will already have offset 0. */
4649 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4650 {
4651 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4652 - byte);
4653 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4654 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4655 byte = (subword_byte % UNITS_PER_WORD
4656 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4657 }
4658
4659 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4660 so if it's become negative it will instead be very large.) */
4661 gcc_assert (byte < GET_MODE_SIZE (innermode));
4662
4663 /* Convert from bytes to chunks of size value_bit. */
4664 value_start = byte * (BITS_PER_UNIT / value_bit);
4665
4666 /* Re-pack the value. */
4667
4668 if (VECTOR_MODE_P (outermode))
4669 {
4670 num_elem = GET_MODE_NUNITS (outermode);
4671 result_v = rtvec_alloc (num_elem);
4672 elems = &RTVEC_ELT (result_v, 0);
4673 outer_submode = GET_MODE_INNER (outermode);
4674 }
4675 else
4676 {
4677 num_elem = 1;
4678 elems = &result_s;
4679 outer_submode = outermode;
4680 }
4681
4682 outer_class = GET_MODE_CLASS (outer_submode);
4683 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4684
4685 gcc_assert (elem_bitsize % value_bit == 0);
4686 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4687
4688 for (elem = 0; elem < num_elem; elem++)
4689 {
4690 unsigned char *vp;
4691
4692 /* Vectors are stored in target memory order. (This is probably
4693 a mistake.) */
4694 {
4695 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4696 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4697 / BITS_PER_UNIT);
4698 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4699 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4700 unsigned bytele = (subword_byte % UNITS_PER_WORD
4701 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4702 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4703 }
4704
4705 switch (outer_class)
4706 {
4707 case MODE_INT:
4708 case MODE_PARTIAL_INT:
4709 {
4710 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4711
4712 for (i = 0;
4713 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4714 i += value_bit)
4715 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4716 for (; i < elem_bitsize; i += value_bit)
4717 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4718 << (i - HOST_BITS_PER_WIDE_INT));
4719
4720 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4721 know why. */
4722 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4723 elems[elem] = gen_int_mode (lo, outer_submode);
4724 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4725 elems[elem] = immed_double_const (lo, hi, outer_submode);
4726 else
4727 return NULL_RTX;
4728 }
4729 break;
4730
4731 case MODE_FLOAT:
4732 case MODE_DECIMAL_FLOAT:
4733 {
4734 REAL_VALUE_TYPE r;
4735 long tmp[max_bitsize / 32];
4736
4737 /* real_from_target wants its input in words affected by
4738 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4739 and use WORDS_BIG_ENDIAN instead; see the documentation
4740 of SUBREG in rtl.texi. */
4741 for (i = 0; i < max_bitsize / 32; i++)
4742 tmp[i] = 0;
4743 for (i = 0; i < elem_bitsize; i += value_bit)
4744 {
4745 int ibase;
4746 if (WORDS_BIG_ENDIAN)
4747 ibase = elem_bitsize - 1 - i;
4748 else
4749 ibase = i;
4750 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4751 }
4752
4753 real_from_target (&r, tmp, outer_submode);
4754 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4755 }
4756 break;
4757
4758 case MODE_FRACT:
4759 case MODE_UFRACT:
4760 case MODE_ACCUM:
4761 case MODE_UACCUM:
4762 {
4763 FIXED_VALUE_TYPE f;
4764 f.data.low = 0;
4765 f.data.high = 0;
4766 f.mode = outer_submode;
4767
4768 for (i = 0;
4769 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4770 i += value_bit)
4771 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4772 for (; i < elem_bitsize; i += value_bit)
4773 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4774 << (i - HOST_BITS_PER_WIDE_INT));
4775
4776 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4777 }
4778 break;
4779
4780 default:
4781 gcc_unreachable ();
4782 }
4783 }
4784 if (VECTOR_MODE_P (outermode))
4785 return gen_rtx_CONST_VECTOR (outermode, result_v);
4786 else
4787 return result_s;
4788 }
4789
4790 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4791 Return 0 if no simplifications are possible. */
4792 rtx
4793 simplify_subreg (enum machine_mode outermode, rtx op,
4794 enum machine_mode innermode, unsigned int byte)
4795 {
4796 /* Little bit of sanity checking. */
4797 gcc_assert (innermode != VOIDmode);
4798 gcc_assert (outermode != VOIDmode);
4799 gcc_assert (innermode != BLKmode);
4800 gcc_assert (outermode != BLKmode);
4801
4802 gcc_assert (GET_MODE (op) == innermode
4803 || GET_MODE (op) == VOIDmode);
4804
4805 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4806 gcc_assert (byte < GET_MODE_SIZE (innermode));
4807
4808 if (outermode == innermode && !byte)
4809 return op;
4810
4811 if (GET_CODE (op) == CONST_INT
4812 || GET_CODE (op) == CONST_DOUBLE
4813 || GET_CODE (op) == CONST_FIXED
4814 || GET_CODE (op) == CONST_VECTOR)
4815 return simplify_immed_subreg (outermode, op, innermode, byte);
4816
4817 /* Changing mode twice with SUBREG => just change it once,
4818 or not at all if changing back op starting mode. */
4819 if (GET_CODE (op) == SUBREG)
4820 {
4821 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4822 int final_offset = byte + SUBREG_BYTE (op);
4823 rtx newx;
4824
4825 if (outermode == innermostmode
4826 && byte == 0 && SUBREG_BYTE (op) == 0)
4827 return SUBREG_REG (op);
4828
4829 /* The SUBREG_BYTE represents offset, as if the value were stored
4830 in memory. Irritating exception is paradoxical subreg, where
4831 we define SUBREG_BYTE to be 0. On big endian machines, this
4832 value should be negative. For a moment, undo this exception. */
4833 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4834 {
4835 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4836 if (WORDS_BIG_ENDIAN)
4837 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4838 if (BYTES_BIG_ENDIAN)
4839 final_offset += difference % UNITS_PER_WORD;
4840 }
4841 if (SUBREG_BYTE (op) == 0
4842 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4843 {
4844 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4845 if (WORDS_BIG_ENDIAN)
4846 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4847 if (BYTES_BIG_ENDIAN)
4848 final_offset += difference % UNITS_PER_WORD;
4849 }
4850
4851 /* See whether resulting subreg will be paradoxical. */
4852 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4853 {
4854 /* In nonparadoxical subregs we can't handle negative offsets. */
4855 if (final_offset < 0)
4856 return NULL_RTX;
4857 /* Bail out in case resulting subreg would be incorrect. */
4858 if (final_offset % GET_MODE_SIZE (outermode)
4859 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4860 return NULL_RTX;
4861 }
4862 else
4863 {
4864 int offset = 0;
4865 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4866
4867 /* In paradoxical subreg, see if we are still looking on lower part.
4868 If so, our SUBREG_BYTE will be 0. */
4869 if (WORDS_BIG_ENDIAN)
4870 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4871 if (BYTES_BIG_ENDIAN)
4872 offset += difference % UNITS_PER_WORD;
4873 if (offset == final_offset)
4874 final_offset = 0;
4875 else
4876 return NULL_RTX;
4877 }
4878
4879 /* Recurse for further possible simplifications. */
4880 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4881 final_offset);
4882 if (newx)
4883 return newx;
4884 if (validate_subreg (outermode, innermostmode,
4885 SUBREG_REG (op), final_offset))
4886 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4887 return NULL_RTX;
4888 }
4889
4890 /* Merge implicit and explicit truncations. */
4891
4892 if (GET_CODE (op) == TRUNCATE
4893 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4894 && subreg_lowpart_offset (outermode, innermode) == byte)
4895 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4896 GET_MODE (XEXP (op, 0)));
4897
4898 /* SUBREG of a hard register => just change the register number
4899 and/or mode. If the hard register is not valid in that mode,
4900 suppress this simplification. If the hard register is the stack,
4901 frame, or argument pointer, leave this as a SUBREG. */
4902
4903 if (REG_P (op)
4904 && REGNO (op) < FIRST_PSEUDO_REGISTER
4905 #ifdef CANNOT_CHANGE_MODE_CLASS
4906 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4907 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4908 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4909 #endif
4910 && ((reload_completed && !frame_pointer_needed)
4911 || (REGNO (op) != FRAME_POINTER_REGNUM
4912 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4913 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4914 #endif
4915 ))
4916 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4917 && REGNO (op) != ARG_POINTER_REGNUM
4918 #endif
4919 && REGNO (op) != STACK_POINTER_REGNUM
4920 && subreg_offset_representable_p (REGNO (op), innermode,
4921 byte, outermode))
4922 {
4923 unsigned int regno = REGNO (op);
4924 unsigned int final_regno
4925 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4926
4927 /* ??? We do allow it if the current REG is not valid for
4928 its mode. This is a kludge to work around how float/complex
4929 arguments are passed on 32-bit SPARC and should be fixed. */
4930 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4931 || ! HARD_REGNO_MODE_OK (regno, innermode))
4932 {
4933 rtx x;
4934 int final_offset = byte;
4935
4936 /* Adjust offset for paradoxical subregs. */
4937 if (byte == 0
4938 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4939 {
4940 int difference = (GET_MODE_SIZE (innermode)
4941 - GET_MODE_SIZE (outermode));
4942 if (WORDS_BIG_ENDIAN)
4943 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4944 if (BYTES_BIG_ENDIAN)
4945 final_offset += difference % UNITS_PER_WORD;
4946 }
4947
4948 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4949
4950 /* Propagate original regno. We don't have any way to specify
4951 the offset inside original regno, so do so only for lowpart.
4952 The information is used only by alias analysis that can not
4953 grog partial register anyway. */
4954
4955 if (subreg_lowpart_offset (outermode, innermode) == byte)
4956 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4957 return x;
4958 }
4959 }
4960
4961 /* If we have a SUBREG of a register that we are replacing and we are
4962 replacing it with a MEM, make a new MEM and try replacing the
4963 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4964 or if we would be widening it. */
4965
4966 if (MEM_P (op)
4967 && ! mode_dependent_address_p (XEXP (op, 0))
4968 /* Allow splitting of volatile memory references in case we don't
4969 have instruction to move the whole thing. */
4970 && (! MEM_VOLATILE_P (op)
4971 || ! have_insn_for (SET, innermode))
4972 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4973 return adjust_address_nv (op, outermode, byte);
4974
4975 /* Handle complex values represented as CONCAT
4976 of real and imaginary part. */
4977 if (GET_CODE (op) == CONCAT)
4978 {
4979 unsigned int part_size, final_offset;
4980 rtx part, res;
4981
4982 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4983 if (byte < part_size)
4984 {
4985 part = XEXP (op, 0);
4986 final_offset = byte;
4987 }
4988 else
4989 {
4990 part = XEXP (op, 1);
4991 final_offset = byte - part_size;
4992 }
4993
4994 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4995 return NULL_RTX;
4996
4997 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4998 if (res)
4999 return res;
5000 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5001 return gen_rtx_SUBREG (outermode, part, final_offset);
5002 return NULL_RTX;
5003 }
5004
5005 /* Optimize SUBREG truncations of zero and sign extended values. */
5006 if ((GET_CODE (op) == ZERO_EXTEND
5007 || GET_CODE (op) == SIGN_EXTEND)
5008 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5009 {
5010 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5011
5012 /* If we're requesting the lowpart of a zero or sign extension,
5013 there are three possibilities. If the outermode is the same
5014 as the origmode, we can omit both the extension and the subreg.
5015 If the outermode is not larger than the origmode, we can apply
5016 the truncation without the extension. Finally, if the outermode
5017 is larger than the origmode, but both are integer modes, we
5018 can just extend to the appropriate mode. */
5019 if (bitpos == 0)
5020 {
5021 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5022 if (outermode == origmode)
5023 return XEXP (op, 0);
5024 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5025 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5026 subreg_lowpart_offset (outermode,
5027 origmode));
5028 if (SCALAR_INT_MODE_P (outermode))
5029 return simplify_gen_unary (GET_CODE (op), outermode,
5030 XEXP (op, 0), origmode);
5031 }
5032
5033 /* A SUBREG resulting from a zero extension may fold to zero if
5034 it extracts higher bits that the ZERO_EXTEND's source bits. */
5035 if (GET_CODE (op) == ZERO_EXTEND
5036 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5037 return CONST0_RTX (outermode);
5038 }
5039
5040 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5041 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5042 the outer subreg is effectively a truncation to the original mode. */
5043 if ((GET_CODE (op) == LSHIFTRT
5044 || GET_CODE (op) == ASHIFTRT)
5045 && SCALAR_INT_MODE_P (outermode)
5046 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5047 to avoid the possibility that an outer LSHIFTRT shifts by more
5048 than the sign extension's sign_bit_copies and introduces zeros
5049 into the high bits of the result. */
5050 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5051 && GET_CODE (XEXP (op, 1)) == CONST_INT
5052 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5053 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5054 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5055 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5056 return simplify_gen_binary (ASHIFTRT, outermode,
5057 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5058
5059 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5060 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5061 the outer subreg is effectively a truncation to the original mode. */
5062 if ((GET_CODE (op) == LSHIFTRT
5063 || GET_CODE (op) == ASHIFTRT)
5064 && SCALAR_INT_MODE_P (outermode)
5065 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5066 && GET_CODE (XEXP (op, 1)) == CONST_INT
5067 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5068 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5069 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5070 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5071 return simplify_gen_binary (LSHIFTRT, outermode,
5072 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5073
5074 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5075 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5076 the outer subreg is effectively a truncation to the original mode. */
5077 if (GET_CODE (op) == ASHIFT
5078 && SCALAR_INT_MODE_P (outermode)
5079 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5080 && GET_CODE (XEXP (op, 1)) == CONST_INT
5081 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5082 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5083 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5084 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5085 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5086 return simplify_gen_binary (ASHIFT, outermode,
5087 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5088
5089 return NULL_RTX;
5090 }
5091
5092 /* Make a SUBREG operation or equivalent if it folds. */
5093
5094 rtx
5095 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5096 enum machine_mode innermode, unsigned int byte)
5097 {
5098 rtx newx;
5099
5100 newx = simplify_subreg (outermode, op, innermode, byte);
5101 if (newx)
5102 return newx;
5103
5104 if (GET_CODE (op) == SUBREG
5105 || GET_CODE (op) == CONCAT
5106 || GET_MODE (op) == VOIDmode)
5107 return NULL_RTX;
5108
5109 if (validate_subreg (outermode, innermode, op, byte))
5110 return gen_rtx_SUBREG (outermode, op, byte);
5111
5112 return NULL_RTX;
5113 }
5114
5115 /* Simplify X, an rtx expression.
5116
5117 Return the simplified expression or NULL if no simplifications
5118 were possible.
5119
5120 This is the preferred entry point into the simplification routines;
5121 however, we still allow passes to call the more specific routines.
5122
5123 Right now GCC has three (yes, three) major bodies of RTL simplification
5124 code that need to be unified.
5125
5126 1. fold_rtx in cse.c. This code uses various CSE specific
5127 information to aid in RTL simplification.
5128
5129 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5130 it uses combine specific information to aid in RTL
5131 simplification.
5132
5133 3. The routines in this file.
5134
5135
5136 Long term we want to only have one body of simplification code; to
5137 get to that state I recommend the following steps:
5138
5139 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5140 which are not pass dependent state into these routines.
5141
5142 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5143 use this routine whenever possible.
5144
5145 3. Allow for pass dependent state to be provided to these
5146 routines and add simplifications based on the pass dependent
5147 state. Remove code from cse.c & combine.c that becomes
5148 redundant/dead.
5149
5150 It will take time, but ultimately the compiler will be easier to
5151 maintain and improve. It's totally silly that when we add a
5152 simplification that it needs to be added to 4 places (3 for RTL
5153 simplification and 1 for tree simplification. */
5154
5155 rtx
5156 simplify_rtx (const_rtx x)
5157 {
5158 const enum rtx_code code = GET_CODE (x);
5159 const enum machine_mode mode = GET_MODE (x);
5160
5161 switch (GET_RTX_CLASS (code))
5162 {
5163 case RTX_UNARY:
5164 return simplify_unary_operation (code, mode,
5165 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5166 case RTX_COMM_ARITH:
5167 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5168 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5169
5170 /* Fall through.... */
5171
5172 case RTX_BIN_ARITH:
5173 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5174
5175 case RTX_TERNARY:
5176 case RTX_BITFIELD_OPS:
5177 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5178 XEXP (x, 0), XEXP (x, 1),
5179 XEXP (x, 2));
5180
5181 case RTX_COMPARE:
5182 case RTX_COMM_COMPARE:
5183 return simplify_relational_operation (code, mode,
5184 ((GET_MODE (XEXP (x, 0))
5185 != VOIDmode)
5186 ? GET_MODE (XEXP (x, 0))
5187 : GET_MODE (XEXP (x, 1))),
5188 XEXP (x, 0),
5189 XEXP (x, 1));
5190
5191 case RTX_EXTRA:
5192 if (code == SUBREG)
5193 return simplify_subreg (mode, SUBREG_REG (x),
5194 GET_MODE (SUBREG_REG (x)),
5195 SUBREG_BYTE (x));
5196 break;
5197
5198 case RTX_OBJ:
5199 if (code == LO_SUM)
5200 {
5201 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5202 if (GET_CODE (XEXP (x, 0)) == HIGH
5203 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5204 return XEXP (x, 1);
5205 }
5206 break;
5207
5208 default:
5209 break;
5210 }
5211 return NULL;
5212 }