tree-flow-inline.h (next_readonly_imm_use): Return NULL_USE_OPERAND_P after the end.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
207
208 rtx
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
211 {
212 rtx tem;
213
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
217
218 return gen_rtx_fmt_e (code, mode, op);
219 }
220
221 /* Likewise for ternary operations. */
222
223 rtx
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
226 {
227 rtx tem;
228
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
233
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
235 }
236
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
239
240 rtx
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
243 {
244 rtx tem;
245
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
249
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 }
252 \f
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
255
256 rtx
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
258 {
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
263
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
267
268 if (x == old_rtx)
269 return new_rtx;
270
271 switch (GET_RTX_CLASS (code))
272 {
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
280
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
288
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
299
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
312
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
316 {
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
324 }
325 break;
326
327 case RTX_OBJ:
328 if (code == MEM)
329 {
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
334 }
335 else if (code == LO_SUM)
336 {
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
339
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
343
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
347 }
348 else if (code == REG)
349 {
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
352 }
353 break;
354
355 default:
356 break;
357 }
358 return x;
359 }
360 \f
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
364 rtx
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
367 {
368 rtx trueop, tem;
369
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
372
373 trueop = avoid_constant_pool_reference (op);
374
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
378
379 return simplify_unary_operation_1 (code, mode, op);
380 }
381
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
386 {
387 enum rtx_code reversed;
388 rtx temp;
389
390 switch (code)
391 {
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
396
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
404
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
409
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
413
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
420
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
428
429
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
437 {
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
440 }
441
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
445
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
452
453
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
460 {
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
463
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
469 }
470
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
475
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
477 {
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
480
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
483
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
488
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
490 {
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
493 }
494
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
497 }
498 break;
499
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
504
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
509
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
513
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
523
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 {
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
531 {
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
535 }
536
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
540 }
541
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
546 {
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
549 }
550
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
555 {
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
559 }
560
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
568
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
576
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
582
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
711
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
720
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
728
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
732
733 /* (float_extend (float_extend x)) is (float_extend x)
734
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
737 */
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
748
749 break;
750
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
756
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
761
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
771
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
775
776 break;
777
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
785
786 case POPCOUNT:
787 switch (GET_CODE (op))
788 {
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 default:
804 break;
805 }
806 break;
807
808 case PARITY:
809 switch (GET_CODE (op))
810 {
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
817
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
825
826 default:
827 break;
828 }
829 break;
830
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
836
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
843
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
855
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
863 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
876
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
885 return rtl_hooks.gen_lowpart_no_emit (mode, op);
886
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
898
899 default:
900 break;
901 }
902
903 return 0;
904 }
905
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
909 rtx
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
912 {
913 unsigned int width = GET_MODE_BITSIZE (mode);
914
915 if (code == VEC_DUPLICATE)
916 {
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
919 {
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
925 }
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
928 {
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
933
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
938 {
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 }
948 return gen_rtx_CONST_VECTOR (mode, v);
949 }
950 }
951
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 {
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
961
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
964 {
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
971 }
972 return gen_rtx_CONST_VECTOR (mode, v);
973 }
974
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
978
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 {
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
984
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 }
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
997 {
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1000
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005
1006 if (op_mode == VOIDmode)
1007 {
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1012 }
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 ;
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 }
1022
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 {
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1028
1029 switch (code)
1030 {
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1034
1035 case NEG:
1036 val = - arg0;
1037 break;
1038
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1042
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1049
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 ;
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1057
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1061 {
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1066 }
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1070
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1077
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1085
1086 case BSWAP:
1087 {
1088 unsigned int s;
1089
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1092 {
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1097 }
1098 }
1099 break;
1100
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1104
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 {
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1116 }
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1122
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 {
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1133 }
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 {
1136 val
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1141 }
1142 else
1143 return 0;
1144 break;
1145
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1154
1155 default:
1156 gcc_unreachable ();
1157 }
1158
1159 return gen_int_mode (val, mode);
1160 }
1161
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || GET_CODE (op) == CONST_INT))
1168 {
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1171
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1176
1177 switch (code)
1178 {
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1183
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1187
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1194
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1198 {
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 }
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1207
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1218
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1228
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1237
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1247
1248 case BSWAP:
1249 {
1250 unsigned int s;
1251
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1255 {
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1258
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1268 }
1269 }
1270 break;
1271
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1276
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1279
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1282
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1286
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1292 {
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298
1299 hv = HWI_SIGN_EXTEND (lv);
1300 }
1301 break;
1302
1303 case SQRT:
1304 return 0;
1305
1306 default:
1307 return 0;
1308 }
1309
1310 return immed_double_const (lv, hv, mode);
1311 }
1312
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1315 {
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1318
1319 switch (code)
1320 {
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1343 {
1344 long tmp[4];
1345 int i;
1346
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1352 }
1353 default:
1354 gcc_unreachable ();
1355 }
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 }
1358
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 {
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1368
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1375 {
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1379
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1382 {
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1386 }
1387 else
1388 {
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 }
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1394 {
1395 xh = th;
1396 xl = tl;
1397 break;
1398 }
1399
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1402 {
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1405 }
1406 else
1407 {
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 }
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1413 {
1414 xh = th;
1415 xl = tl;
1416 break;
1417 }
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1420
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1424
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1427 {
1428 th = -1;
1429 tl = -1;
1430 }
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 {
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1436 }
1437 else
1438 {
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 }
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1444 {
1445 xh = th;
1446 xl = tl;
1447 break;
1448 }
1449
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1452
1453 default:
1454 gcc_unreachable ();
1455 }
1456 return immed_double_const (xl, xh, mode);
1457 }
1458
1459 return NULL_RTX;
1460 }
1461 \f
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1467
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1471 {
1472 rtx tem;
1473
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1476 {
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1479 {
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 }
1483
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1487
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1491 }
1492
1493 if (GET_CODE (op0) == code)
1494 {
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 {
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 }
1501
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1511 }
1512
1513 return 0;
1514 }
1515
1516
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1519
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1522 rtx
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1525 {
1526 rtx trueop0, trueop1;
1527 rtx tem;
1528
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1535
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1539 {
1540 tem = op0, op0 = op1, op1 = tem;
1541 }
1542
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1545
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 }
1551
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1556
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1560 {
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1564
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1567
1568 switch (code)
1569 {
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1577
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1584
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1590
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1596
1597 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1598 && GET_CODE (op1) == CONST_INT)
1599 return plus_constant (op0, INTVAL (op1));
1600 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1601 && GET_CODE (op0) == CONST_INT)
1602 return plus_constant (op1, INTVAL (op0));
1603
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1609
1610 if (SCALAR_INT_MODE_P (mode))
1611 {
1612 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1613 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1614 rtx lhs = op0, rhs = op1;
1615
1616 if (GET_CODE (lhs) == NEG)
1617 {
1618 coeff0l = -1;
1619 coeff0h = -1;
1620 lhs = XEXP (lhs, 0);
1621 }
1622 else if (GET_CODE (lhs) == MULT
1623 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1624 {
1625 coeff0l = INTVAL (XEXP (lhs, 1));
1626 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1627 lhs = XEXP (lhs, 0);
1628 }
1629 else if (GET_CODE (lhs) == ASHIFT
1630 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs, 1)) >= 0
1632 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1633 {
1634 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1635 coeff0h = 0;
1636 lhs = XEXP (lhs, 0);
1637 }
1638
1639 if (GET_CODE (rhs) == NEG)
1640 {
1641 coeff1l = -1;
1642 coeff1h = -1;
1643 rhs = XEXP (rhs, 0);
1644 }
1645 else if (GET_CODE (rhs) == MULT
1646 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1647 {
1648 coeff1l = INTVAL (XEXP (rhs, 1));
1649 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1650 rhs = XEXP (rhs, 0);
1651 }
1652 else if (GET_CODE (rhs) == ASHIFT
1653 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs, 1)) >= 0
1655 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1656 {
1657 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1658 coeff1h = 0;
1659 rhs = XEXP (rhs, 0);
1660 }
1661
1662 if (rtx_equal_p (lhs, rhs))
1663 {
1664 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1665 rtx coeff;
1666 unsigned HOST_WIDE_INT l;
1667 HOST_WIDE_INT h;
1668
1669 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1670 coeff = immed_double_const (l, h, mode);
1671
1672 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1673 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1674 ? tem : 0;
1675 }
1676 }
1677
1678 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1679 if ((GET_CODE (op1) == CONST_INT
1680 || GET_CODE (op1) == CONST_DOUBLE)
1681 && GET_CODE (op0) == XOR
1682 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1683 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1684 && mode_signbit_p (mode, op1))
1685 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1686 simplify_gen_binary (XOR, mode, op1,
1687 XEXP (op0, 1)));
1688
1689 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1690 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1691 && GET_CODE (op0) == MULT
1692 && GET_CODE (XEXP (op0, 0)) == NEG)
1693 {
1694 rtx in1, in2;
1695
1696 in1 = XEXP (XEXP (op0, 0), 0);
1697 in2 = XEXP (op0, 1);
1698 return simplify_gen_binary (MINUS, mode, op1,
1699 simplify_gen_binary (MULT, mode,
1700 in1, in2));
1701 }
1702
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1705 is 1. */
1706 if (COMPARISON_P (op0)
1707 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1708 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1709 && (reversed = reversed_comparison (op0, mode)))
1710 return
1711 simplify_gen_unary (NEG, mode, reversed, mode);
1712
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1718
1719 if (INTEGRAL_MODE_P (mode)
1720 && (plus_minus_operand_p (op0)
1721 || plus_minus_operand_p (op1))
1722 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 return tem;
1724
1725 /* Reassociate floating point addition only when the user
1726 specifies associative math operations. */
1727 if (FLOAT_MODE_P (mode)
1728 && flag_associative_math)
1729 {
1730 tem = simplify_associative_operation (code, mode, op0, op1);
1731 if (tem)
1732 return tem;
1733 }
1734 break;
1735
1736 case COMPARE:
1737 #ifdef HAVE_cc0
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1741
1742 In IEEE floating point, x-0 is not the same as x. */
1743
1744 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1746 && trueop1 == CONST0_RTX (mode))
1747 return op0;
1748 #endif
1749
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1752 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1753 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1754 {
1755 rtx xop00 = XEXP (op0, 0);
1756 rtx xop10 = XEXP (op1, 0);
1757
1758 #ifdef HAVE_cc0
1759 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 #else
1761 if (REG_P (xop00) && REG_P (xop10)
1762 && GET_MODE (xop00) == GET_MODE (xop10)
1763 && REGNO (xop00) == REGNO (xop10)
1764 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1766 #endif
1767 return xop00;
1768 }
1769 break;
1770
1771 case MINUS:
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -ffinite-math-only. */
1775 if (rtx_equal_p (trueop0, trueop1)
1776 && ! side_effects_p (op0)
1777 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1778 return CONST0_RTX (mode);
1779
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1785 return simplify_gen_unary (NEG, mode, op1, mode);
1786
1787 /* (-1 - a) is ~a. */
1788 if (trueop0 == constm1_rtx)
1789 return simplify_gen_unary (NOT, mode, op1, mode);
1790
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1793 0 - 0 is -0. */
1794 if (!(HONOR_SIGNED_ZEROS (mode)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1796 && trueop1 == CONST0_RTX (mode))
1797 return op0;
1798
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1804
1805 if (SCALAR_INT_MODE_P (mode))
1806 {
1807 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1808 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1809 rtx lhs = op0, rhs = op1;
1810
1811 if (GET_CODE (lhs) == NEG)
1812 {
1813 coeff0l = -1;
1814 coeff0h = -1;
1815 lhs = XEXP (lhs, 0);
1816 }
1817 else if (GET_CODE (lhs) == MULT
1818 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1819 {
1820 coeff0l = INTVAL (XEXP (lhs, 1));
1821 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1822 lhs = XEXP (lhs, 0);
1823 }
1824 else if (GET_CODE (lhs) == ASHIFT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs, 1)) >= 0
1827 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1828 {
1829 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 coeff0h = 0;
1831 lhs = XEXP (lhs, 0);
1832 }
1833
1834 if (GET_CODE (rhs) == NEG)
1835 {
1836 negcoeff1l = 1;
1837 negcoeff1h = 0;
1838 rhs = XEXP (rhs, 0);
1839 }
1840 else if (GET_CODE (rhs) == MULT
1841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1842 {
1843 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1844 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1845 rhs = XEXP (rhs, 0);
1846 }
1847 else if (GET_CODE (rhs) == ASHIFT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs, 1)) >= 0
1850 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1851 {
1852 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 negcoeff1h = -1;
1854 rhs = XEXP (rhs, 0);
1855 }
1856
1857 if (rtx_equal_p (lhs, rhs))
1858 {
1859 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 rtx coeff;
1861 unsigned HOST_WIDE_INT l;
1862 HOST_WIDE_INT h;
1863
1864 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1865 coeff = immed_double_const (l, h, mode);
1866
1867 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1868 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1869 ? tem : 0;
1870 }
1871 }
1872
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1) == NEG)
1875 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1876
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0) == NEG
1879 && (GET_CODE (op1) == CONST_INT
1880 || GET_CODE (op1) == CONST_DOUBLE))
1881 {
1882 tem = simplify_unary_operation (NEG, mode, op1, mode);
1883 if (tem)
1884 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1885 }
1886
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1889 return simplify_gen_binary (PLUS, mode,
1890 op0,
1891 neg_const_int (mode, op1));
1892
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1) == AND)
1895 {
1896 if (rtx_equal_p (op0, XEXP (op1, 0)))
1897 {
1898 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1899 GET_MODE (XEXP (op1, 1)));
1900 return simplify_gen_binary (AND, mode, op0, tem);
1901 }
1902 if (rtx_equal_p (op0, XEXP (op1, 1)))
1903 {
1904 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1905 GET_MODE (XEXP (op1, 0)));
1906 return simplify_gen_binary (AND, mode, op0, tem);
1907 }
1908 }
1909
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE == 1
1913 && trueop0 == const1_rtx
1914 && COMPARISON_P (op1)
1915 && (reversed = reversed_comparison (op1, mode)))
1916 return reversed;
1917
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1920 && GET_CODE (op1) == MULT
1921 && GET_CODE (XEXP (op1, 0)) == NEG)
1922 {
1923 rtx in1, in2;
1924
1925 in1 = XEXP (XEXP (op1, 0), 0);
1926 in2 = XEXP (op1, 1);
1927 return simplify_gen_binary (PLUS, mode,
1928 simplify_gen_binary (MULT, mode,
1929 in1, in2),
1930 op0);
1931 }
1932
1933 /* Canonicalize (minus (neg A) (mult B C)) to
1934 (minus (mult (neg B) C) A). */
1935 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1936 && GET_CODE (op1) == MULT
1937 && GET_CODE (op0) == NEG)
1938 {
1939 rtx in1, in2;
1940
1941 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1942 in2 = XEXP (op1, 1);
1943 return simplify_gen_binary (MINUS, mode,
1944 simplify_gen_binary (MULT, mode,
1945 in1, in2),
1946 XEXP (op0, 0));
1947 }
1948
1949 /* If one of the operands is a PLUS or a MINUS, see if we can
1950 simplify this by the associative law. This will, for example,
1951 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1952 Don't use the associative law for floating point.
1953 The inaccuracy makes it nonassociative,
1954 and subtle programs can break if operations are associated. */
1955
1956 if (INTEGRAL_MODE_P (mode)
1957 && (plus_minus_operand_p (op0)
1958 || plus_minus_operand_p (op1))
1959 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1960 return tem;
1961 break;
1962
1963 case MULT:
1964 if (trueop1 == constm1_rtx)
1965 return simplify_gen_unary (NEG, mode, op0, mode);
1966
1967 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1968 x is NaN, since x * 0 is then also NaN. Nor is it valid
1969 when the mode has signed zeros, since multiplying a negative
1970 number by 0 will give -0, not 0. */
1971 if (!HONOR_NANS (mode)
1972 && !HONOR_SIGNED_ZEROS (mode)
1973 && trueop1 == CONST0_RTX (mode)
1974 && ! side_effects_p (op0))
1975 return op1;
1976
1977 /* In IEEE floating point, x*1 is not equivalent to x for
1978 signalling NaNs. */
1979 if (!HONOR_SNANS (mode)
1980 && trueop1 == CONST1_RTX (mode))
1981 return op0;
1982
1983 /* Convert multiply by constant power of two into shift unless
1984 we are still generating RTL. This test is a kludge. */
1985 if (GET_CODE (trueop1) == CONST_INT
1986 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1987 /* If the mode is larger than the host word size, and the
1988 uppermost bit is set, then this isn't a power of two due
1989 to implicit sign extension. */
1990 && (width <= HOST_BITS_PER_WIDE_INT
1991 || val != HOST_BITS_PER_WIDE_INT - 1))
1992 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1993
1994 /* Likewise for multipliers wider than a word. */
1995 if (GET_CODE (trueop1) == CONST_DOUBLE
1996 && (GET_MODE (trueop1) == VOIDmode
1997 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1998 && GET_MODE (op0) == mode
1999 && CONST_DOUBLE_LOW (trueop1) == 0
2000 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2001 return simplify_gen_binary (ASHIFT, mode, op0,
2002 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2003
2004 /* x*2 is x+x and x*(-1) is -x */
2005 if (GET_CODE (trueop1) == CONST_DOUBLE
2006 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2007 && GET_MODE (op0) == mode)
2008 {
2009 REAL_VALUE_TYPE d;
2010 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2011
2012 if (REAL_VALUES_EQUAL (d, dconst2))
2013 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2014
2015 if (!HONOR_SNANS (mode)
2016 && REAL_VALUES_EQUAL (d, dconstm1))
2017 return simplify_gen_unary (NEG, mode, op0, mode);
2018 }
2019
2020 /* Optimize -x * -x as x * x. */
2021 if (FLOAT_MODE_P (mode)
2022 && GET_CODE (op0) == NEG
2023 && GET_CODE (op1) == NEG
2024 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2025 && !side_effects_p (XEXP (op0, 0)))
2026 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2027
2028 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2029 if (SCALAR_FLOAT_MODE_P (mode)
2030 && GET_CODE (op0) == ABS
2031 && GET_CODE (op1) == ABS
2032 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2033 && !side_effects_p (XEXP (op0, 0)))
2034 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2035
2036 /* Reassociate multiplication, but for floating point MULTs
2037 only when the user specifies unsafe math optimizations. */
2038 if (! FLOAT_MODE_P (mode)
2039 || flag_unsafe_math_optimizations)
2040 {
2041 tem = simplify_associative_operation (code, mode, op0, op1);
2042 if (tem)
2043 return tem;
2044 }
2045 break;
2046
2047 case IOR:
2048 if (trueop1 == const0_rtx)
2049 return op0;
2050 if (GET_CODE (trueop1) == CONST_INT
2051 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2052 == GET_MODE_MASK (mode)))
2053 return op1;
2054 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2055 return op0;
2056 /* A | (~A) -> -1 */
2057 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2058 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2059 && ! side_effects_p (op0)
2060 && SCALAR_INT_MODE_P (mode))
2061 return constm1_rtx;
2062
2063 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2064 if (GET_CODE (op1) == CONST_INT
2065 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2066 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2067 return op1;
2068
2069 /* Canonicalize (X & C1) | C2. */
2070 if (GET_CODE (op0) == AND
2071 && GET_CODE (trueop1) == CONST_INT
2072 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2073 {
2074 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2075 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2076 HOST_WIDE_INT c2 = INTVAL (trueop1);
2077
2078 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2079 if ((c1 & c2) == c1
2080 && !side_effects_p (XEXP (op0, 0)))
2081 return trueop1;
2082
2083 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2084 if (((c1|c2) & mask) == mask)
2085 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2086
2087 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2088 if (((c1 & ~c2) & mask) != (c1 & mask))
2089 {
2090 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2091 gen_int_mode (c1 & ~c2, mode));
2092 return simplify_gen_binary (IOR, mode, tem, op1);
2093 }
2094 }
2095
2096 /* Convert (A & B) | A to A. */
2097 if (GET_CODE (op0) == AND
2098 && (rtx_equal_p (XEXP (op0, 0), op1)
2099 || rtx_equal_p (XEXP (op0, 1), op1))
2100 && ! side_effects_p (XEXP (op0, 0))
2101 && ! side_effects_p (XEXP (op0, 1)))
2102 return op1;
2103
2104 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2105 mode size to (rotate A CX). */
2106
2107 if (GET_CODE (op1) == ASHIFT
2108 || GET_CODE (op1) == SUBREG)
2109 {
2110 opleft = op1;
2111 opright = op0;
2112 }
2113 else
2114 {
2115 opright = op1;
2116 opleft = op0;
2117 }
2118
2119 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2120 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2121 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2122 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2123 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2124 == GET_MODE_BITSIZE (mode)))
2125 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2126
2127 /* Same, but for ashift that has been "simplified" to a wider mode
2128 by simplify_shift_const. */
2129
2130 if (GET_CODE (opleft) == SUBREG
2131 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2132 && GET_CODE (opright) == LSHIFTRT
2133 && GET_CODE (XEXP (opright, 0)) == SUBREG
2134 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2135 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2136 && (GET_MODE_SIZE (GET_MODE (opleft))
2137 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2138 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2139 SUBREG_REG (XEXP (opright, 0)))
2140 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2141 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2142 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2143 == GET_MODE_BITSIZE (mode)))
2144 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2145 XEXP (SUBREG_REG (opleft), 1));
2146
2147 /* If we have (ior (and (X C1) C2)), simplify this by making
2148 C1 as small as possible if C1 actually changes. */
2149 if (GET_CODE (op1) == CONST_INT
2150 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2151 || INTVAL (op1) > 0)
2152 && GET_CODE (op0) == AND
2153 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2154 && GET_CODE (op1) == CONST_INT
2155 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2156 return simplify_gen_binary (IOR, mode,
2157 simplify_gen_binary
2158 (AND, mode, XEXP (op0, 0),
2159 GEN_INT (INTVAL (XEXP (op0, 1))
2160 & ~INTVAL (op1))),
2161 op1);
2162
2163 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2164 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2165 the PLUS does not affect any of the bits in OP1: then we can do
2166 the IOR as a PLUS and we can associate. This is valid if OP1
2167 can be safely shifted left C bits. */
2168 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2169 && GET_CODE (XEXP (op0, 0)) == PLUS
2170 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2171 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2172 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2173 {
2174 int count = INTVAL (XEXP (op0, 1));
2175 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2176
2177 if (mask >> count == INTVAL (trueop1)
2178 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2179 return simplify_gen_binary (ASHIFTRT, mode,
2180 plus_constant (XEXP (op0, 0), mask),
2181 XEXP (op0, 1));
2182 }
2183
2184 tem = simplify_associative_operation (code, mode, op0, op1);
2185 if (tem)
2186 return tem;
2187 break;
2188
2189 case XOR:
2190 if (trueop1 == const0_rtx)
2191 return op0;
2192 if (GET_CODE (trueop1) == CONST_INT
2193 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2194 == GET_MODE_MASK (mode)))
2195 return simplify_gen_unary (NOT, mode, op0, mode);
2196 if (rtx_equal_p (trueop0, trueop1)
2197 && ! side_effects_p (op0)
2198 && GET_MODE_CLASS (mode) != MODE_CC)
2199 return CONST0_RTX (mode);
2200
2201 /* Canonicalize XOR of the most significant bit to PLUS. */
2202 if ((GET_CODE (op1) == CONST_INT
2203 || GET_CODE (op1) == CONST_DOUBLE)
2204 && mode_signbit_p (mode, op1))
2205 return simplify_gen_binary (PLUS, mode, op0, op1);
2206 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && GET_CODE (op0) == PLUS
2210 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2211 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2212 && mode_signbit_p (mode, XEXP (op0, 1)))
2213 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2214 simplify_gen_binary (XOR, mode, op1,
2215 XEXP (op0, 1)));
2216
2217 /* If we are XORing two things that have no bits in common,
2218 convert them into an IOR. This helps to detect rotation encoded
2219 using those methods and possibly other simplifications. */
2220
2221 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2222 && (nonzero_bits (op0, mode)
2223 & nonzero_bits (op1, mode)) == 0)
2224 return (simplify_gen_binary (IOR, mode, op0, op1));
2225
2226 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2227 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2228 (NOT y). */
2229 {
2230 int num_negated = 0;
2231
2232 if (GET_CODE (op0) == NOT)
2233 num_negated++, op0 = XEXP (op0, 0);
2234 if (GET_CODE (op1) == NOT)
2235 num_negated++, op1 = XEXP (op1, 0);
2236
2237 if (num_negated == 2)
2238 return simplify_gen_binary (XOR, mode, op0, op1);
2239 else if (num_negated == 1)
2240 return simplify_gen_unary (NOT, mode,
2241 simplify_gen_binary (XOR, mode, op0, op1),
2242 mode);
2243 }
2244
2245 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2246 correspond to a machine insn or result in further simplifications
2247 if B is a constant. */
2248
2249 if (GET_CODE (op0) == AND
2250 && rtx_equal_p (XEXP (op0, 1), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode,
2253 simplify_gen_unary (NOT, mode,
2254 XEXP (op0, 0), mode),
2255 op1);
2256
2257 else if (GET_CODE (op0) == AND
2258 && rtx_equal_p (XEXP (op0, 0), op1)
2259 && ! side_effects_p (op1))
2260 return simplify_gen_binary (AND, mode,
2261 simplify_gen_unary (NOT, mode,
2262 XEXP (op0, 1), mode),
2263 op1);
2264
2265 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2266 comparison if STORE_FLAG_VALUE is 1. */
2267 if (STORE_FLAG_VALUE == 1
2268 && trueop1 == const1_rtx
2269 && COMPARISON_P (op0)
2270 && (reversed = reversed_comparison (op0, mode)))
2271 return reversed;
2272
2273 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2274 is (lt foo (const_int 0)), so we can perform the above
2275 simplification if STORE_FLAG_VALUE is 1. */
2276
2277 if (STORE_FLAG_VALUE == 1
2278 && trueop1 == const1_rtx
2279 && GET_CODE (op0) == LSHIFTRT
2280 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2281 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2282 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2283
2284 /* (xor (comparison foo bar) (const_int sign-bit))
2285 when STORE_FLAG_VALUE is the sign bit. */
2286 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2287 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2288 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2289 && trueop1 == const_true_rtx
2290 && COMPARISON_P (op0)
2291 && (reversed = reversed_comparison (op0, mode)))
2292 return reversed;
2293
2294 tem = simplify_associative_operation (code, mode, op0, op1);
2295 if (tem)
2296 return tem;
2297 break;
2298
2299 case AND:
2300 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2301 return trueop1;
2302 /* If we are turning off bits already known off in OP0, we need
2303 not do an AND. */
2304 if (GET_CODE (trueop1) == CONST_INT
2305 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2307 return op0;
2308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2309 && GET_MODE_CLASS (mode) != MODE_CC)
2310 return op0;
2311 /* A & (~A) -> 0 */
2312 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2313 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2314 && ! side_effects_p (op0)
2315 && GET_MODE_CLASS (mode) != MODE_CC)
2316 return CONST0_RTX (mode);
2317
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0) == SIGN_EXTEND
2321 || GET_CODE (op0) == ZERO_EXTEND)
2322 && GET_CODE (trueop1) == CONST_INT
2323 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2325 & INTVAL (trueop1)) == 0)
2326 {
2327 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2328 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2329 gen_int_mode (INTVAL (trueop1),
2330 imode));
2331 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2332 }
2333
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0) == IOR
2336 && GET_CODE (trueop1) == CONST_INT
2337 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2338 {
2339 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2340 return simplify_gen_binary (IOR, mode,
2341 simplify_gen_binary (AND, mode,
2342 XEXP (op0, 0), op1),
2343 gen_int_mode (tmp, mode));
2344 }
2345
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0) == XOR
2349 && rtx_equal_p (XEXP (op0, 0), op1)
2350 && ! side_effects_p (op1))
2351 return simplify_gen_binary (AND, mode,
2352 simplify_gen_unary (NOT, mode,
2353 XEXP (op0, 1), mode),
2354 op1);
2355
2356 if (GET_CODE (op0) == XOR
2357 && rtx_equal_p (XEXP (op0, 1), op1)
2358 && ! side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode,
2360 simplify_gen_unary (NOT, mode,
2361 XEXP (op0, 0), mode),
2362 op1);
2363
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0) == NOT
2366 && GET_CODE (XEXP (op0, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2368 && ! side_effects_p (op1))
2369 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2370
2371 if (GET_CODE (op0) == NOT
2372 && GET_CODE (XEXP (op0, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2374 && ! side_effects_p (op1))
2375 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2376
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0) == IOR
2379 && (rtx_equal_p (XEXP (op0, 0), op1)
2380 || rtx_equal_p (XEXP (op0, 1), op1))
2381 && ! side_effects_p (XEXP (op0, 0))
2382 && ! side_effects_p (XEXP (op0, 1)))
2383 return op1;
2384
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1) == CONST_INT
2391 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1)
2393 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2394 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2395 {
2396 rtx pmop[2];
2397 int which;
2398
2399 pmop[0] = XEXP (op0, 0);
2400 pmop[1] = XEXP (op0, 1);
2401
2402 for (which = 0; which < 2; which++)
2403 {
2404 tem = pmop[which];
2405 switch (GET_CODE (tem))
2406 {
2407 case AND:
2408 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2410 == INTVAL (trueop1))
2411 pmop[which] = XEXP (tem, 0);
2412 break;
2413 case IOR:
2414 case XOR:
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2417 pmop[which] = XEXP (tem, 0);
2418 break;
2419 default:
2420 break;
2421 }
2422 }
2423
2424 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2425 {
2426 tem = simplify_gen_binary (GET_CODE (op0), mode,
2427 pmop[0], pmop[1]);
2428 return simplify_gen_binary (code, mode, tem, op1);
2429 }
2430 }
2431
2432 /* (and X (ior (not X) Y) -> (and X Y) */
2433 if (GET_CODE (op1) == IOR
2434 && GET_CODE (XEXP (op1, 0)) == NOT
2435 && op0 == XEXP (XEXP (op1, 0), 0))
2436 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2437
2438 /* (and (ior (not X) Y) X) -> (and X Y) */
2439 if (GET_CODE (op0) == IOR
2440 && GET_CODE (XEXP (op0, 0)) == NOT
2441 && op1 == XEXP (XEXP (op0, 0), 0))
2442 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2443
2444 tem = simplify_associative_operation (code, mode, op0, op1);
2445 if (tem)
2446 return tem;
2447 break;
2448
2449 case UDIV:
2450 /* 0/x is 0 (or x&0 if x has side-effects). */
2451 if (trueop0 == CONST0_RTX (mode))
2452 {
2453 if (side_effects_p (op1))
2454 return simplify_gen_binary (AND, mode, op1, trueop0);
2455 return trueop0;
2456 }
2457 /* x/1 is x. */
2458 if (trueop1 == CONST1_RTX (mode))
2459 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2460 /* Convert divide by power of two into shift. */
2461 if (GET_CODE (trueop1) == CONST_INT
2462 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2463 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2464 break;
2465
2466 case DIV:
2467 /* Handle floating point and integers separately. */
2468 if (SCALAR_FLOAT_MODE_P (mode))
2469 {
2470 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2471 safe for modes with NaNs, since 0.0 / 0.0 will then be
2472 NaN rather than 0.0. Nor is it safe for modes with signed
2473 zeros, since dividing 0 by a negative number gives -0.0 */
2474 if (trueop0 == CONST0_RTX (mode)
2475 && !HONOR_NANS (mode)
2476 && !HONOR_SIGNED_ZEROS (mode)
2477 && ! side_effects_p (op1))
2478 return op0;
2479 /* x/1.0 is x. */
2480 if (trueop1 == CONST1_RTX (mode)
2481 && !HONOR_SNANS (mode))
2482 return op0;
2483
2484 if (GET_CODE (trueop1) == CONST_DOUBLE
2485 && trueop1 != CONST0_RTX (mode))
2486 {
2487 REAL_VALUE_TYPE d;
2488 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2489
2490 /* x/-1.0 is -x. */
2491 if (REAL_VALUES_EQUAL (d, dconstm1)
2492 && !HONOR_SNANS (mode))
2493 return simplify_gen_unary (NEG, mode, op0, mode);
2494
2495 /* Change FP division by a constant into multiplication.
2496 Only do this with -freciprocal-math. */
2497 if (flag_reciprocal_math
2498 && !REAL_VALUES_EQUAL (d, dconst0))
2499 {
2500 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2501 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2502 return simplify_gen_binary (MULT, mode, op0, tem);
2503 }
2504 }
2505 }
2506 else
2507 {
2508 /* 0/x is 0 (or x&0 if x has side-effects). */
2509 if (trueop0 == CONST0_RTX (mode))
2510 {
2511 if (side_effects_p (op1))
2512 return simplify_gen_binary (AND, mode, op1, trueop0);
2513 return trueop0;
2514 }
2515 /* x/1 is x. */
2516 if (trueop1 == CONST1_RTX (mode))
2517 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2518 /* x/-1 is -x. */
2519 if (trueop1 == constm1_rtx)
2520 {
2521 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2522 return simplify_gen_unary (NEG, mode, x, mode);
2523 }
2524 }
2525 break;
2526
2527 case UMOD:
2528 /* 0%x is 0 (or x&0 if x has side-effects). */
2529 if (trueop0 == CONST0_RTX (mode))
2530 {
2531 if (side_effects_p (op1))
2532 return simplify_gen_binary (AND, mode, op1, trueop0);
2533 return trueop0;
2534 }
2535 /* x%1 is 0 (of x&0 if x has side-effects). */
2536 if (trueop1 == CONST1_RTX (mode))
2537 {
2538 if (side_effects_p (op0))
2539 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2540 return CONST0_RTX (mode);
2541 }
2542 /* Implement modulus by power of two as AND. */
2543 if (GET_CODE (trueop1) == CONST_INT
2544 && exact_log2 (INTVAL (trueop1)) > 0)
2545 return simplify_gen_binary (AND, mode, op0,
2546 GEN_INT (INTVAL (op1) - 1));
2547 break;
2548
2549 case MOD:
2550 /* 0%x is 0 (or x&0 if x has side-effects). */
2551 if (trueop0 == CONST0_RTX (mode))
2552 {
2553 if (side_effects_p (op1))
2554 return simplify_gen_binary (AND, mode, op1, trueop0);
2555 return trueop0;
2556 }
2557 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2558 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2559 {
2560 if (side_effects_p (op0))
2561 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2562 return CONST0_RTX (mode);
2563 }
2564 break;
2565
2566 case ROTATERT:
2567 case ROTATE:
2568 case ASHIFTRT:
2569 if (trueop1 == CONST0_RTX (mode))
2570 return op0;
2571 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2572 return op0;
2573 /* Rotating ~0 always results in ~0. */
2574 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2575 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2576 && ! side_effects_p (op1))
2577 return op0;
2578 canonicalize_shift:
2579 if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT)
2580 {
2581 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2582 if (val != INTVAL (op1))
2583 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2584 }
2585 break;
2586
2587 case ASHIFT:
2588 case SS_ASHIFT:
2589 case US_ASHIFT:
2590 if (trueop1 == CONST0_RTX (mode))
2591 return op0;
2592 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2593 return op0;
2594 goto canonicalize_shift;
2595
2596 case LSHIFTRT:
2597 if (trueop1 == CONST0_RTX (mode))
2598 return op0;
2599 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2600 return op0;
2601 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2602 if (GET_CODE (op0) == CLZ
2603 && GET_CODE (trueop1) == CONST_INT
2604 && STORE_FLAG_VALUE == 1
2605 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2606 {
2607 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2608 unsigned HOST_WIDE_INT zero_val = 0;
2609
2610 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2611 && zero_val == GET_MODE_BITSIZE (imode)
2612 && INTVAL (trueop1) == exact_log2 (zero_val))
2613 return simplify_gen_relational (EQ, mode, imode,
2614 XEXP (op0, 0), const0_rtx);
2615 }
2616 goto canonicalize_shift;
2617
2618 case SMIN:
2619 if (width <= HOST_BITS_PER_WIDE_INT
2620 && GET_CODE (trueop1) == CONST_INT
2621 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2622 && ! side_effects_p (op0))
2623 return op1;
2624 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2625 return op0;
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2629 break;
2630
2631 case SMAX:
2632 if (width <= HOST_BITS_PER_WIDE_INT
2633 && GET_CODE (trueop1) == CONST_INT
2634 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2635 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2636 && ! side_effects_p (op0))
2637 return op1;
2638 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2639 return op0;
2640 tem = simplify_associative_operation (code, mode, op0, op1);
2641 if (tem)
2642 return tem;
2643 break;
2644
2645 case UMIN:
2646 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2647 return op1;
2648 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2649 return op0;
2650 tem = simplify_associative_operation (code, mode, op0, op1);
2651 if (tem)
2652 return tem;
2653 break;
2654
2655 case UMAX:
2656 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2657 return op1;
2658 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2659 return op0;
2660 tem = simplify_associative_operation (code, mode, op0, op1);
2661 if (tem)
2662 return tem;
2663 break;
2664
2665 case SS_PLUS:
2666 case US_PLUS:
2667 case SS_MINUS:
2668 case US_MINUS:
2669 case SS_MULT:
2670 case US_MULT:
2671 case SS_DIV:
2672 case US_DIV:
2673 /* ??? There are simplifications that can be done. */
2674 return 0;
2675
2676 case VEC_SELECT:
2677 if (!VECTOR_MODE_P (mode))
2678 {
2679 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2680 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2681 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2682 gcc_assert (XVECLEN (trueop1, 0) == 1);
2683 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2684
2685 if (GET_CODE (trueop0) == CONST_VECTOR)
2686 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2687 (trueop1, 0, 0)));
2688
2689 /* Extract a scalar element from a nested VEC_SELECT expression
2690 (with optional nested VEC_CONCAT expression). Some targets
2691 (i386) extract scalar element from a vector using chain of
2692 nested VEC_SELECT expressions. When input operand is a memory
2693 operand, this operation can be simplified to a simple scalar
2694 load from an offseted memory address. */
2695 if (GET_CODE (trueop0) == VEC_SELECT)
2696 {
2697 rtx op0 = XEXP (trueop0, 0);
2698 rtx op1 = XEXP (trueop0, 1);
2699
2700 enum machine_mode opmode = GET_MODE (op0);
2701 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2702 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2703
2704 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2705 int elem;
2706
2707 rtvec vec;
2708 rtx tmp_op, tmp;
2709
2710 gcc_assert (GET_CODE (op1) == PARALLEL);
2711 gcc_assert (i < n_elts);
2712
2713 /* Select element, pointed by nested selector. */
2714 elem = INTVAL (XVECEXP (op1, 0, i));
2715
2716 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2717 if (GET_CODE (op0) == VEC_CONCAT)
2718 {
2719 rtx op00 = XEXP (op0, 0);
2720 rtx op01 = XEXP (op0, 1);
2721
2722 enum machine_mode mode00, mode01;
2723 int n_elts00, n_elts01;
2724
2725 mode00 = GET_MODE (op00);
2726 mode01 = GET_MODE (op01);
2727
2728 /* Find out number of elements of each operand. */
2729 if (VECTOR_MODE_P (mode00))
2730 {
2731 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2732 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2733 }
2734 else
2735 n_elts00 = 1;
2736
2737 if (VECTOR_MODE_P (mode01))
2738 {
2739 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2740 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2741 }
2742 else
2743 n_elts01 = 1;
2744
2745 gcc_assert (n_elts == n_elts00 + n_elts01);
2746
2747 /* Select correct operand of VEC_CONCAT
2748 and adjust selector. */
2749 if (elem < n_elts01)
2750 tmp_op = op00;
2751 else
2752 {
2753 tmp_op = op01;
2754 elem -= n_elts00;
2755 }
2756 }
2757 else
2758 tmp_op = op0;
2759
2760 vec = rtvec_alloc (1);
2761 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2762
2763 tmp = gen_rtx_fmt_ee (code, mode,
2764 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2765 return tmp;
2766 }
2767 }
2768 else
2769 {
2770 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2771 gcc_assert (GET_MODE_INNER (mode)
2772 == GET_MODE_INNER (GET_MODE (trueop0)));
2773 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2774
2775 if (GET_CODE (trueop0) == CONST_VECTOR)
2776 {
2777 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2778 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2779 rtvec v = rtvec_alloc (n_elts);
2780 unsigned int i;
2781
2782 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2783 for (i = 0; i < n_elts; i++)
2784 {
2785 rtx x = XVECEXP (trueop1, 0, i);
2786
2787 gcc_assert (GET_CODE (x) == CONST_INT);
2788 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2789 INTVAL (x));
2790 }
2791
2792 return gen_rtx_CONST_VECTOR (mode, v);
2793 }
2794 }
2795
2796 if (XVECLEN (trueop1, 0) == 1
2797 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2798 && GET_CODE (trueop0) == VEC_CONCAT)
2799 {
2800 rtx vec = trueop0;
2801 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2802
2803 /* Try to find the element in the VEC_CONCAT. */
2804 while (GET_MODE (vec) != mode
2805 && GET_CODE (vec) == VEC_CONCAT)
2806 {
2807 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2808 if (offset < vec_size)
2809 vec = XEXP (vec, 0);
2810 else
2811 {
2812 offset -= vec_size;
2813 vec = XEXP (vec, 1);
2814 }
2815 vec = avoid_constant_pool_reference (vec);
2816 }
2817
2818 if (GET_MODE (vec) == mode)
2819 return vec;
2820 }
2821
2822 return 0;
2823 case VEC_CONCAT:
2824 {
2825 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2826 ? GET_MODE (trueop0)
2827 : GET_MODE_INNER (mode));
2828 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2829 ? GET_MODE (trueop1)
2830 : GET_MODE_INNER (mode));
2831
2832 gcc_assert (VECTOR_MODE_P (mode));
2833 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2834 == GET_MODE_SIZE (mode));
2835
2836 if (VECTOR_MODE_P (op0_mode))
2837 gcc_assert (GET_MODE_INNER (mode)
2838 == GET_MODE_INNER (op0_mode));
2839 else
2840 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2841
2842 if (VECTOR_MODE_P (op1_mode))
2843 gcc_assert (GET_MODE_INNER (mode)
2844 == GET_MODE_INNER (op1_mode));
2845 else
2846 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2847
2848 if ((GET_CODE (trueop0) == CONST_VECTOR
2849 || GET_CODE (trueop0) == CONST_INT
2850 || GET_CODE (trueop0) == CONST_DOUBLE)
2851 && (GET_CODE (trueop1) == CONST_VECTOR
2852 || GET_CODE (trueop1) == CONST_INT
2853 || GET_CODE (trueop1) == CONST_DOUBLE))
2854 {
2855 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2856 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2857 rtvec v = rtvec_alloc (n_elts);
2858 unsigned int i;
2859 unsigned in_n_elts = 1;
2860
2861 if (VECTOR_MODE_P (op0_mode))
2862 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2863 for (i = 0; i < n_elts; i++)
2864 {
2865 if (i < in_n_elts)
2866 {
2867 if (!VECTOR_MODE_P (op0_mode))
2868 RTVEC_ELT (v, i) = trueop0;
2869 else
2870 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2871 }
2872 else
2873 {
2874 if (!VECTOR_MODE_P (op1_mode))
2875 RTVEC_ELT (v, i) = trueop1;
2876 else
2877 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2878 i - in_n_elts);
2879 }
2880 }
2881
2882 return gen_rtx_CONST_VECTOR (mode, v);
2883 }
2884 }
2885 return 0;
2886
2887 default:
2888 gcc_unreachable ();
2889 }
2890
2891 return 0;
2892 }
2893
2894 rtx
2895 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2896 rtx op0, rtx op1)
2897 {
2898 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2899 HOST_WIDE_INT val;
2900 unsigned int width = GET_MODE_BITSIZE (mode);
2901
2902 if (VECTOR_MODE_P (mode)
2903 && code != VEC_CONCAT
2904 && GET_CODE (op0) == CONST_VECTOR
2905 && GET_CODE (op1) == CONST_VECTOR)
2906 {
2907 unsigned n_elts = GET_MODE_NUNITS (mode);
2908 enum machine_mode op0mode = GET_MODE (op0);
2909 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2910 enum machine_mode op1mode = GET_MODE (op1);
2911 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2912 rtvec v = rtvec_alloc (n_elts);
2913 unsigned int i;
2914
2915 gcc_assert (op0_n_elts == n_elts);
2916 gcc_assert (op1_n_elts == n_elts);
2917 for (i = 0; i < n_elts; i++)
2918 {
2919 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2920 CONST_VECTOR_ELT (op0, i),
2921 CONST_VECTOR_ELT (op1, i));
2922 if (!x)
2923 return 0;
2924 RTVEC_ELT (v, i) = x;
2925 }
2926
2927 return gen_rtx_CONST_VECTOR (mode, v);
2928 }
2929
2930 if (VECTOR_MODE_P (mode)
2931 && code == VEC_CONCAT
2932 && (CONST_INT_P (op0)
2933 || GET_CODE (op0) == CONST_DOUBLE
2934 || GET_CODE (op0) == CONST_FIXED)
2935 && (CONST_INT_P (op1)
2936 || GET_CODE (op1) == CONST_DOUBLE
2937 || GET_CODE (op1) == CONST_FIXED))
2938 {
2939 unsigned n_elts = GET_MODE_NUNITS (mode);
2940 rtvec v = rtvec_alloc (n_elts);
2941
2942 gcc_assert (n_elts >= 2);
2943 if (n_elts == 2)
2944 {
2945 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2946 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2947
2948 RTVEC_ELT (v, 0) = op0;
2949 RTVEC_ELT (v, 1) = op1;
2950 }
2951 else
2952 {
2953 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2954 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2955 unsigned i;
2956
2957 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2958 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2959 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2960
2961 for (i = 0; i < op0_n_elts; ++i)
2962 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2963 for (i = 0; i < op1_n_elts; ++i)
2964 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2965 }
2966
2967 return gen_rtx_CONST_VECTOR (mode, v);
2968 }
2969
2970 if (SCALAR_FLOAT_MODE_P (mode)
2971 && GET_CODE (op0) == CONST_DOUBLE
2972 && GET_CODE (op1) == CONST_DOUBLE
2973 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2974 {
2975 if (code == AND
2976 || code == IOR
2977 || code == XOR)
2978 {
2979 long tmp0[4];
2980 long tmp1[4];
2981 REAL_VALUE_TYPE r;
2982 int i;
2983
2984 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2985 GET_MODE (op0));
2986 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2987 GET_MODE (op1));
2988 for (i = 0; i < 4; i++)
2989 {
2990 switch (code)
2991 {
2992 case AND:
2993 tmp0[i] &= tmp1[i];
2994 break;
2995 case IOR:
2996 tmp0[i] |= tmp1[i];
2997 break;
2998 case XOR:
2999 tmp0[i] ^= tmp1[i];
3000 break;
3001 default:
3002 gcc_unreachable ();
3003 }
3004 }
3005 real_from_target (&r, tmp0, mode);
3006 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3007 }
3008 else
3009 {
3010 REAL_VALUE_TYPE f0, f1, value, result;
3011 bool inexact;
3012
3013 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3014 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3015 real_convert (&f0, mode, &f0);
3016 real_convert (&f1, mode, &f1);
3017
3018 if (HONOR_SNANS (mode)
3019 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3020 return 0;
3021
3022 if (code == DIV
3023 && REAL_VALUES_EQUAL (f1, dconst0)
3024 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3025 return 0;
3026
3027 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3028 && flag_trapping_math
3029 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3030 {
3031 int s0 = REAL_VALUE_NEGATIVE (f0);
3032 int s1 = REAL_VALUE_NEGATIVE (f1);
3033
3034 switch (code)
3035 {
3036 case PLUS:
3037 /* Inf + -Inf = NaN plus exception. */
3038 if (s0 != s1)
3039 return 0;
3040 break;
3041 case MINUS:
3042 /* Inf - Inf = NaN plus exception. */
3043 if (s0 == s1)
3044 return 0;
3045 break;
3046 case DIV:
3047 /* Inf / Inf = NaN plus exception. */
3048 return 0;
3049 default:
3050 break;
3051 }
3052 }
3053
3054 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3055 && flag_trapping_math
3056 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3057 || (REAL_VALUE_ISINF (f1)
3058 && REAL_VALUES_EQUAL (f0, dconst0))))
3059 /* Inf * 0 = NaN plus exception. */
3060 return 0;
3061
3062 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3063 &f0, &f1);
3064 real_convert (&result, mode, &value);
3065
3066 /* Don't constant fold this floating point operation if
3067 the result has overflowed and flag_trapping_math. */
3068
3069 if (flag_trapping_math
3070 && MODE_HAS_INFINITIES (mode)
3071 && REAL_VALUE_ISINF (result)
3072 && !REAL_VALUE_ISINF (f0)
3073 && !REAL_VALUE_ISINF (f1))
3074 /* Overflow plus exception. */
3075 return 0;
3076
3077 /* Don't constant fold this floating point operation if the
3078 result may dependent upon the run-time rounding mode and
3079 flag_rounding_math is set, or if GCC's software emulation
3080 is unable to accurately represent the result. */
3081
3082 if ((flag_rounding_math
3083 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
3084 && !flag_unsafe_math_optimizations))
3085 && (inexact || !real_identical (&result, &value)))
3086 return NULL_RTX;
3087
3088 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3089 }
3090 }
3091
3092 /* We can fold some multi-word operations. */
3093 if (GET_MODE_CLASS (mode) == MODE_INT
3094 && width == HOST_BITS_PER_WIDE_INT * 2
3095 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3096 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3097 {
3098 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3099 HOST_WIDE_INT h1, h2, hv, ht;
3100
3101 if (GET_CODE (op0) == CONST_DOUBLE)
3102 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3103 else
3104 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3105
3106 if (GET_CODE (op1) == CONST_DOUBLE)
3107 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3108 else
3109 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3110
3111 switch (code)
3112 {
3113 case MINUS:
3114 /* A - B == A + (-B). */
3115 neg_double (l2, h2, &lv, &hv);
3116 l2 = lv, h2 = hv;
3117
3118 /* Fall through.... */
3119
3120 case PLUS:
3121 add_double (l1, h1, l2, h2, &lv, &hv);
3122 break;
3123
3124 case MULT:
3125 mul_double (l1, h1, l2, h2, &lv, &hv);
3126 break;
3127
3128 case DIV:
3129 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3130 &lv, &hv, &lt, &ht))
3131 return 0;
3132 break;
3133
3134 case MOD:
3135 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3136 &lt, &ht, &lv, &hv))
3137 return 0;
3138 break;
3139
3140 case UDIV:
3141 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3142 &lv, &hv, &lt, &ht))
3143 return 0;
3144 break;
3145
3146 case UMOD:
3147 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3148 &lt, &ht, &lv, &hv))
3149 return 0;
3150 break;
3151
3152 case AND:
3153 lv = l1 & l2, hv = h1 & h2;
3154 break;
3155
3156 case IOR:
3157 lv = l1 | l2, hv = h1 | h2;
3158 break;
3159
3160 case XOR:
3161 lv = l1 ^ l2, hv = h1 ^ h2;
3162 break;
3163
3164 case SMIN:
3165 if (h1 < h2
3166 || (h1 == h2
3167 && ((unsigned HOST_WIDE_INT) l1
3168 < (unsigned HOST_WIDE_INT) l2)))
3169 lv = l1, hv = h1;
3170 else
3171 lv = l2, hv = h2;
3172 break;
3173
3174 case SMAX:
3175 if (h1 > h2
3176 || (h1 == h2
3177 && ((unsigned HOST_WIDE_INT) l1
3178 > (unsigned HOST_WIDE_INT) l2)))
3179 lv = l1, hv = h1;
3180 else
3181 lv = l2, hv = h2;
3182 break;
3183
3184 case UMIN:
3185 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3186 || (h1 == h2
3187 && ((unsigned HOST_WIDE_INT) l1
3188 < (unsigned HOST_WIDE_INT) l2)))
3189 lv = l1, hv = h1;
3190 else
3191 lv = l2, hv = h2;
3192 break;
3193
3194 case UMAX:
3195 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3196 || (h1 == h2
3197 && ((unsigned HOST_WIDE_INT) l1
3198 > (unsigned HOST_WIDE_INT) l2)))
3199 lv = l1, hv = h1;
3200 else
3201 lv = l2, hv = h2;
3202 break;
3203
3204 case LSHIFTRT: case ASHIFTRT:
3205 case ASHIFT:
3206 case ROTATE: case ROTATERT:
3207 if (SHIFT_COUNT_TRUNCATED)
3208 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3209
3210 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3211 return 0;
3212
3213 if (code == LSHIFTRT || code == ASHIFTRT)
3214 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3215 code == ASHIFTRT);
3216 else if (code == ASHIFT)
3217 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3218 else if (code == ROTATE)
3219 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3220 else /* code == ROTATERT */
3221 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3222 break;
3223
3224 default:
3225 return 0;
3226 }
3227
3228 return immed_double_const (lv, hv, mode);
3229 }
3230
3231 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3232 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3233 {
3234 /* Get the integer argument values in two forms:
3235 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3236
3237 arg0 = INTVAL (op0);
3238 arg1 = INTVAL (op1);
3239
3240 if (width < HOST_BITS_PER_WIDE_INT)
3241 {
3242 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3243 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3244
3245 arg0s = arg0;
3246 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3247 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3248
3249 arg1s = arg1;
3250 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3251 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3252 }
3253 else
3254 {
3255 arg0s = arg0;
3256 arg1s = arg1;
3257 }
3258
3259 /* Compute the value of the arithmetic. */
3260
3261 switch (code)
3262 {
3263 case PLUS:
3264 val = arg0s + arg1s;
3265 break;
3266
3267 case MINUS:
3268 val = arg0s - arg1s;
3269 break;
3270
3271 case MULT:
3272 val = arg0s * arg1s;
3273 break;
3274
3275 case DIV:
3276 if (arg1s == 0
3277 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3278 && arg1s == -1))
3279 return 0;
3280 val = arg0s / arg1s;
3281 break;
3282
3283 case MOD:
3284 if (arg1s == 0
3285 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3286 && arg1s == -1))
3287 return 0;
3288 val = arg0s % arg1s;
3289 break;
3290
3291 case UDIV:
3292 if (arg1 == 0
3293 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3294 && arg1s == -1))
3295 return 0;
3296 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3297 break;
3298
3299 case UMOD:
3300 if (arg1 == 0
3301 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3302 && arg1s == -1))
3303 return 0;
3304 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3305 break;
3306
3307 case AND:
3308 val = arg0 & arg1;
3309 break;
3310
3311 case IOR:
3312 val = arg0 | arg1;
3313 break;
3314
3315 case XOR:
3316 val = arg0 ^ arg1;
3317 break;
3318
3319 case LSHIFTRT:
3320 case ASHIFT:
3321 case ASHIFTRT:
3322 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3323 the value is in range. We can't return any old value for
3324 out-of-range arguments because either the middle-end (via
3325 shift_truncation_mask) or the back-end might be relying on
3326 target-specific knowledge. Nor can we rely on
3327 shift_truncation_mask, since the shift might not be part of an
3328 ashlM3, lshrM3 or ashrM3 instruction. */
3329 if (SHIFT_COUNT_TRUNCATED)
3330 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3331 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3332 return 0;
3333
3334 val = (code == ASHIFT
3335 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3336 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3337
3338 /* Sign-extend the result for arithmetic right shifts. */
3339 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3340 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3341 break;
3342
3343 case ROTATERT:
3344 if (arg1 < 0)
3345 return 0;
3346
3347 arg1 %= width;
3348 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3349 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3350 break;
3351
3352 case ROTATE:
3353 if (arg1 < 0)
3354 return 0;
3355
3356 arg1 %= width;
3357 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3358 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3359 break;
3360
3361 case COMPARE:
3362 /* Do nothing here. */
3363 return 0;
3364
3365 case SMIN:
3366 val = arg0s <= arg1s ? arg0s : arg1s;
3367 break;
3368
3369 case UMIN:
3370 val = ((unsigned HOST_WIDE_INT) arg0
3371 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3372 break;
3373
3374 case SMAX:
3375 val = arg0s > arg1s ? arg0s : arg1s;
3376 break;
3377
3378 case UMAX:
3379 val = ((unsigned HOST_WIDE_INT) arg0
3380 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3381 break;
3382
3383 case SS_PLUS:
3384 case US_PLUS:
3385 case SS_MINUS:
3386 case US_MINUS:
3387 case SS_MULT:
3388 case US_MULT:
3389 case SS_DIV:
3390 case US_DIV:
3391 case SS_ASHIFT:
3392 case US_ASHIFT:
3393 /* ??? There are simplifications that can be done. */
3394 return 0;
3395
3396 default:
3397 gcc_unreachable ();
3398 }
3399
3400 return gen_int_mode (val, mode);
3401 }
3402
3403 return NULL_RTX;
3404 }
3405
3406
3407 \f
3408 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3409 PLUS or MINUS.
3410
3411 Rather than test for specific case, we do this by a brute-force method
3412 and do all possible simplifications until no more changes occur. Then
3413 we rebuild the operation. */
3414
3415 struct simplify_plus_minus_op_data
3416 {
3417 rtx op;
3418 short neg;
3419 };
3420
3421 static bool
3422 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3423 {
3424 int result;
3425
3426 result = (commutative_operand_precedence (y)
3427 - commutative_operand_precedence (x));
3428 if (result)
3429 return result > 0;
3430
3431 /* Group together equal REGs to do more simplification. */
3432 if (REG_P (x) && REG_P (y))
3433 return REGNO (x) > REGNO (y);
3434 else
3435 return false;
3436 }
3437
3438 static rtx
3439 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3440 rtx op1)
3441 {
3442 struct simplify_plus_minus_op_data ops[8];
3443 rtx result, tem;
3444 int n_ops = 2, input_ops = 2;
3445 int changed, n_constants = 0, canonicalized = 0;
3446 int i, j;
3447
3448 memset (ops, 0, sizeof ops);
3449
3450 /* Set up the two operands and then expand them until nothing has been
3451 changed. If we run out of room in our array, give up; this should
3452 almost never happen. */
3453
3454 ops[0].op = op0;
3455 ops[0].neg = 0;
3456 ops[1].op = op1;
3457 ops[1].neg = (code == MINUS);
3458
3459 do
3460 {
3461 changed = 0;
3462
3463 for (i = 0; i < n_ops; i++)
3464 {
3465 rtx this_op = ops[i].op;
3466 int this_neg = ops[i].neg;
3467 enum rtx_code this_code = GET_CODE (this_op);
3468
3469 switch (this_code)
3470 {
3471 case PLUS:
3472 case MINUS:
3473 if (n_ops == 7)
3474 return NULL_RTX;
3475
3476 ops[n_ops].op = XEXP (this_op, 1);
3477 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3478 n_ops++;
3479
3480 ops[i].op = XEXP (this_op, 0);
3481 input_ops++;
3482 changed = 1;
3483 canonicalized |= this_neg;
3484 break;
3485
3486 case NEG:
3487 ops[i].op = XEXP (this_op, 0);
3488 ops[i].neg = ! this_neg;
3489 changed = 1;
3490 canonicalized = 1;
3491 break;
3492
3493 case CONST:
3494 if (n_ops < 7
3495 && GET_CODE (XEXP (this_op, 0)) == PLUS
3496 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3497 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3498 {
3499 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3500 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3501 ops[n_ops].neg = this_neg;
3502 n_ops++;
3503 changed = 1;
3504 canonicalized = 1;
3505 }
3506 break;
3507
3508 case NOT:
3509 /* ~a -> (-a - 1) */
3510 if (n_ops != 7)
3511 {
3512 ops[n_ops].op = constm1_rtx;
3513 ops[n_ops++].neg = this_neg;
3514 ops[i].op = XEXP (this_op, 0);
3515 ops[i].neg = !this_neg;
3516 changed = 1;
3517 canonicalized = 1;
3518 }
3519 break;
3520
3521 case CONST_INT:
3522 n_constants++;
3523 if (this_neg)
3524 {
3525 ops[i].op = neg_const_int (mode, this_op);
3526 ops[i].neg = 0;
3527 changed = 1;
3528 canonicalized = 1;
3529 }
3530 break;
3531
3532 default:
3533 break;
3534 }
3535 }
3536 }
3537 while (changed);
3538
3539 if (n_constants > 1)
3540 canonicalized = 1;
3541
3542 gcc_assert (n_ops >= 2);
3543
3544 /* If we only have two operands, we can avoid the loops. */
3545 if (n_ops == 2)
3546 {
3547 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3548 rtx lhs, rhs;
3549
3550 /* Get the two operands. Be careful with the order, especially for
3551 the cases where code == MINUS. */
3552 if (ops[0].neg && ops[1].neg)
3553 {
3554 lhs = gen_rtx_NEG (mode, ops[0].op);
3555 rhs = ops[1].op;
3556 }
3557 else if (ops[0].neg)
3558 {
3559 lhs = ops[1].op;
3560 rhs = ops[0].op;
3561 }
3562 else
3563 {
3564 lhs = ops[0].op;
3565 rhs = ops[1].op;
3566 }
3567
3568 return simplify_const_binary_operation (code, mode, lhs, rhs);
3569 }
3570
3571 /* Now simplify each pair of operands until nothing changes. */
3572 do
3573 {
3574 /* Insertion sort is good enough for an eight-element array. */
3575 for (i = 1; i < n_ops; i++)
3576 {
3577 struct simplify_plus_minus_op_data save;
3578 j = i - 1;
3579 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3580 continue;
3581
3582 canonicalized = 1;
3583 save = ops[i];
3584 do
3585 ops[j + 1] = ops[j];
3586 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3587 ops[j + 1] = save;
3588 }
3589
3590 /* This is only useful the first time through. */
3591 if (!canonicalized)
3592 return NULL_RTX;
3593
3594 changed = 0;
3595 for (i = n_ops - 1; i > 0; i--)
3596 for (j = i - 1; j >= 0; j--)
3597 {
3598 rtx lhs = ops[j].op, rhs = ops[i].op;
3599 int lneg = ops[j].neg, rneg = ops[i].neg;
3600
3601 if (lhs != 0 && rhs != 0)
3602 {
3603 enum rtx_code ncode = PLUS;
3604
3605 if (lneg != rneg)
3606 {
3607 ncode = MINUS;
3608 if (lneg)
3609 tem = lhs, lhs = rhs, rhs = tem;
3610 }
3611 else if (swap_commutative_operands_p (lhs, rhs))
3612 tem = lhs, lhs = rhs, rhs = tem;
3613
3614 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3615 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3616 {
3617 rtx tem_lhs, tem_rhs;
3618
3619 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3620 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3621 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3622
3623 if (tem && !CONSTANT_P (tem))
3624 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3625 }
3626 else
3627 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3628
3629 /* Reject "simplifications" that just wrap the two
3630 arguments in a CONST. Failure to do so can result
3631 in infinite recursion with simplify_binary_operation
3632 when it calls us to simplify CONST operations. */
3633 if (tem
3634 && ! (GET_CODE (tem) == CONST
3635 && GET_CODE (XEXP (tem, 0)) == ncode
3636 && XEXP (XEXP (tem, 0), 0) == lhs
3637 && XEXP (XEXP (tem, 0), 1) == rhs))
3638 {
3639 lneg &= rneg;
3640 if (GET_CODE (tem) == NEG)
3641 tem = XEXP (tem, 0), lneg = !lneg;
3642 if (GET_CODE (tem) == CONST_INT && lneg)
3643 tem = neg_const_int (mode, tem), lneg = 0;
3644
3645 ops[i].op = tem;
3646 ops[i].neg = lneg;
3647 ops[j].op = NULL_RTX;
3648 changed = 1;
3649 }
3650 }
3651 }
3652
3653 /* Pack all the operands to the lower-numbered entries. */
3654 for (i = 0, j = 0; j < n_ops; j++)
3655 if (ops[j].op)
3656 {
3657 ops[i] = ops[j];
3658 i++;
3659 }
3660 n_ops = i;
3661 }
3662 while (changed);
3663
3664 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3665 if (n_ops == 2
3666 && GET_CODE (ops[1].op) == CONST_INT
3667 && CONSTANT_P (ops[0].op)
3668 && ops[0].neg)
3669 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3670
3671 /* We suppressed creation of trivial CONST expressions in the
3672 combination loop to avoid recursion. Create one manually now.
3673 The combination loop should have ensured that there is exactly
3674 one CONST_INT, and the sort will have ensured that it is last
3675 in the array and that any other constant will be next-to-last. */
3676
3677 if (n_ops > 1
3678 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3679 && CONSTANT_P (ops[n_ops - 2].op))
3680 {
3681 rtx value = ops[n_ops - 1].op;
3682 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3683 value = neg_const_int (mode, value);
3684 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3685 n_ops--;
3686 }
3687
3688 /* Put a non-negated operand first, if possible. */
3689
3690 for (i = 0; i < n_ops && ops[i].neg; i++)
3691 continue;
3692 if (i == n_ops)
3693 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3694 else if (i != 0)
3695 {
3696 tem = ops[0].op;
3697 ops[0] = ops[i];
3698 ops[i].op = tem;
3699 ops[i].neg = 1;
3700 }
3701
3702 /* Now make the result by performing the requested operations. */
3703 result = ops[0].op;
3704 for (i = 1; i < n_ops; i++)
3705 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3706 mode, result, ops[i].op);
3707
3708 return result;
3709 }
3710
3711 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3712 static bool
3713 plus_minus_operand_p (const_rtx x)
3714 {
3715 return GET_CODE (x) == PLUS
3716 || GET_CODE (x) == MINUS
3717 || (GET_CODE (x) == CONST
3718 && GET_CODE (XEXP (x, 0)) == PLUS
3719 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3720 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3721 }
3722
3723 /* Like simplify_binary_operation except used for relational operators.
3724 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3725 not also be VOIDmode.
3726
3727 CMP_MODE specifies in which mode the comparison is done in, so it is
3728 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3729 the operands or, if both are VOIDmode, the operands are compared in
3730 "infinite precision". */
3731 rtx
3732 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3733 enum machine_mode cmp_mode, rtx op0, rtx op1)
3734 {
3735 rtx tem, trueop0, trueop1;
3736
3737 if (cmp_mode == VOIDmode)
3738 cmp_mode = GET_MODE (op0);
3739 if (cmp_mode == VOIDmode)
3740 cmp_mode = GET_MODE (op1);
3741
3742 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3743 if (tem)
3744 {
3745 if (SCALAR_FLOAT_MODE_P (mode))
3746 {
3747 if (tem == const0_rtx)
3748 return CONST0_RTX (mode);
3749 #ifdef FLOAT_STORE_FLAG_VALUE
3750 {
3751 REAL_VALUE_TYPE val;
3752 val = FLOAT_STORE_FLAG_VALUE (mode);
3753 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3754 }
3755 #else
3756 return NULL_RTX;
3757 #endif
3758 }
3759 if (VECTOR_MODE_P (mode))
3760 {
3761 if (tem == const0_rtx)
3762 return CONST0_RTX (mode);
3763 #ifdef VECTOR_STORE_FLAG_VALUE
3764 {
3765 int i, units;
3766 rtvec v;
3767
3768 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3769 if (val == NULL_RTX)
3770 return NULL_RTX;
3771 if (val == const1_rtx)
3772 return CONST1_RTX (mode);
3773
3774 units = GET_MODE_NUNITS (mode);
3775 v = rtvec_alloc (units);
3776 for (i = 0; i < units; i++)
3777 RTVEC_ELT (v, i) = val;
3778 return gen_rtx_raw_CONST_VECTOR (mode, v);
3779 }
3780 #else
3781 return NULL_RTX;
3782 #endif
3783 }
3784
3785 return tem;
3786 }
3787
3788 /* For the following tests, ensure const0_rtx is op1. */
3789 if (swap_commutative_operands_p (op0, op1)
3790 || (op0 == const0_rtx && op1 != const0_rtx))
3791 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3792
3793 /* If op0 is a compare, extract the comparison arguments from it. */
3794 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3795 return simplify_relational_operation (code, mode, VOIDmode,
3796 XEXP (op0, 0), XEXP (op0, 1));
3797
3798 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3799 || CC0_P (op0))
3800 return NULL_RTX;
3801
3802 trueop0 = avoid_constant_pool_reference (op0);
3803 trueop1 = avoid_constant_pool_reference (op1);
3804 return simplify_relational_operation_1 (code, mode, cmp_mode,
3805 trueop0, trueop1);
3806 }
3807
3808 /* This part of simplify_relational_operation is only used when CMP_MODE
3809 is not in class MODE_CC (i.e. it is a real comparison).
3810
3811 MODE is the mode of the result, while CMP_MODE specifies in which
3812 mode the comparison is done in, so it is the mode of the operands. */
3813
3814 static rtx
3815 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3816 enum machine_mode cmp_mode, rtx op0, rtx op1)
3817 {
3818 enum rtx_code op0code = GET_CODE (op0);
3819
3820 if (op1 == const0_rtx && COMPARISON_P (op0))
3821 {
3822 /* If op0 is a comparison, extract the comparison arguments
3823 from it. */
3824 if (code == NE)
3825 {
3826 if (GET_MODE (op0) == mode)
3827 return simplify_rtx (op0);
3828 else
3829 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3830 XEXP (op0, 0), XEXP (op0, 1));
3831 }
3832 else if (code == EQ)
3833 {
3834 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3835 if (new_code != UNKNOWN)
3836 return simplify_gen_relational (new_code, mode, VOIDmode,
3837 XEXP (op0, 0), XEXP (op0, 1));
3838 }
3839 }
3840
3841 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3842 if ((code == LTU || code == GEU)
3843 && GET_CODE (op0) == PLUS
3844 && rtx_equal_p (op1, XEXP (op0, 1))
3845 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3846 && !rtx_equal_p (op1, XEXP (op0, 0)))
3847 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3848
3849 if (op1 == const0_rtx)
3850 {
3851 /* Canonicalize (GTU x 0) as (NE x 0). */
3852 if (code == GTU)
3853 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3854 /* Canonicalize (LEU x 0) as (EQ x 0). */
3855 if (code == LEU)
3856 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3857 }
3858 else if (op1 == const1_rtx)
3859 {
3860 switch (code)
3861 {
3862 case GE:
3863 /* Canonicalize (GE x 1) as (GT x 0). */
3864 return simplify_gen_relational (GT, mode, cmp_mode,
3865 op0, const0_rtx);
3866 case GEU:
3867 /* Canonicalize (GEU x 1) as (NE x 0). */
3868 return simplify_gen_relational (NE, mode, cmp_mode,
3869 op0, const0_rtx);
3870 case LT:
3871 /* Canonicalize (LT x 1) as (LE x 0). */
3872 return simplify_gen_relational (LE, mode, cmp_mode,
3873 op0, const0_rtx);
3874 case LTU:
3875 /* Canonicalize (LTU x 1) as (EQ x 0). */
3876 return simplify_gen_relational (EQ, mode, cmp_mode,
3877 op0, const0_rtx);
3878 default:
3879 break;
3880 }
3881 }
3882 else if (op1 == constm1_rtx)
3883 {
3884 /* Canonicalize (LE x -1) as (LT x 0). */
3885 if (code == LE)
3886 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3887 /* Canonicalize (GT x -1) as (GE x 0). */
3888 if (code == GT)
3889 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3890 }
3891
3892 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3893 if ((code == EQ || code == NE)
3894 && (op0code == PLUS || op0code == MINUS)
3895 && CONSTANT_P (op1)
3896 && CONSTANT_P (XEXP (op0, 1))
3897 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3898 {
3899 rtx x = XEXP (op0, 0);
3900 rtx c = XEXP (op0, 1);
3901
3902 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3903 cmp_mode, op1, c);
3904 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3905 }
3906
3907 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3908 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3909 if (code == NE
3910 && op1 == const0_rtx
3911 && GET_MODE_CLASS (mode) == MODE_INT
3912 && cmp_mode != VOIDmode
3913 /* ??? Work-around BImode bugs in the ia64 backend. */
3914 && mode != BImode
3915 && cmp_mode != BImode
3916 && nonzero_bits (op0, cmp_mode) == 1
3917 && STORE_FLAG_VALUE == 1)
3918 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3919 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3920 : lowpart_subreg (mode, op0, cmp_mode);
3921
3922 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3923 if ((code == EQ || code == NE)
3924 && op1 == const0_rtx
3925 && op0code == XOR)
3926 return simplify_gen_relational (code, mode, cmp_mode,
3927 XEXP (op0, 0), XEXP (op0, 1));
3928
3929 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3930 if ((code == EQ || code == NE)
3931 && op0code == XOR
3932 && rtx_equal_p (XEXP (op0, 0), op1)
3933 && !side_effects_p (XEXP (op0, 0)))
3934 return simplify_gen_relational (code, mode, cmp_mode,
3935 XEXP (op0, 1), const0_rtx);
3936
3937 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3938 if ((code == EQ || code == NE)
3939 && op0code == XOR
3940 && rtx_equal_p (XEXP (op0, 1), op1)
3941 && !side_effects_p (XEXP (op0, 1)))
3942 return simplify_gen_relational (code, mode, cmp_mode,
3943 XEXP (op0, 0), const0_rtx);
3944
3945 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3946 if ((code == EQ || code == NE)
3947 && op0code == XOR
3948 && (GET_CODE (op1) == CONST_INT
3949 || GET_CODE (op1) == CONST_DOUBLE)
3950 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3951 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3952 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3953 simplify_gen_binary (XOR, cmp_mode,
3954 XEXP (op0, 1), op1));
3955
3956 if (op0code == POPCOUNT && op1 == const0_rtx)
3957 switch (code)
3958 {
3959 case EQ:
3960 case LE:
3961 case LEU:
3962 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3963 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3964 XEXP (op0, 0), const0_rtx);
3965
3966 case NE:
3967 case GT:
3968 case GTU:
3969 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3970 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3971 XEXP (op0, 0), const0_rtx);
3972
3973 default:
3974 break;
3975 }
3976
3977 return NULL_RTX;
3978 }
3979
3980 enum
3981 {
3982 CMP_EQ = 1,
3983 CMP_LT = 2,
3984 CMP_GT = 4,
3985 CMP_LTU = 8,
3986 CMP_GTU = 16
3987 };
3988
3989
3990 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
3991 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
3992 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
3993 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
3994 For floating-point comparisons, assume that the operands were ordered. */
3995
3996 static rtx
3997 comparison_result (enum rtx_code code, int known_results)
3998 {
3999 switch (code)
4000 {
4001 case EQ:
4002 case UNEQ:
4003 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4004 case NE:
4005 case LTGT:
4006 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4007
4008 case LT:
4009 case UNLT:
4010 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4011 case GE:
4012 case UNGE:
4013 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4014
4015 case GT:
4016 case UNGT:
4017 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4018 case LE:
4019 case UNLE:
4020 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4021
4022 case LTU:
4023 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4024 case GEU:
4025 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4026
4027 case GTU:
4028 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4029 case LEU:
4030 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4031
4032 case ORDERED:
4033 return const_true_rtx;
4034 case UNORDERED:
4035 return const0_rtx;
4036 default:
4037 gcc_unreachable ();
4038 }
4039 }
4040
4041 /* Check if the given comparison (done in the given MODE) is actually a
4042 tautology or a contradiction.
4043 If no simplification is possible, this function returns zero.
4044 Otherwise, it returns either const_true_rtx or const0_rtx. */
4045
4046 rtx
4047 simplify_const_relational_operation (enum rtx_code code,
4048 enum machine_mode mode,
4049 rtx op0, rtx op1)
4050 {
4051 rtx tem;
4052 rtx trueop0;
4053 rtx trueop1;
4054
4055 gcc_assert (mode != VOIDmode
4056 || (GET_MODE (op0) == VOIDmode
4057 && GET_MODE (op1) == VOIDmode));
4058
4059 /* If op0 is a compare, extract the comparison arguments from it. */
4060 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4061 {
4062 op1 = XEXP (op0, 1);
4063 op0 = XEXP (op0, 0);
4064
4065 if (GET_MODE (op0) != VOIDmode)
4066 mode = GET_MODE (op0);
4067 else if (GET_MODE (op1) != VOIDmode)
4068 mode = GET_MODE (op1);
4069 else
4070 return 0;
4071 }
4072
4073 /* We can't simplify MODE_CC values since we don't know what the
4074 actual comparison is. */
4075 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4076 return 0;
4077
4078 /* Make sure the constant is second. */
4079 if (swap_commutative_operands_p (op0, op1))
4080 {
4081 tem = op0, op0 = op1, op1 = tem;
4082 code = swap_condition (code);
4083 }
4084
4085 trueop0 = avoid_constant_pool_reference (op0);
4086 trueop1 = avoid_constant_pool_reference (op1);
4087
4088 /* For integer comparisons of A and B maybe we can simplify A - B and can
4089 then simplify a comparison of that with zero. If A and B are both either
4090 a register or a CONST_INT, this can't help; testing for these cases will
4091 prevent infinite recursion here and speed things up.
4092
4093 We can only do this for EQ and NE comparisons as otherwise we may
4094 lose or introduce overflow which we cannot disregard as undefined as
4095 we do not know the signedness of the operation on either the left or
4096 the right hand side of the comparison. */
4097
4098 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4099 && (code == EQ || code == NE)
4100 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
4101 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
4102 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4103 /* We cannot do this if tem is a nonzero address. */
4104 && ! nonzero_address_p (tem))
4105 return simplify_const_relational_operation (signed_condition (code),
4106 mode, tem, const0_rtx);
4107
4108 if (! HONOR_NANS (mode) && code == ORDERED)
4109 return const_true_rtx;
4110
4111 if (! HONOR_NANS (mode) && code == UNORDERED)
4112 return const0_rtx;
4113
4114 /* For modes without NaNs, if the two operands are equal, we know the
4115 result except if they have side-effects. Even with NaNs we know
4116 the result of unordered comparisons and, if signaling NaNs are
4117 irrelevant, also the result of LT/GT/LTGT. */
4118 if ((! HONOR_NANS (GET_MODE (trueop0))
4119 || code == UNEQ || code == UNLE || code == UNGE
4120 || ((code == LT || code == GT || code == LTGT)
4121 && ! HONOR_SNANS (GET_MODE (trueop0))))
4122 && rtx_equal_p (trueop0, trueop1)
4123 && ! side_effects_p (trueop0))
4124 return comparison_result (code, CMP_EQ);
4125
4126 /* If the operands are floating-point constants, see if we can fold
4127 the result. */
4128 if (GET_CODE (trueop0) == CONST_DOUBLE
4129 && GET_CODE (trueop1) == CONST_DOUBLE
4130 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4131 {
4132 REAL_VALUE_TYPE d0, d1;
4133
4134 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4135 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4136
4137 /* Comparisons are unordered iff at least one of the values is NaN. */
4138 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4139 switch (code)
4140 {
4141 case UNEQ:
4142 case UNLT:
4143 case UNGT:
4144 case UNLE:
4145 case UNGE:
4146 case NE:
4147 case UNORDERED:
4148 return const_true_rtx;
4149 case EQ:
4150 case LT:
4151 case GT:
4152 case LE:
4153 case GE:
4154 case LTGT:
4155 case ORDERED:
4156 return const0_rtx;
4157 default:
4158 return 0;
4159 }
4160
4161 return comparison_result (code,
4162 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4163 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4164 }
4165
4166 /* Otherwise, see if the operands are both integers. */
4167 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4168 && (GET_CODE (trueop0) == CONST_DOUBLE
4169 || GET_CODE (trueop0) == CONST_INT)
4170 && (GET_CODE (trueop1) == CONST_DOUBLE
4171 || GET_CODE (trueop1) == CONST_INT))
4172 {
4173 int width = GET_MODE_BITSIZE (mode);
4174 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4175 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4176
4177 /* Get the two words comprising each integer constant. */
4178 if (GET_CODE (trueop0) == CONST_DOUBLE)
4179 {
4180 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4181 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4182 }
4183 else
4184 {
4185 l0u = l0s = INTVAL (trueop0);
4186 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4187 }
4188
4189 if (GET_CODE (trueop1) == CONST_DOUBLE)
4190 {
4191 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4192 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4193 }
4194 else
4195 {
4196 l1u = l1s = INTVAL (trueop1);
4197 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4198 }
4199
4200 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4201 we have to sign or zero-extend the values. */
4202 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4203 {
4204 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4205 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4206
4207 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4208 l0s |= ((HOST_WIDE_INT) (-1) << width);
4209
4210 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4211 l1s |= ((HOST_WIDE_INT) (-1) << width);
4212 }
4213 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4214 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4215
4216 if (h0u == h1u && l0u == l1u)
4217 return comparison_result (code, CMP_EQ);
4218 else
4219 {
4220 int cr;
4221 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4222 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4223 return comparison_result (code, cr);
4224 }
4225 }
4226
4227 /* Optimize comparisons with upper and lower bounds. */
4228 if (SCALAR_INT_MODE_P (mode)
4229 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4230 && GET_CODE (trueop1) == CONST_INT)
4231 {
4232 int sign;
4233 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4234 HOST_WIDE_INT val = INTVAL (trueop1);
4235 HOST_WIDE_INT mmin, mmax;
4236
4237 if (code == GEU
4238 || code == LEU
4239 || code == GTU
4240 || code == LTU)
4241 sign = 0;
4242 else
4243 sign = 1;
4244
4245 /* Get a reduced range if the sign bit is zero. */
4246 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4247 {
4248 mmin = 0;
4249 mmax = nonzero;
4250 }
4251 else
4252 {
4253 rtx mmin_rtx, mmax_rtx;
4254 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4255
4256 mmin = INTVAL (mmin_rtx);
4257 mmax = INTVAL (mmax_rtx);
4258 if (sign)
4259 {
4260 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4261
4262 mmin >>= (sign_copies - 1);
4263 mmax >>= (sign_copies - 1);
4264 }
4265 }
4266
4267 switch (code)
4268 {
4269 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4270 case GEU:
4271 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4272 return const_true_rtx;
4273 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4274 return const0_rtx;
4275 break;
4276 case GE:
4277 if (val <= mmin)
4278 return const_true_rtx;
4279 if (val > mmax)
4280 return const0_rtx;
4281 break;
4282
4283 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4284 case LEU:
4285 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4286 return const_true_rtx;
4287 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4288 return const0_rtx;
4289 break;
4290 case LE:
4291 if (val >= mmax)
4292 return const_true_rtx;
4293 if (val < mmin)
4294 return const0_rtx;
4295 break;
4296
4297 case EQ:
4298 /* x == y is always false for y out of range. */
4299 if (val < mmin || val > mmax)
4300 return const0_rtx;
4301 break;
4302
4303 /* x > y is always false for y >= mmax, always true for y < mmin. */
4304 case GTU:
4305 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4306 return const0_rtx;
4307 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4308 return const_true_rtx;
4309 break;
4310 case GT:
4311 if (val >= mmax)
4312 return const0_rtx;
4313 if (val < mmin)
4314 return const_true_rtx;
4315 break;
4316
4317 /* x < y is always false for y <= mmin, always true for y > mmax. */
4318 case LTU:
4319 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4320 return const0_rtx;
4321 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4322 return const_true_rtx;
4323 break;
4324 case LT:
4325 if (val <= mmin)
4326 return const0_rtx;
4327 if (val > mmax)
4328 return const_true_rtx;
4329 break;
4330
4331 case NE:
4332 /* x != y is always true for y out of range. */
4333 if (val < mmin || val > mmax)
4334 return const_true_rtx;
4335 break;
4336
4337 default:
4338 break;
4339 }
4340 }
4341
4342 /* Optimize integer comparisons with zero. */
4343 if (trueop1 == const0_rtx)
4344 {
4345 /* Some addresses are known to be nonzero. We don't know
4346 their sign, but equality comparisons are known. */
4347 if (nonzero_address_p (trueop0))
4348 {
4349 if (code == EQ || code == LEU)
4350 return const0_rtx;
4351 if (code == NE || code == GTU)
4352 return const_true_rtx;
4353 }
4354
4355 /* See if the first operand is an IOR with a constant. If so, we
4356 may be able to determine the result of this comparison. */
4357 if (GET_CODE (op0) == IOR)
4358 {
4359 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4360 if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx)
4361 {
4362 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4363 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4364 && (INTVAL (inner_const)
4365 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4366
4367 switch (code)
4368 {
4369 case EQ:
4370 case LEU:
4371 return const0_rtx;
4372 case NE:
4373 case GTU:
4374 return const_true_rtx;
4375 case LT:
4376 case LE:
4377 if (has_sign)
4378 return const_true_rtx;
4379 break;
4380 case GT:
4381 case GE:
4382 if (has_sign)
4383 return const0_rtx;
4384 break;
4385 default:
4386 break;
4387 }
4388 }
4389 }
4390 }
4391
4392 /* Optimize comparison of ABS with zero. */
4393 if (trueop1 == CONST0_RTX (mode)
4394 && (GET_CODE (trueop0) == ABS
4395 || (GET_CODE (trueop0) == FLOAT_EXTEND
4396 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4397 {
4398 switch (code)
4399 {
4400 case LT:
4401 /* Optimize abs(x) < 0.0. */
4402 if (!HONOR_SNANS (mode)
4403 && (!INTEGRAL_MODE_P (mode)
4404 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4405 {
4406 if (INTEGRAL_MODE_P (mode)
4407 && (issue_strict_overflow_warning
4408 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4409 warning (OPT_Wstrict_overflow,
4410 ("assuming signed overflow does not occur when "
4411 "assuming abs (x) < 0 is false"));
4412 return const0_rtx;
4413 }
4414 break;
4415
4416 case GE:
4417 /* Optimize abs(x) >= 0.0. */
4418 if (!HONOR_NANS (mode)
4419 && (!INTEGRAL_MODE_P (mode)
4420 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4421 {
4422 if (INTEGRAL_MODE_P (mode)
4423 && (issue_strict_overflow_warning
4424 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4425 warning (OPT_Wstrict_overflow,
4426 ("assuming signed overflow does not occur when "
4427 "assuming abs (x) >= 0 is true"));
4428 return const_true_rtx;
4429 }
4430 break;
4431
4432 case UNGE:
4433 /* Optimize ! (abs(x) < 0.0). */
4434 return const_true_rtx;
4435
4436 default:
4437 break;
4438 }
4439 }
4440
4441 return 0;
4442 }
4443 \f
4444 /* Simplify CODE, an operation with result mode MODE and three operands,
4445 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4446 a constant. Return 0 if no simplifications is possible. */
4447
4448 rtx
4449 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4450 enum machine_mode op0_mode, rtx op0, rtx op1,
4451 rtx op2)
4452 {
4453 unsigned int width = GET_MODE_BITSIZE (mode);
4454
4455 /* VOIDmode means "infinite" precision. */
4456 if (width == 0)
4457 width = HOST_BITS_PER_WIDE_INT;
4458
4459 switch (code)
4460 {
4461 case SIGN_EXTRACT:
4462 case ZERO_EXTRACT:
4463 if (GET_CODE (op0) == CONST_INT
4464 && GET_CODE (op1) == CONST_INT
4465 && GET_CODE (op2) == CONST_INT
4466 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4467 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4468 {
4469 /* Extracting a bit-field from a constant */
4470 HOST_WIDE_INT val = INTVAL (op0);
4471
4472 if (BITS_BIG_ENDIAN)
4473 val >>= (GET_MODE_BITSIZE (op0_mode)
4474 - INTVAL (op2) - INTVAL (op1));
4475 else
4476 val >>= INTVAL (op2);
4477
4478 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4479 {
4480 /* First zero-extend. */
4481 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4482 /* If desired, propagate sign bit. */
4483 if (code == SIGN_EXTRACT
4484 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4485 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4486 }
4487
4488 /* Clear the bits that don't belong in our mode,
4489 unless they and our sign bit are all one.
4490 So we get either a reasonable negative value or a reasonable
4491 unsigned value for this mode. */
4492 if (width < HOST_BITS_PER_WIDE_INT
4493 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4494 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4495 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4496
4497 return gen_int_mode (val, mode);
4498 }
4499 break;
4500
4501 case IF_THEN_ELSE:
4502 if (GET_CODE (op0) == CONST_INT)
4503 return op0 != const0_rtx ? op1 : op2;
4504
4505 /* Convert c ? a : a into "a". */
4506 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4507 return op1;
4508
4509 /* Convert a != b ? a : b into "a". */
4510 if (GET_CODE (op0) == NE
4511 && ! side_effects_p (op0)
4512 && ! HONOR_NANS (mode)
4513 && ! HONOR_SIGNED_ZEROS (mode)
4514 && ((rtx_equal_p (XEXP (op0, 0), op1)
4515 && rtx_equal_p (XEXP (op0, 1), op2))
4516 || (rtx_equal_p (XEXP (op0, 0), op2)
4517 && rtx_equal_p (XEXP (op0, 1), op1))))
4518 return op1;
4519
4520 /* Convert a == b ? a : b into "b". */
4521 if (GET_CODE (op0) == EQ
4522 && ! side_effects_p (op0)
4523 && ! HONOR_NANS (mode)
4524 && ! HONOR_SIGNED_ZEROS (mode)
4525 && ((rtx_equal_p (XEXP (op0, 0), op1)
4526 && rtx_equal_p (XEXP (op0, 1), op2))
4527 || (rtx_equal_p (XEXP (op0, 0), op2)
4528 && rtx_equal_p (XEXP (op0, 1), op1))))
4529 return op2;
4530
4531 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4532 {
4533 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4534 ? GET_MODE (XEXP (op0, 1))
4535 : GET_MODE (XEXP (op0, 0)));
4536 rtx temp;
4537
4538 /* Look for happy constants in op1 and op2. */
4539 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4540 {
4541 HOST_WIDE_INT t = INTVAL (op1);
4542 HOST_WIDE_INT f = INTVAL (op2);
4543
4544 if (t == STORE_FLAG_VALUE && f == 0)
4545 code = GET_CODE (op0);
4546 else if (t == 0 && f == STORE_FLAG_VALUE)
4547 {
4548 enum rtx_code tmp;
4549 tmp = reversed_comparison_code (op0, NULL_RTX);
4550 if (tmp == UNKNOWN)
4551 break;
4552 code = tmp;
4553 }
4554 else
4555 break;
4556
4557 return simplify_gen_relational (code, mode, cmp_mode,
4558 XEXP (op0, 0), XEXP (op0, 1));
4559 }
4560
4561 if (cmp_mode == VOIDmode)
4562 cmp_mode = op0_mode;
4563 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4564 cmp_mode, XEXP (op0, 0),
4565 XEXP (op0, 1));
4566
4567 /* See if any simplifications were possible. */
4568 if (temp)
4569 {
4570 if (GET_CODE (temp) == CONST_INT)
4571 return temp == const0_rtx ? op2 : op1;
4572 else if (temp)
4573 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4574 }
4575 }
4576 break;
4577
4578 case VEC_MERGE:
4579 gcc_assert (GET_MODE (op0) == mode);
4580 gcc_assert (GET_MODE (op1) == mode);
4581 gcc_assert (VECTOR_MODE_P (mode));
4582 op2 = avoid_constant_pool_reference (op2);
4583 if (GET_CODE (op2) == CONST_INT)
4584 {
4585 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4587 int mask = (1 << n_elts) - 1;
4588
4589 if (!(INTVAL (op2) & mask))
4590 return op1;
4591 if ((INTVAL (op2) & mask) == mask)
4592 return op0;
4593
4594 op0 = avoid_constant_pool_reference (op0);
4595 op1 = avoid_constant_pool_reference (op1);
4596 if (GET_CODE (op0) == CONST_VECTOR
4597 && GET_CODE (op1) == CONST_VECTOR)
4598 {
4599 rtvec v = rtvec_alloc (n_elts);
4600 unsigned int i;
4601
4602 for (i = 0; i < n_elts; i++)
4603 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4604 ? CONST_VECTOR_ELT (op0, i)
4605 : CONST_VECTOR_ELT (op1, i));
4606 return gen_rtx_CONST_VECTOR (mode, v);
4607 }
4608 }
4609 break;
4610
4611 default:
4612 gcc_unreachable ();
4613 }
4614
4615 return 0;
4616 }
4617
4618 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4619 or CONST_VECTOR,
4620 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4621
4622 Works by unpacking OP into a collection of 8-bit values
4623 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4624 and then repacking them again for OUTERMODE. */
4625
4626 static rtx
4627 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4628 enum machine_mode innermode, unsigned int byte)
4629 {
4630 /* We support up to 512-bit values (for V8DFmode). */
4631 enum {
4632 max_bitsize = 512,
4633 value_bit = 8,
4634 value_mask = (1 << value_bit) - 1
4635 };
4636 unsigned char value[max_bitsize / value_bit];
4637 int value_start;
4638 int i;
4639 int elem;
4640
4641 int num_elem;
4642 rtx * elems;
4643 int elem_bitsize;
4644 rtx result_s;
4645 rtvec result_v = NULL;
4646 enum mode_class outer_class;
4647 enum machine_mode outer_submode;
4648
4649 /* Some ports misuse CCmode. */
4650 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4651 return op;
4652
4653 /* We have no way to represent a complex constant at the rtl level. */
4654 if (COMPLEX_MODE_P (outermode))
4655 return NULL_RTX;
4656
4657 /* Unpack the value. */
4658
4659 if (GET_CODE (op) == CONST_VECTOR)
4660 {
4661 num_elem = CONST_VECTOR_NUNITS (op);
4662 elems = &CONST_VECTOR_ELT (op, 0);
4663 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4664 }
4665 else
4666 {
4667 num_elem = 1;
4668 elems = &op;
4669 elem_bitsize = max_bitsize;
4670 }
4671 /* If this asserts, it is too complicated; reducing value_bit may help. */
4672 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4673 /* I don't know how to handle endianness of sub-units. */
4674 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4675
4676 for (elem = 0; elem < num_elem; elem++)
4677 {
4678 unsigned char * vp;
4679 rtx el = elems[elem];
4680
4681 /* Vectors are kept in target memory order. (This is probably
4682 a mistake.) */
4683 {
4684 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4685 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4686 / BITS_PER_UNIT);
4687 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4688 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4689 unsigned bytele = (subword_byte % UNITS_PER_WORD
4690 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4691 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4692 }
4693
4694 switch (GET_CODE (el))
4695 {
4696 case CONST_INT:
4697 for (i = 0;
4698 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4699 i += value_bit)
4700 *vp++ = INTVAL (el) >> i;
4701 /* CONST_INTs are always logically sign-extended. */
4702 for (; i < elem_bitsize; i += value_bit)
4703 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4704 break;
4705
4706 case CONST_DOUBLE:
4707 if (GET_MODE (el) == VOIDmode)
4708 {
4709 /* If this triggers, someone should have generated a
4710 CONST_INT instead. */
4711 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4712
4713 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4714 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4715 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4716 {
4717 *vp++
4718 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4719 i += value_bit;
4720 }
4721 /* It shouldn't matter what's done here, so fill it with
4722 zero. */
4723 for (; i < elem_bitsize; i += value_bit)
4724 *vp++ = 0;
4725 }
4726 else
4727 {
4728 long tmp[max_bitsize / 32];
4729 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4730
4731 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4732 gcc_assert (bitsize <= elem_bitsize);
4733 gcc_assert (bitsize % value_bit == 0);
4734
4735 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4736 GET_MODE (el));
4737
4738 /* real_to_target produces its result in words affected by
4739 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4740 and use WORDS_BIG_ENDIAN instead; see the documentation
4741 of SUBREG in rtl.texi. */
4742 for (i = 0; i < bitsize; i += value_bit)
4743 {
4744 int ibase;
4745 if (WORDS_BIG_ENDIAN)
4746 ibase = bitsize - 1 - i;
4747 else
4748 ibase = i;
4749 *vp++ = tmp[ibase / 32] >> i % 32;
4750 }
4751
4752 /* It shouldn't matter what's done here, so fill it with
4753 zero. */
4754 for (; i < elem_bitsize; i += value_bit)
4755 *vp++ = 0;
4756 }
4757 break;
4758
4759 case CONST_FIXED:
4760 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4761 {
4762 for (i = 0; i < elem_bitsize; i += value_bit)
4763 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4764 }
4765 else
4766 {
4767 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4768 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4769 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4770 i += value_bit)
4771 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4772 >> (i - HOST_BITS_PER_WIDE_INT);
4773 for (; i < elem_bitsize; i += value_bit)
4774 *vp++ = 0;
4775 }
4776 break;
4777
4778 default:
4779 gcc_unreachable ();
4780 }
4781 }
4782
4783 /* Now, pick the right byte to start with. */
4784 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4785 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4786 will already have offset 0. */
4787 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4788 {
4789 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4790 - byte);
4791 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4792 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4793 byte = (subword_byte % UNITS_PER_WORD
4794 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4795 }
4796
4797 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4798 so if it's become negative it will instead be very large.) */
4799 gcc_assert (byte < GET_MODE_SIZE (innermode));
4800
4801 /* Convert from bytes to chunks of size value_bit. */
4802 value_start = byte * (BITS_PER_UNIT / value_bit);
4803
4804 /* Re-pack the value. */
4805
4806 if (VECTOR_MODE_P (outermode))
4807 {
4808 num_elem = GET_MODE_NUNITS (outermode);
4809 result_v = rtvec_alloc (num_elem);
4810 elems = &RTVEC_ELT (result_v, 0);
4811 outer_submode = GET_MODE_INNER (outermode);
4812 }
4813 else
4814 {
4815 num_elem = 1;
4816 elems = &result_s;
4817 outer_submode = outermode;
4818 }
4819
4820 outer_class = GET_MODE_CLASS (outer_submode);
4821 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4822
4823 gcc_assert (elem_bitsize % value_bit == 0);
4824 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4825
4826 for (elem = 0; elem < num_elem; elem++)
4827 {
4828 unsigned char *vp;
4829
4830 /* Vectors are stored in target memory order. (This is probably
4831 a mistake.) */
4832 {
4833 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4834 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4835 / BITS_PER_UNIT);
4836 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4837 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4838 unsigned bytele = (subword_byte % UNITS_PER_WORD
4839 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4840 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4841 }
4842
4843 switch (outer_class)
4844 {
4845 case MODE_INT:
4846 case MODE_PARTIAL_INT:
4847 {
4848 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4849
4850 for (i = 0;
4851 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4852 i += value_bit)
4853 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4854 for (; i < elem_bitsize; i += value_bit)
4855 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4856 << (i - HOST_BITS_PER_WIDE_INT));
4857
4858 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4859 know why. */
4860 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4861 elems[elem] = gen_int_mode (lo, outer_submode);
4862 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4863 elems[elem] = immed_double_const (lo, hi, outer_submode);
4864 else
4865 return NULL_RTX;
4866 }
4867 break;
4868
4869 case MODE_FLOAT:
4870 case MODE_DECIMAL_FLOAT:
4871 {
4872 REAL_VALUE_TYPE r;
4873 long tmp[max_bitsize / 32];
4874
4875 /* real_from_target wants its input in words affected by
4876 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4877 and use WORDS_BIG_ENDIAN instead; see the documentation
4878 of SUBREG in rtl.texi. */
4879 for (i = 0; i < max_bitsize / 32; i++)
4880 tmp[i] = 0;
4881 for (i = 0; i < elem_bitsize; i += value_bit)
4882 {
4883 int ibase;
4884 if (WORDS_BIG_ENDIAN)
4885 ibase = elem_bitsize - 1 - i;
4886 else
4887 ibase = i;
4888 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4889 }
4890
4891 real_from_target (&r, tmp, outer_submode);
4892 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4893 }
4894 break;
4895
4896 case MODE_FRACT:
4897 case MODE_UFRACT:
4898 case MODE_ACCUM:
4899 case MODE_UACCUM:
4900 {
4901 FIXED_VALUE_TYPE f;
4902 f.data.low = 0;
4903 f.data.high = 0;
4904 f.mode = outer_submode;
4905
4906 for (i = 0;
4907 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4908 i += value_bit)
4909 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4910 for (; i < elem_bitsize; i += value_bit)
4911 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4912 << (i - HOST_BITS_PER_WIDE_INT));
4913
4914 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4915 }
4916 break;
4917
4918 default:
4919 gcc_unreachable ();
4920 }
4921 }
4922 if (VECTOR_MODE_P (outermode))
4923 return gen_rtx_CONST_VECTOR (outermode, result_v);
4924 else
4925 return result_s;
4926 }
4927
4928 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4929 Return 0 if no simplifications are possible. */
4930 rtx
4931 simplify_subreg (enum machine_mode outermode, rtx op,
4932 enum machine_mode innermode, unsigned int byte)
4933 {
4934 /* Little bit of sanity checking. */
4935 gcc_assert (innermode != VOIDmode);
4936 gcc_assert (outermode != VOIDmode);
4937 gcc_assert (innermode != BLKmode);
4938 gcc_assert (outermode != BLKmode);
4939
4940 gcc_assert (GET_MODE (op) == innermode
4941 || GET_MODE (op) == VOIDmode);
4942
4943 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4944 gcc_assert (byte < GET_MODE_SIZE (innermode));
4945
4946 if (outermode == innermode && !byte)
4947 return op;
4948
4949 if (GET_CODE (op) == CONST_INT
4950 || GET_CODE (op) == CONST_DOUBLE
4951 || GET_CODE (op) == CONST_FIXED
4952 || GET_CODE (op) == CONST_VECTOR)
4953 return simplify_immed_subreg (outermode, op, innermode, byte);
4954
4955 /* Changing mode twice with SUBREG => just change it once,
4956 or not at all if changing back op starting mode. */
4957 if (GET_CODE (op) == SUBREG)
4958 {
4959 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4960 int final_offset = byte + SUBREG_BYTE (op);
4961 rtx newx;
4962
4963 if (outermode == innermostmode
4964 && byte == 0 && SUBREG_BYTE (op) == 0)
4965 return SUBREG_REG (op);
4966
4967 /* The SUBREG_BYTE represents offset, as if the value were stored
4968 in memory. Irritating exception is paradoxical subreg, where
4969 we define SUBREG_BYTE to be 0. On big endian machines, this
4970 value should be negative. For a moment, undo this exception. */
4971 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4972 {
4973 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4974 if (WORDS_BIG_ENDIAN)
4975 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4976 if (BYTES_BIG_ENDIAN)
4977 final_offset += difference % UNITS_PER_WORD;
4978 }
4979 if (SUBREG_BYTE (op) == 0
4980 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4981 {
4982 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4983 if (WORDS_BIG_ENDIAN)
4984 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4985 if (BYTES_BIG_ENDIAN)
4986 final_offset += difference % UNITS_PER_WORD;
4987 }
4988
4989 /* See whether resulting subreg will be paradoxical. */
4990 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4991 {
4992 /* In nonparadoxical subregs we can't handle negative offsets. */
4993 if (final_offset < 0)
4994 return NULL_RTX;
4995 /* Bail out in case resulting subreg would be incorrect. */
4996 if (final_offset % GET_MODE_SIZE (outermode)
4997 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4998 return NULL_RTX;
4999 }
5000 else
5001 {
5002 int offset = 0;
5003 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5004
5005 /* In paradoxical subreg, see if we are still looking on lower part.
5006 If so, our SUBREG_BYTE will be 0. */
5007 if (WORDS_BIG_ENDIAN)
5008 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5009 if (BYTES_BIG_ENDIAN)
5010 offset += difference % UNITS_PER_WORD;
5011 if (offset == final_offset)
5012 final_offset = 0;
5013 else
5014 return NULL_RTX;
5015 }
5016
5017 /* Recurse for further possible simplifications. */
5018 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5019 final_offset);
5020 if (newx)
5021 return newx;
5022 if (validate_subreg (outermode, innermostmode,
5023 SUBREG_REG (op), final_offset))
5024 {
5025 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5026 if (SUBREG_PROMOTED_VAR_P (op)
5027 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5028 && GET_MODE_CLASS (outermode) == MODE_INT
5029 && IN_RANGE (GET_MODE_SIZE (outermode),
5030 GET_MODE_SIZE (innermode),
5031 GET_MODE_SIZE (innermostmode))
5032 && subreg_lowpart_p (newx))
5033 {
5034 SUBREG_PROMOTED_VAR_P (newx) = 1;
5035 SUBREG_PROMOTED_UNSIGNED_SET
5036 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5037 }
5038 return newx;
5039 }
5040 return NULL_RTX;
5041 }
5042
5043 /* Merge implicit and explicit truncations. */
5044
5045 if (GET_CODE (op) == TRUNCATE
5046 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5047 && subreg_lowpart_offset (outermode, innermode) == byte)
5048 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5049 GET_MODE (XEXP (op, 0)));
5050
5051 /* SUBREG of a hard register => just change the register number
5052 and/or mode. If the hard register is not valid in that mode,
5053 suppress this simplification. If the hard register is the stack,
5054 frame, or argument pointer, leave this as a SUBREG. */
5055
5056 if (REG_P (op)
5057 && REGNO (op) < FIRST_PSEUDO_REGISTER
5058 #ifdef CANNOT_CHANGE_MODE_CLASS
5059 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
5060 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
5061 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
5062 #endif
5063 && ((reload_completed && !frame_pointer_needed)
5064 || (REGNO (op) != FRAME_POINTER_REGNUM
5065 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
5066 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
5067 #endif
5068 ))
5069 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
5070 && REGNO (op) != ARG_POINTER_REGNUM
5071 #endif
5072 && REGNO (op) != STACK_POINTER_REGNUM
5073 && subreg_offset_representable_p (REGNO (op), innermode,
5074 byte, outermode))
5075 {
5076 unsigned int regno = REGNO (op);
5077 unsigned int final_regno
5078 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
5079
5080 /* ??? We do allow it if the current REG is not valid for
5081 its mode. This is a kludge to work around how float/complex
5082 arguments are passed on 32-bit SPARC and should be fixed. */
5083 if (HARD_REGNO_MODE_OK (final_regno, outermode)
5084 || ! HARD_REGNO_MODE_OK (regno, innermode))
5085 {
5086 rtx x;
5087 int final_offset = byte;
5088
5089 /* Adjust offset for paradoxical subregs. */
5090 if (byte == 0
5091 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5092 {
5093 int difference = (GET_MODE_SIZE (innermode)
5094 - GET_MODE_SIZE (outermode));
5095 if (WORDS_BIG_ENDIAN)
5096 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5097 if (BYTES_BIG_ENDIAN)
5098 final_offset += difference % UNITS_PER_WORD;
5099 }
5100
5101 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5102
5103 /* Propagate original regno. We don't have any way to specify
5104 the offset inside original regno, so do so only for lowpart.
5105 The information is used only by alias analysis that can not
5106 grog partial register anyway. */
5107
5108 if (subreg_lowpart_offset (outermode, innermode) == byte)
5109 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5110 return x;
5111 }
5112 }
5113
5114 /* If we have a SUBREG of a register that we are replacing and we are
5115 replacing it with a MEM, make a new MEM and try replacing the
5116 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5117 or if we would be widening it. */
5118
5119 if (MEM_P (op)
5120 && ! mode_dependent_address_p (XEXP (op, 0))
5121 /* Allow splitting of volatile memory references in case we don't
5122 have instruction to move the whole thing. */
5123 && (! MEM_VOLATILE_P (op)
5124 || ! have_insn_for (SET, innermode))
5125 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5126 return adjust_address_nv (op, outermode, byte);
5127
5128 /* Handle complex values represented as CONCAT
5129 of real and imaginary part. */
5130 if (GET_CODE (op) == CONCAT)
5131 {
5132 unsigned int part_size, final_offset;
5133 rtx part, res;
5134
5135 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5136 if (byte < part_size)
5137 {
5138 part = XEXP (op, 0);
5139 final_offset = byte;
5140 }
5141 else
5142 {
5143 part = XEXP (op, 1);
5144 final_offset = byte - part_size;
5145 }
5146
5147 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5148 return NULL_RTX;
5149
5150 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5151 if (res)
5152 return res;
5153 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5154 return gen_rtx_SUBREG (outermode, part, final_offset);
5155 return NULL_RTX;
5156 }
5157
5158 /* Optimize SUBREG truncations of zero and sign extended values. */
5159 if ((GET_CODE (op) == ZERO_EXTEND
5160 || GET_CODE (op) == SIGN_EXTEND)
5161 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5162 {
5163 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5164
5165 /* If we're requesting the lowpart of a zero or sign extension,
5166 there are three possibilities. If the outermode is the same
5167 as the origmode, we can omit both the extension and the subreg.
5168 If the outermode is not larger than the origmode, we can apply
5169 the truncation without the extension. Finally, if the outermode
5170 is larger than the origmode, but both are integer modes, we
5171 can just extend to the appropriate mode. */
5172 if (bitpos == 0)
5173 {
5174 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5175 if (outermode == origmode)
5176 return XEXP (op, 0);
5177 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5178 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5179 subreg_lowpart_offset (outermode,
5180 origmode));
5181 if (SCALAR_INT_MODE_P (outermode))
5182 return simplify_gen_unary (GET_CODE (op), outermode,
5183 XEXP (op, 0), origmode);
5184 }
5185
5186 /* A SUBREG resulting from a zero extension may fold to zero if
5187 it extracts higher bits that the ZERO_EXTEND's source bits. */
5188 if (GET_CODE (op) == ZERO_EXTEND
5189 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5190 return CONST0_RTX (outermode);
5191 }
5192
5193 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5194 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5195 the outer subreg is effectively a truncation to the original mode. */
5196 if ((GET_CODE (op) == LSHIFTRT
5197 || GET_CODE (op) == ASHIFTRT)
5198 && SCALAR_INT_MODE_P (outermode)
5199 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5200 to avoid the possibility that an outer LSHIFTRT shifts by more
5201 than the sign extension's sign_bit_copies and introduces zeros
5202 into the high bits of the result. */
5203 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5204 && GET_CODE (XEXP (op, 1)) == CONST_INT
5205 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5206 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5207 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5208 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5209 return simplify_gen_binary (ASHIFTRT, outermode,
5210 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5211
5212 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5213 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5214 the outer subreg is effectively a truncation to the original mode. */
5215 if ((GET_CODE (op) == LSHIFTRT
5216 || GET_CODE (op) == ASHIFTRT)
5217 && SCALAR_INT_MODE_P (outermode)
5218 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5219 && GET_CODE (XEXP (op, 1)) == CONST_INT
5220 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5221 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5222 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5223 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5224 return simplify_gen_binary (LSHIFTRT, outermode,
5225 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5226
5227 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5228 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5229 the outer subreg is effectively a truncation to the original mode. */
5230 if (GET_CODE (op) == ASHIFT
5231 && SCALAR_INT_MODE_P (outermode)
5232 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5233 && GET_CODE (XEXP (op, 1)) == CONST_INT
5234 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5235 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5236 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5237 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5238 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5239 return simplify_gen_binary (ASHIFT, outermode,
5240 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5241
5242 return NULL_RTX;
5243 }
5244
5245 /* Make a SUBREG operation or equivalent if it folds. */
5246
5247 rtx
5248 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5249 enum machine_mode innermode, unsigned int byte)
5250 {
5251 rtx newx;
5252
5253 newx = simplify_subreg (outermode, op, innermode, byte);
5254 if (newx)
5255 return newx;
5256
5257 if (GET_CODE (op) == SUBREG
5258 || GET_CODE (op) == CONCAT
5259 || GET_MODE (op) == VOIDmode)
5260 return NULL_RTX;
5261
5262 if (validate_subreg (outermode, innermode, op, byte))
5263 return gen_rtx_SUBREG (outermode, op, byte);
5264
5265 return NULL_RTX;
5266 }
5267
5268 /* Simplify X, an rtx expression.
5269
5270 Return the simplified expression or NULL if no simplifications
5271 were possible.
5272
5273 This is the preferred entry point into the simplification routines;
5274 however, we still allow passes to call the more specific routines.
5275
5276 Right now GCC has three (yes, three) major bodies of RTL simplification
5277 code that need to be unified.
5278
5279 1. fold_rtx in cse.c. This code uses various CSE specific
5280 information to aid in RTL simplification.
5281
5282 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5283 it uses combine specific information to aid in RTL
5284 simplification.
5285
5286 3. The routines in this file.
5287
5288
5289 Long term we want to only have one body of simplification code; to
5290 get to that state I recommend the following steps:
5291
5292 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5293 which are not pass dependent state into these routines.
5294
5295 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5296 use this routine whenever possible.
5297
5298 3. Allow for pass dependent state to be provided to these
5299 routines and add simplifications based on the pass dependent
5300 state. Remove code from cse.c & combine.c that becomes
5301 redundant/dead.
5302
5303 It will take time, but ultimately the compiler will be easier to
5304 maintain and improve. It's totally silly that when we add a
5305 simplification that it needs to be added to 4 places (3 for RTL
5306 simplification and 1 for tree simplification. */
5307
5308 rtx
5309 simplify_rtx (const_rtx x)
5310 {
5311 const enum rtx_code code = GET_CODE (x);
5312 const enum machine_mode mode = GET_MODE (x);
5313
5314 switch (GET_RTX_CLASS (code))
5315 {
5316 case RTX_UNARY:
5317 return simplify_unary_operation (code, mode,
5318 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5319 case RTX_COMM_ARITH:
5320 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5321 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5322
5323 /* Fall through.... */
5324
5325 case RTX_BIN_ARITH:
5326 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5327
5328 case RTX_TERNARY:
5329 case RTX_BITFIELD_OPS:
5330 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5331 XEXP (x, 0), XEXP (x, 1),
5332 XEXP (x, 2));
5333
5334 case RTX_COMPARE:
5335 case RTX_COMM_COMPARE:
5336 return simplify_relational_operation (code, mode,
5337 ((GET_MODE (XEXP (x, 0))
5338 != VOIDmode)
5339 ? GET_MODE (XEXP (x, 0))
5340 : GET_MODE (XEXP (x, 1))),
5341 XEXP (x, 0),
5342 XEXP (x, 1));
5343
5344 case RTX_EXTRA:
5345 if (code == SUBREG)
5346 return simplify_subreg (mode, SUBREG_REG (x),
5347 GET_MODE (SUBREG_REG (x)),
5348 SUBREG_BYTE (x));
5349 break;
5350
5351 case RTX_OBJ:
5352 if (code == LO_SUM)
5353 {
5354 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5355 if (GET_CODE (XEXP (x, 0)) == HIGH
5356 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5357 return XEXP (x, 1);
5358 }
5359 break;
5360
5361 default:
5362 break;
5363 }
5364 return NULL;
5365 }