Daily bump.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
409
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
418
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
434
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
442 {
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445 }
446
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
450
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
457
458
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465 {
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
468
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
474 }
475
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
480
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482 {
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
485
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495 {
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
498 }
499
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
502 }
503 break;
504
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
509
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
518
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532 {
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536 {
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540 }
541
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545 }
546
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551 {
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554 }
555
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
560 {
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564 }
565
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
573
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
581
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
592 {
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
596 {
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
604 }
605 else if (STORE_FLAG_VALUE == -1)
606 {
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 }
615 }
616 break;
617
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
624
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
630
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
640
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
648
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1))
660 : truncated_to_mode (mode, op))
661 && ! (GET_CODE (op) == LSHIFTRT
662 && GET_CODE (XEXP (op, 0)) == MULT))
663 return rtl_hooks.gen_lowpart_no_emit (mode, op);
664
665 /* A truncate of a comparison can be replaced with a subreg if
666 STORE_FLAG_VALUE permits. This is like the previous test,
667 but it works even if the comparison is done in a mode larger
668 than HOST_BITS_PER_WIDE_INT. */
669 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
670 && COMPARISON_P (op)
671 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
672 return rtl_hooks.gen_lowpart_no_emit (mode, op);
673 break;
674
675 case FLOAT_TRUNCATE:
676 if (DECIMAL_FLOAT_MODE_P (mode))
677 break;
678
679 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
680 if (GET_CODE (op) == FLOAT_EXTEND
681 && GET_MODE (XEXP (op, 0)) == mode)
682 return XEXP (op, 0);
683
684 /* (float_truncate:SF (float_truncate:DF foo:XF))
685 = (float_truncate:SF foo:XF).
686 This may eliminate double rounding, so it is unsafe.
687
688 (float_truncate:SF (float_extend:XF foo:DF))
689 = (float_truncate:SF foo:DF).
690
691 (float_truncate:DF (float_extend:XF foo:SF))
692 = (float_extend:SF foo:DF). */
693 if ((GET_CODE (op) == FLOAT_TRUNCATE
694 && flag_unsafe_math_optimizations)
695 || GET_CODE (op) == FLOAT_EXTEND)
696 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
697 0)))
698 > GET_MODE_SIZE (mode)
699 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
700 mode,
701 XEXP (op, 0), mode);
702
703 /* (float_truncate (float x)) is (float x) */
704 if (GET_CODE (op) == FLOAT
705 && (flag_unsafe_math_optimizations
706 || ((unsigned)significand_size (GET_MODE (op))
707 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
708 - num_sign_bit_copies (XEXP (op, 0),
709 GET_MODE (XEXP (op, 0)))))))
710 return simplify_gen_unary (FLOAT, mode,
711 XEXP (op, 0),
712 GET_MODE (XEXP (op, 0)));
713
714 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
715 (OP:SF foo:SF) if OP is NEG or ABS. */
716 if ((GET_CODE (op) == ABS
717 || GET_CODE (op) == NEG)
718 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
719 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
720 return simplify_gen_unary (GET_CODE (op), mode,
721 XEXP (XEXP (op, 0), 0), mode);
722
723 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
724 is (float_truncate:SF x). */
725 if (GET_CODE (op) == SUBREG
726 && subreg_lowpart_p (op)
727 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
728 return SUBREG_REG (op);
729 break;
730
731 case FLOAT_EXTEND:
732 if (DECIMAL_FLOAT_MODE_P (mode))
733 break;
734
735 /* (float_extend (float_extend x)) is (float_extend x)
736
737 (float_extend (float x)) is (float x) assuming that double
738 rounding can't happen.
739 */
740 if (GET_CODE (op) == FLOAT_EXTEND
741 || (GET_CODE (op) == FLOAT
742 && ((unsigned)significand_size (GET_MODE (op))
743 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
744 - num_sign_bit_copies (XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)))))))
746 return simplify_gen_unary (GET_CODE (op), mode,
747 XEXP (op, 0),
748 GET_MODE (XEXP (op, 0)));
749
750 break;
751
752 case ABS:
753 /* (abs (neg <foo>)) -> (abs <foo>) */
754 if (GET_CODE (op) == NEG)
755 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
756 GET_MODE (XEXP (op, 0)));
757
758 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 do nothing. */
760 if (GET_MODE (op) == VOIDmode)
761 break;
762
763 /* If operand is something known to be positive, ignore the ABS. */
764 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
765 || ((GET_MODE_BITSIZE (GET_MODE (op))
766 <= HOST_BITS_PER_WIDE_INT)
767 && ((nonzero_bits (op, GET_MODE (op))
768 & ((HOST_WIDE_INT) 1
769 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
770 == 0)))
771 return op;
772
773 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
774 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
775 return gen_rtx_NEG (mode, op);
776
777 break;
778
779 case FFS:
780 /* (ffs (*_extend <X>)) = (ffs <X>) */
781 if (GET_CODE (op) == SIGN_EXTEND
782 || GET_CODE (op) == ZERO_EXTEND)
783 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
784 GET_MODE (XEXP (op, 0)));
785 break;
786
787 case POPCOUNT:
788 case PARITY:
789 /* (pop* (zero_extend <X>)) = (pop* <X>) */
790 if (GET_CODE (op) == ZERO_EXTEND)
791 return simplify_gen_unary (code, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
793 break;
794
795 case FLOAT:
796 /* (float (sign_extend <X>)) = (float <X>). */
797 if (GET_CODE (op) == SIGN_EXTEND)
798 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
800 break;
801
802 case SIGN_EXTEND:
803 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
804 becomes just the MINUS if its mode is MODE. This allows
805 folding switch statements on machines using casesi (such as
806 the VAX). */
807 if (GET_CODE (op) == TRUNCATE
808 && GET_MODE (XEXP (op, 0)) == mode
809 && GET_CODE (XEXP (op, 0)) == MINUS
810 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
811 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
812 return XEXP (op, 0);
813
814 /* Check for a sign extension of a subreg of a promoted
815 variable, where the promotion is sign-extended, and the
816 target mode is the same as the variable's promotion. */
817 if (GET_CODE (op) == SUBREG
818 && SUBREG_PROMOTED_VAR_P (op)
819 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
820 && GET_MODE (XEXP (op, 0)) == mode)
821 return XEXP (op, 0);
822
823 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
824 if (! POINTERS_EXTEND_UNSIGNED
825 && mode == Pmode && GET_MODE (op) == ptr_mode
826 && (CONSTANT_P (op)
827 || (GET_CODE (op) == SUBREG
828 && REG_P (SUBREG_REG (op))
829 && REG_POINTER (SUBREG_REG (op))
830 && GET_MODE (SUBREG_REG (op)) == Pmode)))
831 return convert_memory_address (Pmode, op);
832 #endif
833 break;
834
835 case ZERO_EXTEND:
836 /* Check for a zero extension of a subreg of a promoted
837 variable, where the promotion is zero-extended, and the
838 target mode is the same as the variable's promotion. */
839 if (GET_CODE (op) == SUBREG
840 && SUBREG_PROMOTED_VAR_P (op)
841 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
842 && GET_MODE (XEXP (op, 0)) == mode)
843 return XEXP (op, 0);
844
845 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
846 if (POINTERS_EXTEND_UNSIGNED > 0
847 && mode == Pmode && GET_MODE (op) == ptr_mode
848 && (CONSTANT_P (op)
849 || (GET_CODE (op) == SUBREG
850 && REG_P (SUBREG_REG (op))
851 && REG_POINTER (SUBREG_REG (op))
852 && GET_MODE (SUBREG_REG (op)) == Pmode)))
853 return convert_memory_address (Pmode, op);
854 #endif
855 break;
856
857 default:
858 break;
859 }
860
861 return 0;
862 }
863
864 /* Try to compute the value of a unary operation CODE whose output mode is to
865 be MODE with input operand OP whose mode was originally OP_MODE.
866 Return zero if the value cannot be computed. */
867 rtx
868 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
869 rtx op, enum machine_mode op_mode)
870 {
871 unsigned int width = GET_MODE_BITSIZE (mode);
872
873 if (code == VEC_DUPLICATE)
874 {
875 gcc_assert (VECTOR_MODE_P (mode));
876 if (GET_MODE (op) != VOIDmode)
877 {
878 if (!VECTOR_MODE_P (GET_MODE (op)))
879 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
880 else
881 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
882 (GET_MODE (op)));
883 }
884 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
885 || GET_CODE (op) == CONST_VECTOR)
886 {
887 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
888 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
889 rtvec v = rtvec_alloc (n_elts);
890 unsigned int i;
891
892 if (GET_CODE (op) != CONST_VECTOR)
893 for (i = 0; i < n_elts; i++)
894 RTVEC_ELT (v, i) = op;
895 else
896 {
897 enum machine_mode inmode = GET_MODE (op);
898 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
899 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
900
901 gcc_assert (in_n_elts < n_elts);
902 gcc_assert ((n_elts % in_n_elts) == 0);
903 for (i = 0; i < n_elts; i++)
904 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
905 }
906 return gen_rtx_CONST_VECTOR (mode, v);
907 }
908 }
909
910 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
911 {
912 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
913 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
914 enum machine_mode opmode = GET_MODE (op);
915 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
916 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
917 rtvec v = rtvec_alloc (n_elts);
918 unsigned int i;
919
920 gcc_assert (op_n_elts == n_elts);
921 for (i = 0; i < n_elts; i++)
922 {
923 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
924 CONST_VECTOR_ELT (op, i),
925 GET_MODE_INNER (opmode));
926 if (!x)
927 return 0;
928 RTVEC_ELT (v, i) = x;
929 }
930 return gen_rtx_CONST_VECTOR (mode, v);
931 }
932
933 /* The order of these tests is critical so that, for example, we don't
934 check the wrong mode (input vs. output) for a conversion operation,
935 such as FIX. At some point, this should be simplified. */
936
937 if (code == FLOAT && GET_MODE (op) == VOIDmode
938 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
939 {
940 HOST_WIDE_INT hv, lv;
941 REAL_VALUE_TYPE d;
942
943 if (GET_CODE (op) == CONST_INT)
944 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
945 else
946 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
947
948 REAL_VALUE_FROM_INT (d, lv, hv, mode);
949 d = real_value_truncate (mode, d);
950 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
951 }
952 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
953 && (GET_CODE (op) == CONST_DOUBLE
954 || GET_CODE (op) == CONST_INT))
955 {
956 HOST_WIDE_INT hv, lv;
957 REAL_VALUE_TYPE d;
958
959 if (GET_CODE (op) == CONST_INT)
960 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
961 else
962 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
963
964 if (op_mode == VOIDmode)
965 {
966 /* We don't know how to interpret negative-looking numbers in
967 this case, so don't try to fold those. */
968 if (hv < 0)
969 return 0;
970 }
971 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
972 ;
973 else
974 hv = 0, lv &= GET_MODE_MASK (op_mode);
975
976 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
977 d = real_value_truncate (mode, d);
978 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
979 }
980
981 if (GET_CODE (op) == CONST_INT
982 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
983 {
984 HOST_WIDE_INT arg0 = INTVAL (op);
985 HOST_WIDE_INT val;
986
987 switch (code)
988 {
989 case NOT:
990 val = ~ arg0;
991 break;
992
993 case NEG:
994 val = - arg0;
995 break;
996
997 case ABS:
998 val = (arg0 >= 0 ? arg0 : - arg0);
999 break;
1000
1001 case FFS:
1002 /* Don't use ffs here. Instead, get low order bit and then its
1003 number. If arg0 is zero, this will return 0, as desired. */
1004 arg0 &= GET_MODE_MASK (mode);
1005 val = exact_log2 (arg0 & (- arg0)) + 1;
1006 break;
1007
1008 case CLZ:
1009 arg0 &= GET_MODE_MASK (mode);
1010 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1011 ;
1012 else
1013 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1014 break;
1015
1016 case CTZ:
1017 arg0 &= GET_MODE_MASK (mode);
1018 if (arg0 == 0)
1019 {
1020 /* Even if the value at zero is undefined, we have to come
1021 up with some replacement. Seems good enough. */
1022 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1023 val = GET_MODE_BITSIZE (mode);
1024 }
1025 else
1026 val = exact_log2 (arg0 & -arg0);
1027 break;
1028
1029 case POPCOUNT:
1030 arg0 &= GET_MODE_MASK (mode);
1031 val = 0;
1032 while (arg0)
1033 val++, arg0 &= arg0 - 1;
1034 break;
1035
1036 case PARITY:
1037 arg0 &= GET_MODE_MASK (mode);
1038 val = 0;
1039 while (arg0)
1040 val++, arg0 &= arg0 - 1;
1041 val &= 1;
1042 break;
1043
1044 case TRUNCATE:
1045 val = arg0;
1046 break;
1047
1048 case ZERO_EXTEND:
1049 /* When zero-extending a CONST_INT, we need to know its
1050 original mode. */
1051 gcc_assert (op_mode != VOIDmode);
1052 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1053 {
1054 /* If we were really extending the mode,
1055 we would have to distinguish between zero-extension
1056 and sign-extension. */
1057 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1058 val = arg0;
1059 }
1060 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1061 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1062 else
1063 return 0;
1064 break;
1065
1066 case SIGN_EXTEND:
1067 if (op_mode == VOIDmode)
1068 op_mode = mode;
1069 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1070 {
1071 /* If we were really extending the mode,
1072 we would have to distinguish between zero-extension
1073 and sign-extension. */
1074 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1075 val = arg0;
1076 }
1077 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1078 {
1079 val
1080 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1081 if (val
1082 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1083 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1084 }
1085 else
1086 return 0;
1087 break;
1088
1089 case SQRT:
1090 case FLOAT_EXTEND:
1091 case FLOAT_TRUNCATE:
1092 case SS_TRUNCATE:
1093 case US_TRUNCATE:
1094 return 0;
1095
1096 default:
1097 gcc_unreachable ();
1098 }
1099
1100 return gen_int_mode (val, mode);
1101 }
1102
1103 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1104 for a DImode operation on a CONST_INT. */
1105 else if (GET_MODE (op) == VOIDmode
1106 && width <= HOST_BITS_PER_WIDE_INT * 2
1107 && (GET_CODE (op) == CONST_DOUBLE
1108 || GET_CODE (op) == CONST_INT))
1109 {
1110 unsigned HOST_WIDE_INT l1, lv;
1111 HOST_WIDE_INT h1, hv;
1112
1113 if (GET_CODE (op) == CONST_DOUBLE)
1114 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1115 else
1116 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1117
1118 switch (code)
1119 {
1120 case NOT:
1121 lv = ~ l1;
1122 hv = ~ h1;
1123 break;
1124
1125 case NEG:
1126 neg_double (l1, h1, &lv, &hv);
1127 break;
1128
1129 case ABS:
1130 if (h1 < 0)
1131 neg_double (l1, h1, &lv, &hv);
1132 else
1133 lv = l1, hv = h1;
1134 break;
1135
1136 case FFS:
1137 hv = 0;
1138 if (l1 == 0)
1139 {
1140 if (h1 == 0)
1141 lv = 0;
1142 else
1143 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1144 }
1145 else
1146 lv = exact_log2 (l1 & -l1) + 1;
1147 break;
1148
1149 case CLZ:
1150 hv = 0;
1151 if (h1 != 0)
1152 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1153 - HOST_BITS_PER_WIDE_INT;
1154 else if (l1 != 0)
1155 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1156 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1157 lv = GET_MODE_BITSIZE (mode);
1158 break;
1159
1160 case CTZ:
1161 hv = 0;
1162 if (l1 != 0)
1163 lv = exact_log2 (l1 & -l1);
1164 else if (h1 != 0)
1165 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1166 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1167 lv = GET_MODE_BITSIZE (mode);
1168 break;
1169
1170 case POPCOUNT:
1171 hv = 0;
1172 lv = 0;
1173 while (l1)
1174 lv++, l1 &= l1 - 1;
1175 while (h1)
1176 lv++, h1 &= h1 - 1;
1177 break;
1178
1179 case PARITY:
1180 hv = 0;
1181 lv = 0;
1182 while (l1)
1183 lv++, l1 &= l1 - 1;
1184 while (h1)
1185 lv++, h1 &= h1 - 1;
1186 lv &= 1;
1187 break;
1188
1189 case TRUNCATE:
1190 /* This is just a change-of-mode, so do nothing. */
1191 lv = l1, hv = h1;
1192 break;
1193
1194 case ZERO_EXTEND:
1195 gcc_assert (op_mode != VOIDmode);
1196
1197 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1198 return 0;
1199
1200 hv = 0;
1201 lv = l1 & GET_MODE_MASK (op_mode);
1202 break;
1203
1204 case SIGN_EXTEND:
1205 if (op_mode == VOIDmode
1206 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1207 return 0;
1208 else
1209 {
1210 lv = l1 & GET_MODE_MASK (op_mode);
1211 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1212 && (lv & ((HOST_WIDE_INT) 1
1213 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1214 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1215
1216 hv = HWI_SIGN_EXTEND (lv);
1217 }
1218 break;
1219
1220 case SQRT:
1221 return 0;
1222
1223 default:
1224 return 0;
1225 }
1226
1227 return immed_double_const (lv, hv, mode);
1228 }
1229
1230 else if (GET_CODE (op) == CONST_DOUBLE
1231 && SCALAR_FLOAT_MODE_P (mode))
1232 {
1233 REAL_VALUE_TYPE d, t;
1234 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1235
1236 switch (code)
1237 {
1238 case SQRT:
1239 if (HONOR_SNANS (mode) && real_isnan (&d))
1240 return 0;
1241 real_sqrt (&t, mode, &d);
1242 d = t;
1243 break;
1244 case ABS:
1245 d = REAL_VALUE_ABS (d);
1246 break;
1247 case NEG:
1248 d = REAL_VALUE_NEGATE (d);
1249 break;
1250 case FLOAT_TRUNCATE:
1251 d = real_value_truncate (mode, d);
1252 break;
1253 case FLOAT_EXTEND:
1254 /* All this does is change the mode. */
1255 break;
1256 case FIX:
1257 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1258 break;
1259 case NOT:
1260 {
1261 long tmp[4];
1262 int i;
1263
1264 real_to_target (tmp, &d, GET_MODE (op));
1265 for (i = 0; i < 4; i++)
1266 tmp[i] = ~tmp[i];
1267 real_from_target (&d, tmp, mode);
1268 break;
1269 }
1270 default:
1271 gcc_unreachable ();
1272 }
1273 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1274 }
1275
1276 else if (GET_CODE (op) == CONST_DOUBLE
1277 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1278 && GET_MODE_CLASS (mode) == MODE_INT
1279 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1280 {
1281 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1282 operators are intentionally left unspecified (to ease implementation
1283 by target backends), for consistency, this routine implements the
1284 same semantics for constant folding as used by the middle-end. */
1285
1286 /* This was formerly used only for non-IEEE float.
1287 eggert@twinsun.com says it is safe for IEEE also. */
1288 HOST_WIDE_INT xh, xl, th, tl;
1289 REAL_VALUE_TYPE x, t;
1290 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1291 switch (code)
1292 {
1293 case FIX:
1294 if (REAL_VALUE_ISNAN (x))
1295 return const0_rtx;
1296
1297 /* Test against the signed upper bound. */
1298 if (width > HOST_BITS_PER_WIDE_INT)
1299 {
1300 th = ((unsigned HOST_WIDE_INT) 1
1301 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1302 tl = -1;
1303 }
1304 else
1305 {
1306 th = 0;
1307 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1308 }
1309 real_from_integer (&t, VOIDmode, tl, th, 0);
1310 if (REAL_VALUES_LESS (t, x))
1311 {
1312 xh = th;
1313 xl = tl;
1314 break;
1315 }
1316
1317 /* Test against the signed lower bound. */
1318 if (width > HOST_BITS_PER_WIDE_INT)
1319 {
1320 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1321 tl = 0;
1322 }
1323 else
1324 {
1325 th = -1;
1326 tl = (HOST_WIDE_INT) -1 << (width - 1);
1327 }
1328 real_from_integer (&t, VOIDmode, tl, th, 0);
1329 if (REAL_VALUES_LESS (x, t))
1330 {
1331 xh = th;
1332 xl = tl;
1333 break;
1334 }
1335 REAL_VALUE_TO_INT (&xl, &xh, x);
1336 break;
1337
1338 case UNSIGNED_FIX:
1339 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1340 return const0_rtx;
1341
1342 /* Test against the unsigned upper bound. */
1343 if (width == 2*HOST_BITS_PER_WIDE_INT)
1344 {
1345 th = -1;
1346 tl = -1;
1347 }
1348 else if (width >= HOST_BITS_PER_WIDE_INT)
1349 {
1350 th = ((unsigned HOST_WIDE_INT) 1
1351 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1352 tl = -1;
1353 }
1354 else
1355 {
1356 th = 0;
1357 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1358 }
1359 real_from_integer (&t, VOIDmode, tl, th, 1);
1360 if (REAL_VALUES_LESS (t, x))
1361 {
1362 xh = th;
1363 xl = tl;
1364 break;
1365 }
1366
1367 REAL_VALUE_TO_INT (&xl, &xh, x);
1368 break;
1369
1370 default:
1371 gcc_unreachable ();
1372 }
1373 return immed_double_const (xl, xh, mode);
1374 }
1375
1376 return NULL_RTX;
1377 }
1378 \f
1379 /* Subroutine of simplify_binary_operation to simplify a commutative,
1380 associative binary operation CODE with result mode MODE, operating
1381 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1382 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1383 canonicalization is possible. */
1384
1385 static rtx
1386 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1387 rtx op0, rtx op1)
1388 {
1389 rtx tem;
1390
1391 /* Linearize the operator to the left. */
1392 if (GET_CODE (op1) == code)
1393 {
1394 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1395 if (GET_CODE (op0) == code)
1396 {
1397 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1398 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1399 }
1400
1401 /* "a op (b op c)" becomes "(b op c) op a". */
1402 if (! swap_commutative_operands_p (op1, op0))
1403 return simplify_gen_binary (code, mode, op1, op0);
1404
1405 tem = op0;
1406 op0 = op1;
1407 op1 = tem;
1408 }
1409
1410 if (GET_CODE (op0) == code)
1411 {
1412 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1413 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1414 {
1415 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1416 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1417 }
1418
1419 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1420 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1421 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1422 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1423 if (tem != 0)
1424 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1425
1426 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1427 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1428 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1429 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1430 if (tem != 0)
1431 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1432 }
1433
1434 return 0;
1435 }
1436
1437
1438 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1439 and OP1. Return 0 if no simplification is possible.
1440
1441 Don't use this for relational operations such as EQ or LT.
1442 Use simplify_relational_operation instead. */
1443 rtx
1444 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1445 rtx op0, rtx op1)
1446 {
1447 rtx trueop0, trueop1;
1448 rtx tem;
1449
1450 /* Relational operations don't work here. We must know the mode
1451 of the operands in order to do the comparison correctly.
1452 Assuming a full word can give incorrect results.
1453 Consider comparing 128 with -128 in QImode. */
1454 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1455 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1456
1457 /* Make sure the constant is second. */
1458 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1459 && swap_commutative_operands_p (op0, op1))
1460 {
1461 tem = op0, op0 = op1, op1 = tem;
1462 }
1463
1464 trueop0 = avoid_constant_pool_reference (op0);
1465 trueop1 = avoid_constant_pool_reference (op1);
1466
1467 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1468 if (tem)
1469 return tem;
1470 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1471 }
1472
1473 static rtx
1474 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1475 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1476 {
1477 rtx tem, reversed, opleft, opright;
1478 HOST_WIDE_INT val;
1479 unsigned int width = GET_MODE_BITSIZE (mode);
1480
1481 /* Even if we can't compute a constant result,
1482 there are some cases worth simplifying. */
1483
1484 switch (code)
1485 {
1486 case PLUS:
1487 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1488 when x is NaN, infinite, or finite and nonzero. They aren't
1489 when x is -0 and the rounding mode is not towards -infinity,
1490 since (-0) + 0 is then 0. */
1491 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1492 return op0;
1493
1494 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1495 transformations are safe even for IEEE. */
1496 if (GET_CODE (op0) == NEG)
1497 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1498 else if (GET_CODE (op1) == NEG)
1499 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1500
1501 /* (~a) + 1 -> -a */
1502 if (INTEGRAL_MODE_P (mode)
1503 && GET_CODE (op0) == NOT
1504 && trueop1 == const1_rtx)
1505 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1506
1507 /* Handle both-operands-constant cases. We can only add
1508 CONST_INTs to constants since the sum of relocatable symbols
1509 can't be handled by most assemblers. Don't add CONST_INT
1510 to CONST_INT since overflow won't be computed properly if wider
1511 than HOST_BITS_PER_WIDE_INT. */
1512
1513 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1514 && GET_CODE (op1) == CONST_INT)
1515 return plus_constant (op0, INTVAL (op1));
1516 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1517 && GET_CODE (op0) == CONST_INT)
1518 return plus_constant (op1, INTVAL (op0));
1519
1520 /* See if this is something like X * C - X or vice versa or
1521 if the multiplication is written as a shift. If so, we can
1522 distribute and make a new multiply, shift, or maybe just
1523 have X (if C is 2 in the example above). But don't make
1524 something more expensive than we had before. */
1525
1526 if (SCALAR_INT_MODE_P (mode))
1527 {
1528 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1529 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1530 rtx lhs = op0, rhs = op1;
1531
1532 if (GET_CODE (lhs) == NEG)
1533 {
1534 coeff0l = -1;
1535 coeff0h = -1;
1536 lhs = XEXP (lhs, 0);
1537 }
1538 else if (GET_CODE (lhs) == MULT
1539 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1540 {
1541 coeff0l = INTVAL (XEXP (lhs, 1));
1542 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1543 lhs = XEXP (lhs, 0);
1544 }
1545 else if (GET_CODE (lhs) == ASHIFT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1547 && INTVAL (XEXP (lhs, 1)) >= 0
1548 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1549 {
1550 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1551 coeff0h = 0;
1552 lhs = XEXP (lhs, 0);
1553 }
1554
1555 if (GET_CODE (rhs) == NEG)
1556 {
1557 coeff1l = -1;
1558 coeff1h = -1;
1559 rhs = XEXP (rhs, 0);
1560 }
1561 else if (GET_CODE (rhs) == MULT
1562 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1563 {
1564 coeff1l = INTVAL (XEXP (rhs, 1));
1565 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1566 rhs = XEXP (rhs, 0);
1567 }
1568 else if (GET_CODE (rhs) == ASHIFT
1569 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1570 && INTVAL (XEXP (rhs, 1)) >= 0
1571 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1572 {
1573 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1574 coeff1h = 0;
1575 rhs = XEXP (rhs, 0);
1576 }
1577
1578 if (rtx_equal_p (lhs, rhs))
1579 {
1580 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1581 rtx coeff;
1582 unsigned HOST_WIDE_INT l;
1583 HOST_WIDE_INT h;
1584
1585 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1586 coeff = immed_double_const (l, h, mode);
1587
1588 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1589 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1590 ? tem : 0;
1591 }
1592 }
1593
1594 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1595 if ((GET_CODE (op1) == CONST_INT
1596 || GET_CODE (op1) == CONST_DOUBLE)
1597 && GET_CODE (op0) == XOR
1598 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1599 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1600 && mode_signbit_p (mode, op1))
1601 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1602 simplify_gen_binary (XOR, mode, op1,
1603 XEXP (op0, 1)));
1604
1605 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1606 if (GET_CODE (op0) == MULT
1607 && GET_CODE (XEXP (op0, 0)) == NEG)
1608 {
1609 rtx in1, in2;
1610
1611 in1 = XEXP (XEXP (op0, 0), 0);
1612 in2 = XEXP (op0, 1);
1613 return simplify_gen_binary (MINUS, mode, op1,
1614 simplify_gen_binary (MULT, mode,
1615 in1, in2));
1616 }
1617
1618 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1619 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1620 is 1. */
1621 if (COMPARISON_P (op0)
1622 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1623 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1624 && (reversed = reversed_comparison (op0, mode)))
1625 return
1626 simplify_gen_unary (NEG, mode, reversed, mode);
1627
1628 /* If one of the operands is a PLUS or a MINUS, see if we can
1629 simplify this by the associative law.
1630 Don't use the associative law for floating point.
1631 The inaccuracy makes it nonassociative,
1632 and subtle programs can break if operations are associated. */
1633
1634 if (INTEGRAL_MODE_P (mode)
1635 && (plus_minus_operand_p (op0)
1636 || plus_minus_operand_p (op1))
1637 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1638 return tem;
1639
1640 /* Reassociate floating point addition only when the user
1641 specifies unsafe math optimizations. */
1642 if (FLOAT_MODE_P (mode)
1643 && flag_unsafe_math_optimizations)
1644 {
1645 tem = simplify_associative_operation (code, mode, op0, op1);
1646 if (tem)
1647 return tem;
1648 }
1649 break;
1650
1651 case COMPARE:
1652 #ifdef HAVE_cc0
1653 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1654 using cc0, in which case we want to leave it as a COMPARE
1655 so we can distinguish it from a register-register-copy.
1656
1657 In IEEE floating point, x-0 is not the same as x. */
1658
1659 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1660 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1661 && trueop1 == CONST0_RTX (mode))
1662 return op0;
1663 #endif
1664
1665 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1666 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1667 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1668 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1669 {
1670 rtx xop00 = XEXP (op0, 0);
1671 rtx xop10 = XEXP (op1, 0);
1672
1673 #ifdef HAVE_cc0
1674 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1675 #else
1676 if (REG_P (xop00) && REG_P (xop10)
1677 && GET_MODE (xop00) == GET_MODE (xop10)
1678 && REGNO (xop00) == REGNO (xop10)
1679 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1680 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1681 #endif
1682 return xop00;
1683 }
1684 break;
1685
1686 case MINUS:
1687 /* We can't assume x-x is 0 even with non-IEEE floating point,
1688 but since it is zero except in very strange circumstances, we
1689 will treat it as zero with -funsafe-math-optimizations. */
1690 if (rtx_equal_p (trueop0, trueop1)
1691 && ! side_effects_p (op0)
1692 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1693 return CONST0_RTX (mode);
1694
1695 /* Change subtraction from zero into negation. (0 - x) is the
1696 same as -x when x is NaN, infinite, or finite and nonzero.
1697 But if the mode has signed zeros, and does not round towards
1698 -infinity, then 0 - 0 is 0, not -0. */
1699 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1700 return simplify_gen_unary (NEG, mode, op1, mode);
1701
1702 /* (-1 - a) is ~a. */
1703 if (trueop0 == constm1_rtx)
1704 return simplify_gen_unary (NOT, mode, op1, mode);
1705
1706 /* Subtracting 0 has no effect unless the mode has signed zeros
1707 and supports rounding towards -infinity. In such a case,
1708 0 - 0 is -0. */
1709 if (!(HONOR_SIGNED_ZEROS (mode)
1710 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1711 && trueop1 == CONST0_RTX (mode))
1712 return op0;
1713
1714 /* See if this is something like X * C - X or vice versa or
1715 if the multiplication is written as a shift. If so, we can
1716 distribute and make a new multiply, shift, or maybe just
1717 have X (if C is 2 in the example above). But don't make
1718 something more expensive than we had before. */
1719
1720 if (SCALAR_INT_MODE_P (mode))
1721 {
1722 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1723 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1724 rtx lhs = op0, rhs = op1;
1725
1726 if (GET_CODE (lhs) == NEG)
1727 {
1728 coeff0l = -1;
1729 coeff0h = -1;
1730 lhs = XEXP (lhs, 0);
1731 }
1732 else if (GET_CODE (lhs) == MULT
1733 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1734 {
1735 coeff0l = INTVAL (XEXP (lhs, 1));
1736 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1737 lhs = XEXP (lhs, 0);
1738 }
1739 else if (GET_CODE (lhs) == ASHIFT
1740 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1741 && INTVAL (XEXP (lhs, 1)) >= 0
1742 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1743 {
1744 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1745 coeff0h = 0;
1746 lhs = XEXP (lhs, 0);
1747 }
1748
1749 if (GET_CODE (rhs) == NEG)
1750 {
1751 negcoeff1l = 1;
1752 negcoeff1h = 0;
1753 rhs = XEXP (rhs, 0);
1754 }
1755 else if (GET_CODE (rhs) == MULT
1756 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1757 {
1758 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1759 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1760 rhs = XEXP (rhs, 0);
1761 }
1762 else if (GET_CODE (rhs) == ASHIFT
1763 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1764 && INTVAL (XEXP (rhs, 1)) >= 0
1765 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1766 {
1767 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1768 negcoeff1h = -1;
1769 rhs = XEXP (rhs, 0);
1770 }
1771
1772 if (rtx_equal_p (lhs, rhs))
1773 {
1774 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1775 rtx coeff;
1776 unsigned HOST_WIDE_INT l;
1777 HOST_WIDE_INT h;
1778
1779 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1780 coeff = immed_double_const (l, h, mode);
1781
1782 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1783 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1784 ? tem : 0;
1785 }
1786 }
1787
1788 /* (a - (-b)) -> (a + b). True even for IEEE. */
1789 if (GET_CODE (op1) == NEG)
1790 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1791
1792 /* (-x - c) may be simplified as (-c - x). */
1793 if (GET_CODE (op0) == NEG
1794 && (GET_CODE (op1) == CONST_INT
1795 || GET_CODE (op1) == CONST_DOUBLE))
1796 {
1797 tem = simplify_unary_operation (NEG, mode, op1, mode);
1798 if (tem)
1799 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1800 }
1801
1802 /* Don't let a relocatable value get a negative coeff. */
1803 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1804 return simplify_gen_binary (PLUS, mode,
1805 op0,
1806 neg_const_int (mode, op1));
1807
1808 /* (x - (x & y)) -> (x & ~y) */
1809 if (GET_CODE (op1) == AND)
1810 {
1811 if (rtx_equal_p (op0, XEXP (op1, 0)))
1812 {
1813 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1814 GET_MODE (XEXP (op1, 1)));
1815 return simplify_gen_binary (AND, mode, op0, tem);
1816 }
1817 if (rtx_equal_p (op0, XEXP (op1, 1)))
1818 {
1819 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1820 GET_MODE (XEXP (op1, 0)));
1821 return simplify_gen_binary (AND, mode, op0, tem);
1822 }
1823 }
1824
1825 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1826 by reversing the comparison code if valid. */
1827 if (STORE_FLAG_VALUE == 1
1828 && trueop0 == const1_rtx
1829 && COMPARISON_P (op1)
1830 && (reversed = reversed_comparison (op1, mode)))
1831 return reversed;
1832
1833 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1834 if (GET_CODE (op1) == MULT
1835 && GET_CODE (XEXP (op1, 0)) == NEG)
1836 {
1837 rtx in1, in2;
1838
1839 in1 = XEXP (XEXP (op1, 0), 0);
1840 in2 = XEXP (op1, 1);
1841 return simplify_gen_binary (PLUS, mode,
1842 simplify_gen_binary (MULT, mode,
1843 in1, in2),
1844 op0);
1845 }
1846
1847 /* Canonicalize (minus (neg A) (mult B C)) to
1848 (minus (mult (neg B) C) A). */
1849 if (GET_CODE (op1) == MULT
1850 && GET_CODE (op0) == NEG)
1851 {
1852 rtx in1, in2;
1853
1854 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1855 in2 = XEXP (op1, 1);
1856 return simplify_gen_binary (MINUS, mode,
1857 simplify_gen_binary (MULT, mode,
1858 in1, in2),
1859 XEXP (op0, 0));
1860 }
1861
1862 /* If one of the operands is a PLUS or a MINUS, see if we can
1863 simplify this by the associative law. This will, for example,
1864 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1865 Don't use the associative law for floating point.
1866 The inaccuracy makes it nonassociative,
1867 and subtle programs can break if operations are associated. */
1868
1869 if (INTEGRAL_MODE_P (mode)
1870 && (plus_minus_operand_p (op0)
1871 || plus_minus_operand_p (op1))
1872 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1873 return tem;
1874 break;
1875
1876 case MULT:
1877 if (trueop1 == constm1_rtx)
1878 return simplify_gen_unary (NEG, mode, op0, mode);
1879
1880 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1881 x is NaN, since x * 0 is then also NaN. Nor is it valid
1882 when the mode has signed zeros, since multiplying a negative
1883 number by 0 will give -0, not 0. */
1884 if (!HONOR_NANS (mode)
1885 && !HONOR_SIGNED_ZEROS (mode)
1886 && trueop1 == CONST0_RTX (mode)
1887 && ! side_effects_p (op0))
1888 return op1;
1889
1890 /* In IEEE floating point, x*1 is not equivalent to x for
1891 signalling NaNs. */
1892 if (!HONOR_SNANS (mode)
1893 && trueop1 == CONST1_RTX (mode))
1894 return op0;
1895
1896 /* Convert multiply by constant power of two into shift unless
1897 we are still generating RTL. This test is a kludge. */
1898 if (GET_CODE (trueop1) == CONST_INT
1899 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1900 /* If the mode is larger than the host word size, and the
1901 uppermost bit is set, then this isn't a power of two due
1902 to implicit sign extension. */
1903 && (width <= HOST_BITS_PER_WIDE_INT
1904 || val != HOST_BITS_PER_WIDE_INT - 1))
1905 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1906
1907 /* Likewise for multipliers wider than a word. */
1908 else if (GET_CODE (trueop1) == CONST_DOUBLE
1909 && (GET_MODE (trueop1) == VOIDmode
1910 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1911 && GET_MODE (op0) == mode
1912 && CONST_DOUBLE_LOW (trueop1) == 0
1913 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1914 return simplify_gen_binary (ASHIFT, mode, op0,
1915 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1916
1917 /* x*2 is x+x and x*(-1) is -x */
1918 if (GET_CODE (trueop1) == CONST_DOUBLE
1919 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1920 && GET_MODE (op0) == mode)
1921 {
1922 REAL_VALUE_TYPE d;
1923 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1924
1925 if (REAL_VALUES_EQUAL (d, dconst2))
1926 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1927
1928 if (REAL_VALUES_EQUAL (d, dconstm1))
1929 return simplify_gen_unary (NEG, mode, op0, mode);
1930 }
1931
1932 /* Reassociate multiplication, but for floating point MULTs
1933 only when the user specifies unsafe math optimizations. */
1934 if (! FLOAT_MODE_P (mode)
1935 || flag_unsafe_math_optimizations)
1936 {
1937 tem = simplify_associative_operation (code, mode, op0, op1);
1938 if (tem)
1939 return tem;
1940 }
1941 break;
1942
1943 case IOR:
1944 if (trueop1 == const0_rtx)
1945 return op0;
1946 if (GET_CODE (trueop1) == CONST_INT
1947 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1948 == GET_MODE_MASK (mode)))
1949 return op1;
1950 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1951 return op0;
1952 /* A | (~A) -> -1 */
1953 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1954 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1955 && ! side_effects_p (op0)
1956 && SCALAR_INT_MODE_P (mode))
1957 return constm1_rtx;
1958
1959 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1960 if (GET_CODE (op1) == CONST_INT
1961 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1962 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1963 return op1;
1964
1965 /* Convert (A & B) | A to A. */
1966 if (GET_CODE (op0) == AND
1967 && (rtx_equal_p (XEXP (op0, 0), op1)
1968 || rtx_equal_p (XEXP (op0, 1), op1))
1969 && ! side_effects_p (XEXP (op0, 0))
1970 && ! side_effects_p (XEXP (op0, 1)))
1971 return op1;
1972
1973 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1974 mode size to (rotate A CX). */
1975
1976 if (GET_CODE (op1) == ASHIFT
1977 || GET_CODE (op1) == SUBREG)
1978 {
1979 opleft = op1;
1980 opright = op0;
1981 }
1982 else
1983 {
1984 opright = op1;
1985 opleft = op0;
1986 }
1987
1988 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1989 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1990 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1991 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1992 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1993 == GET_MODE_BITSIZE (mode)))
1994 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1995
1996 /* Same, but for ashift that has been "simplified" to a wider mode
1997 by simplify_shift_const. */
1998
1999 if (GET_CODE (opleft) == SUBREG
2000 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2001 && GET_CODE (opright) == LSHIFTRT
2002 && GET_CODE (XEXP (opright, 0)) == SUBREG
2003 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2004 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2005 && (GET_MODE_SIZE (GET_MODE (opleft))
2006 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2007 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2008 SUBREG_REG (XEXP (opright, 0)))
2009 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2010 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2011 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2012 == GET_MODE_BITSIZE (mode)))
2013 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2014 XEXP (SUBREG_REG (opleft), 1));
2015
2016 /* If we have (ior (and (X C1) C2)), simplify this by making
2017 C1 as small as possible if C1 actually changes. */
2018 if (GET_CODE (op1) == CONST_INT
2019 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2020 || INTVAL (op1) > 0)
2021 && GET_CODE (op0) == AND
2022 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2023 && GET_CODE (op1) == CONST_INT
2024 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2025 return simplify_gen_binary (IOR, mode,
2026 simplify_gen_binary
2027 (AND, mode, XEXP (op0, 0),
2028 GEN_INT (INTVAL (XEXP (op0, 1))
2029 & ~INTVAL (op1))),
2030 op1);
2031
2032 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2033 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2034 the PLUS does not affect any of the bits in OP1: then we can do
2035 the IOR as a PLUS and we can associate. This is valid if OP1
2036 can be safely shifted left C bits. */
2037 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2038 && GET_CODE (XEXP (op0, 0)) == PLUS
2039 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2040 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2041 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2042 {
2043 int count = INTVAL (XEXP (op0, 1));
2044 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2045
2046 if (mask >> count == INTVAL (trueop1)
2047 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2048 return simplify_gen_binary (ASHIFTRT, mode,
2049 plus_constant (XEXP (op0, 0), mask),
2050 XEXP (op0, 1));
2051 }
2052
2053 tem = simplify_associative_operation (code, mode, op0, op1);
2054 if (tem)
2055 return tem;
2056 break;
2057
2058 case XOR:
2059 if (trueop1 == const0_rtx)
2060 return op0;
2061 if (GET_CODE (trueop1) == CONST_INT
2062 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2063 == GET_MODE_MASK (mode)))
2064 return simplify_gen_unary (NOT, mode, op0, mode);
2065 if (rtx_equal_p (trueop0, trueop1)
2066 && ! side_effects_p (op0)
2067 && GET_MODE_CLASS (mode) != MODE_CC)
2068 return CONST0_RTX (mode);
2069
2070 /* Canonicalize XOR of the most significant bit to PLUS. */
2071 if ((GET_CODE (op1) == CONST_INT
2072 || GET_CODE (op1) == CONST_DOUBLE)
2073 && mode_signbit_p (mode, op1))
2074 return simplify_gen_binary (PLUS, mode, op0, op1);
2075 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2076 if ((GET_CODE (op1) == CONST_INT
2077 || GET_CODE (op1) == CONST_DOUBLE)
2078 && GET_CODE (op0) == PLUS
2079 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2080 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2081 && mode_signbit_p (mode, XEXP (op0, 1)))
2082 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2083 simplify_gen_binary (XOR, mode, op1,
2084 XEXP (op0, 1)));
2085
2086 /* If we are XORing two things that have no bits in common,
2087 convert them into an IOR. This helps to detect rotation encoded
2088 using those methods and possibly other simplifications. */
2089
2090 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2091 && (nonzero_bits (op0, mode)
2092 & nonzero_bits (op1, mode)) == 0)
2093 return (simplify_gen_binary (IOR, mode, op0, op1));
2094
2095 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2096 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2097 (NOT y). */
2098 {
2099 int num_negated = 0;
2100
2101 if (GET_CODE (op0) == NOT)
2102 num_negated++, op0 = XEXP (op0, 0);
2103 if (GET_CODE (op1) == NOT)
2104 num_negated++, op1 = XEXP (op1, 0);
2105
2106 if (num_negated == 2)
2107 return simplify_gen_binary (XOR, mode, op0, op1);
2108 else if (num_negated == 1)
2109 return simplify_gen_unary (NOT, mode,
2110 simplify_gen_binary (XOR, mode, op0, op1),
2111 mode);
2112 }
2113
2114 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2115 correspond to a machine insn or result in further simplifications
2116 if B is a constant. */
2117
2118 if (GET_CODE (op0) == AND
2119 && rtx_equal_p (XEXP (op0, 1), op1)
2120 && ! side_effects_p (op1))
2121 return simplify_gen_binary (AND, mode,
2122 simplify_gen_unary (NOT, mode,
2123 XEXP (op0, 0), mode),
2124 op1);
2125
2126 else if (GET_CODE (op0) == AND
2127 && rtx_equal_p (XEXP (op0, 0), op1)
2128 && ! side_effects_p (op1))
2129 return simplify_gen_binary (AND, mode,
2130 simplify_gen_unary (NOT, mode,
2131 XEXP (op0, 1), mode),
2132 op1);
2133
2134 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2135 comparison if STORE_FLAG_VALUE is 1. */
2136 if (STORE_FLAG_VALUE == 1
2137 && trueop1 == const1_rtx
2138 && COMPARISON_P (op0)
2139 && (reversed = reversed_comparison (op0, mode)))
2140 return reversed;
2141
2142 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2143 is (lt foo (const_int 0)), so we can perform the above
2144 simplification if STORE_FLAG_VALUE is 1. */
2145
2146 if (STORE_FLAG_VALUE == 1
2147 && trueop1 == const1_rtx
2148 && GET_CODE (op0) == LSHIFTRT
2149 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2150 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2151 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2152
2153 /* (xor (comparison foo bar) (const_int sign-bit))
2154 when STORE_FLAG_VALUE is the sign bit. */
2155 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2157 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2158 && trueop1 == const_true_rtx
2159 && COMPARISON_P (op0)
2160 && (reversed = reversed_comparison (op0, mode)))
2161 return reversed;
2162
2163 break;
2164
2165 tem = simplify_associative_operation (code, mode, op0, op1);
2166 if (tem)
2167 return tem;
2168 break;
2169
2170 case AND:
2171 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2172 return trueop1;
2173 /* If we are turning off bits already known off in OP0, we need
2174 not do an AND. */
2175 if (GET_CODE (trueop1) == CONST_INT
2176 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2177 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2178 return op0;
2179 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2180 && GET_MODE_CLASS (mode) != MODE_CC)
2181 return op0;
2182 /* A & (~A) -> 0 */
2183 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2184 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2185 && ! side_effects_p (op0)
2186 && GET_MODE_CLASS (mode) != MODE_CC)
2187 return CONST0_RTX (mode);
2188
2189 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2190 there are no nonzero bits of C outside of X's mode. */
2191 if ((GET_CODE (op0) == SIGN_EXTEND
2192 || GET_CODE (op0) == ZERO_EXTEND)
2193 && GET_CODE (trueop1) == CONST_INT
2194 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2195 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2196 & INTVAL (trueop1)) == 0)
2197 {
2198 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2199 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2200 gen_int_mode (INTVAL (trueop1),
2201 imode));
2202 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2203 }
2204
2205 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2206 insn (and may simplify more). */
2207 if (GET_CODE (op0) == XOR
2208 && rtx_equal_p (XEXP (op0, 0), op1)
2209 && ! side_effects_p (op1))
2210 return simplify_gen_binary (AND, mode,
2211 simplify_gen_unary (NOT, mode,
2212 XEXP (op0, 1), mode),
2213 op1);
2214
2215 if (GET_CODE (op0) == XOR
2216 && rtx_equal_p (XEXP (op0, 1), op1)
2217 && ! side_effects_p (op1))
2218 return simplify_gen_binary (AND, mode,
2219 simplify_gen_unary (NOT, mode,
2220 XEXP (op0, 0), mode),
2221 op1);
2222
2223 /* Similarly for (~(A ^ B)) & A. */
2224 if (GET_CODE (op0) == NOT
2225 && GET_CODE (XEXP (op0, 0)) == XOR
2226 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2227 && ! side_effects_p (op1))
2228 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2229
2230 if (GET_CODE (op0) == NOT
2231 && GET_CODE (XEXP (op0, 0)) == XOR
2232 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2233 && ! side_effects_p (op1))
2234 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2235
2236 /* Convert (A | B) & A to A. */
2237 if (GET_CODE (op0) == IOR
2238 && (rtx_equal_p (XEXP (op0, 0), op1)
2239 || rtx_equal_p (XEXP (op0, 1), op1))
2240 && ! side_effects_p (XEXP (op0, 0))
2241 && ! side_effects_p (XEXP (op0, 1)))
2242 return op1;
2243
2244 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2245 ((A & N) + B) & M -> (A + B) & M
2246 Similarly if (N & M) == 0,
2247 ((A | N) + B) & M -> (A + B) & M
2248 and for - instead of + and/or ^ instead of |. */
2249 if (GET_CODE (trueop1) == CONST_INT
2250 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2251 && ~INTVAL (trueop1)
2252 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2253 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2254 {
2255 rtx pmop[2];
2256 int which;
2257
2258 pmop[0] = XEXP (op0, 0);
2259 pmop[1] = XEXP (op0, 1);
2260
2261 for (which = 0; which < 2; which++)
2262 {
2263 tem = pmop[which];
2264 switch (GET_CODE (tem))
2265 {
2266 case AND:
2267 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2268 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2269 == INTVAL (trueop1))
2270 pmop[which] = XEXP (tem, 0);
2271 break;
2272 case IOR:
2273 case XOR:
2274 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2275 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2276 pmop[which] = XEXP (tem, 0);
2277 break;
2278 default:
2279 break;
2280 }
2281 }
2282
2283 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2284 {
2285 tem = simplify_gen_binary (GET_CODE (op0), mode,
2286 pmop[0], pmop[1]);
2287 return simplify_gen_binary (code, mode, tem, op1);
2288 }
2289 }
2290 tem = simplify_associative_operation (code, mode, op0, op1);
2291 if (tem)
2292 return tem;
2293 break;
2294
2295 case UDIV:
2296 /* 0/x is 0 (or x&0 if x has side-effects). */
2297 if (trueop0 == CONST0_RTX (mode))
2298 {
2299 if (side_effects_p (op1))
2300 return simplify_gen_binary (AND, mode, op1, trueop0);
2301 return trueop0;
2302 }
2303 /* x/1 is x. */
2304 if (trueop1 == CONST1_RTX (mode))
2305 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2306 /* Convert divide by power of two into shift. */
2307 if (GET_CODE (trueop1) == CONST_INT
2308 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2309 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2310 break;
2311
2312 case DIV:
2313 /* Handle floating point and integers separately. */
2314 if (SCALAR_FLOAT_MODE_P (mode))
2315 {
2316 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2317 safe for modes with NaNs, since 0.0 / 0.0 will then be
2318 NaN rather than 0.0. Nor is it safe for modes with signed
2319 zeros, since dividing 0 by a negative number gives -0.0 */
2320 if (trueop0 == CONST0_RTX (mode)
2321 && !HONOR_NANS (mode)
2322 && !HONOR_SIGNED_ZEROS (mode)
2323 && ! side_effects_p (op1))
2324 return op0;
2325 /* x/1.0 is x. */
2326 if (trueop1 == CONST1_RTX (mode)
2327 && !HONOR_SNANS (mode))
2328 return op0;
2329
2330 if (GET_CODE (trueop1) == CONST_DOUBLE
2331 && trueop1 != CONST0_RTX (mode))
2332 {
2333 REAL_VALUE_TYPE d;
2334 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2335
2336 /* x/-1.0 is -x. */
2337 if (REAL_VALUES_EQUAL (d, dconstm1)
2338 && !HONOR_SNANS (mode))
2339 return simplify_gen_unary (NEG, mode, op0, mode);
2340
2341 /* Change FP division by a constant into multiplication.
2342 Only do this with -funsafe-math-optimizations. */
2343 if (flag_unsafe_math_optimizations
2344 && !REAL_VALUES_EQUAL (d, dconst0))
2345 {
2346 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2347 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2348 return simplify_gen_binary (MULT, mode, op0, tem);
2349 }
2350 }
2351 }
2352 else
2353 {
2354 /* 0/x is 0 (or x&0 if x has side-effects). */
2355 if (trueop0 == CONST0_RTX (mode))
2356 {
2357 if (side_effects_p (op1))
2358 return simplify_gen_binary (AND, mode, op1, trueop0);
2359 return trueop0;
2360 }
2361 /* x/1 is x. */
2362 if (trueop1 == CONST1_RTX (mode))
2363 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2364 /* x/-1 is -x. */
2365 if (trueop1 == constm1_rtx)
2366 {
2367 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2368 return simplify_gen_unary (NEG, mode, x, mode);
2369 }
2370 }
2371 break;
2372
2373 case UMOD:
2374 /* 0%x is 0 (or x&0 if x has side-effects). */
2375 if (trueop0 == CONST0_RTX (mode))
2376 {
2377 if (side_effects_p (op1))
2378 return simplify_gen_binary (AND, mode, op1, trueop0);
2379 return trueop0;
2380 }
2381 /* x%1 is 0 (of x&0 if x has side-effects). */
2382 if (trueop1 == CONST1_RTX (mode))
2383 {
2384 if (side_effects_p (op0))
2385 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2386 return CONST0_RTX (mode);
2387 }
2388 /* Implement modulus by power of two as AND. */
2389 if (GET_CODE (trueop1) == CONST_INT
2390 && exact_log2 (INTVAL (trueop1)) > 0)
2391 return simplify_gen_binary (AND, mode, op0,
2392 GEN_INT (INTVAL (op1) - 1));
2393 break;
2394
2395 case MOD:
2396 /* 0%x is 0 (or x&0 if x has side-effects). */
2397 if (trueop0 == CONST0_RTX (mode))
2398 {
2399 if (side_effects_p (op1))
2400 return simplify_gen_binary (AND, mode, op1, trueop0);
2401 return trueop0;
2402 }
2403 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2404 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2405 {
2406 if (side_effects_p (op0))
2407 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2408 return CONST0_RTX (mode);
2409 }
2410 break;
2411
2412 case ROTATERT:
2413 case ROTATE:
2414 case ASHIFTRT:
2415 /* Rotating ~0 always results in ~0. */
2416 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2417 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2418 && ! side_effects_p (op1))
2419 return op0;
2420
2421 /* Fall through.... */
2422
2423 case ASHIFT:
2424 case LSHIFTRT:
2425 if (trueop1 == CONST0_RTX (mode))
2426 return op0;
2427 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2428 return op0;
2429 break;
2430
2431 case SMIN:
2432 if (width <= HOST_BITS_PER_WIDE_INT
2433 && GET_CODE (trueop1) == CONST_INT
2434 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2435 && ! side_effects_p (op0))
2436 return op1;
2437 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2438 return op0;
2439 tem = simplify_associative_operation (code, mode, op0, op1);
2440 if (tem)
2441 return tem;
2442 break;
2443
2444 case SMAX:
2445 if (width <= HOST_BITS_PER_WIDE_INT
2446 && GET_CODE (trueop1) == CONST_INT
2447 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2448 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2449 && ! side_effects_p (op0))
2450 return op1;
2451 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2452 return op0;
2453 tem = simplify_associative_operation (code, mode, op0, op1);
2454 if (tem)
2455 return tem;
2456 break;
2457
2458 case UMIN:
2459 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2460 return op1;
2461 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2462 return op0;
2463 tem = simplify_associative_operation (code, mode, op0, op1);
2464 if (tem)
2465 return tem;
2466 break;
2467
2468 case UMAX:
2469 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2470 return op1;
2471 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2472 return op0;
2473 tem = simplify_associative_operation (code, mode, op0, op1);
2474 if (tem)
2475 return tem;
2476 break;
2477
2478 case SS_PLUS:
2479 case US_PLUS:
2480 case SS_MINUS:
2481 case US_MINUS:
2482 /* ??? There are simplifications that can be done. */
2483 return 0;
2484
2485 case VEC_SELECT:
2486 if (!VECTOR_MODE_P (mode))
2487 {
2488 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2489 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2490 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2491 gcc_assert (XVECLEN (trueop1, 0) == 1);
2492 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2493
2494 if (GET_CODE (trueop0) == CONST_VECTOR)
2495 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2496 (trueop1, 0, 0)));
2497 }
2498 else
2499 {
2500 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2501 gcc_assert (GET_MODE_INNER (mode)
2502 == GET_MODE_INNER (GET_MODE (trueop0)));
2503 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2504
2505 if (GET_CODE (trueop0) == CONST_VECTOR)
2506 {
2507 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2508 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2509 rtvec v = rtvec_alloc (n_elts);
2510 unsigned int i;
2511
2512 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2513 for (i = 0; i < n_elts; i++)
2514 {
2515 rtx x = XVECEXP (trueop1, 0, i);
2516
2517 gcc_assert (GET_CODE (x) == CONST_INT);
2518 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2519 INTVAL (x));
2520 }
2521
2522 return gen_rtx_CONST_VECTOR (mode, v);
2523 }
2524 }
2525
2526 if (XVECLEN (trueop1, 0) == 1
2527 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2528 && GET_CODE (trueop0) == VEC_CONCAT)
2529 {
2530 rtx vec = trueop0;
2531 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2532
2533 /* Try to find the element in the VEC_CONCAT. */
2534 while (GET_MODE (vec) != mode
2535 && GET_CODE (vec) == VEC_CONCAT)
2536 {
2537 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2538 if (offset < vec_size)
2539 vec = XEXP (vec, 0);
2540 else
2541 {
2542 offset -= vec_size;
2543 vec = XEXP (vec, 1);
2544 }
2545 vec = avoid_constant_pool_reference (vec);
2546 }
2547
2548 if (GET_MODE (vec) == mode)
2549 return vec;
2550 }
2551
2552 return 0;
2553 case VEC_CONCAT:
2554 {
2555 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2556 ? GET_MODE (trueop0)
2557 : GET_MODE_INNER (mode));
2558 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2559 ? GET_MODE (trueop1)
2560 : GET_MODE_INNER (mode));
2561
2562 gcc_assert (VECTOR_MODE_P (mode));
2563 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2564 == GET_MODE_SIZE (mode));
2565
2566 if (VECTOR_MODE_P (op0_mode))
2567 gcc_assert (GET_MODE_INNER (mode)
2568 == GET_MODE_INNER (op0_mode));
2569 else
2570 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2571
2572 if (VECTOR_MODE_P (op1_mode))
2573 gcc_assert (GET_MODE_INNER (mode)
2574 == GET_MODE_INNER (op1_mode));
2575 else
2576 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2577
2578 if ((GET_CODE (trueop0) == CONST_VECTOR
2579 || GET_CODE (trueop0) == CONST_INT
2580 || GET_CODE (trueop0) == CONST_DOUBLE)
2581 && (GET_CODE (trueop1) == CONST_VECTOR
2582 || GET_CODE (trueop1) == CONST_INT
2583 || GET_CODE (trueop1) == CONST_DOUBLE))
2584 {
2585 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2587 rtvec v = rtvec_alloc (n_elts);
2588 unsigned int i;
2589 unsigned in_n_elts = 1;
2590
2591 if (VECTOR_MODE_P (op0_mode))
2592 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2593 for (i = 0; i < n_elts; i++)
2594 {
2595 if (i < in_n_elts)
2596 {
2597 if (!VECTOR_MODE_P (op0_mode))
2598 RTVEC_ELT (v, i) = trueop0;
2599 else
2600 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2601 }
2602 else
2603 {
2604 if (!VECTOR_MODE_P (op1_mode))
2605 RTVEC_ELT (v, i) = trueop1;
2606 else
2607 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2608 i - in_n_elts);
2609 }
2610 }
2611
2612 return gen_rtx_CONST_VECTOR (mode, v);
2613 }
2614 }
2615 return 0;
2616
2617 default:
2618 gcc_unreachable ();
2619 }
2620
2621 return 0;
2622 }
2623
2624 rtx
2625 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2626 rtx op0, rtx op1)
2627 {
2628 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2629 HOST_WIDE_INT val;
2630 unsigned int width = GET_MODE_BITSIZE (mode);
2631
2632 if (VECTOR_MODE_P (mode)
2633 && code != VEC_CONCAT
2634 && GET_CODE (op0) == CONST_VECTOR
2635 && GET_CODE (op1) == CONST_VECTOR)
2636 {
2637 unsigned n_elts = GET_MODE_NUNITS (mode);
2638 enum machine_mode op0mode = GET_MODE (op0);
2639 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2640 enum machine_mode op1mode = GET_MODE (op1);
2641 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2642 rtvec v = rtvec_alloc (n_elts);
2643 unsigned int i;
2644
2645 gcc_assert (op0_n_elts == n_elts);
2646 gcc_assert (op1_n_elts == n_elts);
2647 for (i = 0; i < n_elts; i++)
2648 {
2649 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2650 CONST_VECTOR_ELT (op0, i),
2651 CONST_VECTOR_ELT (op1, i));
2652 if (!x)
2653 return 0;
2654 RTVEC_ELT (v, i) = x;
2655 }
2656
2657 return gen_rtx_CONST_VECTOR (mode, v);
2658 }
2659
2660 if (VECTOR_MODE_P (mode)
2661 && code == VEC_CONCAT
2662 && CONSTANT_P (op0) && CONSTANT_P (op1))
2663 {
2664 unsigned n_elts = GET_MODE_NUNITS (mode);
2665 rtvec v = rtvec_alloc (n_elts);
2666
2667 gcc_assert (n_elts >= 2);
2668 if (n_elts == 2)
2669 {
2670 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2671 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2672
2673 RTVEC_ELT (v, 0) = op0;
2674 RTVEC_ELT (v, 1) = op1;
2675 }
2676 else
2677 {
2678 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2679 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2680 unsigned i;
2681
2682 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2683 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2684 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2685
2686 for (i = 0; i < op0_n_elts; ++i)
2687 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2688 for (i = 0; i < op1_n_elts; ++i)
2689 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2690 }
2691
2692 return gen_rtx_CONST_VECTOR (mode, v);
2693 }
2694
2695 if (SCALAR_FLOAT_MODE_P (mode)
2696 && GET_CODE (op0) == CONST_DOUBLE
2697 && GET_CODE (op1) == CONST_DOUBLE
2698 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2699 {
2700 if (code == AND
2701 || code == IOR
2702 || code == XOR)
2703 {
2704 long tmp0[4];
2705 long tmp1[4];
2706 REAL_VALUE_TYPE r;
2707 int i;
2708
2709 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2710 GET_MODE (op0));
2711 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2712 GET_MODE (op1));
2713 for (i = 0; i < 4; i++)
2714 {
2715 switch (code)
2716 {
2717 case AND:
2718 tmp0[i] &= tmp1[i];
2719 break;
2720 case IOR:
2721 tmp0[i] |= tmp1[i];
2722 break;
2723 case XOR:
2724 tmp0[i] ^= tmp1[i];
2725 break;
2726 default:
2727 gcc_unreachable ();
2728 }
2729 }
2730 real_from_target (&r, tmp0, mode);
2731 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2732 }
2733 else
2734 {
2735 REAL_VALUE_TYPE f0, f1, value, result;
2736 bool inexact;
2737
2738 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2739 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2740 real_convert (&f0, mode, &f0);
2741 real_convert (&f1, mode, &f1);
2742
2743 if (HONOR_SNANS (mode)
2744 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2745 return 0;
2746
2747 if (code == DIV
2748 && REAL_VALUES_EQUAL (f1, dconst0)
2749 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2750 return 0;
2751
2752 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2753 && flag_trapping_math
2754 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2755 {
2756 int s0 = REAL_VALUE_NEGATIVE (f0);
2757 int s1 = REAL_VALUE_NEGATIVE (f1);
2758
2759 switch (code)
2760 {
2761 case PLUS:
2762 /* Inf + -Inf = NaN plus exception. */
2763 if (s0 != s1)
2764 return 0;
2765 break;
2766 case MINUS:
2767 /* Inf - Inf = NaN plus exception. */
2768 if (s0 == s1)
2769 return 0;
2770 break;
2771 case DIV:
2772 /* Inf / Inf = NaN plus exception. */
2773 return 0;
2774 default:
2775 break;
2776 }
2777 }
2778
2779 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2780 && flag_trapping_math
2781 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2782 || (REAL_VALUE_ISINF (f1)
2783 && REAL_VALUES_EQUAL (f0, dconst0))))
2784 /* Inf * 0 = NaN plus exception. */
2785 return 0;
2786
2787 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2788 &f0, &f1);
2789 real_convert (&result, mode, &value);
2790
2791 /* Don't constant fold this floating point operation if
2792 the result has overflowed and flag_trapping_math. */
2793
2794 if (flag_trapping_math
2795 && MODE_HAS_INFINITIES (mode)
2796 && REAL_VALUE_ISINF (result)
2797 && !REAL_VALUE_ISINF (f0)
2798 && !REAL_VALUE_ISINF (f1))
2799 /* Overflow plus exception. */
2800 return 0;
2801
2802 /* Don't constant fold this floating point operation if the
2803 result may dependent upon the run-time rounding mode and
2804 flag_rounding_math is set, or if GCC's software emulation
2805 is unable to accurately represent the result. */
2806
2807 if ((flag_rounding_math
2808 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2809 && !flag_unsafe_math_optimizations))
2810 && (inexact || !real_identical (&result, &value)))
2811 return NULL_RTX;
2812
2813 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2814 }
2815 }
2816
2817 /* We can fold some multi-word operations. */
2818 if (GET_MODE_CLASS (mode) == MODE_INT
2819 && width == HOST_BITS_PER_WIDE_INT * 2
2820 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2821 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2822 {
2823 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2824 HOST_WIDE_INT h1, h2, hv, ht;
2825
2826 if (GET_CODE (op0) == CONST_DOUBLE)
2827 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2828 else
2829 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2830
2831 if (GET_CODE (op1) == CONST_DOUBLE)
2832 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2833 else
2834 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2835
2836 switch (code)
2837 {
2838 case MINUS:
2839 /* A - B == A + (-B). */
2840 neg_double (l2, h2, &lv, &hv);
2841 l2 = lv, h2 = hv;
2842
2843 /* Fall through.... */
2844
2845 case PLUS:
2846 add_double (l1, h1, l2, h2, &lv, &hv);
2847 break;
2848
2849 case MULT:
2850 mul_double (l1, h1, l2, h2, &lv, &hv);
2851 break;
2852
2853 case DIV:
2854 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2855 &lv, &hv, &lt, &ht))
2856 return 0;
2857 break;
2858
2859 case MOD:
2860 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2861 &lt, &ht, &lv, &hv))
2862 return 0;
2863 break;
2864
2865 case UDIV:
2866 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2867 &lv, &hv, &lt, &ht))
2868 return 0;
2869 break;
2870
2871 case UMOD:
2872 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2873 &lt, &ht, &lv, &hv))
2874 return 0;
2875 break;
2876
2877 case AND:
2878 lv = l1 & l2, hv = h1 & h2;
2879 break;
2880
2881 case IOR:
2882 lv = l1 | l2, hv = h1 | h2;
2883 break;
2884
2885 case XOR:
2886 lv = l1 ^ l2, hv = h1 ^ h2;
2887 break;
2888
2889 case SMIN:
2890 if (h1 < h2
2891 || (h1 == h2
2892 && ((unsigned HOST_WIDE_INT) l1
2893 < (unsigned HOST_WIDE_INT) l2)))
2894 lv = l1, hv = h1;
2895 else
2896 lv = l2, hv = h2;
2897 break;
2898
2899 case SMAX:
2900 if (h1 > h2
2901 || (h1 == h2
2902 && ((unsigned HOST_WIDE_INT) l1
2903 > (unsigned HOST_WIDE_INT) l2)))
2904 lv = l1, hv = h1;
2905 else
2906 lv = l2, hv = h2;
2907 break;
2908
2909 case UMIN:
2910 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2911 || (h1 == h2
2912 && ((unsigned HOST_WIDE_INT) l1
2913 < (unsigned HOST_WIDE_INT) l2)))
2914 lv = l1, hv = h1;
2915 else
2916 lv = l2, hv = h2;
2917 break;
2918
2919 case UMAX:
2920 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2921 || (h1 == h2
2922 && ((unsigned HOST_WIDE_INT) l1
2923 > (unsigned HOST_WIDE_INT) l2)))
2924 lv = l1, hv = h1;
2925 else
2926 lv = l2, hv = h2;
2927 break;
2928
2929 case LSHIFTRT: case ASHIFTRT:
2930 case ASHIFT:
2931 case ROTATE: case ROTATERT:
2932 if (SHIFT_COUNT_TRUNCATED)
2933 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2934
2935 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2936 return 0;
2937
2938 if (code == LSHIFTRT || code == ASHIFTRT)
2939 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2940 code == ASHIFTRT);
2941 else if (code == ASHIFT)
2942 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2943 else if (code == ROTATE)
2944 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2945 else /* code == ROTATERT */
2946 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2947 break;
2948
2949 default:
2950 return 0;
2951 }
2952
2953 return immed_double_const (lv, hv, mode);
2954 }
2955
2956 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2957 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2958 {
2959 /* Get the integer argument values in two forms:
2960 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2961
2962 arg0 = INTVAL (op0);
2963 arg1 = INTVAL (op1);
2964
2965 if (width < HOST_BITS_PER_WIDE_INT)
2966 {
2967 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2968 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2969
2970 arg0s = arg0;
2971 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2972 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2973
2974 arg1s = arg1;
2975 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2976 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2977 }
2978 else
2979 {
2980 arg0s = arg0;
2981 arg1s = arg1;
2982 }
2983
2984 /* Compute the value of the arithmetic. */
2985
2986 switch (code)
2987 {
2988 case PLUS:
2989 val = arg0s + arg1s;
2990 break;
2991
2992 case MINUS:
2993 val = arg0s - arg1s;
2994 break;
2995
2996 case MULT:
2997 val = arg0s * arg1s;
2998 break;
2999
3000 case DIV:
3001 if (arg1s == 0
3002 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3003 && arg1s == -1))
3004 return 0;
3005 val = arg0s / arg1s;
3006 break;
3007
3008 case MOD:
3009 if (arg1s == 0
3010 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3011 && arg1s == -1))
3012 return 0;
3013 val = arg0s % arg1s;
3014 break;
3015
3016 case UDIV:
3017 if (arg1 == 0
3018 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3019 && arg1s == -1))
3020 return 0;
3021 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3022 break;
3023
3024 case UMOD:
3025 if (arg1 == 0
3026 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3027 && arg1s == -1))
3028 return 0;
3029 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3030 break;
3031
3032 case AND:
3033 val = arg0 & arg1;
3034 break;
3035
3036 case IOR:
3037 val = arg0 | arg1;
3038 break;
3039
3040 case XOR:
3041 val = arg0 ^ arg1;
3042 break;
3043
3044 case LSHIFTRT:
3045 case ASHIFT:
3046 case ASHIFTRT:
3047 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3048 the value is in range. We can't return any old value for
3049 out-of-range arguments because either the middle-end (via
3050 shift_truncation_mask) or the back-end might be relying on
3051 target-specific knowledge. Nor can we rely on
3052 shift_truncation_mask, since the shift might not be part of an
3053 ashlM3, lshrM3 or ashrM3 instruction. */
3054 if (SHIFT_COUNT_TRUNCATED)
3055 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3056 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3057 return 0;
3058
3059 val = (code == ASHIFT
3060 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3061 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3062
3063 /* Sign-extend the result for arithmetic right shifts. */
3064 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3065 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3066 break;
3067
3068 case ROTATERT:
3069 if (arg1 < 0)
3070 return 0;
3071
3072 arg1 %= width;
3073 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3074 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3075 break;
3076
3077 case ROTATE:
3078 if (arg1 < 0)
3079 return 0;
3080
3081 arg1 %= width;
3082 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3083 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3084 break;
3085
3086 case COMPARE:
3087 /* Do nothing here. */
3088 return 0;
3089
3090 case SMIN:
3091 val = arg0s <= arg1s ? arg0s : arg1s;
3092 break;
3093
3094 case UMIN:
3095 val = ((unsigned HOST_WIDE_INT) arg0
3096 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3097 break;
3098
3099 case SMAX:
3100 val = arg0s > arg1s ? arg0s : arg1s;
3101 break;
3102
3103 case UMAX:
3104 val = ((unsigned HOST_WIDE_INT) arg0
3105 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3106 break;
3107
3108 case SS_PLUS:
3109 case US_PLUS:
3110 case SS_MINUS:
3111 case US_MINUS:
3112 /* ??? There are simplifications that can be done. */
3113 return 0;
3114
3115 default:
3116 gcc_unreachable ();
3117 }
3118
3119 return gen_int_mode (val, mode);
3120 }
3121
3122 return NULL_RTX;
3123 }
3124
3125
3126 \f
3127 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3128 PLUS or MINUS.
3129
3130 Rather than test for specific case, we do this by a brute-force method
3131 and do all possible simplifications until no more changes occur. Then
3132 we rebuild the operation. */
3133
3134 struct simplify_plus_minus_op_data
3135 {
3136 rtx op;
3137 short neg;
3138 short ix;
3139 };
3140
3141 static int
3142 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3143 {
3144 const struct simplify_plus_minus_op_data *d1 = p1;
3145 const struct simplify_plus_minus_op_data *d2 = p2;
3146 int result;
3147
3148 result = (commutative_operand_precedence (d2->op)
3149 - commutative_operand_precedence (d1->op));
3150 if (result)
3151 return result;
3152 return d1->ix - d2->ix;
3153 }
3154
3155 static rtx
3156 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3157 rtx op1)
3158 {
3159 struct simplify_plus_minus_op_data ops[8];
3160 rtx result, tem;
3161 int n_ops = 2, input_ops = 2;
3162 int first, changed, canonicalized = 0;
3163 int i, j;
3164
3165 memset (ops, 0, sizeof ops);
3166
3167 /* Set up the two operands and then expand them until nothing has been
3168 changed. If we run out of room in our array, give up; this should
3169 almost never happen. */
3170
3171 ops[0].op = op0;
3172 ops[0].neg = 0;
3173 ops[1].op = op1;
3174 ops[1].neg = (code == MINUS);
3175
3176 do
3177 {
3178 changed = 0;
3179
3180 for (i = 0; i < n_ops; i++)
3181 {
3182 rtx this_op = ops[i].op;
3183 int this_neg = ops[i].neg;
3184 enum rtx_code this_code = GET_CODE (this_op);
3185
3186 switch (this_code)
3187 {
3188 case PLUS:
3189 case MINUS:
3190 if (n_ops == 7)
3191 return NULL_RTX;
3192
3193 ops[n_ops].op = XEXP (this_op, 1);
3194 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3195 n_ops++;
3196
3197 ops[i].op = XEXP (this_op, 0);
3198 input_ops++;
3199 changed = 1;
3200 canonicalized |= this_neg;
3201 break;
3202
3203 case NEG:
3204 ops[i].op = XEXP (this_op, 0);
3205 ops[i].neg = ! this_neg;
3206 changed = 1;
3207 canonicalized = 1;
3208 break;
3209
3210 case CONST:
3211 if (n_ops < 7
3212 && GET_CODE (XEXP (this_op, 0)) == PLUS
3213 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3214 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3215 {
3216 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3217 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3218 ops[n_ops].neg = this_neg;
3219 n_ops++;
3220 changed = 1;
3221 canonicalized = 1;
3222 }
3223 break;
3224
3225 case NOT:
3226 /* ~a -> (-a - 1) */
3227 if (n_ops != 7)
3228 {
3229 ops[n_ops].op = constm1_rtx;
3230 ops[n_ops++].neg = this_neg;
3231 ops[i].op = XEXP (this_op, 0);
3232 ops[i].neg = !this_neg;
3233 changed = 1;
3234 canonicalized = 1;
3235 }
3236 break;
3237
3238 case CONST_INT:
3239 if (this_neg)
3240 {
3241 ops[i].op = neg_const_int (mode, this_op);
3242 ops[i].neg = 0;
3243 changed = 1;
3244 canonicalized = 1;
3245 }
3246 break;
3247
3248 default:
3249 break;
3250 }
3251 }
3252 }
3253 while (changed);
3254
3255 gcc_assert (n_ops >= 2);
3256 if (!canonicalized)
3257 {
3258 int n_constants = 0;
3259
3260 for (i = 0; i < n_ops; i++)
3261 if (GET_CODE (ops[i].op) == CONST_INT)
3262 n_constants++;
3263
3264 if (n_constants <= 1)
3265 return NULL_RTX;
3266 }
3267
3268 /* If we only have two operands, we can avoid the loops. */
3269 if (n_ops == 2)
3270 {
3271 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3272 rtx lhs, rhs;
3273
3274 /* Get the two operands. Be careful with the order, especially for
3275 the cases where code == MINUS. */
3276 if (ops[0].neg && ops[1].neg)
3277 {
3278 lhs = gen_rtx_NEG (mode, ops[0].op);
3279 rhs = ops[1].op;
3280 }
3281 else if (ops[0].neg)
3282 {
3283 lhs = ops[1].op;
3284 rhs = ops[0].op;
3285 }
3286 else
3287 {
3288 lhs = ops[0].op;
3289 rhs = ops[1].op;
3290 }
3291
3292 return simplify_const_binary_operation (code, mode, lhs, rhs);
3293 }
3294
3295 /* Now simplify each pair of operands until nothing changes. The first
3296 time through just simplify constants against each other. */
3297
3298 first = 1;
3299 do
3300 {
3301 changed = first;
3302
3303 for (i = 0; i < n_ops - 1; i++)
3304 for (j = i + 1; j < n_ops; j++)
3305 {
3306 rtx lhs = ops[i].op, rhs = ops[j].op;
3307 int lneg = ops[i].neg, rneg = ops[j].neg;
3308
3309 if (lhs != 0 && rhs != 0
3310 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3311 {
3312 enum rtx_code ncode = PLUS;
3313
3314 if (lneg != rneg)
3315 {
3316 ncode = MINUS;
3317 if (lneg)
3318 tem = lhs, lhs = rhs, rhs = tem;
3319 }
3320 else if (swap_commutative_operands_p (lhs, rhs))
3321 tem = lhs, lhs = rhs, rhs = tem;
3322
3323 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3324 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3325 {
3326 rtx tem_lhs, tem_rhs;
3327
3328 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3329 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3330 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3331
3332 if (tem && !CONSTANT_P (tem))
3333 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3334 }
3335 else
3336 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3337
3338 /* Reject "simplifications" that just wrap the two
3339 arguments in a CONST. Failure to do so can result
3340 in infinite recursion with simplify_binary_operation
3341 when it calls us to simplify CONST operations. */
3342 if (tem
3343 && ! (GET_CODE (tem) == CONST
3344 && GET_CODE (XEXP (tem, 0)) == ncode
3345 && XEXP (XEXP (tem, 0), 0) == lhs
3346 && XEXP (XEXP (tem, 0), 1) == rhs)
3347 /* Don't allow -x + -1 -> ~x simplifications in the
3348 first pass. This allows us the chance to combine
3349 the -1 with other constants. */
3350 && ! (first
3351 && GET_CODE (tem) == NOT
3352 && XEXP (tem, 0) == rhs))
3353 {
3354 lneg &= rneg;
3355 if (GET_CODE (tem) == NEG)
3356 tem = XEXP (tem, 0), lneg = !lneg;
3357 if (GET_CODE (tem) == CONST_INT && lneg)
3358 tem = neg_const_int (mode, tem), lneg = 0;
3359
3360 ops[i].op = tem;
3361 ops[i].neg = lneg;
3362 ops[j].op = NULL_RTX;
3363 changed = 1;
3364 }
3365 }
3366 }
3367
3368 first = 0;
3369 }
3370 while (changed);
3371
3372 /* Pack all the operands to the lower-numbered entries. */
3373 for (i = 0, j = 0; j < n_ops; j++)
3374 if (ops[j].op)
3375 {
3376 ops[i] = ops[j];
3377 /* Stabilize sort. */
3378 ops[i].ix = i;
3379 i++;
3380 }
3381 n_ops = i;
3382
3383 /* Sort the operations based on swap_commutative_operands_p. */
3384 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3385
3386 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3387 if (n_ops == 2
3388 && GET_CODE (ops[1].op) == CONST_INT
3389 && CONSTANT_P (ops[0].op)
3390 && ops[0].neg)
3391 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3392
3393 /* We suppressed creation of trivial CONST expressions in the
3394 combination loop to avoid recursion. Create one manually now.
3395 The combination loop should have ensured that there is exactly
3396 one CONST_INT, and the sort will have ensured that it is last
3397 in the array and that any other constant will be next-to-last. */
3398
3399 if (n_ops > 1
3400 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3401 && CONSTANT_P (ops[n_ops - 2].op))
3402 {
3403 rtx value = ops[n_ops - 1].op;
3404 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3405 value = neg_const_int (mode, value);
3406 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3407 n_ops--;
3408 }
3409
3410 /* Put a non-negated operand first, if possible. */
3411
3412 for (i = 0; i < n_ops && ops[i].neg; i++)
3413 continue;
3414 if (i == n_ops)
3415 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3416 else if (i != 0)
3417 {
3418 tem = ops[0].op;
3419 ops[0] = ops[i];
3420 ops[i].op = tem;
3421 ops[i].neg = 1;
3422 }
3423
3424 /* Now make the result by performing the requested operations. */
3425 result = ops[0].op;
3426 for (i = 1; i < n_ops; i++)
3427 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3428 mode, result, ops[i].op);
3429
3430 return result;
3431 }
3432
3433 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3434 static bool
3435 plus_minus_operand_p (rtx x)
3436 {
3437 return GET_CODE (x) == PLUS
3438 || GET_CODE (x) == MINUS
3439 || (GET_CODE (x) == CONST
3440 && GET_CODE (XEXP (x, 0)) == PLUS
3441 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3442 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3443 }
3444
3445 /* Like simplify_binary_operation except used for relational operators.
3446 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3447 not also be VOIDmode.
3448
3449 CMP_MODE specifies in which mode the comparison is done in, so it is
3450 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3451 the operands or, if both are VOIDmode, the operands are compared in
3452 "infinite precision". */
3453 rtx
3454 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3455 enum machine_mode cmp_mode, rtx op0, rtx op1)
3456 {
3457 rtx tem, trueop0, trueop1;
3458
3459 if (cmp_mode == VOIDmode)
3460 cmp_mode = GET_MODE (op0);
3461 if (cmp_mode == VOIDmode)
3462 cmp_mode = GET_MODE (op1);
3463
3464 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3465 if (tem)
3466 {
3467 if (SCALAR_FLOAT_MODE_P (mode))
3468 {
3469 if (tem == const0_rtx)
3470 return CONST0_RTX (mode);
3471 #ifdef FLOAT_STORE_FLAG_VALUE
3472 {
3473 REAL_VALUE_TYPE val;
3474 val = FLOAT_STORE_FLAG_VALUE (mode);
3475 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3476 }
3477 #else
3478 return NULL_RTX;
3479 #endif
3480 }
3481 if (VECTOR_MODE_P (mode))
3482 {
3483 if (tem == const0_rtx)
3484 return CONST0_RTX (mode);
3485 #ifdef VECTOR_STORE_FLAG_VALUE
3486 {
3487 int i, units;
3488 rtvec v;
3489
3490 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3491 if (val == NULL_RTX)
3492 return NULL_RTX;
3493 if (val == const1_rtx)
3494 return CONST1_RTX (mode);
3495
3496 units = GET_MODE_NUNITS (mode);
3497 v = rtvec_alloc (units);
3498 for (i = 0; i < units; i++)
3499 RTVEC_ELT (v, i) = val;
3500 return gen_rtx_raw_CONST_VECTOR (mode, v);
3501 }
3502 #else
3503 return NULL_RTX;
3504 #endif
3505 }
3506
3507 return tem;
3508 }
3509
3510 /* For the following tests, ensure const0_rtx is op1. */
3511 if (swap_commutative_operands_p (op0, op1)
3512 || (op0 == const0_rtx && op1 != const0_rtx))
3513 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3514
3515 /* If op0 is a compare, extract the comparison arguments from it. */
3516 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3517 return simplify_relational_operation (code, mode, VOIDmode,
3518 XEXP (op0, 0), XEXP (op0, 1));
3519
3520 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3521 || CC0_P (op0))
3522 return NULL_RTX;
3523
3524 trueop0 = avoid_constant_pool_reference (op0);
3525 trueop1 = avoid_constant_pool_reference (op1);
3526 return simplify_relational_operation_1 (code, mode, cmp_mode,
3527 trueop0, trueop1);
3528 }
3529
3530 /* This part of simplify_relational_operation is only used when CMP_MODE
3531 is not in class MODE_CC (i.e. it is a real comparison).
3532
3533 MODE is the mode of the result, while CMP_MODE specifies in which
3534 mode the comparison is done in, so it is the mode of the operands. */
3535
3536 static rtx
3537 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3538 enum machine_mode cmp_mode, rtx op0, rtx op1)
3539 {
3540 enum rtx_code op0code = GET_CODE (op0);
3541
3542 if (GET_CODE (op1) == CONST_INT)
3543 {
3544 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3545 {
3546 /* If op0 is a comparison, extract the comparison arguments
3547 from it. */
3548 if (code == NE)
3549 {
3550 if (GET_MODE (op0) == mode)
3551 return simplify_rtx (op0);
3552 else
3553 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3554 XEXP (op0, 0), XEXP (op0, 1));
3555 }
3556 else if (code == EQ)
3557 {
3558 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3559 if (new_code != UNKNOWN)
3560 return simplify_gen_relational (new_code, mode, VOIDmode,
3561 XEXP (op0, 0), XEXP (op0, 1));
3562 }
3563 }
3564 }
3565
3566 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3567 if ((code == EQ || code == NE)
3568 && (op0code == PLUS || op0code == MINUS)
3569 && CONSTANT_P (op1)
3570 && CONSTANT_P (XEXP (op0, 1))
3571 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3572 {
3573 rtx x = XEXP (op0, 0);
3574 rtx c = XEXP (op0, 1);
3575
3576 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3577 cmp_mode, op1, c);
3578 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3579 }
3580
3581 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3582 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3583 if (code == NE
3584 && op1 == const0_rtx
3585 && GET_MODE_CLASS (mode) == MODE_INT
3586 && cmp_mode != VOIDmode
3587 /* ??? Work-around BImode bugs in the ia64 backend. */
3588 && mode != BImode
3589 && cmp_mode != BImode
3590 && nonzero_bits (op0, cmp_mode) == 1
3591 && STORE_FLAG_VALUE == 1)
3592 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3593 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3594 : lowpart_subreg (mode, op0, cmp_mode);
3595
3596 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3597 if ((code == EQ || code == NE)
3598 && op1 == const0_rtx
3599 && op0code == XOR)
3600 return simplify_gen_relational (code, mode, cmp_mode,
3601 XEXP (op0, 0), XEXP (op0, 1));
3602
3603 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3604 if ((code == EQ || code == NE)
3605 && op0code == XOR
3606 && rtx_equal_p (XEXP (op0, 0), op1)
3607 && !side_effects_p (XEXP (op0, 1)))
3608 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3609 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3610 if ((code == EQ || code == NE)
3611 && op0code == XOR
3612 && rtx_equal_p (XEXP (op0, 1), op1)
3613 && !side_effects_p (XEXP (op0, 0)))
3614 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3615
3616 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3617 if ((code == EQ || code == NE)
3618 && op0code == XOR
3619 && (GET_CODE (op1) == CONST_INT
3620 || GET_CODE (op1) == CONST_DOUBLE)
3621 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3622 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3623 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3624 simplify_gen_binary (XOR, cmp_mode,
3625 XEXP (op0, 1), op1));
3626
3627 return NULL_RTX;
3628 }
3629
3630 /* Check if the given comparison (done in the given MODE) is actually a
3631 tautology or a contradiction.
3632 If no simplification is possible, this function returns zero.
3633 Otherwise, it returns either const_true_rtx or const0_rtx. */
3634
3635 rtx
3636 simplify_const_relational_operation (enum rtx_code code,
3637 enum machine_mode mode,
3638 rtx op0, rtx op1)
3639 {
3640 int equal, op0lt, op0ltu, op1lt, op1ltu;
3641 rtx tem;
3642 rtx trueop0;
3643 rtx trueop1;
3644
3645 gcc_assert (mode != VOIDmode
3646 || (GET_MODE (op0) == VOIDmode
3647 && GET_MODE (op1) == VOIDmode));
3648
3649 /* If op0 is a compare, extract the comparison arguments from it. */
3650 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3651 {
3652 op1 = XEXP (op0, 1);
3653 op0 = XEXP (op0, 0);
3654
3655 if (GET_MODE (op0) != VOIDmode)
3656 mode = GET_MODE (op0);
3657 else if (GET_MODE (op1) != VOIDmode)
3658 mode = GET_MODE (op1);
3659 else
3660 return 0;
3661 }
3662
3663 /* We can't simplify MODE_CC values since we don't know what the
3664 actual comparison is. */
3665 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3666 return 0;
3667
3668 /* Make sure the constant is second. */
3669 if (swap_commutative_operands_p (op0, op1))
3670 {
3671 tem = op0, op0 = op1, op1 = tem;
3672 code = swap_condition (code);
3673 }
3674
3675 trueop0 = avoid_constant_pool_reference (op0);
3676 trueop1 = avoid_constant_pool_reference (op1);
3677
3678 /* For integer comparisons of A and B maybe we can simplify A - B and can
3679 then simplify a comparison of that with zero. If A and B are both either
3680 a register or a CONST_INT, this can't help; testing for these cases will
3681 prevent infinite recursion here and speed things up.
3682
3683 If CODE is an unsigned comparison, then we can never do this optimization,
3684 because it gives an incorrect result if the subtraction wraps around zero.
3685 ANSI C defines unsigned operations such that they never overflow, and
3686 thus such cases can not be ignored; but we cannot do it even for
3687 signed comparisons for languages such as Java, so test flag_wrapv. */
3688
3689 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3690 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3691 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3692 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3693 /* We cannot do this for == or != if tem is a nonzero address. */
3694 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3695 && code != GTU && code != GEU && code != LTU && code != LEU)
3696 return simplify_const_relational_operation (signed_condition (code),
3697 mode, tem, const0_rtx);
3698
3699 if (flag_unsafe_math_optimizations && code == ORDERED)
3700 return const_true_rtx;
3701
3702 if (flag_unsafe_math_optimizations && code == UNORDERED)
3703 return const0_rtx;
3704
3705 /* For modes without NaNs, if the two operands are equal, we know the
3706 result except if they have side-effects. */
3707 if (! HONOR_NANS (GET_MODE (trueop0))
3708 && rtx_equal_p (trueop0, trueop1)
3709 && ! side_effects_p (trueop0))
3710 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3711
3712 /* If the operands are floating-point constants, see if we can fold
3713 the result. */
3714 else if (GET_CODE (trueop0) == CONST_DOUBLE
3715 && GET_CODE (trueop1) == CONST_DOUBLE
3716 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3717 {
3718 REAL_VALUE_TYPE d0, d1;
3719
3720 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3721 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3722
3723 /* Comparisons are unordered iff at least one of the values is NaN. */
3724 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3725 switch (code)
3726 {
3727 case UNEQ:
3728 case UNLT:
3729 case UNGT:
3730 case UNLE:
3731 case UNGE:
3732 case NE:
3733 case UNORDERED:
3734 return const_true_rtx;
3735 case EQ:
3736 case LT:
3737 case GT:
3738 case LE:
3739 case GE:
3740 case LTGT:
3741 case ORDERED:
3742 return const0_rtx;
3743 default:
3744 return 0;
3745 }
3746
3747 equal = REAL_VALUES_EQUAL (d0, d1);
3748 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3749 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3750 }
3751
3752 /* Otherwise, see if the operands are both integers. */
3753 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3754 && (GET_CODE (trueop0) == CONST_DOUBLE
3755 || GET_CODE (trueop0) == CONST_INT)
3756 && (GET_CODE (trueop1) == CONST_DOUBLE
3757 || GET_CODE (trueop1) == CONST_INT))
3758 {
3759 int width = GET_MODE_BITSIZE (mode);
3760 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3761 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3762
3763 /* Get the two words comprising each integer constant. */
3764 if (GET_CODE (trueop0) == CONST_DOUBLE)
3765 {
3766 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3767 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3768 }
3769 else
3770 {
3771 l0u = l0s = INTVAL (trueop0);
3772 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3773 }
3774
3775 if (GET_CODE (trueop1) == CONST_DOUBLE)
3776 {
3777 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3778 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3779 }
3780 else
3781 {
3782 l1u = l1s = INTVAL (trueop1);
3783 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3784 }
3785
3786 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3787 we have to sign or zero-extend the values. */
3788 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3789 {
3790 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3791 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3792
3793 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3794 l0s |= ((HOST_WIDE_INT) (-1) << width);
3795
3796 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3797 l1s |= ((HOST_WIDE_INT) (-1) << width);
3798 }
3799 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3800 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3801
3802 equal = (h0u == h1u && l0u == l1u);
3803 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3804 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3805 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3806 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3807 }
3808
3809 /* Otherwise, there are some code-specific tests we can make. */
3810 else
3811 {
3812 /* Optimize comparisons with upper and lower bounds. */
3813 if (SCALAR_INT_MODE_P (mode)
3814 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3815 {
3816 rtx mmin, mmax;
3817 int sign;
3818
3819 if (code == GEU
3820 || code == LEU
3821 || code == GTU
3822 || code == LTU)
3823 sign = 0;
3824 else
3825 sign = 1;
3826
3827 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3828
3829 tem = NULL_RTX;
3830 switch (code)
3831 {
3832 case GEU:
3833 case GE:
3834 /* x >= min is always true. */
3835 if (rtx_equal_p (trueop1, mmin))
3836 tem = const_true_rtx;
3837 else
3838 break;
3839
3840 case LEU:
3841 case LE:
3842 /* x <= max is always true. */
3843 if (rtx_equal_p (trueop1, mmax))
3844 tem = const_true_rtx;
3845 break;
3846
3847 case GTU:
3848 case GT:
3849 /* x > max is always false. */
3850 if (rtx_equal_p (trueop1, mmax))
3851 tem = const0_rtx;
3852 break;
3853
3854 case LTU:
3855 case LT:
3856 /* x < min is always false. */
3857 if (rtx_equal_p (trueop1, mmin))
3858 tem = const0_rtx;
3859 break;
3860
3861 default:
3862 break;
3863 }
3864 if (tem == const0_rtx
3865 || tem == const_true_rtx)
3866 return tem;
3867 }
3868
3869 switch (code)
3870 {
3871 case EQ:
3872 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3873 return const0_rtx;
3874 break;
3875
3876 case NE:
3877 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3878 return const_true_rtx;
3879 break;
3880
3881 case LT:
3882 /* Optimize abs(x) < 0.0. */
3883 if (trueop1 == CONST0_RTX (mode)
3884 && !HONOR_SNANS (mode)
3885 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3886 {
3887 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3888 : trueop0;
3889 if (GET_CODE (tem) == ABS)
3890 return const0_rtx;
3891 }
3892 break;
3893
3894 case GE:
3895 /* Optimize abs(x) >= 0.0. */
3896 if (trueop1 == CONST0_RTX (mode)
3897 && !HONOR_NANS (mode)
3898 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3899 {
3900 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3901 : trueop0;
3902 if (GET_CODE (tem) == ABS)
3903 return const_true_rtx;
3904 }
3905 break;
3906
3907 case UNGE:
3908 /* Optimize ! (abs(x) < 0.0). */
3909 if (trueop1 == CONST0_RTX (mode))
3910 {
3911 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3912 : trueop0;
3913 if (GET_CODE (tem) == ABS)
3914 return const_true_rtx;
3915 }
3916 break;
3917
3918 default:
3919 break;
3920 }
3921
3922 return 0;
3923 }
3924
3925 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3926 as appropriate. */
3927 switch (code)
3928 {
3929 case EQ:
3930 case UNEQ:
3931 return equal ? const_true_rtx : const0_rtx;
3932 case NE:
3933 case LTGT:
3934 return ! equal ? const_true_rtx : const0_rtx;
3935 case LT:
3936 case UNLT:
3937 return op0lt ? const_true_rtx : const0_rtx;
3938 case GT:
3939 case UNGT:
3940 return op1lt ? const_true_rtx : const0_rtx;
3941 case LTU:
3942 return op0ltu ? const_true_rtx : const0_rtx;
3943 case GTU:
3944 return op1ltu ? const_true_rtx : const0_rtx;
3945 case LE:
3946 case UNLE:
3947 return equal || op0lt ? const_true_rtx : const0_rtx;
3948 case GE:
3949 case UNGE:
3950 return equal || op1lt ? const_true_rtx : const0_rtx;
3951 case LEU:
3952 return equal || op0ltu ? const_true_rtx : const0_rtx;
3953 case GEU:
3954 return equal || op1ltu ? const_true_rtx : const0_rtx;
3955 case ORDERED:
3956 return const_true_rtx;
3957 case UNORDERED:
3958 return const0_rtx;
3959 default:
3960 gcc_unreachable ();
3961 }
3962 }
3963 \f
3964 /* Simplify CODE, an operation with result mode MODE and three operands,
3965 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3966 a constant. Return 0 if no simplifications is possible. */
3967
3968 rtx
3969 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3970 enum machine_mode op0_mode, rtx op0, rtx op1,
3971 rtx op2)
3972 {
3973 unsigned int width = GET_MODE_BITSIZE (mode);
3974
3975 /* VOIDmode means "infinite" precision. */
3976 if (width == 0)
3977 width = HOST_BITS_PER_WIDE_INT;
3978
3979 switch (code)
3980 {
3981 case SIGN_EXTRACT:
3982 case ZERO_EXTRACT:
3983 if (GET_CODE (op0) == CONST_INT
3984 && GET_CODE (op1) == CONST_INT
3985 && GET_CODE (op2) == CONST_INT
3986 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3987 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3988 {
3989 /* Extracting a bit-field from a constant */
3990 HOST_WIDE_INT val = INTVAL (op0);
3991
3992 if (BITS_BIG_ENDIAN)
3993 val >>= (GET_MODE_BITSIZE (op0_mode)
3994 - INTVAL (op2) - INTVAL (op1));
3995 else
3996 val >>= INTVAL (op2);
3997
3998 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3999 {
4000 /* First zero-extend. */
4001 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4002 /* If desired, propagate sign bit. */
4003 if (code == SIGN_EXTRACT
4004 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4005 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4006 }
4007
4008 /* Clear the bits that don't belong in our mode,
4009 unless they and our sign bit are all one.
4010 So we get either a reasonable negative value or a reasonable
4011 unsigned value for this mode. */
4012 if (width < HOST_BITS_PER_WIDE_INT
4013 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4014 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4015 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4016
4017 return gen_int_mode (val, mode);
4018 }
4019 break;
4020
4021 case IF_THEN_ELSE:
4022 if (GET_CODE (op0) == CONST_INT)
4023 return op0 != const0_rtx ? op1 : op2;
4024
4025 /* Convert c ? a : a into "a". */
4026 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4027 return op1;
4028
4029 /* Convert a != b ? a : b into "a". */
4030 if (GET_CODE (op0) == NE
4031 && ! side_effects_p (op0)
4032 && ! HONOR_NANS (mode)
4033 && ! HONOR_SIGNED_ZEROS (mode)
4034 && ((rtx_equal_p (XEXP (op0, 0), op1)
4035 && rtx_equal_p (XEXP (op0, 1), op2))
4036 || (rtx_equal_p (XEXP (op0, 0), op2)
4037 && rtx_equal_p (XEXP (op0, 1), op1))))
4038 return op1;
4039
4040 /* Convert a == b ? a : b into "b". */
4041 if (GET_CODE (op0) == EQ
4042 && ! side_effects_p (op0)
4043 && ! HONOR_NANS (mode)
4044 && ! HONOR_SIGNED_ZEROS (mode)
4045 && ((rtx_equal_p (XEXP (op0, 0), op1)
4046 && rtx_equal_p (XEXP (op0, 1), op2))
4047 || (rtx_equal_p (XEXP (op0, 0), op2)
4048 && rtx_equal_p (XEXP (op0, 1), op1))))
4049 return op2;
4050
4051 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4052 {
4053 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4054 ? GET_MODE (XEXP (op0, 1))
4055 : GET_MODE (XEXP (op0, 0)));
4056 rtx temp;
4057
4058 /* Look for happy constants in op1 and op2. */
4059 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4060 {
4061 HOST_WIDE_INT t = INTVAL (op1);
4062 HOST_WIDE_INT f = INTVAL (op2);
4063
4064 if (t == STORE_FLAG_VALUE && f == 0)
4065 code = GET_CODE (op0);
4066 else if (t == 0 && f == STORE_FLAG_VALUE)
4067 {
4068 enum rtx_code tmp;
4069 tmp = reversed_comparison_code (op0, NULL_RTX);
4070 if (tmp == UNKNOWN)
4071 break;
4072 code = tmp;
4073 }
4074 else
4075 break;
4076
4077 return simplify_gen_relational (code, mode, cmp_mode,
4078 XEXP (op0, 0), XEXP (op0, 1));
4079 }
4080
4081 if (cmp_mode == VOIDmode)
4082 cmp_mode = op0_mode;
4083 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4084 cmp_mode, XEXP (op0, 0),
4085 XEXP (op0, 1));
4086
4087 /* See if any simplifications were possible. */
4088 if (temp)
4089 {
4090 if (GET_CODE (temp) == CONST_INT)
4091 return temp == const0_rtx ? op2 : op1;
4092 else if (temp)
4093 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4094 }
4095 }
4096 break;
4097
4098 case VEC_MERGE:
4099 gcc_assert (GET_MODE (op0) == mode);
4100 gcc_assert (GET_MODE (op1) == mode);
4101 gcc_assert (VECTOR_MODE_P (mode));
4102 op2 = avoid_constant_pool_reference (op2);
4103 if (GET_CODE (op2) == CONST_INT)
4104 {
4105 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4106 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4107 int mask = (1 << n_elts) - 1;
4108
4109 if (!(INTVAL (op2) & mask))
4110 return op1;
4111 if ((INTVAL (op2) & mask) == mask)
4112 return op0;
4113
4114 op0 = avoid_constant_pool_reference (op0);
4115 op1 = avoid_constant_pool_reference (op1);
4116 if (GET_CODE (op0) == CONST_VECTOR
4117 && GET_CODE (op1) == CONST_VECTOR)
4118 {
4119 rtvec v = rtvec_alloc (n_elts);
4120 unsigned int i;
4121
4122 for (i = 0; i < n_elts; i++)
4123 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4124 ? CONST_VECTOR_ELT (op0, i)
4125 : CONST_VECTOR_ELT (op1, i));
4126 return gen_rtx_CONST_VECTOR (mode, v);
4127 }
4128 }
4129 break;
4130
4131 default:
4132 gcc_unreachable ();
4133 }
4134
4135 return 0;
4136 }
4137
4138 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4139 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4140
4141 Works by unpacking OP into a collection of 8-bit values
4142 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4143 and then repacking them again for OUTERMODE. */
4144
4145 static rtx
4146 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4147 enum machine_mode innermode, unsigned int byte)
4148 {
4149 /* We support up to 512-bit values (for V8DFmode). */
4150 enum {
4151 max_bitsize = 512,
4152 value_bit = 8,
4153 value_mask = (1 << value_bit) - 1
4154 };
4155 unsigned char value[max_bitsize / value_bit];
4156 int value_start;
4157 int i;
4158 int elem;
4159
4160 int num_elem;
4161 rtx * elems;
4162 int elem_bitsize;
4163 rtx result_s;
4164 rtvec result_v = NULL;
4165 enum mode_class outer_class;
4166 enum machine_mode outer_submode;
4167
4168 /* Some ports misuse CCmode. */
4169 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4170 return op;
4171
4172 /* We have no way to represent a complex constant at the rtl level. */
4173 if (COMPLEX_MODE_P (outermode))
4174 return NULL_RTX;
4175
4176 /* Unpack the value. */
4177
4178 if (GET_CODE (op) == CONST_VECTOR)
4179 {
4180 num_elem = CONST_VECTOR_NUNITS (op);
4181 elems = &CONST_VECTOR_ELT (op, 0);
4182 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4183 }
4184 else
4185 {
4186 num_elem = 1;
4187 elems = &op;
4188 elem_bitsize = max_bitsize;
4189 }
4190 /* If this asserts, it is too complicated; reducing value_bit may help. */
4191 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4192 /* I don't know how to handle endianness of sub-units. */
4193 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4194
4195 for (elem = 0; elem < num_elem; elem++)
4196 {
4197 unsigned char * vp;
4198 rtx el = elems[elem];
4199
4200 /* Vectors are kept in target memory order. (This is probably
4201 a mistake.) */
4202 {
4203 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4204 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4205 / BITS_PER_UNIT);
4206 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4207 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4208 unsigned bytele = (subword_byte % UNITS_PER_WORD
4209 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4210 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4211 }
4212
4213 switch (GET_CODE (el))
4214 {
4215 case CONST_INT:
4216 for (i = 0;
4217 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4218 i += value_bit)
4219 *vp++ = INTVAL (el) >> i;
4220 /* CONST_INTs are always logically sign-extended. */
4221 for (; i < elem_bitsize; i += value_bit)
4222 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4223 break;
4224
4225 case CONST_DOUBLE:
4226 if (GET_MODE (el) == VOIDmode)
4227 {
4228 /* If this triggers, someone should have generated a
4229 CONST_INT instead. */
4230 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4231
4232 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4233 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4234 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4235 {
4236 *vp++
4237 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4238 i += value_bit;
4239 }
4240 /* It shouldn't matter what's done here, so fill it with
4241 zero. */
4242 for (; i < elem_bitsize; i += value_bit)
4243 *vp++ = 0;
4244 }
4245 else
4246 {
4247 long tmp[max_bitsize / 32];
4248 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4249
4250 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4251 gcc_assert (bitsize <= elem_bitsize);
4252 gcc_assert (bitsize % value_bit == 0);
4253
4254 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4255 GET_MODE (el));
4256
4257 /* real_to_target produces its result in words affected by
4258 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4259 and use WORDS_BIG_ENDIAN instead; see the documentation
4260 of SUBREG in rtl.texi. */
4261 for (i = 0; i < bitsize; i += value_bit)
4262 {
4263 int ibase;
4264 if (WORDS_BIG_ENDIAN)
4265 ibase = bitsize - 1 - i;
4266 else
4267 ibase = i;
4268 *vp++ = tmp[ibase / 32] >> i % 32;
4269 }
4270
4271 /* It shouldn't matter what's done here, so fill it with
4272 zero. */
4273 for (; i < elem_bitsize; i += value_bit)
4274 *vp++ = 0;
4275 }
4276 break;
4277
4278 default:
4279 gcc_unreachable ();
4280 }
4281 }
4282
4283 /* Now, pick the right byte to start with. */
4284 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4285 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4286 will already have offset 0. */
4287 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4288 {
4289 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4290 - byte);
4291 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4292 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4293 byte = (subword_byte % UNITS_PER_WORD
4294 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4295 }
4296
4297 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4298 so if it's become negative it will instead be very large.) */
4299 gcc_assert (byte < GET_MODE_SIZE (innermode));
4300
4301 /* Convert from bytes to chunks of size value_bit. */
4302 value_start = byte * (BITS_PER_UNIT / value_bit);
4303
4304 /* Re-pack the value. */
4305
4306 if (VECTOR_MODE_P (outermode))
4307 {
4308 num_elem = GET_MODE_NUNITS (outermode);
4309 result_v = rtvec_alloc (num_elem);
4310 elems = &RTVEC_ELT (result_v, 0);
4311 outer_submode = GET_MODE_INNER (outermode);
4312 }
4313 else
4314 {
4315 num_elem = 1;
4316 elems = &result_s;
4317 outer_submode = outermode;
4318 }
4319
4320 outer_class = GET_MODE_CLASS (outer_submode);
4321 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4322
4323 gcc_assert (elem_bitsize % value_bit == 0);
4324 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4325
4326 for (elem = 0; elem < num_elem; elem++)
4327 {
4328 unsigned char *vp;
4329
4330 /* Vectors are stored in target memory order. (This is probably
4331 a mistake.) */
4332 {
4333 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4334 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4335 / BITS_PER_UNIT);
4336 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4337 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4338 unsigned bytele = (subword_byte % UNITS_PER_WORD
4339 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4340 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4341 }
4342
4343 switch (outer_class)
4344 {
4345 case MODE_INT:
4346 case MODE_PARTIAL_INT:
4347 {
4348 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4349
4350 for (i = 0;
4351 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4352 i += value_bit)
4353 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4354 for (; i < elem_bitsize; i += value_bit)
4355 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4356 << (i - HOST_BITS_PER_WIDE_INT));
4357
4358 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4359 know why. */
4360 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4361 elems[elem] = gen_int_mode (lo, outer_submode);
4362 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4363 elems[elem] = immed_double_const (lo, hi, outer_submode);
4364 else
4365 return NULL_RTX;
4366 }
4367 break;
4368
4369 case MODE_FLOAT:
4370 case MODE_DECIMAL_FLOAT:
4371 {
4372 REAL_VALUE_TYPE r;
4373 long tmp[max_bitsize / 32];
4374
4375 /* real_from_target wants its input in words affected by
4376 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4377 and use WORDS_BIG_ENDIAN instead; see the documentation
4378 of SUBREG in rtl.texi. */
4379 for (i = 0; i < max_bitsize / 32; i++)
4380 tmp[i] = 0;
4381 for (i = 0; i < elem_bitsize; i += value_bit)
4382 {
4383 int ibase;
4384 if (WORDS_BIG_ENDIAN)
4385 ibase = elem_bitsize - 1 - i;
4386 else
4387 ibase = i;
4388 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4389 }
4390
4391 real_from_target (&r, tmp, outer_submode);
4392 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4393 }
4394 break;
4395
4396 default:
4397 gcc_unreachable ();
4398 }
4399 }
4400 if (VECTOR_MODE_P (outermode))
4401 return gen_rtx_CONST_VECTOR (outermode, result_v);
4402 else
4403 return result_s;
4404 }
4405
4406 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4407 Return 0 if no simplifications are possible. */
4408 rtx
4409 simplify_subreg (enum machine_mode outermode, rtx op,
4410 enum machine_mode innermode, unsigned int byte)
4411 {
4412 /* Little bit of sanity checking. */
4413 gcc_assert (innermode != VOIDmode);
4414 gcc_assert (outermode != VOIDmode);
4415 gcc_assert (innermode != BLKmode);
4416 gcc_assert (outermode != BLKmode);
4417
4418 gcc_assert (GET_MODE (op) == innermode
4419 || GET_MODE (op) == VOIDmode);
4420
4421 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4422 gcc_assert (byte < GET_MODE_SIZE (innermode));
4423
4424 if (outermode == innermode && !byte)
4425 return op;
4426
4427 if (GET_CODE (op) == CONST_INT
4428 || GET_CODE (op) == CONST_DOUBLE
4429 || GET_CODE (op) == CONST_VECTOR)
4430 return simplify_immed_subreg (outermode, op, innermode, byte);
4431
4432 /* Changing mode twice with SUBREG => just change it once,
4433 or not at all if changing back op starting mode. */
4434 if (GET_CODE (op) == SUBREG)
4435 {
4436 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4437 int final_offset = byte + SUBREG_BYTE (op);
4438 rtx newx;
4439
4440 if (outermode == innermostmode
4441 && byte == 0 && SUBREG_BYTE (op) == 0)
4442 return SUBREG_REG (op);
4443
4444 /* The SUBREG_BYTE represents offset, as if the value were stored
4445 in memory. Irritating exception is paradoxical subreg, where
4446 we define SUBREG_BYTE to be 0. On big endian machines, this
4447 value should be negative. For a moment, undo this exception. */
4448 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4449 {
4450 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4451 if (WORDS_BIG_ENDIAN)
4452 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4453 if (BYTES_BIG_ENDIAN)
4454 final_offset += difference % UNITS_PER_WORD;
4455 }
4456 if (SUBREG_BYTE (op) == 0
4457 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4458 {
4459 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4460 if (WORDS_BIG_ENDIAN)
4461 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4462 if (BYTES_BIG_ENDIAN)
4463 final_offset += difference % UNITS_PER_WORD;
4464 }
4465
4466 /* See whether resulting subreg will be paradoxical. */
4467 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4468 {
4469 /* In nonparadoxical subregs we can't handle negative offsets. */
4470 if (final_offset < 0)
4471 return NULL_RTX;
4472 /* Bail out in case resulting subreg would be incorrect. */
4473 if (final_offset % GET_MODE_SIZE (outermode)
4474 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4475 return NULL_RTX;
4476 }
4477 else
4478 {
4479 int offset = 0;
4480 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4481
4482 /* In paradoxical subreg, see if we are still looking on lower part.
4483 If so, our SUBREG_BYTE will be 0. */
4484 if (WORDS_BIG_ENDIAN)
4485 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4486 if (BYTES_BIG_ENDIAN)
4487 offset += difference % UNITS_PER_WORD;
4488 if (offset == final_offset)
4489 final_offset = 0;
4490 else
4491 return NULL_RTX;
4492 }
4493
4494 /* Recurse for further possible simplifications. */
4495 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4496 final_offset);
4497 if (newx)
4498 return newx;
4499 if (validate_subreg (outermode, innermostmode,
4500 SUBREG_REG (op), final_offset))
4501 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4502 return NULL_RTX;
4503 }
4504
4505 /* Merge implicit and explicit truncations. */
4506
4507 if (GET_CODE (op) == TRUNCATE
4508 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4509 && subreg_lowpart_offset (outermode, innermode) == byte)
4510 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4511 GET_MODE (XEXP (op, 0)));
4512
4513 /* SUBREG of a hard register => just change the register number
4514 and/or mode. If the hard register is not valid in that mode,
4515 suppress this simplification. If the hard register is the stack,
4516 frame, or argument pointer, leave this as a SUBREG. */
4517
4518 if (REG_P (op)
4519 && REGNO (op) < FIRST_PSEUDO_REGISTER
4520 #ifdef CANNOT_CHANGE_MODE_CLASS
4521 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4522 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4523 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4524 #endif
4525 && ((reload_completed && !frame_pointer_needed)
4526 || (REGNO (op) != FRAME_POINTER_REGNUM
4527 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4528 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4529 #endif
4530 ))
4531 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4532 && REGNO (op) != ARG_POINTER_REGNUM
4533 #endif
4534 && REGNO (op) != STACK_POINTER_REGNUM
4535 && subreg_offset_representable_p (REGNO (op), innermode,
4536 byte, outermode))
4537 {
4538 unsigned int regno = REGNO (op);
4539 unsigned int final_regno
4540 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4541
4542 /* ??? We do allow it if the current REG is not valid for
4543 its mode. This is a kludge to work around how float/complex
4544 arguments are passed on 32-bit SPARC and should be fixed. */
4545 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4546 || ! HARD_REGNO_MODE_OK (regno, innermode))
4547 {
4548 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4549
4550 /* Propagate original regno. We don't have any way to specify
4551 the offset inside original regno, so do so only for lowpart.
4552 The information is used only by alias analysis that can not
4553 grog partial register anyway. */
4554
4555 if (subreg_lowpart_offset (outermode, innermode) == byte)
4556 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4557 return x;
4558 }
4559 }
4560
4561 /* If we have a SUBREG of a register that we are replacing and we are
4562 replacing it with a MEM, make a new MEM and try replacing the
4563 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4564 or if we would be widening it. */
4565
4566 if (MEM_P (op)
4567 && ! mode_dependent_address_p (XEXP (op, 0))
4568 /* Allow splitting of volatile memory references in case we don't
4569 have instruction to move the whole thing. */
4570 && (! MEM_VOLATILE_P (op)
4571 || ! have_insn_for (SET, innermode))
4572 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4573 return adjust_address_nv (op, outermode, byte);
4574
4575 /* Handle complex values represented as CONCAT
4576 of real and imaginary part. */
4577 if (GET_CODE (op) == CONCAT)
4578 {
4579 unsigned int inner_size, final_offset;
4580 rtx part, res;
4581
4582 inner_size = GET_MODE_UNIT_SIZE (innermode);
4583 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4584 final_offset = byte % inner_size;
4585 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4586 return NULL_RTX;
4587
4588 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4589 if (res)
4590 return res;
4591 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4592 return gen_rtx_SUBREG (outermode, part, final_offset);
4593 return NULL_RTX;
4594 }
4595
4596 /* Optimize SUBREG truncations of zero and sign extended values. */
4597 if ((GET_CODE (op) == ZERO_EXTEND
4598 || GET_CODE (op) == SIGN_EXTEND)
4599 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4600 {
4601 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4602
4603 /* If we're requesting the lowpart of a zero or sign extension,
4604 there are three possibilities. If the outermode is the same
4605 as the origmode, we can omit both the extension and the subreg.
4606 If the outermode is not larger than the origmode, we can apply
4607 the truncation without the extension. Finally, if the outermode
4608 is larger than the origmode, but both are integer modes, we
4609 can just extend to the appropriate mode. */
4610 if (bitpos == 0)
4611 {
4612 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4613 if (outermode == origmode)
4614 return XEXP (op, 0);
4615 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4616 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4617 subreg_lowpart_offset (outermode,
4618 origmode));
4619 if (SCALAR_INT_MODE_P (outermode))
4620 return simplify_gen_unary (GET_CODE (op), outermode,
4621 XEXP (op, 0), origmode);
4622 }
4623
4624 /* A SUBREG resulting from a zero extension may fold to zero if
4625 it extracts higher bits that the ZERO_EXTEND's source bits. */
4626 if (GET_CODE (op) == ZERO_EXTEND
4627 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4628 return CONST0_RTX (outermode);
4629 }
4630
4631 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4632 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4633 the outer subreg is effectively a truncation to the original mode. */
4634 if ((GET_CODE (op) == LSHIFTRT
4635 || GET_CODE (op) == ASHIFTRT)
4636 && SCALAR_INT_MODE_P (outermode)
4637 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4638 to avoid the possibility that an outer LSHIFTRT shifts by more
4639 than the sign extension's sign_bit_copies and introduces zeros
4640 into the high bits of the result. */
4641 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4642 && GET_CODE (XEXP (op, 1)) == CONST_INT
4643 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4644 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4645 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4646 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4647 return simplify_gen_binary (ASHIFTRT, outermode,
4648 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4649
4650 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4651 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4652 the outer subreg is effectively a truncation to the original mode. */
4653 if ((GET_CODE (op) == LSHIFTRT
4654 || GET_CODE (op) == ASHIFTRT)
4655 && SCALAR_INT_MODE_P (outermode)
4656 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4657 && GET_CODE (XEXP (op, 1)) == CONST_INT
4658 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4659 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4660 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4661 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4662 return simplify_gen_binary (LSHIFTRT, outermode,
4663 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4664
4665 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4666 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4667 the outer subreg is effectively a truncation to the original mode. */
4668 if (GET_CODE (op) == ASHIFT
4669 && SCALAR_INT_MODE_P (outermode)
4670 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4671 && GET_CODE (XEXP (op, 1)) == CONST_INT
4672 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4673 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4674 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4675 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4676 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4677 return simplify_gen_binary (ASHIFT, outermode,
4678 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4679
4680 return NULL_RTX;
4681 }
4682
4683 /* Make a SUBREG operation or equivalent if it folds. */
4684
4685 rtx
4686 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4687 enum machine_mode innermode, unsigned int byte)
4688 {
4689 rtx newx;
4690
4691 newx = simplify_subreg (outermode, op, innermode, byte);
4692 if (newx)
4693 return newx;
4694
4695 if (GET_CODE (op) == SUBREG
4696 || GET_CODE (op) == CONCAT
4697 || GET_MODE (op) == VOIDmode)
4698 return NULL_RTX;
4699
4700 if (validate_subreg (outermode, innermode, op, byte))
4701 return gen_rtx_SUBREG (outermode, op, byte);
4702
4703 return NULL_RTX;
4704 }
4705
4706 /* Simplify X, an rtx expression.
4707
4708 Return the simplified expression or NULL if no simplifications
4709 were possible.
4710
4711 This is the preferred entry point into the simplification routines;
4712 however, we still allow passes to call the more specific routines.
4713
4714 Right now GCC has three (yes, three) major bodies of RTL simplification
4715 code that need to be unified.
4716
4717 1. fold_rtx in cse.c. This code uses various CSE specific
4718 information to aid in RTL simplification.
4719
4720 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4721 it uses combine specific information to aid in RTL
4722 simplification.
4723
4724 3. The routines in this file.
4725
4726
4727 Long term we want to only have one body of simplification code; to
4728 get to that state I recommend the following steps:
4729
4730 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4731 which are not pass dependent state into these routines.
4732
4733 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4734 use this routine whenever possible.
4735
4736 3. Allow for pass dependent state to be provided to these
4737 routines and add simplifications based on the pass dependent
4738 state. Remove code from cse.c & combine.c that becomes
4739 redundant/dead.
4740
4741 It will take time, but ultimately the compiler will be easier to
4742 maintain and improve. It's totally silly that when we add a
4743 simplification that it needs to be added to 4 places (3 for RTL
4744 simplification and 1 for tree simplification. */
4745
4746 rtx
4747 simplify_rtx (rtx x)
4748 {
4749 enum rtx_code code = GET_CODE (x);
4750 enum machine_mode mode = GET_MODE (x);
4751
4752 switch (GET_RTX_CLASS (code))
4753 {
4754 case RTX_UNARY:
4755 return simplify_unary_operation (code, mode,
4756 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4757 case RTX_COMM_ARITH:
4758 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4759 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4760
4761 /* Fall through.... */
4762
4763 case RTX_BIN_ARITH:
4764 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4765
4766 case RTX_TERNARY:
4767 case RTX_BITFIELD_OPS:
4768 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4769 XEXP (x, 0), XEXP (x, 1),
4770 XEXP (x, 2));
4771
4772 case RTX_COMPARE:
4773 case RTX_COMM_COMPARE:
4774 return simplify_relational_operation (code, mode,
4775 ((GET_MODE (XEXP (x, 0))
4776 != VOIDmode)
4777 ? GET_MODE (XEXP (x, 0))
4778 : GET_MODE (XEXP (x, 1))),
4779 XEXP (x, 0),
4780 XEXP (x, 1));
4781
4782 case RTX_EXTRA:
4783 if (code == SUBREG)
4784 return simplify_gen_subreg (mode, SUBREG_REG (x),
4785 GET_MODE (SUBREG_REG (x)),
4786 SUBREG_BYTE (x));
4787 break;
4788
4789 case RTX_OBJ:
4790 if (code == LO_SUM)
4791 {
4792 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4793 if (GET_CODE (XEXP (x, 0)) == HIGH
4794 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4795 return XEXP (x, 1);
4796 }
4797 break;
4798
4799 default:
4800 break;
4801 }
4802 return NULL;
4803 }
4804