rtl.h (truncated_to_mode): Declare it.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 addr = XEXP (x, 0);
162
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
165
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170 {
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
173 }
174
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
177
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
182 {
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
185
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
190 {
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
194 }
195 else
196 return c;
197 }
198
199 return x;
200 }
201
202 /* Return true if X is a MEM referencing the constant pool. */
203
204 bool
205 constant_pool_reference_p (rtx x)
206 {
207 return avoid_constant_pool_reference (x) != x;
208 }
209 \f
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
212
213 rtx
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
222
223 return gen_rtx_fmt_e (code, mode, op);
224 }
225
226 /* Likewise for ternary operations. */
227
228 rtx
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231 {
232 rtx tem;
233
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
238
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240 }
241
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
244
245 rtx
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
248 {
249 rtx tem;
250
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
254
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
256 }
257 \f
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
260
261 rtx
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263 {
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
268
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
272
273 if (x == old_rtx)
274 return new_rtx;
275
276 switch (GET_RTX_CLASS (code))
277 {
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
293
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
329 }
330 break;
331
332 case RTX_OBJ:
333 if (code == MEM)
334 {
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
339 }
340 else if (code == LO_SUM)
341 {
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
348
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
352 }
353 else if (code == REG)
354 {
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
357 }
358 break;
359
360 default:
361 break;
362 }
363 return x;
364 }
365 \f
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
369 rtx
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
372 {
373 rtx trueop, tem;
374
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
377
378 trueop = avoid_constant_pool_reference (op);
379
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
383
384 return simplify_unary_operation_1 (code, mode, op);
385 }
386
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391 {
392 enum rtx_code reversed;
393 rtx temp;
394
395 switch (code)
396 {
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
401
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
409
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
418
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
434
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
442 {
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445 }
446
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
450
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
457
458
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465 {
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
468
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
474 }
475
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
480
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482 {
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
485
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495 {
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
498 }
499
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
502 }
503 break;
504
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
509
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
518
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532 {
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536 {
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540 }
541
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545 }
546
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551 {
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554 }
555
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
560 {
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564 }
565
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
573
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
581
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
592 {
593 if (STORE_FLAG_VALUE == 1)
594 return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0),
595 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
596 else if (STORE_FLAG_VALUE == -1)
597 return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0),
598 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
599 }
600 break;
601
602 case TRUNCATE:
603 /* We can't handle truncation to a partial integer mode here
604 because we don't know the real bitsize of the partial
605 integer mode. */
606 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
607 break;
608
609 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
610 if ((GET_CODE (op) == SIGN_EXTEND
611 || GET_CODE (op) == ZERO_EXTEND)
612 && GET_MODE (XEXP (op, 0)) == mode)
613 return XEXP (op, 0);
614
615 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
616 (OP:SI foo:SI) if OP is NEG or ABS. */
617 if ((GET_CODE (op) == ABS
618 || GET_CODE (op) == NEG)
619 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
620 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
621 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
622 return simplify_gen_unary (GET_CODE (op), mode,
623 XEXP (XEXP (op, 0), 0), mode);
624
625 /* (truncate:A (subreg:B (truncate:C X) 0)) is
626 (truncate:A X). */
627 if (GET_CODE (op) == SUBREG
628 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
629 && subreg_lowpart_p (op))
630 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
631 GET_MODE (XEXP (SUBREG_REG (op), 0)));
632
633 /* If we know that the value is already truncated, we can
634 replace the TRUNCATE with a SUBREG. Note that this is also
635 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
636 modes we just have to apply a different definition for
637 truncation. But don't do this for an (LSHIFTRT (MULT ...))
638 since this will cause problems with the umulXi3_highpart
639 patterns. */
640 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
641 GET_MODE_BITSIZE (GET_MODE (op)))
642 ? (num_sign_bit_copies (op, GET_MODE (op))
643 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1))
644 : truncated_to_mode (mode, op))
645 && ! (GET_CODE (op) == LSHIFTRT
646 && GET_CODE (XEXP (op, 0)) == MULT))
647 return rtl_hooks.gen_lowpart_no_emit (mode, op);
648
649 /* A truncate of a comparison can be replaced with a subreg if
650 STORE_FLAG_VALUE permits. This is like the previous test,
651 but it works even if the comparison is done in a mode larger
652 than HOST_BITS_PER_WIDE_INT. */
653 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
654 && COMPARISON_P (op)
655 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
656 return rtl_hooks.gen_lowpart_no_emit (mode, op);
657 break;
658
659 case FLOAT_TRUNCATE:
660 if (DECIMAL_FLOAT_MODE_P (mode))
661 break;
662
663 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
664 if (GET_CODE (op) == FLOAT_EXTEND
665 && GET_MODE (XEXP (op, 0)) == mode)
666 return XEXP (op, 0);
667
668 /* (float_truncate:SF (float_truncate:DF foo:XF))
669 = (float_truncate:SF foo:XF).
670 This may eliminate double rounding, so it is unsafe.
671
672 (float_truncate:SF (float_extend:XF foo:DF))
673 = (float_truncate:SF foo:DF).
674
675 (float_truncate:DF (float_extend:XF foo:SF))
676 = (float_extend:SF foo:DF). */
677 if ((GET_CODE (op) == FLOAT_TRUNCATE
678 && flag_unsafe_math_optimizations)
679 || GET_CODE (op) == FLOAT_EXTEND)
680 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
681 0)))
682 > GET_MODE_SIZE (mode)
683 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
684 mode,
685 XEXP (op, 0), mode);
686
687 /* (float_truncate (float x)) is (float x) */
688 if (GET_CODE (op) == FLOAT
689 && (flag_unsafe_math_optimizations
690 || ((unsigned)significand_size (GET_MODE (op))
691 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
692 - num_sign_bit_copies (XEXP (op, 0),
693 GET_MODE (XEXP (op, 0)))))))
694 return simplify_gen_unary (FLOAT, mode,
695 XEXP (op, 0),
696 GET_MODE (XEXP (op, 0)));
697
698 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
699 (OP:SF foo:SF) if OP is NEG or ABS. */
700 if ((GET_CODE (op) == ABS
701 || GET_CODE (op) == NEG)
702 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
704 return simplify_gen_unary (GET_CODE (op), mode,
705 XEXP (XEXP (op, 0), 0), mode);
706
707 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
708 is (float_truncate:SF x). */
709 if (GET_CODE (op) == SUBREG
710 && subreg_lowpart_p (op)
711 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
712 return SUBREG_REG (op);
713 break;
714
715 case FLOAT_EXTEND:
716 if (DECIMAL_FLOAT_MODE_P (mode))
717 break;
718
719 /* (float_extend (float_extend x)) is (float_extend x)
720
721 (float_extend (float x)) is (float x) assuming that double
722 rounding can't happen.
723 */
724 if (GET_CODE (op) == FLOAT_EXTEND
725 || (GET_CODE (op) == FLOAT
726 && ((unsigned)significand_size (GET_MODE (op))
727 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
728 - num_sign_bit_copies (XEXP (op, 0),
729 GET_MODE (XEXP (op, 0)))))))
730 return simplify_gen_unary (GET_CODE (op), mode,
731 XEXP (op, 0),
732 GET_MODE (XEXP (op, 0)));
733
734 break;
735
736 case ABS:
737 /* (abs (neg <foo>)) -> (abs <foo>) */
738 if (GET_CODE (op) == NEG)
739 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
740 GET_MODE (XEXP (op, 0)));
741
742 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
743 do nothing. */
744 if (GET_MODE (op) == VOIDmode)
745 break;
746
747 /* If operand is something known to be positive, ignore the ABS. */
748 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
749 || ((GET_MODE_BITSIZE (GET_MODE (op))
750 <= HOST_BITS_PER_WIDE_INT)
751 && ((nonzero_bits (op, GET_MODE (op))
752 & ((HOST_WIDE_INT) 1
753 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
754 == 0)))
755 return op;
756
757 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
758 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
759 return gen_rtx_NEG (mode, op);
760
761 break;
762
763 case FFS:
764 /* (ffs (*_extend <X>)) = (ffs <X>) */
765 if (GET_CODE (op) == SIGN_EXTEND
766 || GET_CODE (op) == ZERO_EXTEND)
767 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
768 GET_MODE (XEXP (op, 0)));
769 break;
770
771 case POPCOUNT:
772 case PARITY:
773 /* (pop* (zero_extend <X>)) = (pop* <X>) */
774 if (GET_CODE (op) == ZERO_EXTEND)
775 return simplify_gen_unary (code, mode, XEXP (op, 0),
776 GET_MODE (XEXP (op, 0)));
777 break;
778
779 case FLOAT:
780 /* (float (sign_extend <X>)) = (float <X>). */
781 if (GET_CODE (op) == SIGN_EXTEND)
782 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
785
786 case SIGN_EXTEND:
787 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
788 becomes just the MINUS if its mode is MODE. This allows
789 folding switch statements on machines using casesi (such as
790 the VAX). */
791 if (GET_CODE (op) == TRUNCATE
792 && GET_MODE (XEXP (op, 0)) == mode
793 && GET_CODE (XEXP (op, 0)) == MINUS
794 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
795 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
796 return XEXP (op, 0);
797
798 /* Check for a sign extension of a subreg of a promoted
799 variable, where the promotion is sign-extended, and the
800 target mode is the same as the variable's promotion. */
801 if (GET_CODE (op) == SUBREG
802 && SUBREG_PROMOTED_VAR_P (op)
803 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
804 && GET_MODE (XEXP (op, 0)) == mode)
805 return XEXP (op, 0);
806
807 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
808 if (! POINTERS_EXTEND_UNSIGNED
809 && mode == Pmode && GET_MODE (op) == ptr_mode
810 && (CONSTANT_P (op)
811 || (GET_CODE (op) == SUBREG
812 && REG_P (SUBREG_REG (op))
813 && REG_POINTER (SUBREG_REG (op))
814 && GET_MODE (SUBREG_REG (op)) == Pmode)))
815 return convert_memory_address (Pmode, op);
816 #endif
817 break;
818
819 case ZERO_EXTEND:
820 /* Check for a zero extension of a subreg of a promoted
821 variable, where the promotion is zero-extended, and the
822 target mode is the same as the variable's promotion. */
823 if (GET_CODE (op) == SUBREG
824 && SUBREG_PROMOTED_VAR_P (op)
825 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
826 && GET_MODE (XEXP (op, 0)) == mode)
827 return XEXP (op, 0);
828
829 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
830 if (POINTERS_EXTEND_UNSIGNED > 0
831 && mode == Pmode && GET_MODE (op) == ptr_mode
832 && (CONSTANT_P (op)
833 || (GET_CODE (op) == SUBREG
834 && REG_P (SUBREG_REG (op))
835 && REG_POINTER (SUBREG_REG (op))
836 && GET_MODE (SUBREG_REG (op)) == Pmode)))
837 return convert_memory_address (Pmode, op);
838 #endif
839 break;
840
841 default:
842 break;
843 }
844
845 return 0;
846 }
847
848 /* Try to compute the value of a unary operation CODE whose output mode is to
849 be MODE with input operand OP whose mode was originally OP_MODE.
850 Return zero if the value cannot be computed. */
851 rtx
852 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
853 rtx op, enum machine_mode op_mode)
854 {
855 unsigned int width = GET_MODE_BITSIZE (mode);
856
857 if (code == VEC_DUPLICATE)
858 {
859 gcc_assert (VECTOR_MODE_P (mode));
860 if (GET_MODE (op) != VOIDmode)
861 {
862 if (!VECTOR_MODE_P (GET_MODE (op)))
863 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
864 else
865 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
866 (GET_MODE (op)));
867 }
868 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
869 || GET_CODE (op) == CONST_VECTOR)
870 {
871 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
872 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
873 rtvec v = rtvec_alloc (n_elts);
874 unsigned int i;
875
876 if (GET_CODE (op) != CONST_VECTOR)
877 for (i = 0; i < n_elts; i++)
878 RTVEC_ELT (v, i) = op;
879 else
880 {
881 enum machine_mode inmode = GET_MODE (op);
882 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
883 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
884
885 gcc_assert (in_n_elts < n_elts);
886 gcc_assert ((n_elts % in_n_elts) == 0);
887 for (i = 0; i < n_elts; i++)
888 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
889 }
890 return gen_rtx_CONST_VECTOR (mode, v);
891 }
892 }
893
894 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
895 {
896 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
897 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
898 enum machine_mode opmode = GET_MODE (op);
899 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
900 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
901 rtvec v = rtvec_alloc (n_elts);
902 unsigned int i;
903
904 gcc_assert (op_n_elts == n_elts);
905 for (i = 0; i < n_elts; i++)
906 {
907 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
908 CONST_VECTOR_ELT (op, i),
909 GET_MODE_INNER (opmode));
910 if (!x)
911 return 0;
912 RTVEC_ELT (v, i) = x;
913 }
914 return gen_rtx_CONST_VECTOR (mode, v);
915 }
916
917 /* The order of these tests is critical so that, for example, we don't
918 check the wrong mode (input vs. output) for a conversion operation,
919 such as FIX. At some point, this should be simplified. */
920
921 if (code == FLOAT && GET_MODE (op) == VOIDmode
922 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
923 {
924 HOST_WIDE_INT hv, lv;
925 REAL_VALUE_TYPE d;
926
927 if (GET_CODE (op) == CONST_INT)
928 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
929 else
930 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
931
932 REAL_VALUE_FROM_INT (d, lv, hv, mode);
933 d = real_value_truncate (mode, d);
934 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
935 }
936 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
937 && (GET_CODE (op) == CONST_DOUBLE
938 || GET_CODE (op) == CONST_INT))
939 {
940 HOST_WIDE_INT hv, lv;
941 REAL_VALUE_TYPE d;
942
943 if (GET_CODE (op) == CONST_INT)
944 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
945 else
946 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
947
948 if (op_mode == VOIDmode)
949 {
950 /* We don't know how to interpret negative-looking numbers in
951 this case, so don't try to fold those. */
952 if (hv < 0)
953 return 0;
954 }
955 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
956 ;
957 else
958 hv = 0, lv &= GET_MODE_MASK (op_mode);
959
960 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
961 d = real_value_truncate (mode, d);
962 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
963 }
964
965 if (GET_CODE (op) == CONST_INT
966 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
967 {
968 HOST_WIDE_INT arg0 = INTVAL (op);
969 HOST_WIDE_INT val;
970
971 switch (code)
972 {
973 case NOT:
974 val = ~ arg0;
975 break;
976
977 case NEG:
978 val = - arg0;
979 break;
980
981 case ABS:
982 val = (arg0 >= 0 ? arg0 : - arg0);
983 break;
984
985 case FFS:
986 /* Don't use ffs here. Instead, get low order bit and then its
987 number. If arg0 is zero, this will return 0, as desired. */
988 arg0 &= GET_MODE_MASK (mode);
989 val = exact_log2 (arg0 & (- arg0)) + 1;
990 break;
991
992 case CLZ:
993 arg0 &= GET_MODE_MASK (mode);
994 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
995 ;
996 else
997 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
998 break;
999
1000 case CTZ:
1001 arg0 &= GET_MODE_MASK (mode);
1002 if (arg0 == 0)
1003 {
1004 /* Even if the value at zero is undefined, we have to come
1005 up with some replacement. Seems good enough. */
1006 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1007 val = GET_MODE_BITSIZE (mode);
1008 }
1009 else
1010 val = exact_log2 (arg0 & -arg0);
1011 break;
1012
1013 case POPCOUNT:
1014 arg0 &= GET_MODE_MASK (mode);
1015 val = 0;
1016 while (arg0)
1017 val++, arg0 &= arg0 - 1;
1018 break;
1019
1020 case PARITY:
1021 arg0 &= GET_MODE_MASK (mode);
1022 val = 0;
1023 while (arg0)
1024 val++, arg0 &= arg0 - 1;
1025 val &= 1;
1026 break;
1027
1028 case TRUNCATE:
1029 val = arg0;
1030 break;
1031
1032 case ZERO_EXTEND:
1033 /* When zero-extending a CONST_INT, we need to know its
1034 original mode. */
1035 gcc_assert (op_mode != VOIDmode);
1036 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1037 {
1038 /* If we were really extending the mode,
1039 we would have to distinguish between zero-extension
1040 and sign-extension. */
1041 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1042 val = arg0;
1043 }
1044 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1045 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1046 else
1047 return 0;
1048 break;
1049
1050 case SIGN_EXTEND:
1051 if (op_mode == VOIDmode)
1052 op_mode = mode;
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1054 {
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1060 }
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 {
1063 val
1064 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1065 if (val
1066 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1067 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1068 }
1069 else
1070 return 0;
1071 break;
1072
1073 case SQRT:
1074 case FLOAT_EXTEND:
1075 case FLOAT_TRUNCATE:
1076 case SS_TRUNCATE:
1077 case US_TRUNCATE:
1078 return 0;
1079
1080 default:
1081 gcc_unreachable ();
1082 }
1083
1084 return gen_int_mode (val, mode);
1085 }
1086
1087 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1088 for a DImode operation on a CONST_INT. */
1089 else if (GET_MODE (op) == VOIDmode
1090 && width <= HOST_BITS_PER_WIDE_INT * 2
1091 && (GET_CODE (op) == CONST_DOUBLE
1092 || GET_CODE (op) == CONST_INT))
1093 {
1094 unsigned HOST_WIDE_INT l1, lv;
1095 HOST_WIDE_INT h1, hv;
1096
1097 if (GET_CODE (op) == CONST_DOUBLE)
1098 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1099 else
1100 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1101
1102 switch (code)
1103 {
1104 case NOT:
1105 lv = ~ l1;
1106 hv = ~ h1;
1107 break;
1108
1109 case NEG:
1110 neg_double (l1, h1, &lv, &hv);
1111 break;
1112
1113 case ABS:
1114 if (h1 < 0)
1115 neg_double (l1, h1, &lv, &hv);
1116 else
1117 lv = l1, hv = h1;
1118 break;
1119
1120 case FFS:
1121 hv = 0;
1122 if (l1 == 0)
1123 {
1124 if (h1 == 0)
1125 lv = 0;
1126 else
1127 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1128 }
1129 else
1130 lv = exact_log2 (l1 & -l1) + 1;
1131 break;
1132
1133 case CLZ:
1134 hv = 0;
1135 if (h1 != 0)
1136 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1137 - HOST_BITS_PER_WIDE_INT;
1138 else if (l1 != 0)
1139 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1140 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1141 lv = GET_MODE_BITSIZE (mode);
1142 break;
1143
1144 case CTZ:
1145 hv = 0;
1146 if (l1 != 0)
1147 lv = exact_log2 (l1 & -l1);
1148 else if (h1 != 0)
1149 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1150 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1151 lv = GET_MODE_BITSIZE (mode);
1152 break;
1153
1154 case POPCOUNT:
1155 hv = 0;
1156 lv = 0;
1157 while (l1)
1158 lv++, l1 &= l1 - 1;
1159 while (h1)
1160 lv++, h1 &= h1 - 1;
1161 break;
1162
1163 case PARITY:
1164 hv = 0;
1165 lv = 0;
1166 while (l1)
1167 lv++, l1 &= l1 - 1;
1168 while (h1)
1169 lv++, h1 &= h1 - 1;
1170 lv &= 1;
1171 break;
1172
1173 case TRUNCATE:
1174 /* This is just a change-of-mode, so do nothing. */
1175 lv = l1, hv = h1;
1176 break;
1177
1178 case ZERO_EXTEND:
1179 gcc_assert (op_mode != VOIDmode);
1180
1181 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1182 return 0;
1183
1184 hv = 0;
1185 lv = l1 & GET_MODE_MASK (op_mode);
1186 break;
1187
1188 case SIGN_EXTEND:
1189 if (op_mode == VOIDmode
1190 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1191 return 0;
1192 else
1193 {
1194 lv = l1 & GET_MODE_MASK (op_mode);
1195 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1196 && (lv & ((HOST_WIDE_INT) 1
1197 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1198 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1199
1200 hv = HWI_SIGN_EXTEND (lv);
1201 }
1202 break;
1203
1204 case SQRT:
1205 return 0;
1206
1207 default:
1208 return 0;
1209 }
1210
1211 return immed_double_const (lv, hv, mode);
1212 }
1213
1214 else if (GET_CODE (op) == CONST_DOUBLE
1215 && SCALAR_FLOAT_MODE_P (mode))
1216 {
1217 REAL_VALUE_TYPE d, t;
1218 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1219
1220 switch (code)
1221 {
1222 case SQRT:
1223 if (HONOR_SNANS (mode) && real_isnan (&d))
1224 return 0;
1225 real_sqrt (&t, mode, &d);
1226 d = t;
1227 break;
1228 case ABS:
1229 d = REAL_VALUE_ABS (d);
1230 break;
1231 case NEG:
1232 d = REAL_VALUE_NEGATE (d);
1233 break;
1234 case FLOAT_TRUNCATE:
1235 d = real_value_truncate (mode, d);
1236 break;
1237 case FLOAT_EXTEND:
1238 /* All this does is change the mode. */
1239 break;
1240 case FIX:
1241 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1242 break;
1243 case NOT:
1244 {
1245 long tmp[4];
1246 int i;
1247
1248 real_to_target (tmp, &d, GET_MODE (op));
1249 for (i = 0; i < 4; i++)
1250 tmp[i] = ~tmp[i];
1251 real_from_target (&d, tmp, mode);
1252 break;
1253 }
1254 default:
1255 gcc_unreachable ();
1256 }
1257 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1258 }
1259
1260 else if (GET_CODE (op) == CONST_DOUBLE
1261 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1262 && GET_MODE_CLASS (mode) == MODE_INT
1263 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1264 {
1265 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1266 operators are intentionally left unspecified (to ease implementation
1267 by target backends), for consistency, this routine implements the
1268 same semantics for constant folding as used by the middle-end. */
1269
1270 /* This was formerly used only for non-IEEE float.
1271 eggert@twinsun.com says it is safe for IEEE also. */
1272 HOST_WIDE_INT xh, xl, th, tl;
1273 REAL_VALUE_TYPE x, t;
1274 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1275 switch (code)
1276 {
1277 case FIX:
1278 if (REAL_VALUE_ISNAN (x))
1279 return const0_rtx;
1280
1281 /* Test against the signed upper bound. */
1282 if (width > HOST_BITS_PER_WIDE_INT)
1283 {
1284 th = ((unsigned HOST_WIDE_INT) 1
1285 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1286 tl = -1;
1287 }
1288 else
1289 {
1290 th = 0;
1291 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1292 }
1293 real_from_integer (&t, VOIDmode, tl, th, 0);
1294 if (REAL_VALUES_LESS (t, x))
1295 {
1296 xh = th;
1297 xl = tl;
1298 break;
1299 }
1300
1301 /* Test against the signed lower bound. */
1302 if (width > HOST_BITS_PER_WIDE_INT)
1303 {
1304 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1305 tl = 0;
1306 }
1307 else
1308 {
1309 th = -1;
1310 tl = (HOST_WIDE_INT) -1 << (width - 1);
1311 }
1312 real_from_integer (&t, VOIDmode, tl, th, 0);
1313 if (REAL_VALUES_LESS (x, t))
1314 {
1315 xh = th;
1316 xl = tl;
1317 break;
1318 }
1319 REAL_VALUE_TO_INT (&xl, &xh, x);
1320 break;
1321
1322 case UNSIGNED_FIX:
1323 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1324 return const0_rtx;
1325
1326 /* Test against the unsigned upper bound. */
1327 if (width == 2*HOST_BITS_PER_WIDE_INT)
1328 {
1329 th = -1;
1330 tl = -1;
1331 }
1332 else if (width >= HOST_BITS_PER_WIDE_INT)
1333 {
1334 th = ((unsigned HOST_WIDE_INT) 1
1335 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1336 tl = -1;
1337 }
1338 else
1339 {
1340 th = 0;
1341 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1342 }
1343 real_from_integer (&t, VOIDmode, tl, th, 1);
1344 if (REAL_VALUES_LESS (t, x))
1345 {
1346 xh = th;
1347 xl = tl;
1348 break;
1349 }
1350
1351 REAL_VALUE_TO_INT (&xl, &xh, x);
1352 break;
1353
1354 default:
1355 gcc_unreachable ();
1356 }
1357 return immed_double_const (xl, xh, mode);
1358 }
1359
1360 return NULL_RTX;
1361 }
1362 \f
1363 /* Subroutine of simplify_binary_operation to simplify a commutative,
1364 associative binary operation CODE with result mode MODE, operating
1365 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1366 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1367 canonicalization is possible. */
1368
1369 static rtx
1370 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1371 rtx op0, rtx op1)
1372 {
1373 rtx tem;
1374
1375 /* Linearize the operator to the left. */
1376 if (GET_CODE (op1) == code)
1377 {
1378 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1379 if (GET_CODE (op0) == code)
1380 {
1381 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1382 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1383 }
1384
1385 /* "a op (b op c)" becomes "(b op c) op a". */
1386 if (! swap_commutative_operands_p (op1, op0))
1387 return simplify_gen_binary (code, mode, op1, op0);
1388
1389 tem = op0;
1390 op0 = op1;
1391 op1 = tem;
1392 }
1393
1394 if (GET_CODE (op0) == code)
1395 {
1396 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1397 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1398 {
1399 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1400 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1401 }
1402
1403 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1404 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1405 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1406 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1407 if (tem != 0)
1408 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1409
1410 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1411 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1412 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1413 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1414 if (tem != 0)
1415 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1416 }
1417
1418 return 0;
1419 }
1420
1421
1422 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1423 and OP1. Return 0 if no simplification is possible.
1424
1425 Don't use this for relational operations such as EQ or LT.
1426 Use simplify_relational_operation instead. */
1427 rtx
1428 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1429 rtx op0, rtx op1)
1430 {
1431 rtx trueop0, trueop1;
1432 rtx tem;
1433
1434 /* Relational operations don't work here. We must know the mode
1435 of the operands in order to do the comparison correctly.
1436 Assuming a full word can give incorrect results.
1437 Consider comparing 128 with -128 in QImode. */
1438 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1439 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1440
1441 /* Make sure the constant is second. */
1442 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1443 && swap_commutative_operands_p (op0, op1))
1444 {
1445 tem = op0, op0 = op1, op1 = tem;
1446 }
1447
1448 trueop0 = avoid_constant_pool_reference (op0);
1449 trueop1 = avoid_constant_pool_reference (op1);
1450
1451 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1452 if (tem)
1453 return tem;
1454 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1455 }
1456
1457 static rtx
1458 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1459 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1460 {
1461 rtx tem, reversed, opleft, opright;
1462 HOST_WIDE_INT val;
1463 unsigned int width = GET_MODE_BITSIZE (mode);
1464
1465 /* Even if we can't compute a constant result,
1466 there are some cases worth simplifying. */
1467
1468 switch (code)
1469 {
1470 case PLUS:
1471 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1472 when x is NaN, infinite, or finite and nonzero. They aren't
1473 when x is -0 and the rounding mode is not towards -infinity,
1474 since (-0) + 0 is then 0. */
1475 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1476 return op0;
1477
1478 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1479 transformations are safe even for IEEE. */
1480 if (GET_CODE (op0) == NEG)
1481 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1482 else if (GET_CODE (op1) == NEG)
1483 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1484
1485 /* (~a) + 1 -> -a */
1486 if (INTEGRAL_MODE_P (mode)
1487 && GET_CODE (op0) == NOT
1488 && trueop1 == const1_rtx)
1489 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1490
1491 /* Handle both-operands-constant cases. We can only add
1492 CONST_INTs to constants since the sum of relocatable symbols
1493 can't be handled by most assemblers. Don't add CONST_INT
1494 to CONST_INT since overflow won't be computed properly if wider
1495 than HOST_BITS_PER_WIDE_INT. */
1496
1497 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1498 && GET_CODE (op1) == CONST_INT)
1499 return plus_constant (op0, INTVAL (op1));
1500 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1501 && GET_CODE (op0) == CONST_INT)
1502 return plus_constant (op1, INTVAL (op0));
1503
1504 /* See if this is something like X * C - X or vice versa or
1505 if the multiplication is written as a shift. If so, we can
1506 distribute and make a new multiply, shift, or maybe just
1507 have X (if C is 2 in the example above). But don't make
1508 something more expensive than we had before. */
1509
1510 if (SCALAR_INT_MODE_P (mode))
1511 {
1512 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1513 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1514 rtx lhs = op0, rhs = op1;
1515
1516 if (GET_CODE (lhs) == NEG)
1517 {
1518 coeff0l = -1;
1519 coeff0h = -1;
1520 lhs = XEXP (lhs, 0);
1521 }
1522 else if (GET_CODE (lhs) == MULT
1523 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1524 {
1525 coeff0l = INTVAL (XEXP (lhs, 1));
1526 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1527 lhs = XEXP (lhs, 0);
1528 }
1529 else if (GET_CODE (lhs) == ASHIFT
1530 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1531 && INTVAL (XEXP (lhs, 1)) >= 0
1532 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1533 {
1534 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1535 coeff0h = 0;
1536 lhs = XEXP (lhs, 0);
1537 }
1538
1539 if (GET_CODE (rhs) == NEG)
1540 {
1541 coeff1l = -1;
1542 coeff1h = -1;
1543 rhs = XEXP (rhs, 0);
1544 }
1545 else if (GET_CODE (rhs) == MULT
1546 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1547 {
1548 coeff1l = INTVAL (XEXP (rhs, 1));
1549 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1550 rhs = XEXP (rhs, 0);
1551 }
1552 else if (GET_CODE (rhs) == ASHIFT
1553 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1554 && INTVAL (XEXP (rhs, 1)) >= 0
1555 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1556 {
1557 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1558 coeff1h = 0;
1559 rhs = XEXP (rhs, 0);
1560 }
1561
1562 if (rtx_equal_p (lhs, rhs))
1563 {
1564 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1565 rtx coeff;
1566 unsigned HOST_WIDE_INT l;
1567 HOST_WIDE_INT h;
1568
1569 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1570 coeff = immed_double_const (l, h, mode);
1571
1572 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1573 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1574 ? tem : 0;
1575 }
1576 }
1577
1578 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1579 if ((GET_CODE (op1) == CONST_INT
1580 || GET_CODE (op1) == CONST_DOUBLE)
1581 && GET_CODE (op0) == XOR
1582 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1583 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1584 && mode_signbit_p (mode, op1))
1585 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1586 simplify_gen_binary (XOR, mode, op1,
1587 XEXP (op0, 1)));
1588
1589 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1590 if (GET_CODE (op0) == MULT
1591 && GET_CODE (XEXP (op0, 0)) == NEG)
1592 {
1593 rtx in1, in2;
1594
1595 in1 = XEXP (XEXP (op0, 0), 0);
1596 in2 = XEXP (op0, 1);
1597 return simplify_gen_binary (MINUS, mode, op1,
1598 simplify_gen_binary (MULT, mode,
1599 in1, in2));
1600 }
1601
1602 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1603 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1604 is 1. */
1605 if (COMPARISON_P (op0)
1606 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1607 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1608 && (reversed = reversed_comparison (op0, mode)))
1609 return
1610 simplify_gen_unary (NEG, mode, reversed, mode);
1611
1612 /* If one of the operands is a PLUS or a MINUS, see if we can
1613 simplify this by the associative law.
1614 Don't use the associative law for floating point.
1615 The inaccuracy makes it nonassociative,
1616 and subtle programs can break if operations are associated. */
1617
1618 if (INTEGRAL_MODE_P (mode)
1619 && (plus_minus_operand_p (op0)
1620 || plus_minus_operand_p (op1))
1621 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1622 return tem;
1623
1624 /* Reassociate floating point addition only when the user
1625 specifies unsafe math optimizations. */
1626 if (FLOAT_MODE_P (mode)
1627 && flag_unsafe_math_optimizations)
1628 {
1629 tem = simplify_associative_operation (code, mode, op0, op1);
1630 if (tem)
1631 return tem;
1632 }
1633 break;
1634
1635 case COMPARE:
1636 #ifdef HAVE_cc0
1637 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1638 using cc0, in which case we want to leave it as a COMPARE
1639 so we can distinguish it from a register-register-copy.
1640
1641 In IEEE floating point, x-0 is not the same as x. */
1642
1643 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1644 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1645 && trueop1 == CONST0_RTX (mode))
1646 return op0;
1647 #endif
1648
1649 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1650 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1651 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1652 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1653 {
1654 rtx xop00 = XEXP (op0, 0);
1655 rtx xop10 = XEXP (op1, 0);
1656
1657 #ifdef HAVE_cc0
1658 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1659 #else
1660 if (REG_P (xop00) && REG_P (xop10)
1661 && GET_MODE (xop00) == GET_MODE (xop10)
1662 && REGNO (xop00) == REGNO (xop10)
1663 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1664 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1665 #endif
1666 return xop00;
1667 }
1668 break;
1669
1670 case MINUS:
1671 /* We can't assume x-x is 0 even with non-IEEE floating point,
1672 but since it is zero except in very strange circumstances, we
1673 will treat it as zero with -funsafe-math-optimizations. */
1674 if (rtx_equal_p (trueop0, trueop1)
1675 && ! side_effects_p (op0)
1676 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1677 return CONST0_RTX (mode);
1678
1679 /* Change subtraction from zero into negation. (0 - x) is the
1680 same as -x when x is NaN, infinite, or finite and nonzero.
1681 But if the mode has signed zeros, and does not round towards
1682 -infinity, then 0 - 0 is 0, not -0. */
1683 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1684 return simplify_gen_unary (NEG, mode, op1, mode);
1685
1686 /* (-1 - a) is ~a. */
1687 if (trueop0 == constm1_rtx)
1688 return simplify_gen_unary (NOT, mode, op1, mode);
1689
1690 /* Subtracting 0 has no effect unless the mode has signed zeros
1691 and supports rounding towards -infinity. In such a case,
1692 0 - 0 is -0. */
1693 if (!(HONOR_SIGNED_ZEROS (mode)
1694 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1695 && trueop1 == CONST0_RTX (mode))
1696 return op0;
1697
1698 /* See if this is something like X * C - X or vice versa or
1699 if the multiplication is written as a shift. If so, we can
1700 distribute and make a new multiply, shift, or maybe just
1701 have X (if C is 2 in the example above). But don't make
1702 something more expensive than we had before. */
1703
1704 if (SCALAR_INT_MODE_P (mode))
1705 {
1706 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1707 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1708 rtx lhs = op0, rhs = op1;
1709
1710 if (GET_CODE (lhs) == NEG)
1711 {
1712 coeff0l = -1;
1713 coeff0h = -1;
1714 lhs = XEXP (lhs, 0);
1715 }
1716 else if (GET_CODE (lhs) == MULT
1717 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1718 {
1719 coeff0l = INTVAL (XEXP (lhs, 1));
1720 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1721 lhs = XEXP (lhs, 0);
1722 }
1723 else if (GET_CODE (lhs) == ASHIFT
1724 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1725 && INTVAL (XEXP (lhs, 1)) >= 0
1726 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1727 {
1728 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1729 coeff0h = 0;
1730 lhs = XEXP (lhs, 0);
1731 }
1732
1733 if (GET_CODE (rhs) == NEG)
1734 {
1735 negcoeff1l = 1;
1736 negcoeff1h = 0;
1737 rhs = XEXP (rhs, 0);
1738 }
1739 else if (GET_CODE (rhs) == MULT
1740 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1741 {
1742 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1743 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1744 rhs = XEXP (rhs, 0);
1745 }
1746 else if (GET_CODE (rhs) == ASHIFT
1747 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1748 && INTVAL (XEXP (rhs, 1)) >= 0
1749 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1750 {
1751 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1752 negcoeff1h = -1;
1753 rhs = XEXP (rhs, 0);
1754 }
1755
1756 if (rtx_equal_p (lhs, rhs))
1757 {
1758 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1759 rtx coeff;
1760 unsigned HOST_WIDE_INT l;
1761 HOST_WIDE_INT h;
1762
1763 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1764 coeff = immed_double_const (l, h, mode);
1765
1766 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1767 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1768 ? tem : 0;
1769 }
1770 }
1771
1772 /* (a - (-b)) -> (a + b). True even for IEEE. */
1773 if (GET_CODE (op1) == NEG)
1774 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1775
1776 /* (-x - c) may be simplified as (-c - x). */
1777 if (GET_CODE (op0) == NEG
1778 && (GET_CODE (op1) == CONST_INT
1779 || GET_CODE (op1) == CONST_DOUBLE))
1780 {
1781 tem = simplify_unary_operation (NEG, mode, op1, mode);
1782 if (tem)
1783 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1784 }
1785
1786 /* Don't let a relocatable value get a negative coeff. */
1787 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1788 return simplify_gen_binary (PLUS, mode,
1789 op0,
1790 neg_const_int (mode, op1));
1791
1792 /* (x - (x & y)) -> (x & ~y) */
1793 if (GET_CODE (op1) == AND)
1794 {
1795 if (rtx_equal_p (op0, XEXP (op1, 0)))
1796 {
1797 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1798 GET_MODE (XEXP (op1, 1)));
1799 return simplify_gen_binary (AND, mode, op0, tem);
1800 }
1801 if (rtx_equal_p (op0, XEXP (op1, 1)))
1802 {
1803 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1804 GET_MODE (XEXP (op1, 0)));
1805 return simplify_gen_binary (AND, mode, op0, tem);
1806 }
1807 }
1808
1809 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1810 by reversing the comparison code if valid. */
1811 if (STORE_FLAG_VALUE == 1
1812 && trueop0 == const1_rtx
1813 && COMPARISON_P (op1)
1814 && (reversed = reversed_comparison (op1, mode)))
1815 return reversed;
1816
1817 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1818 if (GET_CODE (op1) == MULT
1819 && GET_CODE (XEXP (op1, 0)) == NEG)
1820 {
1821 rtx in1, in2;
1822
1823 in1 = XEXP (XEXP (op1, 0), 0);
1824 in2 = XEXP (op1, 1);
1825 return simplify_gen_binary (PLUS, mode,
1826 simplify_gen_binary (MULT, mode,
1827 in1, in2),
1828 op0);
1829 }
1830
1831 /* Canonicalize (minus (neg A) (mult B C)) to
1832 (minus (mult (neg B) C) A). */
1833 if (GET_CODE (op1) == MULT
1834 && GET_CODE (op0) == NEG)
1835 {
1836 rtx in1, in2;
1837
1838 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1839 in2 = XEXP (op1, 1);
1840 return simplify_gen_binary (MINUS, mode,
1841 simplify_gen_binary (MULT, mode,
1842 in1, in2),
1843 XEXP (op0, 0));
1844 }
1845
1846 /* If one of the operands is a PLUS or a MINUS, see if we can
1847 simplify this by the associative law. This will, for example,
1848 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1849 Don't use the associative law for floating point.
1850 The inaccuracy makes it nonassociative,
1851 and subtle programs can break if operations are associated. */
1852
1853 if (INTEGRAL_MODE_P (mode)
1854 && (plus_minus_operand_p (op0)
1855 || plus_minus_operand_p (op1))
1856 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1857 return tem;
1858 break;
1859
1860 case MULT:
1861 if (trueop1 == constm1_rtx)
1862 return simplify_gen_unary (NEG, mode, op0, mode);
1863
1864 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1865 x is NaN, since x * 0 is then also NaN. Nor is it valid
1866 when the mode has signed zeros, since multiplying a negative
1867 number by 0 will give -0, not 0. */
1868 if (!HONOR_NANS (mode)
1869 && !HONOR_SIGNED_ZEROS (mode)
1870 && trueop1 == CONST0_RTX (mode)
1871 && ! side_effects_p (op0))
1872 return op1;
1873
1874 /* In IEEE floating point, x*1 is not equivalent to x for
1875 signalling NaNs. */
1876 if (!HONOR_SNANS (mode)
1877 && trueop1 == CONST1_RTX (mode))
1878 return op0;
1879
1880 /* Convert multiply by constant power of two into shift unless
1881 we are still generating RTL. This test is a kludge. */
1882 if (GET_CODE (trueop1) == CONST_INT
1883 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1884 /* If the mode is larger than the host word size, and the
1885 uppermost bit is set, then this isn't a power of two due
1886 to implicit sign extension. */
1887 && (width <= HOST_BITS_PER_WIDE_INT
1888 || val != HOST_BITS_PER_WIDE_INT - 1))
1889 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1890
1891 /* Likewise for multipliers wider than a word. */
1892 else if (GET_CODE (trueop1) == CONST_DOUBLE
1893 && (GET_MODE (trueop1) == VOIDmode
1894 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1895 && GET_MODE (op0) == mode
1896 && CONST_DOUBLE_LOW (trueop1) == 0
1897 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1898 return simplify_gen_binary (ASHIFT, mode, op0,
1899 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1900
1901 /* x*2 is x+x and x*(-1) is -x */
1902 if (GET_CODE (trueop1) == CONST_DOUBLE
1903 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1904 && GET_MODE (op0) == mode)
1905 {
1906 REAL_VALUE_TYPE d;
1907 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1908
1909 if (REAL_VALUES_EQUAL (d, dconst2))
1910 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1911
1912 if (REAL_VALUES_EQUAL (d, dconstm1))
1913 return simplify_gen_unary (NEG, mode, op0, mode);
1914 }
1915
1916 /* Reassociate multiplication, but for floating point MULTs
1917 only when the user specifies unsafe math optimizations. */
1918 if (! FLOAT_MODE_P (mode)
1919 || flag_unsafe_math_optimizations)
1920 {
1921 tem = simplify_associative_operation (code, mode, op0, op1);
1922 if (tem)
1923 return tem;
1924 }
1925 break;
1926
1927 case IOR:
1928 if (trueop1 == const0_rtx)
1929 return op0;
1930 if (GET_CODE (trueop1) == CONST_INT
1931 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1932 == GET_MODE_MASK (mode)))
1933 return op1;
1934 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1935 return op0;
1936 /* A | (~A) -> -1 */
1937 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1938 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1939 && ! side_effects_p (op0)
1940 && SCALAR_INT_MODE_P (mode))
1941 return constm1_rtx;
1942
1943 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1944 if (GET_CODE (op1) == CONST_INT
1945 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1946 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1947 return op1;
1948
1949 /* Convert (A & B) | A to A. */
1950 if (GET_CODE (op0) == AND
1951 && (rtx_equal_p (XEXP (op0, 0), op1)
1952 || rtx_equal_p (XEXP (op0, 1), op1))
1953 && ! side_effects_p (XEXP (op0, 0))
1954 && ! side_effects_p (XEXP (op0, 1)))
1955 return op1;
1956
1957 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1958 mode size to (rotate A CX). */
1959
1960 if (GET_CODE (op1) == ASHIFT
1961 || GET_CODE (op1) == SUBREG)
1962 {
1963 opleft = op1;
1964 opright = op0;
1965 }
1966 else
1967 {
1968 opright = op1;
1969 opleft = op0;
1970 }
1971
1972 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1973 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1974 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1975 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1976 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1977 == GET_MODE_BITSIZE (mode)))
1978 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1979
1980 /* Same, but for ashift that has been "simplified" to a wider mode
1981 by simplify_shift_const. */
1982
1983 if (GET_CODE (opleft) == SUBREG
1984 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1985 && GET_CODE (opright) == LSHIFTRT
1986 && GET_CODE (XEXP (opright, 0)) == SUBREG
1987 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1988 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1989 && (GET_MODE_SIZE (GET_MODE (opleft))
1990 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1991 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1992 SUBREG_REG (XEXP (opright, 0)))
1993 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1994 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1995 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1996 == GET_MODE_BITSIZE (mode)))
1997 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1998 XEXP (SUBREG_REG (opleft), 1));
1999
2000 /* If we have (ior (and (X C1) C2)), simplify this by making
2001 C1 as small as possible if C1 actually changes. */
2002 if (GET_CODE (op1) == CONST_INT
2003 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2004 || INTVAL (op1) > 0)
2005 && GET_CODE (op0) == AND
2006 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2007 && GET_CODE (op1) == CONST_INT
2008 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2009 return simplify_gen_binary (IOR, mode,
2010 simplify_gen_binary
2011 (AND, mode, XEXP (op0, 0),
2012 GEN_INT (INTVAL (XEXP (op0, 1))
2013 & ~INTVAL (op1))),
2014 op1);
2015
2016 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2017 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2018 the PLUS does not affect any of the bits in OP1: then we can do
2019 the IOR as a PLUS and we can associate. This is valid if OP1
2020 can be safely shifted left C bits. */
2021 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2022 && GET_CODE (XEXP (op0, 0)) == PLUS
2023 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2024 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2025 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2026 {
2027 int count = INTVAL (XEXP (op0, 1));
2028 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2029
2030 if (mask >> count == INTVAL (trueop1)
2031 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2032 return simplify_gen_binary (ASHIFTRT, mode,
2033 plus_constant (XEXP (op0, 0), mask),
2034 XEXP (op0, 1));
2035 }
2036
2037 tem = simplify_associative_operation (code, mode, op0, op1);
2038 if (tem)
2039 return tem;
2040 break;
2041
2042 case XOR:
2043 if (trueop1 == const0_rtx)
2044 return op0;
2045 if (GET_CODE (trueop1) == CONST_INT
2046 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2047 == GET_MODE_MASK (mode)))
2048 return simplify_gen_unary (NOT, mode, op0, mode);
2049 if (rtx_equal_p (trueop0, trueop1)
2050 && ! side_effects_p (op0)
2051 && GET_MODE_CLASS (mode) != MODE_CC)
2052 return CONST0_RTX (mode);
2053
2054 /* Canonicalize XOR of the most significant bit to PLUS. */
2055 if ((GET_CODE (op1) == CONST_INT
2056 || GET_CODE (op1) == CONST_DOUBLE)
2057 && mode_signbit_p (mode, op1))
2058 return simplify_gen_binary (PLUS, mode, op0, op1);
2059 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2060 if ((GET_CODE (op1) == CONST_INT
2061 || GET_CODE (op1) == CONST_DOUBLE)
2062 && GET_CODE (op0) == PLUS
2063 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2064 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2065 && mode_signbit_p (mode, XEXP (op0, 1)))
2066 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2067 simplify_gen_binary (XOR, mode, op1,
2068 XEXP (op0, 1)));
2069
2070 /* If we are XORing two things that have no bits in common,
2071 convert them into an IOR. This helps to detect rotation encoded
2072 using those methods and possibly other simplifications. */
2073
2074 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2075 && (nonzero_bits (op0, mode)
2076 & nonzero_bits (op1, mode)) == 0)
2077 return (simplify_gen_binary (IOR, mode, op0, op1));
2078
2079 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2080 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2081 (NOT y). */
2082 {
2083 int num_negated = 0;
2084
2085 if (GET_CODE (op0) == NOT)
2086 num_negated++, op0 = XEXP (op0, 0);
2087 if (GET_CODE (op1) == NOT)
2088 num_negated++, op1 = XEXP (op1, 0);
2089
2090 if (num_negated == 2)
2091 return simplify_gen_binary (XOR, mode, op0, op1);
2092 else if (num_negated == 1)
2093 return simplify_gen_unary (NOT, mode,
2094 simplify_gen_binary (XOR, mode, op0, op1),
2095 mode);
2096 }
2097
2098 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2099 correspond to a machine insn or result in further simplifications
2100 if B is a constant. */
2101
2102 if (GET_CODE (op0) == AND
2103 && rtx_equal_p (XEXP (op0, 1), op1)
2104 && ! side_effects_p (op1))
2105 return simplify_gen_binary (AND, mode,
2106 simplify_gen_unary (NOT, mode,
2107 XEXP (op0, 0), mode),
2108 op1);
2109
2110 else if (GET_CODE (op0) == AND
2111 && rtx_equal_p (XEXP (op0, 0), op1)
2112 && ! side_effects_p (op1))
2113 return simplify_gen_binary (AND, mode,
2114 simplify_gen_unary (NOT, mode,
2115 XEXP (op0, 1), mode),
2116 op1);
2117
2118 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2119 comparison if STORE_FLAG_VALUE is 1. */
2120 if (STORE_FLAG_VALUE == 1
2121 && trueop1 == const1_rtx
2122 && COMPARISON_P (op0)
2123 && (reversed = reversed_comparison (op0, mode)))
2124 return reversed;
2125
2126 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2127 is (lt foo (const_int 0)), so we can perform the above
2128 simplification if STORE_FLAG_VALUE is 1. */
2129
2130 if (STORE_FLAG_VALUE == 1
2131 && trueop1 == const1_rtx
2132 && GET_CODE (op0) == LSHIFTRT
2133 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2134 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2135 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2136
2137 /* (xor (comparison foo bar) (const_int sign-bit))
2138 when STORE_FLAG_VALUE is the sign bit. */
2139 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2140 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2141 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2142 && trueop1 == const_true_rtx
2143 && COMPARISON_P (op0)
2144 && (reversed = reversed_comparison (op0, mode)))
2145 return reversed;
2146
2147 break;
2148
2149 tem = simplify_associative_operation (code, mode, op0, op1);
2150 if (tem)
2151 return tem;
2152 break;
2153
2154 case AND:
2155 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2156 return trueop1;
2157 /* If we are turning off bits already known off in OP0, we need
2158 not do an AND. */
2159 if (GET_CODE (trueop1) == CONST_INT
2160 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2161 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2162 return op0;
2163 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2164 && GET_MODE_CLASS (mode) != MODE_CC)
2165 return op0;
2166 /* A & (~A) -> 0 */
2167 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2168 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2169 && ! side_effects_p (op0)
2170 && GET_MODE_CLASS (mode) != MODE_CC)
2171 return CONST0_RTX (mode);
2172
2173 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2174 there are no nonzero bits of C outside of X's mode. */
2175 if ((GET_CODE (op0) == SIGN_EXTEND
2176 || GET_CODE (op0) == ZERO_EXTEND)
2177 && GET_CODE (trueop1) == CONST_INT
2178 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2179 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2180 & INTVAL (trueop1)) == 0)
2181 {
2182 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2183 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2184 gen_int_mode (INTVAL (trueop1),
2185 imode));
2186 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2187 }
2188
2189 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2190 insn (and may simplify more). */
2191 if (GET_CODE (op0) == XOR
2192 && rtx_equal_p (XEXP (op0, 0), op1)
2193 && ! side_effects_p (op1))
2194 return simplify_gen_binary (AND, mode,
2195 simplify_gen_unary (NOT, mode,
2196 XEXP (op0, 1), mode),
2197 op1);
2198
2199 if (GET_CODE (op0) == XOR
2200 && rtx_equal_p (XEXP (op0, 1), op1)
2201 && ! side_effects_p (op1))
2202 return simplify_gen_binary (AND, mode,
2203 simplify_gen_unary (NOT, mode,
2204 XEXP (op0, 0), mode),
2205 op1);
2206
2207 /* Similarly for (~(A ^ B)) & A. */
2208 if (GET_CODE (op0) == NOT
2209 && GET_CODE (XEXP (op0, 0)) == XOR
2210 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2211 && ! side_effects_p (op1))
2212 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2213
2214 if (GET_CODE (op0) == NOT
2215 && GET_CODE (XEXP (op0, 0)) == XOR
2216 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2217 && ! side_effects_p (op1))
2218 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2219
2220 /* Convert (A | B) & A to A. */
2221 if (GET_CODE (op0) == IOR
2222 && (rtx_equal_p (XEXP (op0, 0), op1)
2223 || rtx_equal_p (XEXP (op0, 1), op1))
2224 && ! side_effects_p (XEXP (op0, 0))
2225 && ! side_effects_p (XEXP (op0, 1)))
2226 return op1;
2227
2228 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2229 ((A & N) + B) & M -> (A + B) & M
2230 Similarly if (N & M) == 0,
2231 ((A | N) + B) & M -> (A + B) & M
2232 and for - instead of + and/or ^ instead of |. */
2233 if (GET_CODE (trueop1) == CONST_INT
2234 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2235 && ~INTVAL (trueop1)
2236 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2237 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2238 {
2239 rtx pmop[2];
2240 int which;
2241
2242 pmop[0] = XEXP (op0, 0);
2243 pmop[1] = XEXP (op0, 1);
2244
2245 for (which = 0; which < 2; which++)
2246 {
2247 tem = pmop[which];
2248 switch (GET_CODE (tem))
2249 {
2250 case AND:
2251 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2252 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2253 == INTVAL (trueop1))
2254 pmop[which] = XEXP (tem, 0);
2255 break;
2256 case IOR:
2257 case XOR:
2258 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2259 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2260 pmop[which] = XEXP (tem, 0);
2261 break;
2262 default:
2263 break;
2264 }
2265 }
2266
2267 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2268 {
2269 tem = simplify_gen_binary (GET_CODE (op0), mode,
2270 pmop[0], pmop[1]);
2271 return simplify_gen_binary (code, mode, tem, op1);
2272 }
2273 }
2274 tem = simplify_associative_operation (code, mode, op0, op1);
2275 if (tem)
2276 return tem;
2277 break;
2278
2279 case UDIV:
2280 /* 0/x is 0 (or x&0 if x has side-effects). */
2281 if (trueop0 == CONST0_RTX (mode))
2282 {
2283 if (side_effects_p (op1))
2284 return simplify_gen_binary (AND, mode, op1, trueop0);
2285 return trueop0;
2286 }
2287 /* x/1 is x. */
2288 if (trueop1 == CONST1_RTX (mode))
2289 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2290 /* Convert divide by power of two into shift. */
2291 if (GET_CODE (trueop1) == CONST_INT
2292 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2293 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2294 break;
2295
2296 case DIV:
2297 /* Handle floating point and integers separately. */
2298 if (SCALAR_FLOAT_MODE_P (mode))
2299 {
2300 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2301 safe for modes with NaNs, since 0.0 / 0.0 will then be
2302 NaN rather than 0.0. Nor is it safe for modes with signed
2303 zeros, since dividing 0 by a negative number gives -0.0 */
2304 if (trueop0 == CONST0_RTX (mode)
2305 && !HONOR_NANS (mode)
2306 && !HONOR_SIGNED_ZEROS (mode)
2307 && ! side_effects_p (op1))
2308 return op0;
2309 /* x/1.0 is x. */
2310 if (trueop1 == CONST1_RTX (mode)
2311 && !HONOR_SNANS (mode))
2312 return op0;
2313
2314 if (GET_CODE (trueop1) == CONST_DOUBLE
2315 && trueop1 != CONST0_RTX (mode))
2316 {
2317 REAL_VALUE_TYPE d;
2318 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2319
2320 /* x/-1.0 is -x. */
2321 if (REAL_VALUES_EQUAL (d, dconstm1)
2322 && !HONOR_SNANS (mode))
2323 return simplify_gen_unary (NEG, mode, op0, mode);
2324
2325 /* Change FP division by a constant into multiplication.
2326 Only do this with -funsafe-math-optimizations. */
2327 if (flag_unsafe_math_optimizations
2328 && !REAL_VALUES_EQUAL (d, dconst0))
2329 {
2330 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2331 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2332 return simplify_gen_binary (MULT, mode, op0, tem);
2333 }
2334 }
2335 }
2336 else
2337 {
2338 /* 0/x is 0 (or x&0 if x has side-effects). */
2339 if (trueop0 == CONST0_RTX (mode))
2340 {
2341 if (side_effects_p (op1))
2342 return simplify_gen_binary (AND, mode, op1, trueop0);
2343 return trueop0;
2344 }
2345 /* x/1 is x. */
2346 if (trueop1 == CONST1_RTX (mode))
2347 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2348 /* x/-1 is -x. */
2349 if (trueop1 == constm1_rtx)
2350 {
2351 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2352 return simplify_gen_unary (NEG, mode, x, mode);
2353 }
2354 }
2355 break;
2356
2357 case UMOD:
2358 /* 0%x is 0 (or x&0 if x has side-effects). */
2359 if (trueop0 == CONST0_RTX (mode))
2360 {
2361 if (side_effects_p (op1))
2362 return simplify_gen_binary (AND, mode, op1, trueop0);
2363 return trueop0;
2364 }
2365 /* x%1 is 0 (of x&0 if x has side-effects). */
2366 if (trueop1 == CONST1_RTX (mode))
2367 {
2368 if (side_effects_p (op0))
2369 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2370 return CONST0_RTX (mode);
2371 }
2372 /* Implement modulus by power of two as AND. */
2373 if (GET_CODE (trueop1) == CONST_INT
2374 && exact_log2 (INTVAL (trueop1)) > 0)
2375 return simplify_gen_binary (AND, mode, op0,
2376 GEN_INT (INTVAL (op1) - 1));
2377 break;
2378
2379 case MOD:
2380 /* 0%x is 0 (or x&0 if x has side-effects). */
2381 if (trueop0 == CONST0_RTX (mode))
2382 {
2383 if (side_effects_p (op1))
2384 return simplify_gen_binary (AND, mode, op1, trueop0);
2385 return trueop0;
2386 }
2387 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2388 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2389 {
2390 if (side_effects_p (op0))
2391 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2392 return CONST0_RTX (mode);
2393 }
2394 break;
2395
2396 case ROTATERT:
2397 case ROTATE:
2398 case ASHIFTRT:
2399 /* Rotating ~0 always results in ~0. */
2400 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2401 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2402 && ! side_effects_p (op1))
2403 return op0;
2404
2405 /* Fall through.... */
2406
2407 case ASHIFT:
2408 case LSHIFTRT:
2409 if (trueop1 == CONST0_RTX (mode))
2410 return op0;
2411 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2412 return op0;
2413 break;
2414
2415 case SMIN:
2416 if (width <= HOST_BITS_PER_WIDE_INT
2417 && GET_CODE (trueop1) == CONST_INT
2418 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2419 && ! side_effects_p (op0))
2420 return op1;
2421 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2422 return op0;
2423 tem = simplify_associative_operation (code, mode, op0, op1);
2424 if (tem)
2425 return tem;
2426 break;
2427
2428 case SMAX:
2429 if (width <= HOST_BITS_PER_WIDE_INT
2430 && GET_CODE (trueop1) == CONST_INT
2431 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2432 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2433 && ! side_effects_p (op0))
2434 return op1;
2435 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2436 return op0;
2437 tem = simplify_associative_operation (code, mode, op0, op1);
2438 if (tem)
2439 return tem;
2440 break;
2441
2442 case UMIN:
2443 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2444 return op1;
2445 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2446 return op0;
2447 tem = simplify_associative_operation (code, mode, op0, op1);
2448 if (tem)
2449 return tem;
2450 break;
2451
2452 case UMAX:
2453 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2454 return op1;
2455 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2456 return op0;
2457 tem = simplify_associative_operation (code, mode, op0, op1);
2458 if (tem)
2459 return tem;
2460 break;
2461
2462 case SS_PLUS:
2463 case US_PLUS:
2464 case SS_MINUS:
2465 case US_MINUS:
2466 /* ??? There are simplifications that can be done. */
2467 return 0;
2468
2469 case VEC_SELECT:
2470 if (!VECTOR_MODE_P (mode))
2471 {
2472 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2473 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2474 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2475 gcc_assert (XVECLEN (trueop1, 0) == 1);
2476 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2477
2478 if (GET_CODE (trueop0) == CONST_VECTOR)
2479 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2480 (trueop1, 0, 0)));
2481 }
2482 else
2483 {
2484 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2485 gcc_assert (GET_MODE_INNER (mode)
2486 == GET_MODE_INNER (GET_MODE (trueop0)));
2487 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2488
2489 if (GET_CODE (trueop0) == CONST_VECTOR)
2490 {
2491 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2492 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2493 rtvec v = rtvec_alloc (n_elts);
2494 unsigned int i;
2495
2496 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2497 for (i = 0; i < n_elts; i++)
2498 {
2499 rtx x = XVECEXP (trueop1, 0, i);
2500
2501 gcc_assert (GET_CODE (x) == CONST_INT);
2502 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2503 INTVAL (x));
2504 }
2505
2506 return gen_rtx_CONST_VECTOR (mode, v);
2507 }
2508 }
2509
2510 if (XVECLEN (trueop1, 0) == 1
2511 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2512 && GET_CODE (trueop0) == VEC_CONCAT)
2513 {
2514 rtx vec = trueop0;
2515 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2516
2517 /* Try to find the element in the VEC_CONCAT. */
2518 while (GET_MODE (vec) != mode
2519 && GET_CODE (vec) == VEC_CONCAT)
2520 {
2521 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2522 if (offset < vec_size)
2523 vec = XEXP (vec, 0);
2524 else
2525 {
2526 offset -= vec_size;
2527 vec = XEXP (vec, 1);
2528 }
2529 vec = avoid_constant_pool_reference (vec);
2530 }
2531
2532 if (GET_MODE (vec) == mode)
2533 return vec;
2534 }
2535
2536 return 0;
2537 case VEC_CONCAT:
2538 {
2539 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2540 ? GET_MODE (trueop0)
2541 : GET_MODE_INNER (mode));
2542 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2543 ? GET_MODE (trueop1)
2544 : GET_MODE_INNER (mode));
2545
2546 gcc_assert (VECTOR_MODE_P (mode));
2547 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2548 == GET_MODE_SIZE (mode));
2549
2550 if (VECTOR_MODE_P (op0_mode))
2551 gcc_assert (GET_MODE_INNER (mode)
2552 == GET_MODE_INNER (op0_mode));
2553 else
2554 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2555
2556 if (VECTOR_MODE_P (op1_mode))
2557 gcc_assert (GET_MODE_INNER (mode)
2558 == GET_MODE_INNER (op1_mode));
2559 else
2560 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2561
2562 if ((GET_CODE (trueop0) == CONST_VECTOR
2563 || GET_CODE (trueop0) == CONST_INT
2564 || GET_CODE (trueop0) == CONST_DOUBLE)
2565 && (GET_CODE (trueop1) == CONST_VECTOR
2566 || GET_CODE (trueop1) == CONST_INT
2567 || GET_CODE (trueop1) == CONST_DOUBLE))
2568 {
2569 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2570 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2571 rtvec v = rtvec_alloc (n_elts);
2572 unsigned int i;
2573 unsigned in_n_elts = 1;
2574
2575 if (VECTOR_MODE_P (op0_mode))
2576 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2577 for (i = 0; i < n_elts; i++)
2578 {
2579 if (i < in_n_elts)
2580 {
2581 if (!VECTOR_MODE_P (op0_mode))
2582 RTVEC_ELT (v, i) = trueop0;
2583 else
2584 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2585 }
2586 else
2587 {
2588 if (!VECTOR_MODE_P (op1_mode))
2589 RTVEC_ELT (v, i) = trueop1;
2590 else
2591 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2592 i - in_n_elts);
2593 }
2594 }
2595
2596 return gen_rtx_CONST_VECTOR (mode, v);
2597 }
2598 }
2599 return 0;
2600
2601 default:
2602 gcc_unreachable ();
2603 }
2604
2605 return 0;
2606 }
2607
2608 rtx
2609 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2610 rtx op0, rtx op1)
2611 {
2612 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2613 HOST_WIDE_INT val;
2614 unsigned int width = GET_MODE_BITSIZE (mode);
2615
2616 if (VECTOR_MODE_P (mode)
2617 && code != VEC_CONCAT
2618 && GET_CODE (op0) == CONST_VECTOR
2619 && GET_CODE (op1) == CONST_VECTOR)
2620 {
2621 unsigned n_elts = GET_MODE_NUNITS (mode);
2622 enum machine_mode op0mode = GET_MODE (op0);
2623 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2624 enum machine_mode op1mode = GET_MODE (op1);
2625 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2626 rtvec v = rtvec_alloc (n_elts);
2627 unsigned int i;
2628
2629 gcc_assert (op0_n_elts == n_elts);
2630 gcc_assert (op1_n_elts == n_elts);
2631 for (i = 0; i < n_elts; i++)
2632 {
2633 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2634 CONST_VECTOR_ELT (op0, i),
2635 CONST_VECTOR_ELT (op1, i));
2636 if (!x)
2637 return 0;
2638 RTVEC_ELT (v, i) = x;
2639 }
2640
2641 return gen_rtx_CONST_VECTOR (mode, v);
2642 }
2643
2644 if (VECTOR_MODE_P (mode)
2645 && code == VEC_CONCAT
2646 && CONSTANT_P (op0) && CONSTANT_P (op1))
2647 {
2648 unsigned n_elts = GET_MODE_NUNITS (mode);
2649 rtvec v = rtvec_alloc (n_elts);
2650
2651 gcc_assert (n_elts >= 2);
2652 if (n_elts == 2)
2653 {
2654 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2655 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2656
2657 RTVEC_ELT (v, 0) = op0;
2658 RTVEC_ELT (v, 1) = op1;
2659 }
2660 else
2661 {
2662 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2663 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2664 unsigned i;
2665
2666 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2667 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2668 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2669
2670 for (i = 0; i < op0_n_elts; ++i)
2671 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2672 for (i = 0; i < op1_n_elts; ++i)
2673 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2674 }
2675
2676 return gen_rtx_CONST_VECTOR (mode, v);
2677 }
2678
2679 if (SCALAR_FLOAT_MODE_P (mode)
2680 && GET_CODE (op0) == CONST_DOUBLE
2681 && GET_CODE (op1) == CONST_DOUBLE
2682 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2683 {
2684 if (code == AND
2685 || code == IOR
2686 || code == XOR)
2687 {
2688 long tmp0[4];
2689 long tmp1[4];
2690 REAL_VALUE_TYPE r;
2691 int i;
2692
2693 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2694 GET_MODE (op0));
2695 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2696 GET_MODE (op1));
2697 for (i = 0; i < 4; i++)
2698 {
2699 switch (code)
2700 {
2701 case AND:
2702 tmp0[i] &= tmp1[i];
2703 break;
2704 case IOR:
2705 tmp0[i] |= tmp1[i];
2706 break;
2707 case XOR:
2708 tmp0[i] ^= tmp1[i];
2709 break;
2710 default:
2711 gcc_unreachable ();
2712 }
2713 }
2714 real_from_target (&r, tmp0, mode);
2715 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2716 }
2717 else
2718 {
2719 REAL_VALUE_TYPE f0, f1, value, result;
2720 bool inexact;
2721
2722 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2723 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2724 real_convert (&f0, mode, &f0);
2725 real_convert (&f1, mode, &f1);
2726
2727 if (HONOR_SNANS (mode)
2728 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2729 return 0;
2730
2731 if (code == DIV
2732 && REAL_VALUES_EQUAL (f1, dconst0)
2733 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2734 return 0;
2735
2736 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2737 && flag_trapping_math
2738 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2739 {
2740 int s0 = REAL_VALUE_NEGATIVE (f0);
2741 int s1 = REAL_VALUE_NEGATIVE (f1);
2742
2743 switch (code)
2744 {
2745 case PLUS:
2746 /* Inf + -Inf = NaN plus exception. */
2747 if (s0 != s1)
2748 return 0;
2749 break;
2750 case MINUS:
2751 /* Inf - Inf = NaN plus exception. */
2752 if (s0 == s1)
2753 return 0;
2754 break;
2755 case DIV:
2756 /* Inf / Inf = NaN plus exception. */
2757 return 0;
2758 default:
2759 break;
2760 }
2761 }
2762
2763 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2764 && flag_trapping_math
2765 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2766 || (REAL_VALUE_ISINF (f1)
2767 && REAL_VALUES_EQUAL (f0, dconst0))))
2768 /* Inf * 0 = NaN plus exception. */
2769 return 0;
2770
2771 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2772 &f0, &f1);
2773 real_convert (&result, mode, &value);
2774
2775 /* Don't constant fold this floating point operation if
2776 the result has overflowed and flag_trapping_math. */
2777
2778 if (flag_trapping_math
2779 && MODE_HAS_INFINITIES (mode)
2780 && REAL_VALUE_ISINF (result)
2781 && !REAL_VALUE_ISINF (f0)
2782 && !REAL_VALUE_ISINF (f1))
2783 /* Overflow plus exception. */
2784 return 0;
2785
2786 /* Don't constant fold this floating point operation if the
2787 result may dependent upon the run-time rounding mode and
2788 flag_rounding_math is set, or if GCC's software emulation
2789 is unable to accurately represent the result. */
2790
2791 if ((flag_rounding_math
2792 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2793 && !flag_unsafe_math_optimizations))
2794 && (inexact || !real_identical (&result, &value)))
2795 return NULL_RTX;
2796
2797 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2798 }
2799 }
2800
2801 /* We can fold some multi-word operations. */
2802 if (GET_MODE_CLASS (mode) == MODE_INT
2803 && width == HOST_BITS_PER_WIDE_INT * 2
2804 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2805 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2806 {
2807 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2808 HOST_WIDE_INT h1, h2, hv, ht;
2809
2810 if (GET_CODE (op0) == CONST_DOUBLE)
2811 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2812 else
2813 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2814
2815 if (GET_CODE (op1) == CONST_DOUBLE)
2816 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2817 else
2818 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2819
2820 switch (code)
2821 {
2822 case MINUS:
2823 /* A - B == A + (-B). */
2824 neg_double (l2, h2, &lv, &hv);
2825 l2 = lv, h2 = hv;
2826
2827 /* Fall through.... */
2828
2829 case PLUS:
2830 add_double (l1, h1, l2, h2, &lv, &hv);
2831 break;
2832
2833 case MULT:
2834 mul_double (l1, h1, l2, h2, &lv, &hv);
2835 break;
2836
2837 case DIV:
2838 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2839 &lv, &hv, &lt, &ht))
2840 return 0;
2841 break;
2842
2843 case MOD:
2844 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2845 &lt, &ht, &lv, &hv))
2846 return 0;
2847 break;
2848
2849 case UDIV:
2850 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2851 &lv, &hv, &lt, &ht))
2852 return 0;
2853 break;
2854
2855 case UMOD:
2856 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2857 &lt, &ht, &lv, &hv))
2858 return 0;
2859 break;
2860
2861 case AND:
2862 lv = l1 & l2, hv = h1 & h2;
2863 break;
2864
2865 case IOR:
2866 lv = l1 | l2, hv = h1 | h2;
2867 break;
2868
2869 case XOR:
2870 lv = l1 ^ l2, hv = h1 ^ h2;
2871 break;
2872
2873 case SMIN:
2874 if (h1 < h2
2875 || (h1 == h2
2876 && ((unsigned HOST_WIDE_INT) l1
2877 < (unsigned HOST_WIDE_INT) l2)))
2878 lv = l1, hv = h1;
2879 else
2880 lv = l2, hv = h2;
2881 break;
2882
2883 case SMAX:
2884 if (h1 > h2
2885 || (h1 == h2
2886 && ((unsigned HOST_WIDE_INT) l1
2887 > (unsigned HOST_WIDE_INT) l2)))
2888 lv = l1, hv = h1;
2889 else
2890 lv = l2, hv = h2;
2891 break;
2892
2893 case UMIN:
2894 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2895 || (h1 == h2
2896 && ((unsigned HOST_WIDE_INT) l1
2897 < (unsigned HOST_WIDE_INT) l2)))
2898 lv = l1, hv = h1;
2899 else
2900 lv = l2, hv = h2;
2901 break;
2902
2903 case UMAX:
2904 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2905 || (h1 == h2
2906 && ((unsigned HOST_WIDE_INT) l1
2907 > (unsigned HOST_WIDE_INT) l2)))
2908 lv = l1, hv = h1;
2909 else
2910 lv = l2, hv = h2;
2911 break;
2912
2913 case LSHIFTRT: case ASHIFTRT:
2914 case ASHIFT:
2915 case ROTATE: case ROTATERT:
2916 if (SHIFT_COUNT_TRUNCATED)
2917 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2918
2919 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2920 return 0;
2921
2922 if (code == LSHIFTRT || code == ASHIFTRT)
2923 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2924 code == ASHIFTRT);
2925 else if (code == ASHIFT)
2926 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2927 else if (code == ROTATE)
2928 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2929 else /* code == ROTATERT */
2930 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2931 break;
2932
2933 default:
2934 return 0;
2935 }
2936
2937 return immed_double_const (lv, hv, mode);
2938 }
2939
2940 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2941 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2942 {
2943 /* Get the integer argument values in two forms:
2944 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2945
2946 arg0 = INTVAL (op0);
2947 arg1 = INTVAL (op1);
2948
2949 if (width < HOST_BITS_PER_WIDE_INT)
2950 {
2951 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2952 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2953
2954 arg0s = arg0;
2955 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2956 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2957
2958 arg1s = arg1;
2959 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2960 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2961 }
2962 else
2963 {
2964 arg0s = arg0;
2965 arg1s = arg1;
2966 }
2967
2968 /* Compute the value of the arithmetic. */
2969
2970 switch (code)
2971 {
2972 case PLUS:
2973 val = arg0s + arg1s;
2974 break;
2975
2976 case MINUS:
2977 val = arg0s - arg1s;
2978 break;
2979
2980 case MULT:
2981 val = arg0s * arg1s;
2982 break;
2983
2984 case DIV:
2985 if (arg1s == 0
2986 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2987 && arg1s == -1))
2988 return 0;
2989 val = arg0s / arg1s;
2990 break;
2991
2992 case MOD:
2993 if (arg1s == 0
2994 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2995 && arg1s == -1))
2996 return 0;
2997 val = arg0s % arg1s;
2998 break;
2999
3000 case UDIV:
3001 if (arg1 == 0
3002 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3003 && arg1s == -1))
3004 return 0;
3005 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3006 break;
3007
3008 case UMOD:
3009 if (arg1 == 0
3010 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3011 && arg1s == -1))
3012 return 0;
3013 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3014 break;
3015
3016 case AND:
3017 val = arg0 & arg1;
3018 break;
3019
3020 case IOR:
3021 val = arg0 | arg1;
3022 break;
3023
3024 case XOR:
3025 val = arg0 ^ arg1;
3026 break;
3027
3028 case LSHIFTRT:
3029 case ASHIFT:
3030 case ASHIFTRT:
3031 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3032 the value is in range. We can't return any old value for
3033 out-of-range arguments because either the middle-end (via
3034 shift_truncation_mask) or the back-end might be relying on
3035 target-specific knowledge. Nor can we rely on
3036 shift_truncation_mask, since the shift might not be part of an
3037 ashlM3, lshrM3 or ashrM3 instruction. */
3038 if (SHIFT_COUNT_TRUNCATED)
3039 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3040 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3041 return 0;
3042
3043 val = (code == ASHIFT
3044 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3045 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3046
3047 /* Sign-extend the result for arithmetic right shifts. */
3048 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3049 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3050 break;
3051
3052 case ROTATERT:
3053 if (arg1 < 0)
3054 return 0;
3055
3056 arg1 %= width;
3057 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3058 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3059 break;
3060
3061 case ROTATE:
3062 if (arg1 < 0)
3063 return 0;
3064
3065 arg1 %= width;
3066 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3067 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3068 break;
3069
3070 case COMPARE:
3071 /* Do nothing here. */
3072 return 0;
3073
3074 case SMIN:
3075 val = arg0s <= arg1s ? arg0s : arg1s;
3076 break;
3077
3078 case UMIN:
3079 val = ((unsigned HOST_WIDE_INT) arg0
3080 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3081 break;
3082
3083 case SMAX:
3084 val = arg0s > arg1s ? arg0s : arg1s;
3085 break;
3086
3087 case UMAX:
3088 val = ((unsigned HOST_WIDE_INT) arg0
3089 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3090 break;
3091
3092 case SS_PLUS:
3093 case US_PLUS:
3094 case SS_MINUS:
3095 case US_MINUS:
3096 /* ??? There are simplifications that can be done. */
3097 return 0;
3098
3099 default:
3100 gcc_unreachable ();
3101 }
3102
3103 return gen_int_mode (val, mode);
3104 }
3105
3106 return NULL_RTX;
3107 }
3108
3109
3110 \f
3111 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3112 PLUS or MINUS.
3113
3114 Rather than test for specific case, we do this by a brute-force method
3115 and do all possible simplifications until no more changes occur. Then
3116 we rebuild the operation. */
3117
3118 struct simplify_plus_minus_op_data
3119 {
3120 rtx op;
3121 short neg;
3122 short ix;
3123 };
3124
3125 static int
3126 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3127 {
3128 const struct simplify_plus_minus_op_data *d1 = p1;
3129 const struct simplify_plus_minus_op_data *d2 = p2;
3130 int result;
3131
3132 result = (commutative_operand_precedence (d2->op)
3133 - commutative_operand_precedence (d1->op));
3134 if (result)
3135 return result;
3136 return d1->ix - d2->ix;
3137 }
3138
3139 static rtx
3140 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3141 rtx op1)
3142 {
3143 struct simplify_plus_minus_op_data ops[8];
3144 rtx result, tem;
3145 int n_ops = 2, input_ops = 2;
3146 int first, changed, canonicalized = 0;
3147 int i, j;
3148
3149 memset (ops, 0, sizeof ops);
3150
3151 /* Set up the two operands and then expand them until nothing has been
3152 changed. If we run out of room in our array, give up; this should
3153 almost never happen. */
3154
3155 ops[0].op = op0;
3156 ops[0].neg = 0;
3157 ops[1].op = op1;
3158 ops[1].neg = (code == MINUS);
3159
3160 do
3161 {
3162 changed = 0;
3163
3164 for (i = 0; i < n_ops; i++)
3165 {
3166 rtx this_op = ops[i].op;
3167 int this_neg = ops[i].neg;
3168 enum rtx_code this_code = GET_CODE (this_op);
3169
3170 switch (this_code)
3171 {
3172 case PLUS:
3173 case MINUS:
3174 if (n_ops == 7)
3175 return NULL_RTX;
3176
3177 ops[n_ops].op = XEXP (this_op, 1);
3178 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3179 n_ops++;
3180
3181 ops[i].op = XEXP (this_op, 0);
3182 input_ops++;
3183 changed = 1;
3184 canonicalized |= this_neg;
3185 break;
3186
3187 case NEG:
3188 ops[i].op = XEXP (this_op, 0);
3189 ops[i].neg = ! this_neg;
3190 changed = 1;
3191 canonicalized = 1;
3192 break;
3193
3194 case CONST:
3195 if (n_ops < 7
3196 && GET_CODE (XEXP (this_op, 0)) == PLUS
3197 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3198 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3199 {
3200 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3201 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3202 ops[n_ops].neg = this_neg;
3203 n_ops++;
3204 changed = 1;
3205 canonicalized = 1;
3206 }
3207 break;
3208
3209 case NOT:
3210 /* ~a -> (-a - 1) */
3211 if (n_ops != 7)
3212 {
3213 ops[n_ops].op = constm1_rtx;
3214 ops[n_ops++].neg = this_neg;
3215 ops[i].op = XEXP (this_op, 0);
3216 ops[i].neg = !this_neg;
3217 changed = 1;
3218 canonicalized = 1;
3219 }
3220 break;
3221
3222 case CONST_INT:
3223 if (this_neg)
3224 {
3225 ops[i].op = neg_const_int (mode, this_op);
3226 ops[i].neg = 0;
3227 changed = 1;
3228 canonicalized = 1;
3229 }
3230 break;
3231
3232 default:
3233 break;
3234 }
3235 }
3236 }
3237 while (changed);
3238
3239 gcc_assert (n_ops >= 2);
3240 if (!canonicalized)
3241 {
3242 int n_constants = 0;
3243
3244 for (i = 0; i < n_ops; i++)
3245 if (GET_CODE (ops[i].op) == CONST_INT)
3246 n_constants++;
3247
3248 if (n_constants <= 1)
3249 return NULL_RTX;
3250 }
3251
3252 /* If we only have two operands, we can avoid the loops. */
3253 if (n_ops == 2)
3254 {
3255 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3256 rtx lhs, rhs;
3257
3258 /* Get the two operands. Be careful with the order, especially for
3259 the cases where code == MINUS. */
3260 if (ops[0].neg && ops[1].neg)
3261 {
3262 lhs = gen_rtx_NEG (mode, ops[0].op);
3263 rhs = ops[1].op;
3264 }
3265 else if (ops[0].neg)
3266 {
3267 lhs = ops[1].op;
3268 rhs = ops[0].op;
3269 }
3270 else
3271 {
3272 lhs = ops[0].op;
3273 rhs = ops[1].op;
3274 }
3275
3276 return simplify_const_binary_operation (code, mode, lhs, rhs);
3277 }
3278
3279 /* Now simplify each pair of operands until nothing changes. The first
3280 time through just simplify constants against each other. */
3281
3282 first = 1;
3283 do
3284 {
3285 changed = first;
3286
3287 for (i = 0; i < n_ops - 1; i++)
3288 for (j = i + 1; j < n_ops; j++)
3289 {
3290 rtx lhs = ops[i].op, rhs = ops[j].op;
3291 int lneg = ops[i].neg, rneg = ops[j].neg;
3292
3293 if (lhs != 0 && rhs != 0
3294 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3295 {
3296 enum rtx_code ncode = PLUS;
3297
3298 if (lneg != rneg)
3299 {
3300 ncode = MINUS;
3301 if (lneg)
3302 tem = lhs, lhs = rhs, rhs = tem;
3303 }
3304 else if (swap_commutative_operands_p (lhs, rhs))
3305 tem = lhs, lhs = rhs, rhs = tem;
3306
3307 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3308
3309 /* Reject "simplifications" that just wrap the two
3310 arguments in a CONST. Failure to do so can result
3311 in infinite recursion with simplify_binary_operation
3312 when it calls us to simplify CONST operations. */
3313 if (tem
3314 && ! (GET_CODE (tem) == CONST
3315 && GET_CODE (XEXP (tem, 0)) == ncode
3316 && XEXP (XEXP (tem, 0), 0) == lhs
3317 && XEXP (XEXP (tem, 0), 1) == rhs)
3318 /* Don't allow -x + -1 -> ~x simplifications in the
3319 first pass. This allows us the chance to combine
3320 the -1 with other constants. */
3321 && ! (first
3322 && GET_CODE (tem) == NOT
3323 && XEXP (tem, 0) == rhs))
3324 {
3325 lneg &= rneg;
3326 if (GET_CODE (tem) == NEG)
3327 tem = XEXP (tem, 0), lneg = !lneg;
3328 if (GET_CODE (tem) == CONST_INT && lneg)
3329 tem = neg_const_int (mode, tem), lneg = 0;
3330
3331 ops[i].op = tem;
3332 ops[i].neg = lneg;
3333 ops[j].op = NULL_RTX;
3334 changed = 1;
3335 }
3336 }
3337 }
3338
3339 first = 0;
3340 }
3341 while (changed);
3342
3343 /* Pack all the operands to the lower-numbered entries. */
3344 for (i = 0, j = 0; j < n_ops; j++)
3345 if (ops[j].op)
3346 {
3347 ops[i] = ops[j];
3348 /* Stabilize sort. */
3349 ops[i].ix = i;
3350 i++;
3351 }
3352 n_ops = i;
3353
3354 /* Sort the operations based on swap_commutative_operands_p. */
3355 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3356
3357 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3358 if (n_ops == 2
3359 && GET_CODE (ops[1].op) == CONST_INT
3360 && CONSTANT_P (ops[0].op)
3361 && ops[0].neg)
3362 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3363
3364 /* We suppressed creation of trivial CONST expressions in the
3365 combination loop to avoid recursion. Create one manually now.
3366 The combination loop should have ensured that there is exactly
3367 one CONST_INT, and the sort will have ensured that it is last
3368 in the array and that any other constant will be next-to-last. */
3369
3370 if (n_ops > 1
3371 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3372 && CONSTANT_P (ops[n_ops - 2].op))
3373 {
3374 rtx value = ops[n_ops - 1].op;
3375 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3376 value = neg_const_int (mode, value);
3377 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3378 n_ops--;
3379 }
3380
3381 /* Put a non-negated operand first, if possible. */
3382
3383 for (i = 0; i < n_ops && ops[i].neg; i++)
3384 continue;
3385 if (i == n_ops)
3386 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3387 else if (i != 0)
3388 {
3389 tem = ops[0].op;
3390 ops[0] = ops[i];
3391 ops[i].op = tem;
3392 ops[i].neg = 1;
3393 }
3394
3395 /* Now make the result by performing the requested operations. */
3396 result = ops[0].op;
3397 for (i = 1; i < n_ops; i++)
3398 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3399 mode, result, ops[i].op);
3400
3401 return result;
3402 }
3403
3404 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3405 static bool
3406 plus_minus_operand_p (rtx x)
3407 {
3408 return GET_CODE (x) == PLUS
3409 || GET_CODE (x) == MINUS
3410 || (GET_CODE (x) == CONST
3411 && GET_CODE (XEXP (x, 0)) == PLUS
3412 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3413 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3414 }
3415
3416 /* Like simplify_binary_operation except used for relational operators.
3417 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3418 not also be VOIDmode.
3419
3420 CMP_MODE specifies in which mode the comparison is done in, so it is
3421 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3422 the operands or, if both are VOIDmode, the operands are compared in
3423 "infinite precision". */
3424 rtx
3425 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3426 enum machine_mode cmp_mode, rtx op0, rtx op1)
3427 {
3428 rtx tem, trueop0, trueop1;
3429
3430 if (cmp_mode == VOIDmode)
3431 cmp_mode = GET_MODE (op0);
3432 if (cmp_mode == VOIDmode)
3433 cmp_mode = GET_MODE (op1);
3434
3435 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3436 if (tem)
3437 {
3438 if (SCALAR_FLOAT_MODE_P (mode))
3439 {
3440 if (tem == const0_rtx)
3441 return CONST0_RTX (mode);
3442 #ifdef FLOAT_STORE_FLAG_VALUE
3443 {
3444 REAL_VALUE_TYPE val;
3445 val = FLOAT_STORE_FLAG_VALUE (mode);
3446 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3447 }
3448 #else
3449 return NULL_RTX;
3450 #endif
3451 }
3452 if (VECTOR_MODE_P (mode))
3453 {
3454 if (tem == const0_rtx)
3455 return CONST0_RTX (mode);
3456 #ifdef VECTOR_STORE_FLAG_VALUE
3457 {
3458 int i, units;
3459 rtvec v;
3460
3461 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3462 if (val == NULL_RTX)
3463 return NULL_RTX;
3464 if (val == const1_rtx)
3465 return CONST1_RTX (mode);
3466
3467 units = GET_MODE_NUNITS (mode);
3468 v = rtvec_alloc (units);
3469 for (i = 0; i < units; i++)
3470 RTVEC_ELT (v, i) = val;
3471 return gen_rtx_raw_CONST_VECTOR (mode, v);
3472 }
3473 #else
3474 return NULL_RTX;
3475 #endif
3476 }
3477
3478 return tem;
3479 }
3480
3481 /* For the following tests, ensure const0_rtx is op1. */
3482 if (swap_commutative_operands_p (op0, op1)
3483 || (op0 == const0_rtx && op1 != const0_rtx))
3484 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3485
3486 /* If op0 is a compare, extract the comparison arguments from it. */
3487 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3488 return simplify_relational_operation (code, mode, VOIDmode,
3489 XEXP (op0, 0), XEXP (op0, 1));
3490
3491 if (mode == VOIDmode
3492 || GET_MODE_CLASS (cmp_mode) == MODE_CC
3493 || CC0_P (op0))
3494 return NULL_RTX;
3495
3496 trueop0 = avoid_constant_pool_reference (op0);
3497 trueop1 = avoid_constant_pool_reference (op1);
3498 return simplify_relational_operation_1 (code, mode, cmp_mode,
3499 trueop0, trueop1);
3500 }
3501
3502 /* This part of simplify_relational_operation is only used when CMP_MODE
3503 is not in class MODE_CC (i.e. it is a real comparison).
3504
3505 MODE is the mode of the result, while CMP_MODE specifies in which
3506 mode the comparison is done in, so it is the mode of the operands. */
3507
3508 static rtx
3509 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3510 enum machine_mode cmp_mode, rtx op0, rtx op1)
3511 {
3512 enum rtx_code op0code = GET_CODE (op0);
3513
3514 if (GET_CODE (op1) == CONST_INT)
3515 {
3516 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3517 {
3518 /* If op0 is a comparison, extract the comparison arguments
3519 from it. */
3520 if (code == NE)
3521 {
3522 if (GET_MODE (op0) == mode)
3523 return simplify_rtx (op0);
3524 else
3525 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3526 XEXP (op0, 0), XEXP (op0, 1));
3527 }
3528 else if (code == EQ)
3529 {
3530 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3531 if (new_code != UNKNOWN)
3532 return simplify_gen_relational (new_code, mode, VOIDmode,
3533 XEXP (op0, 0), XEXP (op0, 1));
3534 }
3535 }
3536 }
3537
3538 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3539 if ((code == EQ || code == NE)
3540 && (op0code == PLUS || op0code == MINUS)
3541 && CONSTANT_P (op1)
3542 && CONSTANT_P (XEXP (op0, 1))
3543 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3544 {
3545 rtx x = XEXP (op0, 0);
3546 rtx c = XEXP (op0, 1);
3547
3548 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3549 cmp_mode, op1, c);
3550 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3551 }
3552
3553 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3554 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3555 if (code == NE
3556 && op1 == const0_rtx
3557 && GET_MODE_CLASS (mode) == MODE_INT
3558 && cmp_mode != VOIDmode
3559 /* ??? Work-around BImode bugs in the ia64 backend. */
3560 && mode != BImode
3561 && cmp_mode != BImode
3562 && nonzero_bits (op0, cmp_mode) == 1
3563 && STORE_FLAG_VALUE == 1)
3564 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3565 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3566 : lowpart_subreg (mode, op0, cmp_mode);
3567
3568 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3569 if ((code == EQ || code == NE)
3570 && op1 == const0_rtx
3571 && op0code == XOR)
3572 return simplify_gen_relational (code, mode, cmp_mode,
3573 XEXP (op0, 0), XEXP (op0, 1));
3574
3575 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3576 if ((code == EQ || code == NE)
3577 && op0code == XOR
3578 && rtx_equal_p (XEXP (op0, 0), op1)
3579 && !side_effects_p (XEXP (op0, 1)))
3580 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3581 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3582 if ((code == EQ || code == NE)
3583 && op0code == XOR
3584 && rtx_equal_p (XEXP (op0, 1), op1)
3585 && !side_effects_p (XEXP (op0, 0)))
3586 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3587
3588 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3589 if ((code == EQ || code == NE)
3590 && op0code == XOR
3591 && (GET_CODE (op1) == CONST_INT
3592 || GET_CODE (op1) == CONST_DOUBLE)
3593 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3594 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3595 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3596 simplify_gen_binary (XOR, cmp_mode,
3597 XEXP (op0, 1), op1));
3598
3599 return NULL_RTX;
3600 }
3601
3602 /* Check if the given comparison (done in the given MODE) is actually a
3603 tautology or a contradiction.
3604 If no simplification is possible, this function returns zero.
3605 Otherwise, it returns either const_true_rtx or const0_rtx. */
3606
3607 rtx
3608 simplify_const_relational_operation (enum rtx_code code,
3609 enum machine_mode mode,
3610 rtx op0, rtx op1)
3611 {
3612 int equal, op0lt, op0ltu, op1lt, op1ltu;
3613 rtx tem;
3614 rtx trueop0;
3615 rtx trueop1;
3616
3617 gcc_assert (mode != VOIDmode
3618 || (GET_MODE (op0) == VOIDmode
3619 && GET_MODE (op1) == VOIDmode));
3620
3621 /* If op0 is a compare, extract the comparison arguments from it. */
3622 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3623 {
3624 op1 = XEXP (op0, 1);
3625 op0 = XEXP (op0, 0);
3626
3627 if (GET_MODE (op0) != VOIDmode)
3628 mode = GET_MODE (op0);
3629 else if (GET_MODE (op1) != VOIDmode)
3630 mode = GET_MODE (op1);
3631 else
3632 return 0;
3633 }
3634
3635 /* We can't simplify MODE_CC values since we don't know what the
3636 actual comparison is. */
3637 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3638 return 0;
3639
3640 /* Make sure the constant is second. */
3641 if (swap_commutative_operands_p (op0, op1))
3642 {
3643 tem = op0, op0 = op1, op1 = tem;
3644 code = swap_condition (code);
3645 }
3646
3647 trueop0 = avoid_constant_pool_reference (op0);
3648 trueop1 = avoid_constant_pool_reference (op1);
3649
3650 /* For integer comparisons of A and B maybe we can simplify A - B and can
3651 then simplify a comparison of that with zero. If A and B are both either
3652 a register or a CONST_INT, this can't help; testing for these cases will
3653 prevent infinite recursion here and speed things up.
3654
3655 If CODE is an unsigned comparison, then we can never do this optimization,
3656 because it gives an incorrect result if the subtraction wraps around zero.
3657 ANSI C defines unsigned operations such that they never overflow, and
3658 thus such cases can not be ignored; but we cannot do it even for
3659 signed comparisons for languages such as Java, so test flag_wrapv. */
3660
3661 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3662 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3663 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3664 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3665 /* We cannot do this for == or != if tem is a nonzero address. */
3666 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3667 && code != GTU && code != GEU && code != LTU && code != LEU)
3668 return simplify_const_relational_operation (signed_condition (code),
3669 mode, tem, const0_rtx);
3670
3671 if (flag_unsafe_math_optimizations && code == ORDERED)
3672 return const_true_rtx;
3673
3674 if (flag_unsafe_math_optimizations && code == UNORDERED)
3675 return const0_rtx;
3676
3677 /* For modes without NaNs, if the two operands are equal, we know the
3678 result except if they have side-effects. */
3679 if (! HONOR_NANS (GET_MODE (trueop0))
3680 && rtx_equal_p (trueop0, trueop1)
3681 && ! side_effects_p (trueop0))
3682 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3683
3684 /* If the operands are floating-point constants, see if we can fold
3685 the result. */
3686 else if (GET_CODE (trueop0) == CONST_DOUBLE
3687 && GET_CODE (trueop1) == CONST_DOUBLE
3688 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3689 {
3690 REAL_VALUE_TYPE d0, d1;
3691
3692 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3693 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3694
3695 /* Comparisons are unordered iff at least one of the values is NaN. */
3696 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3697 switch (code)
3698 {
3699 case UNEQ:
3700 case UNLT:
3701 case UNGT:
3702 case UNLE:
3703 case UNGE:
3704 case NE:
3705 case UNORDERED:
3706 return const_true_rtx;
3707 case EQ:
3708 case LT:
3709 case GT:
3710 case LE:
3711 case GE:
3712 case LTGT:
3713 case ORDERED:
3714 return const0_rtx;
3715 default:
3716 return 0;
3717 }
3718
3719 equal = REAL_VALUES_EQUAL (d0, d1);
3720 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3721 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3722 }
3723
3724 /* Otherwise, see if the operands are both integers. */
3725 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3726 && (GET_CODE (trueop0) == CONST_DOUBLE
3727 || GET_CODE (trueop0) == CONST_INT)
3728 && (GET_CODE (trueop1) == CONST_DOUBLE
3729 || GET_CODE (trueop1) == CONST_INT))
3730 {
3731 int width = GET_MODE_BITSIZE (mode);
3732 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3733 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3734
3735 /* Get the two words comprising each integer constant. */
3736 if (GET_CODE (trueop0) == CONST_DOUBLE)
3737 {
3738 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3739 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3740 }
3741 else
3742 {
3743 l0u = l0s = INTVAL (trueop0);
3744 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3745 }
3746
3747 if (GET_CODE (trueop1) == CONST_DOUBLE)
3748 {
3749 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3750 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3751 }
3752 else
3753 {
3754 l1u = l1s = INTVAL (trueop1);
3755 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3756 }
3757
3758 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3759 we have to sign or zero-extend the values. */
3760 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3761 {
3762 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3763 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3764
3765 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3766 l0s |= ((HOST_WIDE_INT) (-1) << width);
3767
3768 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3769 l1s |= ((HOST_WIDE_INT) (-1) << width);
3770 }
3771 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3772 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3773
3774 equal = (h0u == h1u && l0u == l1u);
3775 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3776 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3777 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3778 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3779 }
3780
3781 /* Otherwise, there are some code-specific tests we can make. */
3782 else
3783 {
3784 /* Optimize comparisons with upper and lower bounds. */
3785 if (SCALAR_INT_MODE_P (mode)
3786 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3787 {
3788 rtx mmin, mmax;
3789 int sign;
3790
3791 if (code == GEU
3792 || code == LEU
3793 || code == GTU
3794 || code == LTU)
3795 sign = 0;
3796 else
3797 sign = 1;
3798
3799 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3800
3801 tem = NULL_RTX;
3802 switch (code)
3803 {
3804 case GEU:
3805 case GE:
3806 /* x >= min is always true. */
3807 if (rtx_equal_p (trueop1, mmin))
3808 tem = const_true_rtx;
3809 else
3810 break;
3811
3812 case LEU:
3813 case LE:
3814 /* x <= max is always true. */
3815 if (rtx_equal_p (trueop1, mmax))
3816 tem = const_true_rtx;
3817 break;
3818
3819 case GTU:
3820 case GT:
3821 /* x > max is always false. */
3822 if (rtx_equal_p (trueop1, mmax))
3823 tem = const0_rtx;
3824 break;
3825
3826 case LTU:
3827 case LT:
3828 /* x < min is always false. */
3829 if (rtx_equal_p (trueop1, mmin))
3830 tem = const0_rtx;
3831 break;
3832
3833 default:
3834 break;
3835 }
3836 if (tem == const0_rtx
3837 || tem == const_true_rtx)
3838 return tem;
3839 }
3840
3841 switch (code)
3842 {
3843 case EQ:
3844 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3845 return const0_rtx;
3846 break;
3847
3848 case NE:
3849 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3850 return const_true_rtx;
3851 break;
3852
3853 case LT:
3854 /* Optimize abs(x) < 0.0. */
3855 if (trueop1 == CONST0_RTX (mode)
3856 && !HONOR_SNANS (mode)
3857 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3858 {
3859 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3860 : trueop0;
3861 if (GET_CODE (tem) == ABS)
3862 return const0_rtx;
3863 }
3864 break;
3865
3866 case GE:
3867 /* Optimize abs(x) >= 0.0. */
3868 if (trueop1 == CONST0_RTX (mode)
3869 && !HONOR_NANS (mode)
3870 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3871 {
3872 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3873 : trueop0;
3874 if (GET_CODE (tem) == ABS)
3875 return const_true_rtx;
3876 }
3877 break;
3878
3879 case UNGE:
3880 /* Optimize ! (abs(x) < 0.0). */
3881 if (trueop1 == CONST0_RTX (mode))
3882 {
3883 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3884 : trueop0;
3885 if (GET_CODE (tem) == ABS)
3886 return const_true_rtx;
3887 }
3888 break;
3889
3890 default:
3891 break;
3892 }
3893
3894 return 0;
3895 }
3896
3897 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3898 as appropriate. */
3899 switch (code)
3900 {
3901 case EQ:
3902 case UNEQ:
3903 return equal ? const_true_rtx : const0_rtx;
3904 case NE:
3905 case LTGT:
3906 return ! equal ? const_true_rtx : const0_rtx;
3907 case LT:
3908 case UNLT:
3909 return op0lt ? const_true_rtx : const0_rtx;
3910 case GT:
3911 case UNGT:
3912 return op1lt ? const_true_rtx : const0_rtx;
3913 case LTU:
3914 return op0ltu ? const_true_rtx : const0_rtx;
3915 case GTU:
3916 return op1ltu ? const_true_rtx : const0_rtx;
3917 case LE:
3918 case UNLE:
3919 return equal || op0lt ? const_true_rtx : const0_rtx;
3920 case GE:
3921 case UNGE:
3922 return equal || op1lt ? const_true_rtx : const0_rtx;
3923 case LEU:
3924 return equal || op0ltu ? const_true_rtx : const0_rtx;
3925 case GEU:
3926 return equal || op1ltu ? const_true_rtx : const0_rtx;
3927 case ORDERED:
3928 return const_true_rtx;
3929 case UNORDERED:
3930 return const0_rtx;
3931 default:
3932 gcc_unreachable ();
3933 }
3934 }
3935 \f
3936 /* Simplify CODE, an operation with result mode MODE and three operands,
3937 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3938 a constant. Return 0 if no simplifications is possible. */
3939
3940 rtx
3941 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3942 enum machine_mode op0_mode, rtx op0, rtx op1,
3943 rtx op2)
3944 {
3945 unsigned int width = GET_MODE_BITSIZE (mode);
3946
3947 /* VOIDmode means "infinite" precision. */
3948 if (width == 0)
3949 width = HOST_BITS_PER_WIDE_INT;
3950
3951 switch (code)
3952 {
3953 case SIGN_EXTRACT:
3954 case ZERO_EXTRACT:
3955 if (GET_CODE (op0) == CONST_INT
3956 && GET_CODE (op1) == CONST_INT
3957 && GET_CODE (op2) == CONST_INT
3958 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3959 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3960 {
3961 /* Extracting a bit-field from a constant */
3962 HOST_WIDE_INT val = INTVAL (op0);
3963
3964 if (BITS_BIG_ENDIAN)
3965 val >>= (GET_MODE_BITSIZE (op0_mode)
3966 - INTVAL (op2) - INTVAL (op1));
3967 else
3968 val >>= INTVAL (op2);
3969
3970 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3971 {
3972 /* First zero-extend. */
3973 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3974 /* If desired, propagate sign bit. */
3975 if (code == SIGN_EXTRACT
3976 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3977 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3978 }
3979
3980 /* Clear the bits that don't belong in our mode,
3981 unless they and our sign bit are all one.
3982 So we get either a reasonable negative value or a reasonable
3983 unsigned value for this mode. */
3984 if (width < HOST_BITS_PER_WIDE_INT
3985 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3986 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3987 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3988
3989 return gen_int_mode (val, mode);
3990 }
3991 break;
3992
3993 case IF_THEN_ELSE:
3994 if (GET_CODE (op0) == CONST_INT)
3995 return op0 != const0_rtx ? op1 : op2;
3996
3997 /* Convert c ? a : a into "a". */
3998 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3999 return op1;
4000
4001 /* Convert a != b ? a : b into "a". */
4002 if (GET_CODE (op0) == NE
4003 && ! side_effects_p (op0)
4004 && ! HONOR_NANS (mode)
4005 && ! HONOR_SIGNED_ZEROS (mode)
4006 && ((rtx_equal_p (XEXP (op0, 0), op1)
4007 && rtx_equal_p (XEXP (op0, 1), op2))
4008 || (rtx_equal_p (XEXP (op0, 0), op2)
4009 && rtx_equal_p (XEXP (op0, 1), op1))))
4010 return op1;
4011
4012 /* Convert a == b ? a : b into "b". */
4013 if (GET_CODE (op0) == EQ
4014 && ! side_effects_p (op0)
4015 && ! HONOR_NANS (mode)
4016 && ! HONOR_SIGNED_ZEROS (mode)
4017 && ((rtx_equal_p (XEXP (op0, 0), op1)
4018 && rtx_equal_p (XEXP (op0, 1), op2))
4019 || (rtx_equal_p (XEXP (op0, 0), op2)
4020 && rtx_equal_p (XEXP (op0, 1), op1))))
4021 return op2;
4022
4023 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4024 {
4025 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4026 ? GET_MODE (XEXP (op0, 1))
4027 : GET_MODE (XEXP (op0, 0)));
4028 rtx temp;
4029
4030 /* Look for happy constants in op1 and op2. */
4031 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4032 {
4033 HOST_WIDE_INT t = INTVAL (op1);
4034 HOST_WIDE_INT f = INTVAL (op2);
4035
4036 if (t == STORE_FLAG_VALUE && f == 0)
4037 code = GET_CODE (op0);
4038 else if (t == 0 && f == STORE_FLAG_VALUE)
4039 {
4040 enum rtx_code tmp;
4041 tmp = reversed_comparison_code (op0, NULL_RTX);
4042 if (tmp == UNKNOWN)
4043 break;
4044 code = tmp;
4045 }
4046 else
4047 break;
4048
4049 return simplify_gen_relational (code, mode, cmp_mode,
4050 XEXP (op0, 0), XEXP (op0, 1));
4051 }
4052
4053 if (cmp_mode == VOIDmode)
4054 cmp_mode = op0_mode;
4055 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4056 cmp_mode, XEXP (op0, 0),
4057 XEXP (op0, 1));
4058
4059 /* See if any simplifications were possible. */
4060 if (temp)
4061 {
4062 if (GET_CODE (temp) == CONST_INT)
4063 return temp == const0_rtx ? op2 : op1;
4064 else if (temp)
4065 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4066 }
4067 }
4068 break;
4069
4070 case VEC_MERGE:
4071 gcc_assert (GET_MODE (op0) == mode);
4072 gcc_assert (GET_MODE (op1) == mode);
4073 gcc_assert (VECTOR_MODE_P (mode));
4074 op2 = avoid_constant_pool_reference (op2);
4075 if (GET_CODE (op2) == CONST_INT)
4076 {
4077 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4078 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4079 int mask = (1 << n_elts) - 1;
4080
4081 if (!(INTVAL (op2) & mask))
4082 return op1;
4083 if ((INTVAL (op2) & mask) == mask)
4084 return op0;
4085
4086 op0 = avoid_constant_pool_reference (op0);
4087 op1 = avoid_constant_pool_reference (op1);
4088 if (GET_CODE (op0) == CONST_VECTOR
4089 && GET_CODE (op1) == CONST_VECTOR)
4090 {
4091 rtvec v = rtvec_alloc (n_elts);
4092 unsigned int i;
4093
4094 for (i = 0; i < n_elts; i++)
4095 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4096 ? CONST_VECTOR_ELT (op0, i)
4097 : CONST_VECTOR_ELT (op1, i));
4098 return gen_rtx_CONST_VECTOR (mode, v);
4099 }
4100 }
4101 break;
4102
4103 default:
4104 gcc_unreachable ();
4105 }
4106
4107 return 0;
4108 }
4109
4110 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4111 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4112
4113 Works by unpacking OP into a collection of 8-bit values
4114 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4115 and then repacking them again for OUTERMODE. */
4116
4117 static rtx
4118 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4119 enum machine_mode innermode, unsigned int byte)
4120 {
4121 /* We support up to 512-bit values (for V8DFmode). */
4122 enum {
4123 max_bitsize = 512,
4124 value_bit = 8,
4125 value_mask = (1 << value_bit) - 1
4126 };
4127 unsigned char value[max_bitsize / value_bit];
4128 int value_start;
4129 int i;
4130 int elem;
4131
4132 int num_elem;
4133 rtx * elems;
4134 int elem_bitsize;
4135 rtx result_s;
4136 rtvec result_v = NULL;
4137 enum mode_class outer_class;
4138 enum machine_mode outer_submode;
4139
4140 /* Some ports misuse CCmode. */
4141 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4142 return op;
4143
4144 /* We have no way to represent a complex constant at the rtl level. */
4145 if (COMPLEX_MODE_P (outermode))
4146 return NULL_RTX;
4147
4148 /* Unpack the value. */
4149
4150 if (GET_CODE (op) == CONST_VECTOR)
4151 {
4152 num_elem = CONST_VECTOR_NUNITS (op);
4153 elems = &CONST_VECTOR_ELT (op, 0);
4154 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4155 }
4156 else
4157 {
4158 num_elem = 1;
4159 elems = &op;
4160 elem_bitsize = max_bitsize;
4161 }
4162 /* If this asserts, it is too complicated; reducing value_bit may help. */
4163 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4164 /* I don't know how to handle endianness of sub-units. */
4165 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4166
4167 for (elem = 0; elem < num_elem; elem++)
4168 {
4169 unsigned char * vp;
4170 rtx el = elems[elem];
4171
4172 /* Vectors are kept in target memory order. (This is probably
4173 a mistake.) */
4174 {
4175 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4176 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4177 / BITS_PER_UNIT);
4178 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4179 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4180 unsigned bytele = (subword_byte % UNITS_PER_WORD
4181 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4182 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4183 }
4184
4185 switch (GET_CODE (el))
4186 {
4187 case CONST_INT:
4188 for (i = 0;
4189 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4190 i += value_bit)
4191 *vp++ = INTVAL (el) >> i;
4192 /* CONST_INTs are always logically sign-extended. */
4193 for (; i < elem_bitsize; i += value_bit)
4194 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4195 break;
4196
4197 case CONST_DOUBLE:
4198 if (GET_MODE (el) == VOIDmode)
4199 {
4200 /* If this triggers, someone should have generated a
4201 CONST_INT instead. */
4202 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4203
4204 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4205 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4206 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4207 {
4208 *vp++
4209 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4210 i += value_bit;
4211 }
4212 /* It shouldn't matter what's done here, so fill it with
4213 zero. */
4214 for (; i < elem_bitsize; i += value_bit)
4215 *vp++ = 0;
4216 }
4217 else
4218 {
4219 long tmp[max_bitsize / 32];
4220 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4221
4222 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4223 gcc_assert (bitsize <= elem_bitsize);
4224 gcc_assert (bitsize % value_bit == 0);
4225
4226 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4227 GET_MODE (el));
4228
4229 /* real_to_target produces its result in words affected by
4230 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4231 and use WORDS_BIG_ENDIAN instead; see the documentation
4232 of SUBREG in rtl.texi. */
4233 for (i = 0; i < bitsize; i += value_bit)
4234 {
4235 int ibase;
4236 if (WORDS_BIG_ENDIAN)
4237 ibase = bitsize - 1 - i;
4238 else
4239 ibase = i;
4240 *vp++ = tmp[ibase / 32] >> i % 32;
4241 }
4242
4243 /* It shouldn't matter what's done here, so fill it with
4244 zero. */
4245 for (; i < elem_bitsize; i += value_bit)
4246 *vp++ = 0;
4247 }
4248 break;
4249
4250 default:
4251 gcc_unreachable ();
4252 }
4253 }
4254
4255 /* Now, pick the right byte to start with. */
4256 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4257 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4258 will already have offset 0. */
4259 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4260 {
4261 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4262 - byte);
4263 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4264 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4265 byte = (subword_byte % UNITS_PER_WORD
4266 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4267 }
4268
4269 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4270 so if it's become negative it will instead be very large.) */
4271 gcc_assert (byte < GET_MODE_SIZE (innermode));
4272
4273 /* Convert from bytes to chunks of size value_bit. */
4274 value_start = byte * (BITS_PER_UNIT / value_bit);
4275
4276 /* Re-pack the value. */
4277
4278 if (VECTOR_MODE_P (outermode))
4279 {
4280 num_elem = GET_MODE_NUNITS (outermode);
4281 result_v = rtvec_alloc (num_elem);
4282 elems = &RTVEC_ELT (result_v, 0);
4283 outer_submode = GET_MODE_INNER (outermode);
4284 }
4285 else
4286 {
4287 num_elem = 1;
4288 elems = &result_s;
4289 outer_submode = outermode;
4290 }
4291
4292 outer_class = GET_MODE_CLASS (outer_submode);
4293 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4294
4295 gcc_assert (elem_bitsize % value_bit == 0);
4296 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4297
4298 for (elem = 0; elem < num_elem; elem++)
4299 {
4300 unsigned char *vp;
4301
4302 /* Vectors are stored in target memory order. (This is probably
4303 a mistake.) */
4304 {
4305 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4306 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4307 / BITS_PER_UNIT);
4308 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4309 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4310 unsigned bytele = (subword_byte % UNITS_PER_WORD
4311 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4312 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4313 }
4314
4315 switch (outer_class)
4316 {
4317 case MODE_INT:
4318 case MODE_PARTIAL_INT:
4319 {
4320 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4321
4322 for (i = 0;
4323 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4324 i += value_bit)
4325 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4326 for (; i < elem_bitsize; i += value_bit)
4327 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4328 << (i - HOST_BITS_PER_WIDE_INT));
4329
4330 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4331 know why. */
4332 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4333 elems[elem] = gen_int_mode (lo, outer_submode);
4334 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4335 elems[elem] = immed_double_const (lo, hi, outer_submode);
4336 else
4337 return NULL_RTX;
4338 }
4339 break;
4340
4341 case MODE_FLOAT:
4342 case MODE_DECIMAL_FLOAT:
4343 {
4344 REAL_VALUE_TYPE r;
4345 long tmp[max_bitsize / 32];
4346
4347 /* real_from_target wants its input in words affected by
4348 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4349 and use WORDS_BIG_ENDIAN instead; see the documentation
4350 of SUBREG in rtl.texi. */
4351 for (i = 0; i < max_bitsize / 32; i++)
4352 tmp[i] = 0;
4353 for (i = 0; i < elem_bitsize; i += value_bit)
4354 {
4355 int ibase;
4356 if (WORDS_BIG_ENDIAN)
4357 ibase = elem_bitsize - 1 - i;
4358 else
4359 ibase = i;
4360 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4361 }
4362
4363 real_from_target (&r, tmp, outer_submode);
4364 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4365 }
4366 break;
4367
4368 default:
4369 gcc_unreachable ();
4370 }
4371 }
4372 if (VECTOR_MODE_P (outermode))
4373 return gen_rtx_CONST_VECTOR (outermode, result_v);
4374 else
4375 return result_s;
4376 }
4377
4378 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4379 Return 0 if no simplifications are possible. */
4380 rtx
4381 simplify_subreg (enum machine_mode outermode, rtx op,
4382 enum machine_mode innermode, unsigned int byte)
4383 {
4384 /* Little bit of sanity checking. */
4385 gcc_assert (innermode != VOIDmode);
4386 gcc_assert (outermode != VOIDmode);
4387 gcc_assert (innermode != BLKmode);
4388 gcc_assert (outermode != BLKmode);
4389
4390 gcc_assert (GET_MODE (op) == innermode
4391 || GET_MODE (op) == VOIDmode);
4392
4393 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4394 gcc_assert (byte < GET_MODE_SIZE (innermode));
4395
4396 if (outermode == innermode && !byte)
4397 return op;
4398
4399 if (GET_CODE (op) == CONST_INT
4400 || GET_CODE (op) == CONST_DOUBLE
4401 || GET_CODE (op) == CONST_VECTOR)
4402 return simplify_immed_subreg (outermode, op, innermode, byte);
4403
4404 /* Changing mode twice with SUBREG => just change it once,
4405 or not at all if changing back op starting mode. */
4406 if (GET_CODE (op) == SUBREG)
4407 {
4408 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4409 int final_offset = byte + SUBREG_BYTE (op);
4410 rtx newx;
4411
4412 if (outermode == innermostmode
4413 && byte == 0 && SUBREG_BYTE (op) == 0)
4414 return SUBREG_REG (op);
4415
4416 /* The SUBREG_BYTE represents offset, as if the value were stored
4417 in memory. Irritating exception is paradoxical subreg, where
4418 we define SUBREG_BYTE to be 0. On big endian machines, this
4419 value should be negative. For a moment, undo this exception. */
4420 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4421 {
4422 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4423 if (WORDS_BIG_ENDIAN)
4424 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4425 if (BYTES_BIG_ENDIAN)
4426 final_offset += difference % UNITS_PER_WORD;
4427 }
4428 if (SUBREG_BYTE (op) == 0
4429 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4430 {
4431 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4432 if (WORDS_BIG_ENDIAN)
4433 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4434 if (BYTES_BIG_ENDIAN)
4435 final_offset += difference % UNITS_PER_WORD;
4436 }
4437
4438 /* See whether resulting subreg will be paradoxical. */
4439 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4440 {
4441 /* In nonparadoxical subregs we can't handle negative offsets. */
4442 if (final_offset < 0)
4443 return NULL_RTX;
4444 /* Bail out in case resulting subreg would be incorrect. */
4445 if (final_offset % GET_MODE_SIZE (outermode)
4446 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4447 return NULL_RTX;
4448 }
4449 else
4450 {
4451 int offset = 0;
4452 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4453
4454 /* In paradoxical subreg, see if we are still looking on lower part.
4455 If so, our SUBREG_BYTE will be 0. */
4456 if (WORDS_BIG_ENDIAN)
4457 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4458 if (BYTES_BIG_ENDIAN)
4459 offset += difference % UNITS_PER_WORD;
4460 if (offset == final_offset)
4461 final_offset = 0;
4462 else
4463 return NULL_RTX;
4464 }
4465
4466 /* Recurse for further possible simplifications. */
4467 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4468 final_offset);
4469 if (newx)
4470 return newx;
4471 if (validate_subreg (outermode, innermostmode,
4472 SUBREG_REG (op), final_offset))
4473 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4474 return NULL_RTX;
4475 }
4476
4477 /* Merge implicit and explicit truncations. */
4478
4479 if (GET_CODE (op) == TRUNCATE
4480 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4481 && subreg_lowpart_offset (outermode, innermode) == byte)
4482 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4483 GET_MODE (XEXP (op, 0)));
4484
4485 /* SUBREG of a hard register => just change the register number
4486 and/or mode. If the hard register is not valid in that mode,
4487 suppress this simplification. If the hard register is the stack,
4488 frame, or argument pointer, leave this as a SUBREG. */
4489
4490 if (REG_P (op)
4491 && REGNO (op) < FIRST_PSEUDO_REGISTER
4492 #ifdef CANNOT_CHANGE_MODE_CLASS
4493 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4494 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4495 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4496 #endif
4497 && ((reload_completed && !frame_pointer_needed)
4498 || (REGNO (op) != FRAME_POINTER_REGNUM
4499 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4500 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4501 #endif
4502 ))
4503 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4504 && REGNO (op) != ARG_POINTER_REGNUM
4505 #endif
4506 && REGNO (op) != STACK_POINTER_REGNUM
4507 && subreg_offset_representable_p (REGNO (op), innermode,
4508 byte, outermode))
4509 {
4510 unsigned int regno = REGNO (op);
4511 unsigned int final_regno
4512 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4513
4514 /* ??? We do allow it if the current REG is not valid for
4515 its mode. This is a kludge to work around how float/complex
4516 arguments are passed on 32-bit SPARC and should be fixed. */
4517 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4518 || ! HARD_REGNO_MODE_OK (regno, innermode))
4519 {
4520 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4521
4522 /* Propagate original regno. We don't have any way to specify
4523 the offset inside original regno, so do so only for lowpart.
4524 The information is used only by alias analysis that can not
4525 grog partial register anyway. */
4526
4527 if (subreg_lowpart_offset (outermode, innermode) == byte)
4528 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4529 return x;
4530 }
4531 }
4532
4533 /* If we have a SUBREG of a register that we are replacing and we are
4534 replacing it with a MEM, make a new MEM and try replacing the
4535 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4536 or if we would be widening it. */
4537
4538 if (MEM_P (op)
4539 && ! mode_dependent_address_p (XEXP (op, 0))
4540 /* Allow splitting of volatile memory references in case we don't
4541 have instruction to move the whole thing. */
4542 && (! MEM_VOLATILE_P (op)
4543 || ! have_insn_for (SET, innermode))
4544 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4545 return adjust_address_nv (op, outermode, byte);
4546
4547 /* Handle complex values represented as CONCAT
4548 of real and imaginary part. */
4549 if (GET_CODE (op) == CONCAT)
4550 {
4551 unsigned int inner_size, final_offset;
4552 rtx part, res;
4553
4554 inner_size = GET_MODE_UNIT_SIZE (innermode);
4555 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4556 final_offset = byte % inner_size;
4557 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4558 return NULL_RTX;
4559
4560 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4561 if (res)
4562 return res;
4563 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4564 return gen_rtx_SUBREG (outermode, part, final_offset);
4565 return NULL_RTX;
4566 }
4567
4568 /* Optimize SUBREG truncations of zero and sign extended values. */
4569 if ((GET_CODE (op) == ZERO_EXTEND
4570 || GET_CODE (op) == SIGN_EXTEND)
4571 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4572 {
4573 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4574
4575 /* If we're requesting the lowpart of a zero or sign extension,
4576 there are three possibilities. If the outermode is the same
4577 as the origmode, we can omit both the extension and the subreg.
4578 If the outermode is not larger than the origmode, we can apply
4579 the truncation without the extension. Finally, if the outermode
4580 is larger than the origmode, but both are integer modes, we
4581 can just extend to the appropriate mode. */
4582 if (bitpos == 0)
4583 {
4584 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4585 if (outermode == origmode)
4586 return XEXP (op, 0);
4587 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4588 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4589 subreg_lowpart_offset (outermode,
4590 origmode));
4591 if (SCALAR_INT_MODE_P (outermode))
4592 return simplify_gen_unary (GET_CODE (op), outermode,
4593 XEXP (op, 0), origmode);
4594 }
4595
4596 /* A SUBREG resulting from a zero extension may fold to zero if
4597 it extracts higher bits that the ZERO_EXTEND's source bits. */
4598 if (GET_CODE (op) == ZERO_EXTEND
4599 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4600 return CONST0_RTX (outermode);
4601 }
4602
4603 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4604 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4605 the outer subreg is effectively a truncation to the original mode. */
4606 if ((GET_CODE (op) == LSHIFTRT
4607 || GET_CODE (op) == ASHIFTRT)
4608 && SCALAR_INT_MODE_P (outermode)
4609 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4610 to avoid the possibility that an outer LSHIFTRT shifts by more
4611 than the sign extension's sign_bit_copies and introduces zeros
4612 into the high bits of the result. */
4613 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4614 && GET_CODE (XEXP (op, 1)) == CONST_INT
4615 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4616 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4617 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4618 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4619 return simplify_gen_binary (ASHIFTRT, outermode,
4620 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4621
4622 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4623 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4624 the outer subreg is effectively a truncation to the original mode. */
4625 if ((GET_CODE (op) == LSHIFTRT
4626 || GET_CODE (op) == ASHIFTRT)
4627 && SCALAR_INT_MODE_P (outermode)
4628 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4629 && GET_CODE (XEXP (op, 1)) == CONST_INT
4630 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4631 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4632 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4633 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4634 return simplify_gen_binary (LSHIFTRT, outermode,
4635 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4636
4637 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4638 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4639 the outer subreg is effectively a truncation to the original mode. */
4640 if (GET_CODE (op) == ASHIFT
4641 && SCALAR_INT_MODE_P (outermode)
4642 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4643 && GET_CODE (XEXP (op, 1)) == CONST_INT
4644 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4645 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4646 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4647 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4648 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4649 return simplify_gen_binary (ASHIFT, outermode,
4650 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4651
4652 return NULL_RTX;
4653 }
4654
4655 /* Make a SUBREG operation or equivalent if it folds. */
4656
4657 rtx
4658 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4659 enum machine_mode innermode, unsigned int byte)
4660 {
4661 rtx newx;
4662
4663 newx = simplify_subreg (outermode, op, innermode, byte);
4664 if (newx)
4665 return newx;
4666
4667 if (GET_CODE (op) == SUBREG
4668 || GET_CODE (op) == CONCAT
4669 || GET_MODE (op) == VOIDmode)
4670 return NULL_RTX;
4671
4672 if (validate_subreg (outermode, innermode, op, byte))
4673 return gen_rtx_SUBREG (outermode, op, byte);
4674
4675 return NULL_RTX;
4676 }
4677
4678 /* Simplify X, an rtx expression.
4679
4680 Return the simplified expression or NULL if no simplifications
4681 were possible.
4682
4683 This is the preferred entry point into the simplification routines;
4684 however, we still allow passes to call the more specific routines.
4685
4686 Right now GCC has three (yes, three) major bodies of RTL simplification
4687 code that need to be unified.
4688
4689 1. fold_rtx in cse.c. This code uses various CSE specific
4690 information to aid in RTL simplification.
4691
4692 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4693 it uses combine specific information to aid in RTL
4694 simplification.
4695
4696 3. The routines in this file.
4697
4698
4699 Long term we want to only have one body of simplification code; to
4700 get to that state I recommend the following steps:
4701
4702 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4703 which are not pass dependent state into these routines.
4704
4705 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4706 use this routine whenever possible.
4707
4708 3. Allow for pass dependent state to be provided to these
4709 routines and add simplifications based on the pass dependent
4710 state. Remove code from cse.c & combine.c that becomes
4711 redundant/dead.
4712
4713 It will take time, but ultimately the compiler will be easier to
4714 maintain and improve. It's totally silly that when we add a
4715 simplification that it needs to be added to 4 places (3 for RTL
4716 simplification and 1 for tree simplification. */
4717
4718 rtx
4719 simplify_rtx (rtx x)
4720 {
4721 enum rtx_code code = GET_CODE (x);
4722 enum machine_mode mode = GET_MODE (x);
4723
4724 switch (GET_RTX_CLASS (code))
4725 {
4726 case RTX_UNARY:
4727 return simplify_unary_operation (code, mode,
4728 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4729 case RTX_COMM_ARITH:
4730 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4731 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4732
4733 /* Fall through.... */
4734
4735 case RTX_BIN_ARITH:
4736 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4737
4738 case RTX_TERNARY:
4739 case RTX_BITFIELD_OPS:
4740 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4741 XEXP (x, 0), XEXP (x, 1),
4742 XEXP (x, 2));
4743
4744 case RTX_COMPARE:
4745 case RTX_COMM_COMPARE:
4746 return simplify_relational_operation (code, mode,
4747 ((GET_MODE (XEXP (x, 0))
4748 != VOIDmode)
4749 ? GET_MODE (XEXP (x, 0))
4750 : GET_MODE (XEXP (x, 1))),
4751 XEXP (x, 0),
4752 XEXP (x, 1));
4753
4754 case RTX_EXTRA:
4755 if (code == SUBREG)
4756 return simplify_gen_subreg (mode, SUBREG_REG (x),
4757 GET_MODE (SUBREG_REG (x)),
4758 SUBREG_BYTE (x));
4759 break;
4760
4761 case RTX_OBJ:
4762 if (code == LO_SUM)
4763 {
4764 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4765 if (GET_CODE (XEXP (x, 0)) == HIGH
4766 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4767 return XEXP (x, 1);
4768 }
4769 break;
4770
4771 default:
4772 break;
4773 }
4774 return NULL;
4775 }
4776