inclhack.def (hpux_imaginary_i): Remove spaces.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
207
208 rtx
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
211 {
212 rtx tem;
213
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
217
218 return gen_rtx_fmt_e (code, mode, op);
219 }
220
221 /* Likewise for ternary operations. */
222
223 rtx
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
226 {
227 rtx tem;
228
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
233
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
235 }
236
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
239
240 rtx
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
243 {
244 rtx tem;
245
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
249
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 }
252 \f
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
255
256 rtx
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
258 {
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
263
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
267
268 if (x == old_rtx)
269 return new_rtx;
270
271 switch (GET_RTX_CLASS (code))
272 {
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
280
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
288
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
299
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
312
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
316 {
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
324 }
325 break;
326
327 case RTX_OBJ:
328 if (code == MEM)
329 {
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
334 }
335 else if (code == LO_SUM)
336 {
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
339
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
343
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
347 }
348 else if (code == REG)
349 {
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
352 }
353 break;
354
355 default:
356 break;
357 }
358 return x;
359 }
360 \f
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
364 rtx
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
367 {
368 rtx trueop, tem;
369
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
372
373 trueop = avoid_constant_pool_reference (op);
374
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
378
379 return simplify_unary_operation_1 (code, mode, op);
380 }
381
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
386 {
387 enum rtx_code reversed;
388 rtx temp;
389
390 switch (code)
391 {
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
396
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
404
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
409
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
413
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && CONST_INT_P (XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
420
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && CONST_INT_P (XEXP (op, 1))
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
428
429
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
437 {
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
440 }
441
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
445
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1))
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
452
453
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
460 {
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
463
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
469 }
470
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
475
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
477 {
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
480
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
483
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
488
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
490 {
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
493 }
494
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
497 }
498 break;
499
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
504
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
509
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
513
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
523
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 {
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (CONST_INT_P (XEXP (op, 1))
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
531 {
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
535 }
536
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
540 }
541
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
546 {
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
549 }
550
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
555 {
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
559 }
560
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && CONST_INT_P (XEXP (op, 1))
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
568
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && CONST_INT_P (XEXP (op, 1))
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
576
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
582
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
711
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
720
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
728
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
732
733 /* (float_extend (float_extend x)) is (float_extend x)
734
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
737 */
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
748
749 break;
750
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
756
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
761
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
771
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
775
776 break;
777
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
785
786 case POPCOUNT:
787 switch (GET_CODE (op))
788 {
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 default:
804 break;
805 }
806 break;
807
808 case PARITY:
809 switch (GET_CODE (op))
810 {
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
817
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
825
826 default:
827 break;
828 }
829 break;
830
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
836
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
843
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
855
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
863 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
876
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
885 return rtl_hooks.gen_lowpart_no_emit (mode, op);
886
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
898
899 default:
900 break;
901 }
902
903 return 0;
904 }
905
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
909 rtx
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
912 {
913 unsigned int width = GET_MODE_BITSIZE (mode);
914
915 if (code == VEC_DUPLICATE)
916 {
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
919 {
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
925 }
926 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
928 {
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
933
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
938 {
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 }
948 return gen_rtx_CONST_VECTOR (mode, v);
949 }
950 }
951
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 {
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
961
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
964 {
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
971 }
972 return gen_rtx_CONST_VECTOR (mode, v);
973 }
974
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
978
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
981 {
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
984
985 if (CONST_INT_P (op))
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 }
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || CONST_INT_P (op)))
997 {
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1000
1001 if (CONST_INT_P (op))
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005
1006 if (op_mode == VOIDmode)
1007 {
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1012 }
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 ;
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 }
1022
1023 if (CONST_INT_P (op)
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 {
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1028
1029 switch (code)
1030 {
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1034
1035 case NEG:
1036 val = - arg0;
1037 break;
1038
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1042
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1049
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 ;
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1057
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1061 {
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1066 }
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1070
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1077
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1085
1086 case BSWAP:
1087 {
1088 unsigned int s;
1089
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1092 {
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1097 }
1098 }
1099 break;
1100
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1104
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 {
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1116 }
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1122
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 {
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1133 }
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 {
1136 val
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1141 }
1142 else
1143 return 0;
1144 break;
1145
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1154
1155 default:
1156 gcc_unreachable ();
1157 }
1158
1159 return gen_int_mode (val, mode);
1160 }
1161
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || CONST_INT_P (op)))
1168 {
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1171
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1176
1177 switch (code)
1178 {
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1183
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1187
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1194
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1198 {
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 }
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1207
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1218
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1228
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1237
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1247
1248 case BSWAP:
1249 {
1250 unsigned int s;
1251
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1255 {
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1258
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1268 }
1269 }
1270 break;
1271
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1276
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1279
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1282
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1286
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1292 {
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298
1299 hv = HWI_SIGN_EXTEND (lv);
1300 }
1301 break;
1302
1303 case SQRT:
1304 return 0;
1305
1306 default:
1307 return 0;
1308 }
1309
1310 return immed_double_const (lv, hv, mode);
1311 }
1312
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1315 {
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1318
1319 switch (code)
1320 {
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1343 {
1344 long tmp[4];
1345 int i;
1346
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1352 }
1353 default:
1354 gcc_unreachable ();
1355 }
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 }
1358
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 {
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1368
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1375 {
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1379
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1382 {
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1386 }
1387 else
1388 {
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 }
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1394 {
1395 xh = th;
1396 xl = tl;
1397 break;
1398 }
1399
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1402 {
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1405 }
1406 else
1407 {
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 }
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1413 {
1414 xh = th;
1415 xl = tl;
1416 break;
1417 }
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1420
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1424
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1427 {
1428 th = -1;
1429 tl = -1;
1430 }
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 {
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1436 }
1437 else
1438 {
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 }
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1444 {
1445 xh = th;
1446 xl = tl;
1447 break;
1448 }
1449
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1452
1453 default:
1454 gcc_unreachable ();
1455 }
1456 return immed_double_const (xl, xh, mode);
1457 }
1458
1459 return NULL_RTX;
1460 }
1461 \f
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1467
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1471 {
1472 rtx tem;
1473
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1476 {
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1479 {
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 }
1483
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1487
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1491 }
1492
1493 if (GET_CODE (op0) == code)
1494 {
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 {
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 }
1501
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1511 }
1512
1513 return 0;
1514 }
1515
1516
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1519
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1522 rtx
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1525 {
1526 rtx trueop0, trueop1;
1527 rtx tem;
1528
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1535
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1539 {
1540 tem = op0, op0 = op1, op1 = tem;
1541 }
1542
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1545
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 }
1551
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1556
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1560 {
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1564
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1567
1568 switch (code)
1569 {
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1577
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1584
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1590
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1596
1597 if ((GET_CODE (op0) == CONST
1598 || GET_CODE (op0) == SYMBOL_REF
1599 || GET_CODE (op0) == LABEL_REF)
1600 && CONST_INT_P (op1))
1601 return plus_constant (op0, INTVAL (op1));
1602 else if ((GET_CODE (op1) == CONST
1603 || GET_CODE (op1) == SYMBOL_REF
1604 || GET_CODE (op1) == LABEL_REF)
1605 && CONST_INT_P (op0))
1606 return plus_constant (op1, INTVAL (op0));
1607
1608 /* See if this is something like X * C - X or vice versa or
1609 if the multiplication is written as a shift. If so, we can
1610 distribute and make a new multiply, shift, or maybe just
1611 have X (if C is 2 in the example above). But don't make
1612 something more expensive than we had before. */
1613
1614 if (SCALAR_INT_MODE_P (mode))
1615 {
1616 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1617 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1618 rtx lhs = op0, rhs = op1;
1619
1620 if (GET_CODE (lhs) == NEG)
1621 {
1622 coeff0l = -1;
1623 coeff0h = -1;
1624 lhs = XEXP (lhs, 0);
1625 }
1626 else if (GET_CODE (lhs) == MULT
1627 && CONST_INT_P (XEXP (lhs, 1)))
1628 {
1629 coeff0l = INTVAL (XEXP (lhs, 1));
1630 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1631 lhs = XEXP (lhs, 0);
1632 }
1633 else if (GET_CODE (lhs) == ASHIFT
1634 && CONST_INT_P (XEXP (lhs, 1))
1635 && INTVAL (XEXP (lhs, 1)) >= 0
1636 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1637 {
1638 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1639 coeff0h = 0;
1640 lhs = XEXP (lhs, 0);
1641 }
1642
1643 if (GET_CODE (rhs) == NEG)
1644 {
1645 coeff1l = -1;
1646 coeff1h = -1;
1647 rhs = XEXP (rhs, 0);
1648 }
1649 else if (GET_CODE (rhs) == MULT
1650 && CONST_INT_P (XEXP (rhs, 1)))
1651 {
1652 coeff1l = INTVAL (XEXP (rhs, 1));
1653 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1654 rhs = XEXP (rhs, 0);
1655 }
1656 else if (GET_CODE (rhs) == ASHIFT
1657 && CONST_INT_P (XEXP (rhs, 1))
1658 && INTVAL (XEXP (rhs, 1)) >= 0
1659 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1660 {
1661 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1662 coeff1h = 0;
1663 rhs = XEXP (rhs, 0);
1664 }
1665
1666 if (rtx_equal_p (lhs, rhs))
1667 {
1668 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1669 rtx coeff;
1670 unsigned HOST_WIDE_INT l;
1671 HOST_WIDE_INT h;
1672 bool speed = optimize_function_for_speed_p (cfun);
1673
1674 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1675 coeff = immed_double_const (l, h, mode);
1676
1677 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1678 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1679 ? tem : 0;
1680 }
1681 }
1682
1683 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1684 if ((CONST_INT_P (op1)
1685 || GET_CODE (op1) == CONST_DOUBLE)
1686 && GET_CODE (op0) == XOR
1687 && (CONST_INT_P (XEXP (op0, 1))
1688 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1689 && mode_signbit_p (mode, op1))
1690 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1691 simplify_gen_binary (XOR, mode, op1,
1692 XEXP (op0, 1)));
1693
1694 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1695 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1696 && GET_CODE (op0) == MULT
1697 && GET_CODE (XEXP (op0, 0)) == NEG)
1698 {
1699 rtx in1, in2;
1700
1701 in1 = XEXP (XEXP (op0, 0), 0);
1702 in2 = XEXP (op0, 1);
1703 return simplify_gen_binary (MINUS, mode, op1,
1704 simplify_gen_binary (MULT, mode,
1705 in1, in2));
1706 }
1707
1708 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1709 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1710 is 1. */
1711 if (COMPARISON_P (op0)
1712 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1713 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1714 && (reversed = reversed_comparison (op0, mode)))
1715 return
1716 simplify_gen_unary (NEG, mode, reversed, mode);
1717
1718 /* If one of the operands is a PLUS or a MINUS, see if we can
1719 simplify this by the associative law.
1720 Don't use the associative law for floating point.
1721 The inaccuracy makes it nonassociative,
1722 and subtle programs can break if operations are associated. */
1723
1724 if (INTEGRAL_MODE_P (mode)
1725 && (plus_minus_operand_p (op0)
1726 || plus_minus_operand_p (op1))
1727 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1728 return tem;
1729
1730 /* Reassociate floating point addition only when the user
1731 specifies associative math operations. */
1732 if (FLOAT_MODE_P (mode)
1733 && flag_associative_math)
1734 {
1735 tem = simplify_associative_operation (code, mode, op0, op1);
1736 if (tem)
1737 return tem;
1738 }
1739 break;
1740
1741 case COMPARE:
1742 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1743 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1744 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1745 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1746 {
1747 rtx xop00 = XEXP (op0, 0);
1748 rtx xop10 = XEXP (op1, 0);
1749
1750 #ifdef HAVE_cc0
1751 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1752 #else
1753 if (REG_P (xop00) && REG_P (xop10)
1754 && GET_MODE (xop00) == GET_MODE (xop10)
1755 && REGNO (xop00) == REGNO (xop10)
1756 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1757 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1758 #endif
1759 return xop00;
1760 }
1761 break;
1762
1763 case MINUS:
1764 /* We can't assume x-x is 0 even with non-IEEE floating point,
1765 but since it is zero except in very strange circumstances, we
1766 will treat it as zero with -ffinite-math-only. */
1767 if (rtx_equal_p (trueop0, trueop1)
1768 && ! side_effects_p (op0)
1769 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1770 return CONST0_RTX (mode);
1771
1772 /* Change subtraction from zero into negation. (0 - x) is the
1773 same as -x when x is NaN, infinite, or finite and nonzero.
1774 But if the mode has signed zeros, and does not round towards
1775 -infinity, then 0 - 0 is 0, not -0. */
1776 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1777 return simplify_gen_unary (NEG, mode, op1, mode);
1778
1779 /* (-1 - a) is ~a. */
1780 if (trueop0 == constm1_rtx)
1781 return simplify_gen_unary (NOT, mode, op1, mode);
1782
1783 /* Subtracting 0 has no effect unless the mode has signed zeros
1784 and supports rounding towards -infinity. In such a case,
1785 0 - 0 is -0. */
1786 if (!(HONOR_SIGNED_ZEROS (mode)
1787 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1788 && trueop1 == CONST0_RTX (mode))
1789 return op0;
1790
1791 /* See if this is something like X * C - X or vice versa or
1792 if the multiplication is written as a shift. If so, we can
1793 distribute and make a new multiply, shift, or maybe just
1794 have X (if C is 2 in the example above). But don't make
1795 something more expensive than we had before. */
1796
1797 if (SCALAR_INT_MODE_P (mode))
1798 {
1799 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1800 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1801 rtx lhs = op0, rhs = op1;
1802
1803 if (GET_CODE (lhs) == NEG)
1804 {
1805 coeff0l = -1;
1806 coeff0h = -1;
1807 lhs = XEXP (lhs, 0);
1808 }
1809 else if (GET_CODE (lhs) == MULT
1810 && CONST_INT_P (XEXP (lhs, 1)))
1811 {
1812 coeff0l = INTVAL (XEXP (lhs, 1));
1813 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1814 lhs = XEXP (lhs, 0);
1815 }
1816 else if (GET_CODE (lhs) == ASHIFT
1817 && CONST_INT_P (XEXP (lhs, 1))
1818 && INTVAL (XEXP (lhs, 1)) >= 0
1819 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1820 {
1821 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1822 coeff0h = 0;
1823 lhs = XEXP (lhs, 0);
1824 }
1825
1826 if (GET_CODE (rhs) == NEG)
1827 {
1828 negcoeff1l = 1;
1829 negcoeff1h = 0;
1830 rhs = XEXP (rhs, 0);
1831 }
1832 else if (GET_CODE (rhs) == MULT
1833 && CONST_INT_P (XEXP (rhs, 1)))
1834 {
1835 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1836 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1837 rhs = XEXP (rhs, 0);
1838 }
1839 else if (GET_CODE (rhs) == ASHIFT
1840 && CONST_INT_P (XEXP (rhs, 1))
1841 && INTVAL (XEXP (rhs, 1)) >= 0
1842 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1843 {
1844 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1845 negcoeff1h = -1;
1846 rhs = XEXP (rhs, 0);
1847 }
1848
1849 if (rtx_equal_p (lhs, rhs))
1850 {
1851 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1852 rtx coeff;
1853 unsigned HOST_WIDE_INT l;
1854 HOST_WIDE_INT h;
1855 bool speed = optimize_function_for_speed_p (cfun);
1856
1857 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1858 coeff = immed_double_const (l, h, mode);
1859
1860 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1861 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1862 ? tem : 0;
1863 }
1864 }
1865
1866 /* (a - (-b)) -> (a + b). True even for IEEE. */
1867 if (GET_CODE (op1) == NEG)
1868 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1869
1870 /* (-x - c) may be simplified as (-c - x). */
1871 if (GET_CODE (op0) == NEG
1872 && (CONST_INT_P (op1)
1873 || GET_CODE (op1) == CONST_DOUBLE))
1874 {
1875 tem = simplify_unary_operation (NEG, mode, op1, mode);
1876 if (tem)
1877 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1878 }
1879
1880 /* Don't let a relocatable value get a negative coeff. */
1881 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1882 return simplify_gen_binary (PLUS, mode,
1883 op0,
1884 neg_const_int (mode, op1));
1885
1886 /* (x - (x & y)) -> (x & ~y) */
1887 if (GET_CODE (op1) == AND)
1888 {
1889 if (rtx_equal_p (op0, XEXP (op1, 0)))
1890 {
1891 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1892 GET_MODE (XEXP (op1, 1)));
1893 return simplify_gen_binary (AND, mode, op0, tem);
1894 }
1895 if (rtx_equal_p (op0, XEXP (op1, 1)))
1896 {
1897 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1898 GET_MODE (XEXP (op1, 0)));
1899 return simplify_gen_binary (AND, mode, op0, tem);
1900 }
1901 }
1902
1903 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1904 by reversing the comparison code if valid. */
1905 if (STORE_FLAG_VALUE == 1
1906 && trueop0 == const1_rtx
1907 && COMPARISON_P (op1)
1908 && (reversed = reversed_comparison (op1, mode)))
1909 return reversed;
1910
1911 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1912 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1913 && GET_CODE (op1) == MULT
1914 && GET_CODE (XEXP (op1, 0)) == NEG)
1915 {
1916 rtx in1, in2;
1917
1918 in1 = XEXP (XEXP (op1, 0), 0);
1919 in2 = XEXP (op1, 1);
1920 return simplify_gen_binary (PLUS, mode,
1921 simplify_gen_binary (MULT, mode,
1922 in1, in2),
1923 op0);
1924 }
1925
1926 /* Canonicalize (minus (neg A) (mult B C)) to
1927 (minus (mult (neg B) C) A). */
1928 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1929 && GET_CODE (op1) == MULT
1930 && GET_CODE (op0) == NEG)
1931 {
1932 rtx in1, in2;
1933
1934 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1935 in2 = XEXP (op1, 1);
1936 return simplify_gen_binary (MINUS, mode,
1937 simplify_gen_binary (MULT, mode,
1938 in1, in2),
1939 XEXP (op0, 0));
1940 }
1941
1942 /* If one of the operands is a PLUS or a MINUS, see if we can
1943 simplify this by the associative law. This will, for example,
1944 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1945 Don't use the associative law for floating point.
1946 The inaccuracy makes it nonassociative,
1947 and subtle programs can break if operations are associated. */
1948
1949 if (INTEGRAL_MODE_P (mode)
1950 && (plus_minus_operand_p (op0)
1951 || plus_minus_operand_p (op1))
1952 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1953 return tem;
1954 break;
1955
1956 case MULT:
1957 if (trueop1 == constm1_rtx)
1958 return simplify_gen_unary (NEG, mode, op0, mode);
1959
1960 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1961 x is NaN, since x * 0 is then also NaN. Nor is it valid
1962 when the mode has signed zeros, since multiplying a negative
1963 number by 0 will give -0, not 0. */
1964 if (!HONOR_NANS (mode)
1965 && !HONOR_SIGNED_ZEROS (mode)
1966 && trueop1 == CONST0_RTX (mode)
1967 && ! side_effects_p (op0))
1968 return op1;
1969
1970 /* In IEEE floating point, x*1 is not equivalent to x for
1971 signalling NaNs. */
1972 if (!HONOR_SNANS (mode)
1973 && trueop1 == CONST1_RTX (mode))
1974 return op0;
1975
1976 /* Convert multiply by constant power of two into shift unless
1977 we are still generating RTL. This test is a kludge. */
1978 if (CONST_INT_P (trueop1)
1979 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1980 /* If the mode is larger than the host word size, and the
1981 uppermost bit is set, then this isn't a power of two due
1982 to implicit sign extension. */
1983 && (width <= HOST_BITS_PER_WIDE_INT
1984 || val != HOST_BITS_PER_WIDE_INT - 1))
1985 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1986
1987 /* Likewise for multipliers wider than a word. */
1988 if (GET_CODE (trueop1) == CONST_DOUBLE
1989 && (GET_MODE (trueop1) == VOIDmode
1990 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1991 && GET_MODE (op0) == mode
1992 && CONST_DOUBLE_LOW (trueop1) == 0
1993 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1994 return simplify_gen_binary (ASHIFT, mode, op0,
1995 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1996
1997 /* x*2 is x+x and x*(-1) is -x */
1998 if (GET_CODE (trueop1) == CONST_DOUBLE
1999 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2000 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2001 && GET_MODE (op0) == mode)
2002 {
2003 REAL_VALUE_TYPE d;
2004 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2005
2006 if (REAL_VALUES_EQUAL (d, dconst2))
2007 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2008
2009 if (!HONOR_SNANS (mode)
2010 && REAL_VALUES_EQUAL (d, dconstm1))
2011 return simplify_gen_unary (NEG, mode, op0, mode);
2012 }
2013
2014 /* Optimize -x * -x as x * x. */
2015 if (FLOAT_MODE_P (mode)
2016 && GET_CODE (op0) == NEG
2017 && GET_CODE (op1) == NEG
2018 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2019 && !side_effects_p (XEXP (op0, 0)))
2020 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2021
2022 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2023 if (SCALAR_FLOAT_MODE_P (mode)
2024 && GET_CODE (op0) == ABS
2025 && GET_CODE (op1) == ABS
2026 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2027 && !side_effects_p (XEXP (op0, 0)))
2028 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2029
2030 /* Reassociate multiplication, but for floating point MULTs
2031 only when the user specifies unsafe math optimizations. */
2032 if (! FLOAT_MODE_P (mode)
2033 || flag_unsafe_math_optimizations)
2034 {
2035 tem = simplify_associative_operation (code, mode, op0, op1);
2036 if (tem)
2037 return tem;
2038 }
2039 break;
2040
2041 case IOR:
2042 if (trueop1 == const0_rtx)
2043 return op0;
2044 if (CONST_INT_P (trueop1)
2045 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2046 == GET_MODE_MASK (mode)))
2047 return op1;
2048 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2049 return op0;
2050 /* A | (~A) -> -1 */
2051 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2052 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2053 && ! side_effects_p (op0)
2054 && SCALAR_INT_MODE_P (mode))
2055 return constm1_rtx;
2056
2057 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2058 if (CONST_INT_P (op1)
2059 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2060 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2061 return op1;
2062
2063 /* Canonicalize (X & C1) | C2. */
2064 if (GET_CODE (op0) == AND
2065 && CONST_INT_P (trueop1)
2066 && CONST_INT_P (XEXP (op0, 1)))
2067 {
2068 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2069 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2070 HOST_WIDE_INT c2 = INTVAL (trueop1);
2071
2072 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2073 if ((c1 & c2) == c1
2074 && !side_effects_p (XEXP (op0, 0)))
2075 return trueop1;
2076
2077 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2078 if (((c1|c2) & mask) == mask)
2079 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2080
2081 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2082 if (((c1 & ~c2) & mask) != (c1 & mask))
2083 {
2084 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2085 gen_int_mode (c1 & ~c2, mode));
2086 return simplify_gen_binary (IOR, mode, tem, op1);
2087 }
2088 }
2089
2090 /* Convert (A & B) | A to A. */
2091 if (GET_CODE (op0) == AND
2092 && (rtx_equal_p (XEXP (op0, 0), op1)
2093 || rtx_equal_p (XEXP (op0, 1), op1))
2094 && ! side_effects_p (XEXP (op0, 0))
2095 && ! side_effects_p (XEXP (op0, 1)))
2096 return op1;
2097
2098 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2099 mode size to (rotate A CX). */
2100
2101 if (GET_CODE (op1) == ASHIFT
2102 || GET_CODE (op1) == SUBREG)
2103 {
2104 opleft = op1;
2105 opright = op0;
2106 }
2107 else
2108 {
2109 opright = op1;
2110 opleft = op0;
2111 }
2112
2113 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2114 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2115 && CONST_INT_P (XEXP (opleft, 1))
2116 && CONST_INT_P (XEXP (opright, 1))
2117 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2118 == GET_MODE_BITSIZE (mode)))
2119 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2120
2121 /* Same, but for ashift that has been "simplified" to a wider mode
2122 by simplify_shift_const. */
2123
2124 if (GET_CODE (opleft) == SUBREG
2125 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2126 && GET_CODE (opright) == LSHIFTRT
2127 && GET_CODE (XEXP (opright, 0)) == SUBREG
2128 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2129 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2130 && (GET_MODE_SIZE (GET_MODE (opleft))
2131 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2132 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2133 SUBREG_REG (XEXP (opright, 0)))
2134 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2135 && CONST_INT_P (XEXP (opright, 1))
2136 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2137 == GET_MODE_BITSIZE (mode)))
2138 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2139 XEXP (SUBREG_REG (opleft), 1));
2140
2141 /* If we have (ior (and (X C1) C2)), simplify this by making
2142 C1 as small as possible if C1 actually changes. */
2143 if (CONST_INT_P (op1)
2144 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2145 || INTVAL (op1) > 0)
2146 && GET_CODE (op0) == AND
2147 && CONST_INT_P (XEXP (op0, 1))
2148 && CONST_INT_P (op1)
2149 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2150 return simplify_gen_binary (IOR, mode,
2151 simplify_gen_binary
2152 (AND, mode, XEXP (op0, 0),
2153 GEN_INT (INTVAL (XEXP (op0, 1))
2154 & ~INTVAL (op1))),
2155 op1);
2156
2157 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2158 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2159 the PLUS does not affect any of the bits in OP1: then we can do
2160 the IOR as a PLUS and we can associate. This is valid if OP1
2161 can be safely shifted left C bits. */
2162 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2163 && GET_CODE (XEXP (op0, 0)) == PLUS
2164 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2165 && CONST_INT_P (XEXP (op0, 1))
2166 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2167 {
2168 int count = INTVAL (XEXP (op0, 1));
2169 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2170
2171 if (mask >> count == INTVAL (trueop1)
2172 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2173 return simplify_gen_binary (ASHIFTRT, mode,
2174 plus_constant (XEXP (op0, 0), mask),
2175 XEXP (op0, 1));
2176 }
2177
2178 tem = simplify_associative_operation (code, mode, op0, op1);
2179 if (tem)
2180 return tem;
2181 break;
2182
2183 case XOR:
2184 if (trueop1 == const0_rtx)
2185 return op0;
2186 if (CONST_INT_P (trueop1)
2187 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2188 == GET_MODE_MASK (mode)))
2189 return simplify_gen_unary (NOT, mode, op0, mode);
2190 if (rtx_equal_p (trueop0, trueop1)
2191 && ! side_effects_p (op0)
2192 && GET_MODE_CLASS (mode) != MODE_CC)
2193 return CONST0_RTX (mode);
2194
2195 /* Canonicalize XOR of the most significant bit to PLUS. */
2196 if ((CONST_INT_P (op1)
2197 || GET_CODE (op1) == CONST_DOUBLE)
2198 && mode_signbit_p (mode, op1))
2199 return simplify_gen_binary (PLUS, mode, op0, op1);
2200 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2201 if ((CONST_INT_P (op1)
2202 || GET_CODE (op1) == CONST_DOUBLE)
2203 && GET_CODE (op0) == PLUS
2204 && (CONST_INT_P (XEXP (op0, 1))
2205 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2206 && mode_signbit_p (mode, XEXP (op0, 1)))
2207 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2208 simplify_gen_binary (XOR, mode, op1,
2209 XEXP (op0, 1)));
2210
2211 /* If we are XORing two things that have no bits in common,
2212 convert them into an IOR. This helps to detect rotation encoded
2213 using those methods and possibly other simplifications. */
2214
2215 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2216 && (nonzero_bits (op0, mode)
2217 & nonzero_bits (op1, mode)) == 0)
2218 return (simplify_gen_binary (IOR, mode, op0, op1));
2219
2220 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2221 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2222 (NOT y). */
2223 {
2224 int num_negated = 0;
2225
2226 if (GET_CODE (op0) == NOT)
2227 num_negated++, op0 = XEXP (op0, 0);
2228 if (GET_CODE (op1) == NOT)
2229 num_negated++, op1 = XEXP (op1, 0);
2230
2231 if (num_negated == 2)
2232 return simplify_gen_binary (XOR, mode, op0, op1);
2233 else if (num_negated == 1)
2234 return simplify_gen_unary (NOT, mode,
2235 simplify_gen_binary (XOR, mode, op0, op1),
2236 mode);
2237 }
2238
2239 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2240 correspond to a machine insn or result in further simplifications
2241 if B is a constant. */
2242
2243 if (GET_CODE (op0) == AND
2244 && rtx_equal_p (XEXP (op0, 1), op1)
2245 && ! side_effects_p (op1))
2246 return simplify_gen_binary (AND, mode,
2247 simplify_gen_unary (NOT, mode,
2248 XEXP (op0, 0), mode),
2249 op1);
2250
2251 else if (GET_CODE (op0) == AND
2252 && rtx_equal_p (XEXP (op0, 0), op1)
2253 && ! side_effects_p (op1))
2254 return simplify_gen_binary (AND, mode,
2255 simplify_gen_unary (NOT, mode,
2256 XEXP (op0, 1), mode),
2257 op1);
2258
2259 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2260 comparison if STORE_FLAG_VALUE is 1. */
2261 if (STORE_FLAG_VALUE == 1
2262 && trueop1 == const1_rtx
2263 && COMPARISON_P (op0)
2264 && (reversed = reversed_comparison (op0, mode)))
2265 return reversed;
2266
2267 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2268 is (lt foo (const_int 0)), so we can perform the above
2269 simplification if STORE_FLAG_VALUE is 1. */
2270
2271 if (STORE_FLAG_VALUE == 1
2272 && trueop1 == const1_rtx
2273 && GET_CODE (op0) == LSHIFTRT
2274 && CONST_INT_P (XEXP (op0, 1))
2275 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2276 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2277
2278 /* (xor (comparison foo bar) (const_int sign-bit))
2279 when STORE_FLAG_VALUE is the sign bit. */
2280 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2281 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2282 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2283 && trueop1 == const_true_rtx
2284 && COMPARISON_P (op0)
2285 && (reversed = reversed_comparison (op0, mode)))
2286 return reversed;
2287
2288 tem = simplify_associative_operation (code, mode, op0, op1);
2289 if (tem)
2290 return tem;
2291 break;
2292
2293 case AND:
2294 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2295 return trueop1;
2296 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2297 {
2298 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2299 HOST_WIDE_INT nzop1;
2300 if (CONST_INT_P (trueop1))
2301 {
2302 HOST_WIDE_INT val1 = INTVAL (trueop1);
2303 /* If we are turning off bits already known off in OP0, we need
2304 not do an AND. */
2305 if ((nzop0 & ~val1) == 0)
2306 return op0;
2307 }
2308 nzop1 = nonzero_bits (trueop1, mode);
2309 /* If we are clearing all the nonzero bits, the result is zero. */
2310 if ((nzop1 & nzop0) == 0
2311 && !side_effects_p (op0) && !side_effects_p (op1))
2312 return CONST0_RTX (mode);
2313 }
2314 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2315 && GET_MODE_CLASS (mode) != MODE_CC)
2316 return op0;
2317 /* A & (~A) -> 0 */
2318 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2319 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2320 && ! side_effects_p (op0)
2321 && GET_MODE_CLASS (mode) != MODE_CC)
2322 return CONST0_RTX (mode);
2323
2324 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2325 there are no nonzero bits of C outside of X's mode. */
2326 if ((GET_CODE (op0) == SIGN_EXTEND
2327 || GET_CODE (op0) == ZERO_EXTEND)
2328 && CONST_INT_P (trueop1)
2329 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2330 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2331 & INTVAL (trueop1)) == 0)
2332 {
2333 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2334 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2335 gen_int_mode (INTVAL (trueop1),
2336 imode));
2337 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2338 }
2339
2340 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2341 we might be able to further simplify the AND with X and potentially
2342 remove the truncation altogether. */
2343 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2344 {
2345 rtx x = XEXP (op0, 0);
2346 enum machine_mode xmode = GET_MODE (x);
2347 tem = simplify_gen_binary (AND, xmode, x,
2348 gen_int_mode (INTVAL (trueop1), xmode));
2349 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2350 }
2351
2352 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2353 if (GET_CODE (op0) == IOR
2354 && CONST_INT_P (trueop1)
2355 && CONST_INT_P (XEXP (op0, 1)))
2356 {
2357 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2358 return simplify_gen_binary (IOR, mode,
2359 simplify_gen_binary (AND, mode,
2360 XEXP (op0, 0), op1),
2361 gen_int_mode (tmp, mode));
2362 }
2363
2364 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2365 insn (and may simplify more). */
2366 if (GET_CODE (op0) == XOR
2367 && rtx_equal_p (XEXP (op0, 0), op1)
2368 && ! side_effects_p (op1))
2369 return simplify_gen_binary (AND, mode,
2370 simplify_gen_unary (NOT, mode,
2371 XEXP (op0, 1), mode),
2372 op1);
2373
2374 if (GET_CODE (op0) == XOR
2375 && rtx_equal_p (XEXP (op0, 1), op1)
2376 && ! side_effects_p (op1))
2377 return simplify_gen_binary (AND, mode,
2378 simplify_gen_unary (NOT, mode,
2379 XEXP (op0, 0), mode),
2380 op1);
2381
2382 /* Similarly for (~(A ^ B)) & A. */
2383 if (GET_CODE (op0) == NOT
2384 && GET_CODE (XEXP (op0, 0)) == XOR
2385 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2386 && ! side_effects_p (op1))
2387 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2388
2389 if (GET_CODE (op0) == NOT
2390 && GET_CODE (XEXP (op0, 0)) == XOR
2391 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2392 && ! side_effects_p (op1))
2393 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2394
2395 /* Convert (A | B) & A to A. */
2396 if (GET_CODE (op0) == IOR
2397 && (rtx_equal_p (XEXP (op0, 0), op1)
2398 || rtx_equal_p (XEXP (op0, 1), op1))
2399 && ! side_effects_p (XEXP (op0, 0))
2400 && ! side_effects_p (XEXP (op0, 1)))
2401 return op1;
2402
2403 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2404 ((A & N) + B) & M -> (A + B) & M
2405 Similarly if (N & M) == 0,
2406 ((A | N) + B) & M -> (A + B) & M
2407 and for - instead of + and/or ^ instead of |.
2408 Also, if (N & M) == 0, then
2409 (A +- N) & M -> A & M. */
2410 if (CONST_INT_P (trueop1)
2411 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2412 && ~INTVAL (trueop1)
2413 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2414 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2415 {
2416 rtx pmop[2];
2417 int which;
2418
2419 pmop[0] = XEXP (op0, 0);
2420 pmop[1] = XEXP (op0, 1);
2421
2422 if (CONST_INT_P (pmop[1])
2423 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2424 return simplify_gen_binary (AND, mode, pmop[0], op1);
2425
2426 for (which = 0; which < 2; which++)
2427 {
2428 tem = pmop[which];
2429 switch (GET_CODE (tem))
2430 {
2431 case AND:
2432 if (CONST_INT_P (XEXP (tem, 1))
2433 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2434 == INTVAL (trueop1))
2435 pmop[which] = XEXP (tem, 0);
2436 break;
2437 case IOR:
2438 case XOR:
2439 if (CONST_INT_P (XEXP (tem, 1))
2440 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2441 pmop[which] = XEXP (tem, 0);
2442 break;
2443 default:
2444 break;
2445 }
2446 }
2447
2448 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2449 {
2450 tem = simplify_gen_binary (GET_CODE (op0), mode,
2451 pmop[0], pmop[1]);
2452 return simplify_gen_binary (code, mode, tem, op1);
2453 }
2454 }
2455
2456 /* (and X (ior (not X) Y) -> (and X Y) */
2457 if (GET_CODE (op1) == IOR
2458 && GET_CODE (XEXP (op1, 0)) == NOT
2459 && op0 == XEXP (XEXP (op1, 0), 0))
2460 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2461
2462 /* (and (ior (not X) Y) X) -> (and X Y) */
2463 if (GET_CODE (op0) == IOR
2464 && GET_CODE (XEXP (op0, 0)) == NOT
2465 && op1 == XEXP (XEXP (op0, 0), 0))
2466 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2467
2468 tem = simplify_associative_operation (code, mode, op0, op1);
2469 if (tem)
2470 return tem;
2471 break;
2472
2473 case UDIV:
2474 /* 0/x is 0 (or x&0 if x has side-effects). */
2475 if (trueop0 == CONST0_RTX (mode))
2476 {
2477 if (side_effects_p (op1))
2478 return simplify_gen_binary (AND, mode, op1, trueop0);
2479 return trueop0;
2480 }
2481 /* x/1 is x. */
2482 if (trueop1 == CONST1_RTX (mode))
2483 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2484 /* Convert divide by power of two into shift. */
2485 if (CONST_INT_P (trueop1)
2486 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2487 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2488 break;
2489
2490 case DIV:
2491 /* Handle floating point and integers separately. */
2492 if (SCALAR_FLOAT_MODE_P (mode))
2493 {
2494 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2495 safe for modes with NaNs, since 0.0 / 0.0 will then be
2496 NaN rather than 0.0. Nor is it safe for modes with signed
2497 zeros, since dividing 0 by a negative number gives -0.0 */
2498 if (trueop0 == CONST0_RTX (mode)
2499 && !HONOR_NANS (mode)
2500 && !HONOR_SIGNED_ZEROS (mode)
2501 && ! side_effects_p (op1))
2502 return op0;
2503 /* x/1.0 is x. */
2504 if (trueop1 == CONST1_RTX (mode)
2505 && !HONOR_SNANS (mode))
2506 return op0;
2507
2508 if (GET_CODE (trueop1) == CONST_DOUBLE
2509 && trueop1 != CONST0_RTX (mode))
2510 {
2511 REAL_VALUE_TYPE d;
2512 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2513
2514 /* x/-1.0 is -x. */
2515 if (REAL_VALUES_EQUAL (d, dconstm1)
2516 && !HONOR_SNANS (mode))
2517 return simplify_gen_unary (NEG, mode, op0, mode);
2518
2519 /* Change FP division by a constant into multiplication.
2520 Only do this with -freciprocal-math. */
2521 if (flag_reciprocal_math
2522 && !REAL_VALUES_EQUAL (d, dconst0))
2523 {
2524 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2525 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2526 return simplify_gen_binary (MULT, mode, op0, tem);
2527 }
2528 }
2529 }
2530 else
2531 {
2532 /* 0/x is 0 (or x&0 if x has side-effects). */
2533 if (trueop0 == CONST0_RTX (mode))
2534 {
2535 if (side_effects_p (op1))
2536 return simplify_gen_binary (AND, mode, op1, trueop0);
2537 return trueop0;
2538 }
2539 /* x/1 is x. */
2540 if (trueop1 == CONST1_RTX (mode))
2541 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2542 /* x/-1 is -x. */
2543 if (trueop1 == constm1_rtx)
2544 {
2545 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2546 return simplify_gen_unary (NEG, mode, x, mode);
2547 }
2548 }
2549 break;
2550
2551 case UMOD:
2552 /* 0%x is 0 (or x&0 if x has side-effects). */
2553 if (trueop0 == CONST0_RTX (mode))
2554 {
2555 if (side_effects_p (op1))
2556 return simplify_gen_binary (AND, mode, op1, trueop0);
2557 return trueop0;
2558 }
2559 /* x%1 is 0 (of x&0 if x has side-effects). */
2560 if (trueop1 == CONST1_RTX (mode))
2561 {
2562 if (side_effects_p (op0))
2563 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2564 return CONST0_RTX (mode);
2565 }
2566 /* Implement modulus by power of two as AND. */
2567 if (CONST_INT_P (trueop1)
2568 && exact_log2 (INTVAL (trueop1)) > 0)
2569 return simplify_gen_binary (AND, mode, op0,
2570 GEN_INT (INTVAL (op1) - 1));
2571 break;
2572
2573 case MOD:
2574 /* 0%x is 0 (or x&0 if x has side-effects). */
2575 if (trueop0 == CONST0_RTX (mode))
2576 {
2577 if (side_effects_p (op1))
2578 return simplify_gen_binary (AND, mode, op1, trueop0);
2579 return trueop0;
2580 }
2581 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2582 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2583 {
2584 if (side_effects_p (op0))
2585 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2586 return CONST0_RTX (mode);
2587 }
2588 break;
2589
2590 case ROTATERT:
2591 case ROTATE:
2592 case ASHIFTRT:
2593 if (trueop1 == CONST0_RTX (mode))
2594 return op0;
2595 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2596 return op0;
2597 /* Rotating ~0 always results in ~0. */
2598 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2599 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2600 && ! side_effects_p (op1))
2601 return op0;
2602 canonicalize_shift:
2603 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2604 {
2605 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2606 if (val != INTVAL (op1))
2607 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2608 }
2609 break;
2610
2611 case ASHIFT:
2612 case SS_ASHIFT:
2613 case US_ASHIFT:
2614 if (trueop1 == CONST0_RTX (mode))
2615 return op0;
2616 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2617 return op0;
2618 goto canonicalize_shift;
2619
2620 case LSHIFTRT:
2621 if (trueop1 == CONST0_RTX (mode))
2622 return op0;
2623 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2624 return op0;
2625 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2626 if (GET_CODE (op0) == CLZ
2627 && CONST_INT_P (trueop1)
2628 && STORE_FLAG_VALUE == 1
2629 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2630 {
2631 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2632 unsigned HOST_WIDE_INT zero_val = 0;
2633
2634 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2635 && zero_val == GET_MODE_BITSIZE (imode)
2636 && INTVAL (trueop1) == exact_log2 (zero_val))
2637 return simplify_gen_relational (EQ, mode, imode,
2638 XEXP (op0, 0), const0_rtx);
2639 }
2640 goto canonicalize_shift;
2641
2642 case SMIN:
2643 if (width <= HOST_BITS_PER_WIDE_INT
2644 && CONST_INT_P (trueop1)
2645 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2646 && ! side_effects_p (op0))
2647 return op1;
2648 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2649 return op0;
2650 tem = simplify_associative_operation (code, mode, op0, op1);
2651 if (tem)
2652 return tem;
2653 break;
2654
2655 case SMAX:
2656 if (width <= HOST_BITS_PER_WIDE_INT
2657 && CONST_INT_P (trueop1)
2658 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2659 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2660 && ! side_effects_p (op0))
2661 return op1;
2662 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2663 return op0;
2664 tem = simplify_associative_operation (code, mode, op0, op1);
2665 if (tem)
2666 return tem;
2667 break;
2668
2669 case UMIN:
2670 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2671 return op1;
2672 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2673 return op0;
2674 tem = simplify_associative_operation (code, mode, op0, op1);
2675 if (tem)
2676 return tem;
2677 break;
2678
2679 case UMAX:
2680 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2681 return op1;
2682 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2683 return op0;
2684 tem = simplify_associative_operation (code, mode, op0, op1);
2685 if (tem)
2686 return tem;
2687 break;
2688
2689 case SS_PLUS:
2690 case US_PLUS:
2691 case SS_MINUS:
2692 case US_MINUS:
2693 case SS_MULT:
2694 case US_MULT:
2695 case SS_DIV:
2696 case US_DIV:
2697 /* ??? There are simplifications that can be done. */
2698 return 0;
2699
2700 case VEC_SELECT:
2701 if (!VECTOR_MODE_P (mode))
2702 {
2703 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2704 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2705 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2706 gcc_assert (XVECLEN (trueop1, 0) == 1);
2707 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2708
2709 if (GET_CODE (trueop0) == CONST_VECTOR)
2710 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2711 (trueop1, 0, 0)));
2712
2713 /* Extract a scalar element from a nested VEC_SELECT expression
2714 (with optional nested VEC_CONCAT expression). Some targets
2715 (i386) extract scalar element from a vector using chain of
2716 nested VEC_SELECT expressions. When input operand is a memory
2717 operand, this operation can be simplified to a simple scalar
2718 load from an offseted memory address. */
2719 if (GET_CODE (trueop0) == VEC_SELECT)
2720 {
2721 rtx op0 = XEXP (trueop0, 0);
2722 rtx op1 = XEXP (trueop0, 1);
2723
2724 enum machine_mode opmode = GET_MODE (op0);
2725 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2726 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2727
2728 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2729 int elem;
2730
2731 rtvec vec;
2732 rtx tmp_op, tmp;
2733
2734 gcc_assert (GET_CODE (op1) == PARALLEL);
2735 gcc_assert (i < n_elts);
2736
2737 /* Select element, pointed by nested selector. */
2738 elem = INTVAL (XVECEXP (op1, 0, i));
2739
2740 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2741 if (GET_CODE (op0) == VEC_CONCAT)
2742 {
2743 rtx op00 = XEXP (op0, 0);
2744 rtx op01 = XEXP (op0, 1);
2745
2746 enum machine_mode mode00, mode01;
2747 int n_elts00, n_elts01;
2748
2749 mode00 = GET_MODE (op00);
2750 mode01 = GET_MODE (op01);
2751
2752 /* Find out number of elements of each operand. */
2753 if (VECTOR_MODE_P (mode00))
2754 {
2755 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2756 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2757 }
2758 else
2759 n_elts00 = 1;
2760
2761 if (VECTOR_MODE_P (mode01))
2762 {
2763 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2764 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2765 }
2766 else
2767 n_elts01 = 1;
2768
2769 gcc_assert (n_elts == n_elts00 + n_elts01);
2770
2771 /* Select correct operand of VEC_CONCAT
2772 and adjust selector. */
2773 if (elem < n_elts01)
2774 tmp_op = op00;
2775 else
2776 {
2777 tmp_op = op01;
2778 elem -= n_elts00;
2779 }
2780 }
2781 else
2782 tmp_op = op0;
2783
2784 vec = rtvec_alloc (1);
2785 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2786
2787 tmp = gen_rtx_fmt_ee (code, mode,
2788 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2789 return tmp;
2790 }
2791 }
2792 else
2793 {
2794 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2795 gcc_assert (GET_MODE_INNER (mode)
2796 == GET_MODE_INNER (GET_MODE (trueop0)));
2797 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2798
2799 if (GET_CODE (trueop0) == CONST_VECTOR)
2800 {
2801 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2802 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2803 rtvec v = rtvec_alloc (n_elts);
2804 unsigned int i;
2805
2806 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2807 for (i = 0; i < n_elts; i++)
2808 {
2809 rtx x = XVECEXP (trueop1, 0, i);
2810
2811 gcc_assert (CONST_INT_P (x));
2812 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2813 INTVAL (x));
2814 }
2815
2816 return gen_rtx_CONST_VECTOR (mode, v);
2817 }
2818 }
2819
2820 if (XVECLEN (trueop1, 0) == 1
2821 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2822 && GET_CODE (trueop0) == VEC_CONCAT)
2823 {
2824 rtx vec = trueop0;
2825 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2826
2827 /* Try to find the element in the VEC_CONCAT. */
2828 while (GET_MODE (vec) != mode
2829 && GET_CODE (vec) == VEC_CONCAT)
2830 {
2831 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2832 if (offset < vec_size)
2833 vec = XEXP (vec, 0);
2834 else
2835 {
2836 offset -= vec_size;
2837 vec = XEXP (vec, 1);
2838 }
2839 vec = avoid_constant_pool_reference (vec);
2840 }
2841
2842 if (GET_MODE (vec) == mode)
2843 return vec;
2844 }
2845
2846 return 0;
2847 case VEC_CONCAT:
2848 {
2849 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2850 ? GET_MODE (trueop0)
2851 : GET_MODE_INNER (mode));
2852 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2853 ? GET_MODE (trueop1)
2854 : GET_MODE_INNER (mode));
2855
2856 gcc_assert (VECTOR_MODE_P (mode));
2857 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2858 == GET_MODE_SIZE (mode));
2859
2860 if (VECTOR_MODE_P (op0_mode))
2861 gcc_assert (GET_MODE_INNER (mode)
2862 == GET_MODE_INNER (op0_mode));
2863 else
2864 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2865
2866 if (VECTOR_MODE_P (op1_mode))
2867 gcc_assert (GET_MODE_INNER (mode)
2868 == GET_MODE_INNER (op1_mode));
2869 else
2870 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2871
2872 if ((GET_CODE (trueop0) == CONST_VECTOR
2873 || CONST_INT_P (trueop0)
2874 || GET_CODE (trueop0) == CONST_DOUBLE)
2875 && (GET_CODE (trueop1) == CONST_VECTOR
2876 || CONST_INT_P (trueop1)
2877 || GET_CODE (trueop1) == CONST_DOUBLE))
2878 {
2879 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2880 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2881 rtvec v = rtvec_alloc (n_elts);
2882 unsigned int i;
2883 unsigned in_n_elts = 1;
2884
2885 if (VECTOR_MODE_P (op0_mode))
2886 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2887 for (i = 0; i < n_elts; i++)
2888 {
2889 if (i < in_n_elts)
2890 {
2891 if (!VECTOR_MODE_P (op0_mode))
2892 RTVEC_ELT (v, i) = trueop0;
2893 else
2894 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2895 }
2896 else
2897 {
2898 if (!VECTOR_MODE_P (op1_mode))
2899 RTVEC_ELT (v, i) = trueop1;
2900 else
2901 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2902 i - in_n_elts);
2903 }
2904 }
2905
2906 return gen_rtx_CONST_VECTOR (mode, v);
2907 }
2908 }
2909 return 0;
2910
2911 default:
2912 gcc_unreachable ();
2913 }
2914
2915 return 0;
2916 }
2917
2918 rtx
2919 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2920 rtx op0, rtx op1)
2921 {
2922 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2923 HOST_WIDE_INT val;
2924 unsigned int width = GET_MODE_BITSIZE (mode);
2925
2926 if (VECTOR_MODE_P (mode)
2927 && code != VEC_CONCAT
2928 && GET_CODE (op0) == CONST_VECTOR
2929 && GET_CODE (op1) == CONST_VECTOR)
2930 {
2931 unsigned n_elts = GET_MODE_NUNITS (mode);
2932 enum machine_mode op0mode = GET_MODE (op0);
2933 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2934 enum machine_mode op1mode = GET_MODE (op1);
2935 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2936 rtvec v = rtvec_alloc (n_elts);
2937 unsigned int i;
2938
2939 gcc_assert (op0_n_elts == n_elts);
2940 gcc_assert (op1_n_elts == n_elts);
2941 for (i = 0; i < n_elts; i++)
2942 {
2943 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2944 CONST_VECTOR_ELT (op0, i),
2945 CONST_VECTOR_ELT (op1, i));
2946 if (!x)
2947 return 0;
2948 RTVEC_ELT (v, i) = x;
2949 }
2950
2951 return gen_rtx_CONST_VECTOR (mode, v);
2952 }
2953
2954 if (VECTOR_MODE_P (mode)
2955 && code == VEC_CONCAT
2956 && (CONST_INT_P (op0)
2957 || GET_CODE (op0) == CONST_DOUBLE
2958 || GET_CODE (op0) == CONST_FIXED)
2959 && (CONST_INT_P (op1)
2960 || GET_CODE (op1) == CONST_DOUBLE
2961 || GET_CODE (op1) == CONST_FIXED))
2962 {
2963 unsigned n_elts = GET_MODE_NUNITS (mode);
2964 rtvec v = rtvec_alloc (n_elts);
2965
2966 gcc_assert (n_elts >= 2);
2967 if (n_elts == 2)
2968 {
2969 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2970 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2971
2972 RTVEC_ELT (v, 0) = op0;
2973 RTVEC_ELT (v, 1) = op1;
2974 }
2975 else
2976 {
2977 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2978 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2979 unsigned i;
2980
2981 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2982 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2983 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2984
2985 for (i = 0; i < op0_n_elts; ++i)
2986 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2987 for (i = 0; i < op1_n_elts; ++i)
2988 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2989 }
2990
2991 return gen_rtx_CONST_VECTOR (mode, v);
2992 }
2993
2994 if (SCALAR_FLOAT_MODE_P (mode)
2995 && GET_CODE (op0) == CONST_DOUBLE
2996 && GET_CODE (op1) == CONST_DOUBLE
2997 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2998 {
2999 if (code == AND
3000 || code == IOR
3001 || code == XOR)
3002 {
3003 long tmp0[4];
3004 long tmp1[4];
3005 REAL_VALUE_TYPE r;
3006 int i;
3007
3008 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3009 GET_MODE (op0));
3010 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3011 GET_MODE (op1));
3012 for (i = 0; i < 4; i++)
3013 {
3014 switch (code)
3015 {
3016 case AND:
3017 tmp0[i] &= tmp1[i];
3018 break;
3019 case IOR:
3020 tmp0[i] |= tmp1[i];
3021 break;
3022 case XOR:
3023 tmp0[i] ^= tmp1[i];
3024 break;
3025 default:
3026 gcc_unreachable ();
3027 }
3028 }
3029 real_from_target (&r, tmp0, mode);
3030 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3031 }
3032 else
3033 {
3034 REAL_VALUE_TYPE f0, f1, value, result;
3035 bool inexact;
3036
3037 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3038 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3039 real_convert (&f0, mode, &f0);
3040 real_convert (&f1, mode, &f1);
3041
3042 if (HONOR_SNANS (mode)
3043 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3044 return 0;
3045
3046 if (code == DIV
3047 && REAL_VALUES_EQUAL (f1, dconst0)
3048 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3049 return 0;
3050
3051 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3052 && flag_trapping_math
3053 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3054 {
3055 int s0 = REAL_VALUE_NEGATIVE (f0);
3056 int s1 = REAL_VALUE_NEGATIVE (f1);
3057
3058 switch (code)
3059 {
3060 case PLUS:
3061 /* Inf + -Inf = NaN plus exception. */
3062 if (s0 != s1)
3063 return 0;
3064 break;
3065 case MINUS:
3066 /* Inf - Inf = NaN plus exception. */
3067 if (s0 == s1)
3068 return 0;
3069 break;
3070 case DIV:
3071 /* Inf / Inf = NaN plus exception. */
3072 return 0;
3073 default:
3074 break;
3075 }
3076 }
3077
3078 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3079 && flag_trapping_math
3080 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3081 || (REAL_VALUE_ISINF (f1)
3082 && REAL_VALUES_EQUAL (f0, dconst0))))
3083 /* Inf * 0 = NaN plus exception. */
3084 return 0;
3085
3086 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3087 &f0, &f1);
3088 real_convert (&result, mode, &value);
3089
3090 /* Don't constant fold this floating point operation if
3091 the result has overflowed and flag_trapping_math. */
3092
3093 if (flag_trapping_math
3094 && MODE_HAS_INFINITIES (mode)
3095 && REAL_VALUE_ISINF (result)
3096 && !REAL_VALUE_ISINF (f0)
3097 && !REAL_VALUE_ISINF (f1))
3098 /* Overflow plus exception. */
3099 return 0;
3100
3101 /* Don't constant fold this floating point operation if the
3102 result may dependent upon the run-time rounding mode and
3103 flag_rounding_math is set, or if GCC's software emulation
3104 is unable to accurately represent the result. */
3105
3106 if ((flag_rounding_math
3107 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3108 && (inexact || !real_identical (&result, &value)))
3109 return NULL_RTX;
3110
3111 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3112 }
3113 }
3114
3115 /* We can fold some multi-word operations. */
3116 if (GET_MODE_CLASS (mode) == MODE_INT
3117 && width == HOST_BITS_PER_WIDE_INT * 2
3118 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3119 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3120 {
3121 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3122 HOST_WIDE_INT h1, h2, hv, ht;
3123
3124 if (GET_CODE (op0) == CONST_DOUBLE)
3125 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3126 else
3127 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3128
3129 if (GET_CODE (op1) == CONST_DOUBLE)
3130 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3131 else
3132 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3133
3134 switch (code)
3135 {
3136 case MINUS:
3137 /* A - B == A + (-B). */
3138 neg_double (l2, h2, &lv, &hv);
3139 l2 = lv, h2 = hv;
3140
3141 /* Fall through.... */
3142
3143 case PLUS:
3144 add_double (l1, h1, l2, h2, &lv, &hv);
3145 break;
3146
3147 case MULT:
3148 mul_double (l1, h1, l2, h2, &lv, &hv);
3149 break;
3150
3151 case DIV:
3152 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3153 &lv, &hv, &lt, &ht))
3154 return 0;
3155 break;
3156
3157 case MOD:
3158 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3159 &lt, &ht, &lv, &hv))
3160 return 0;
3161 break;
3162
3163 case UDIV:
3164 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3165 &lv, &hv, &lt, &ht))
3166 return 0;
3167 break;
3168
3169 case UMOD:
3170 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3171 &lt, &ht, &lv, &hv))
3172 return 0;
3173 break;
3174
3175 case AND:
3176 lv = l1 & l2, hv = h1 & h2;
3177 break;
3178
3179 case IOR:
3180 lv = l1 | l2, hv = h1 | h2;
3181 break;
3182
3183 case XOR:
3184 lv = l1 ^ l2, hv = h1 ^ h2;
3185 break;
3186
3187 case SMIN:
3188 if (h1 < h2
3189 || (h1 == h2
3190 && ((unsigned HOST_WIDE_INT) l1
3191 < (unsigned HOST_WIDE_INT) l2)))
3192 lv = l1, hv = h1;
3193 else
3194 lv = l2, hv = h2;
3195 break;
3196
3197 case SMAX:
3198 if (h1 > h2
3199 || (h1 == h2
3200 && ((unsigned HOST_WIDE_INT) l1
3201 > (unsigned HOST_WIDE_INT) l2)))
3202 lv = l1, hv = h1;
3203 else
3204 lv = l2, hv = h2;
3205 break;
3206
3207 case UMIN:
3208 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3209 || (h1 == h2
3210 && ((unsigned HOST_WIDE_INT) l1
3211 < (unsigned HOST_WIDE_INT) l2)))
3212 lv = l1, hv = h1;
3213 else
3214 lv = l2, hv = h2;
3215 break;
3216
3217 case UMAX:
3218 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3219 || (h1 == h2
3220 && ((unsigned HOST_WIDE_INT) l1
3221 > (unsigned HOST_WIDE_INT) l2)))
3222 lv = l1, hv = h1;
3223 else
3224 lv = l2, hv = h2;
3225 break;
3226
3227 case LSHIFTRT: case ASHIFTRT:
3228 case ASHIFT:
3229 case ROTATE: case ROTATERT:
3230 if (SHIFT_COUNT_TRUNCATED)
3231 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3232
3233 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3234 return 0;
3235
3236 if (code == LSHIFTRT || code == ASHIFTRT)
3237 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3238 code == ASHIFTRT);
3239 else if (code == ASHIFT)
3240 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3241 else if (code == ROTATE)
3242 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3243 else /* code == ROTATERT */
3244 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3245 break;
3246
3247 default:
3248 return 0;
3249 }
3250
3251 return immed_double_const (lv, hv, mode);
3252 }
3253
3254 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3255 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3256 {
3257 /* Get the integer argument values in two forms:
3258 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3259
3260 arg0 = INTVAL (op0);
3261 arg1 = INTVAL (op1);
3262
3263 if (width < HOST_BITS_PER_WIDE_INT)
3264 {
3265 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3266 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3267
3268 arg0s = arg0;
3269 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3270 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3271
3272 arg1s = arg1;
3273 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3274 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3275 }
3276 else
3277 {
3278 arg0s = arg0;
3279 arg1s = arg1;
3280 }
3281
3282 /* Compute the value of the arithmetic. */
3283
3284 switch (code)
3285 {
3286 case PLUS:
3287 val = arg0s + arg1s;
3288 break;
3289
3290 case MINUS:
3291 val = arg0s - arg1s;
3292 break;
3293
3294 case MULT:
3295 val = arg0s * arg1s;
3296 break;
3297
3298 case DIV:
3299 if (arg1s == 0
3300 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3301 && arg1s == -1))
3302 return 0;
3303 val = arg0s / arg1s;
3304 break;
3305
3306 case MOD:
3307 if (arg1s == 0
3308 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3309 && arg1s == -1))
3310 return 0;
3311 val = arg0s % arg1s;
3312 break;
3313
3314 case UDIV:
3315 if (arg1 == 0
3316 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3317 && arg1s == -1))
3318 return 0;
3319 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3320 break;
3321
3322 case UMOD:
3323 if (arg1 == 0
3324 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3325 && arg1s == -1))
3326 return 0;
3327 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3328 break;
3329
3330 case AND:
3331 val = arg0 & arg1;
3332 break;
3333
3334 case IOR:
3335 val = arg0 | arg1;
3336 break;
3337
3338 case XOR:
3339 val = arg0 ^ arg1;
3340 break;
3341
3342 case LSHIFTRT:
3343 case ASHIFT:
3344 case ASHIFTRT:
3345 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3346 the value is in range. We can't return any old value for
3347 out-of-range arguments because either the middle-end (via
3348 shift_truncation_mask) or the back-end might be relying on
3349 target-specific knowledge. Nor can we rely on
3350 shift_truncation_mask, since the shift might not be part of an
3351 ashlM3, lshrM3 or ashrM3 instruction. */
3352 if (SHIFT_COUNT_TRUNCATED)
3353 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3354 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3355 return 0;
3356
3357 val = (code == ASHIFT
3358 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3359 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3360
3361 /* Sign-extend the result for arithmetic right shifts. */
3362 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3363 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3364 break;
3365
3366 case ROTATERT:
3367 if (arg1 < 0)
3368 return 0;
3369
3370 arg1 %= width;
3371 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3372 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3373 break;
3374
3375 case ROTATE:
3376 if (arg1 < 0)
3377 return 0;
3378
3379 arg1 %= width;
3380 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3381 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3382 break;
3383
3384 case COMPARE:
3385 /* Do nothing here. */
3386 return 0;
3387
3388 case SMIN:
3389 val = arg0s <= arg1s ? arg0s : arg1s;
3390 break;
3391
3392 case UMIN:
3393 val = ((unsigned HOST_WIDE_INT) arg0
3394 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3395 break;
3396
3397 case SMAX:
3398 val = arg0s > arg1s ? arg0s : arg1s;
3399 break;
3400
3401 case UMAX:
3402 val = ((unsigned HOST_WIDE_INT) arg0
3403 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3404 break;
3405
3406 case SS_PLUS:
3407 case US_PLUS:
3408 case SS_MINUS:
3409 case US_MINUS:
3410 case SS_MULT:
3411 case US_MULT:
3412 case SS_DIV:
3413 case US_DIV:
3414 case SS_ASHIFT:
3415 case US_ASHIFT:
3416 /* ??? There are simplifications that can be done. */
3417 return 0;
3418
3419 default:
3420 gcc_unreachable ();
3421 }
3422
3423 return gen_int_mode (val, mode);
3424 }
3425
3426 return NULL_RTX;
3427 }
3428
3429
3430 \f
3431 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3432 PLUS or MINUS.
3433
3434 Rather than test for specific case, we do this by a brute-force method
3435 and do all possible simplifications until no more changes occur. Then
3436 we rebuild the operation. */
3437
3438 struct simplify_plus_minus_op_data
3439 {
3440 rtx op;
3441 short neg;
3442 };
3443
3444 static bool
3445 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3446 {
3447 int result;
3448
3449 result = (commutative_operand_precedence (y)
3450 - commutative_operand_precedence (x));
3451 if (result)
3452 return result > 0;
3453
3454 /* Group together equal REGs to do more simplification. */
3455 if (REG_P (x) && REG_P (y))
3456 return REGNO (x) > REGNO (y);
3457 else
3458 return false;
3459 }
3460
3461 static rtx
3462 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3463 rtx op1)
3464 {
3465 struct simplify_plus_minus_op_data ops[8];
3466 rtx result, tem;
3467 int n_ops = 2, input_ops = 2;
3468 int changed, n_constants = 0, canonicalized = 0;
3469 int i, j;
3470
3471 memset (ops, 0, sizeof ops);
3472
3473 /* Set up the two operands and then expand them until nothing has been
3474 changed. If we run out of room in our array, give up; this should
3475 almost never happen. */
3476
3477 ops[0].op = op0;
3478 ops[0].neg = 0;
3479 ops[1].op = op1;
3480 ops[1].neg = (code == MINUS);
3481
3482 do
3483 {
3484 changed = 0;
3485
3486 for (i = 0; i < n_ops; i++)
3487 {
3488 rtx this_op = ops[i].op;
3489 int this_neg = ops[i].neg;
3490 enum rtx_code this_code = GET_CODE (this_op);
3491
3492 switch (this_code)
3493 {
3494 case PLUS:
3495 case MINUS:
3496 if (n_ops == 7)
3497 return NULL_RTX;
3498
3499 ops[n_ops].op = XEXP (this_op, 1);
3500 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3501 n_ops++;
3502
3503 ops[i].op = XEXP (this_op, 0);
3504 input_ops++;
3505 changed = 1;
3506 canonicalized |= this_neg;
3507 break;
3508
3509 case NEG:
3510 ops[i].op = XEXP (this_op, 0);
3511 ops[i].neg = ! this_neg;
3512 changed = 1;
3513 canonicalized = 1;
3514 break;
3515
3516 case CONST:
3517 if (n_ops < 7
3518 && GET_CODE (XEXP (this_op, 0)) == PLUS
3519 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3520 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3521 {
3522 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3523 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3524 ops[n_ops].neg = this_neg;
3525 n_ops++;
3526 changed = 1;
3527 canonicalized = 1;
3528 }
3529 break;
3530
3531 case NOT:
3532 /* ~a -> (-a - 1) */
3533 if (n_ops != 7)
3534 {
3535 ops[n_ops].op = constm1_rtx;
3536 ops[n_ops++].neg = this_neg;
3537 ops[i].op = XEXP (this_op, 0);
3538 ops[i].neg = !this_neg;
3539 changed = 1;
3540 canonicalized = 1;
3541 }
3542 break;
3543
3544 case CONST_INT:
3545 n_constants++;
3546 if (this_neg)
3547 {
3548 ops[i].op = neg_const_int (mode, this_op);
3549 ops[i].neg = 0;
3550 changed = 1;
3551 canonicalized = 1;
3552 }
3553 break;
3554
3555 default:
3556 break;
3557 }
3558 }
3559 }
3560 while (changed);
3561
3562 if (n_constants > 1)
3563 canonicalized = 1;
3564
3565 gcc_assert (n_ops >= 2);
3566
3567 /* If we only have two operands, we can avoid the loops. */
3568 if (n_ops == 2)
3569 {
3570 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3571 rtx lhs, rhs;
3572
3573 /* Get the two operands. Be careful with the order, especially for
3574 the cases where code == MINUS. */
3575 if (ops[0].neg && ops[1].neg)
3576 {
3577 lhs = gen_rtx_NEG (mode, ops[0].op);
3578 rhs = ops[1].op;
3579 }
3580 else if (ops[0].neg)
3581 {
3582 lhs = ops[1].op;
3583 rhs = ops[0].op;
3584 }
3585 else
3586 {
3587 lhs = ops[0].op;
3588 rhs = ops[1].op;
3589 }
3590
3591 return simplify_const_binary_operation (code, mode, lhs, rhs);
3592 }
3593
3594 /* Now simplify each pair of operands until nothing changes. */
3595 do
3596 {
3597 /* Insertion sort is good enough for an eight-element array. */
3598 for (i = 1; i < n_ops; i++)
3599 {
3600 struct simplify_plus_minus_op_data save;
3601 j = i - 1;
3602 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3603 continue;
3604
3605 canonicalized = 1;
3606 save = ops[i];
3607 do
3608 ops[j + 1] = ops[j];
3609 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3610 ops[j + 1] = save;
3611 }
3612
3613 changed = 0;
3614 for (i = n_ops - 1; i > 0; i--)
3615 for (j = i - 1; j >= 0; j--)
3616 {
3617 rtx lhs = ops[j].op, rhs = ops[i].op;
3618 int lneg = ops[j].neg, rneg = ops[i].neg;
3619
3620 if (lhs != 0 && rhs != 0)
3621 {
3622 enum rtx_code ncode = PLUS;
3623
3624 if (lneg != rneg)
3625 {
3626 ncode = MINUS;
3627 if (lneg)
3628 tem = lhs, lhs = rhs, rhs = tem;
3629 }
3630 else if (swap_commutative_operands_p (lhs, rhs))
3631 tem = lhs, lhs = rhs, rhs = tem;
3632
3633 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3634 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3635 {
3636 rtx tem_lhs, tem_rhs;
3637
3638 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3639 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3640 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3641
3642 if (tem && !CONSTANT_P (tem))
3643 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3644 }
3645 else
3646 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3647
3648 /* Reject "simplifications" that just wrap the two
3649 arguments in a CONST. Failure to do so can result
3650 in infinite recursion with simplify_binary_operation
3651 when it calls us to simplify CONST operations. */
3652 if (tem
3653 && ! (GET_CODE (tem) == CONST
3654 && GET_CODE (XEXP (tem, 0)) == ncode
3655 && XEXP (XEXP (tem, 0), 0) == lhs
3656 && XEXP (XEXP (tem, 0), 1) == rhs))
3657 {
3658 lneg &= rneg;
3659 if (GET_CODE (tem) == NEG)
3660 tem = XEXP (tem, 0), lneg = !lneg;
3661 if (CONST_INT_P (tem) && lneg)
3662 tem = neg_const_int (mode, tem), lneg = 0;
3663
3664 ops[i].op = tem;
3665 ops[i].neg = lneg;
3666 ops[j].op = NULL_RTX;
3667 changed = 1;
3668 canonicalized = 1;
3669 }
3670 }
3671 }
3672
3673 /* If nothing changed, fail. */
3674 if (!canonicalized)
3675 return NULL_RTX;
3676
3677 /* Pack all the operands to the lower-numbered entries. */
3678 for (i = 0, j = 0; j < n_ops; j++)
3679 if (ops[j].op)
3680 {
3681 ops[i] = ops[j];
3682 i++;
3683 }
3684 n_ops = i;
3685 }
3686 while (changed);
3687
3688 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3689 if (n_ops == 2
3690 && CONST_INT_P (ops[1].op)
3691 && CONSTANT_P (ops[0].op)
3692 && ops[0].neg)
3693 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3694
3695 /* We suppressed creation of trivial CONST expressions in the
3696 combination loop to avoid recursion. Create one manually now.
3697 The combination loop should have ensured that there is exactly
3698 one CONST_INT, and the sort will have ensured that it is last
3699 in the array and that any other constant will be next-to-last. */
3700
3701 if (n_ops > 1
3702 && CONST_INT_P (ops[n_ops - 1].op)
3703 && CONSTANT_P (ops[n_ops - 2].op))
3704 {
3705 rtx value = ops[n_ops - 1].op;
3706 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3707 value = neg_const_int (mode, value);
3708 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3709 n_ops--;
3710 }
3711
3712 /* Put a non-negated operand first, if possible. */
3713
3714 for (i = 0; i < n_ops && ops[i].neg; i++)
3715 continue;
3716 if (i == n_ops)
3717 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3718 else if (i != 0)
3719 {
3720 tem = ops[0].op;
3721 ops[0] = ops[i];
3722 ops[i].op = tem;
3723 ops[i].neg = 1;
3724 }
3725
3726 /* Now make the result by performing the requested operations. */
3727 result = ops[0].op;
3728 for (i = 1; i < n_ops; i++)
3729 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3730 mode, result, ops[i].op);
3731
3732 return result;
3733 }
3734
3735 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3736 static bool
3737 plus_minus_operand_p (const_rtx x)
3738 {
3739 return GET_CODE (x) == PLUS
3740 || GET_CODE (x) == MINUS
3741 || (GET_CODE (x) == CONST
3742 && GET_CODE (XEXP (x, 0)) == PLUS
3743 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3744 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3745 }
3746
3747 /* Like simplify_binary_operation except used for relational operators.
3748 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3749 not also be VOIDmode.
3750
3751 CMP_MODE specifies in which mode the comparison is done in, so it is
3752 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3753 the operands or, if both are VOIDmode, the operands are compared in
3754 "infinite precision". */
3755 rtx
3756 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3757 enum machine_mode cmp_mode, rtx op0, rtx op1)
3758 {
3759 rtx tem, trueop0, trueop1;
3760
3761 if (cmp_mode == VOIDmode)
3762 cmp_mode = GET_MODE (op0);
3763 if (cmp_mode == VOIDmode)
3764 cmp_mode = GET_MODE (op1);
3765
3766 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3767 if (tem)
3768 {
3769 if (SCALAR_FLOAT_MODE_P (mode))
3770 {
3771 if (tem == const0_rtx)
3772 return CONST0_RTX (mode);
3773 #ifdef FLOAT_STORE_FLAG_VALUE
3774 {
3775 REAL_VALUE_TYPE val;
3776 val = FLOAT_STORE_FLAG_VALUE (mode);
3777 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3778 }
3779 #else
3780 return NULL_RTX;
3781 #endif
3782 }
3783 if (VECTOR_MODE_P (mode))
3784 {
3785 if (tem == const0_rtx)
3786 return CONST0_RTX (mode);
3787 #ifdef VECTOR_STORE_FLAG_VALUE
3788 {
3789 int i, units;
3790 rtvec v;
3791
3792 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3793 if (val == NULL_RTX)
3794 return NULL_RTX;
3795 if (val == const1_rtx)
3796 return CONST1_RTX (mode);
3797
3798 units = GET_MODE_NUNITS (mode);
3799 v = rtvec_alloc (units);
3800 for (i = 0; i < units; i++)
3801 RTVEC_ELT (v, i) = val;
3802 return gen_rtx_raw_CONST_VECTOR (mode, v);
3803 }
3804 #else
3805 return NULL_RTX;
3806 #endif
3807 }
3808
3809 return tem;
3810 }
3811
3812 /* For the following tests, ensure const0_rtx is op1. */
3813 if (swap_commutative_operands_p (op0, op1)
3814 || (op0 == const0_rtx && op1 != const0_rtx))
3815 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3816
3817 /* If op0 is a compare, extract the comparison arguments from it. */
3818 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3819 return simplify_gen_relational (code, mode, VOIDmode,
3820 XEXP (op0, 0), XEXP (op0, 1));
3821
3822 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3823 || CC0_P (op0))
3824 return NULL_RTX;
3825
3826 trueop0 = avoid_constant_pool_reference (op0);
3827 trueop1 = avoid_constant_pool_reference (op1);
3828 return simplify_relational_operation_1 (code, mode, cmp_mode,
3829 trueop0, trueop1);
3830 }
3831
3832 /* This part of simplify_relational_operation is only used when CMP_MODE
3833 is not in class MODE_CC (i.e. it is a real comparison).
3834
3835 MODE is the mode of the result, while CMP_MODE specifies in which
3836 mode the comparison is done in, so it is the mode of the operands. */
3837
3838 static rtx
3839 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3840 enum machine_mode cmp_mode, rtx op0, rtx op1)
3841 {
3842 enum rtx_code op0code = GET_CODE (op0);
3843
3844 if (op1 == const0_rtx && COMPARISON_P (op0))
3845 {
3846 /* If op0 is a comparison, extract the comparison arguments
3847 from it. */
3848 if (code == NE)
3849 {
3850 if (GET_MODE (op0) == mode)
3851 return simplify_rtx (op0);
3852 else
3853 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3854 XEXP (op0, 0), XEXP (op0, 1));
3855 }
3856 else if (code == EQ)
3857 {
3858 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3859 if (new_code != UNKNOWN)
3860 return simplify_gen_relational (new_code, mode, VOIDmode,
3861 XEXP (op0, 0), XEXP (op0, 1));
3862 }
3863 }
3864
3865 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3866 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3867 if ((code == LTU || code == GEU)
3868 && GET_CODE (op0) == PLUS
3869 && CONST_INT_P (XEXP (op0, 1))
3870 && (rtx_equal_p (op1, XEXP (op0, 0))
3871 || rtx_equal_p (op1, XEXP (op0, 1))))
3872 {
3873 rtx new_cmp
3874 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3875 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3876 cmp_mode, XEXP (op0, 0), new_cmp);
3877 }
3878
3879 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3880 if ((code == LTU || code == GEU)
3881 && GET_CODE (op0) == PLUS
3882 && rtx_equal_p (op1, XEXP (op0, 1))
3883 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3884 && !rtx_equal_p (op1, XEXP (op0, 0)))
3885 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3886
3887 if (op1 == const0_rtx)
3888 {
3889 /* Canonicalize (GTU x 0) as (NE x 0). */
3890 if (code == GTU)
3891 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3892 /* Canonicalize (LEU x 0) as (EQ x 0). */
3893 if (code == LEU)
3894 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3895 }
3896 else if (op1 == const1_rtx)
3897 {
3898 switch (code)
3899 {
3900 case GE:
3901 /* Canonicalize (GE x 1) as (GT x 0). */
3902 return simplify_gen_relational (GT, mode, cmp_mode,
3903 op0, const0_rtx);
3904 case GEU:
3905 /* Canonicalize (GEU x 1) as (NE x 0). */
3906 return simplify_gen_relational (NE, mode, cmp_mode,
3907 op0, const0_rtx);
3908 case LT:
3909 /* Canonicalize (LT x 1) as (LE x 0). */
3910 return simplify_gen_relational (LE, mode, cmp_mode,
3911 op0, const0_rtx);
3912 case LTU:
3913 /* Canonicalize (LTU x 1) as (EQ x 0). */
3914 return simplify_gen_relational (EQ, mode, cmp_mode,
3915 op0, const0_rtx);
3916 default:
3917 break;
3918 }
3919 }
3920 else if (op1 == constm1_rtx)
3921 {
3922 /* Canonicalize (LE x -1) as (LT x 0). */
3923 if (code == LE)
3924 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3925 /* Canonicalize (GT x -1) as (GE x 0). */
3926 if (code == GT)
3927 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3928 }
3929
3930 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3931 if ((code == EQ || code == NE)
3932 && (op0code == PLUS || op0code == MINUS)
3933 && CONSTANT_P (op1)
3934 && CONSTANT_P (XEXP (op0, 1))
3935 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3936 {
3937 rtx x = XEXP (op0, 0);
3938 rtx c = XEXP (op0, 1);
3939
3940 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3941 cmp_mode, op1, c);
3942 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3943 }
3944
3945 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3946 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3947 if (code == NE
3948 && op1 == const0_rtx
3949 && GET_MODE_CLASS (mode) == MODE_INT
3950 && cmp_mode != VOIDmode
3951 /* ??? Work-around BImode bugs in the ia64 backend. */
3952 && mode != BImode
3953 && cmp_mode != BImode
3954 && nonzero_bits (op0, cmp_mode) == 1
3955 && STORE_FLAG_VALUE == 1)
3956 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3957 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3958 : lowpart_subreg (mode, op0, cmp_mode);
3959
3960 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3961 if ((code == EQ || code == NE)
3962 && op1 == const0_rtx
3963 && op0code == XOR)
3964 return simplify_gen_relational (code, mode, cmp_mode,
3965 XEXP (op0, 0), XEXP (op0, 1));
3966
3967 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3968 if ((code == EQ || code == NE)
3969 && op0code == XOR
3970 && rtx_equal_p (XEXP (op0, 0), op1)
3971 && !side_effects_p (XEXP (op0, 0)))
3972 return simplify_gen_relational (code, mode, cmp_mode,
3973 XEXP (op0, 1), const0_rtx);
3974
3975 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3976 if ((code == EQ || code == NE)
3977 && op0code == XOR
3978 && rtx_equal_p (XEXP (op0, 1), op1)
3979 && !side_effects_p (XEXP (op0, 1)))
3980 return simplify_gen_relational (code, mode, cmp_mode,
3981 XEXP (op0, 0), const0_rtx);
3982
3983 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3984 if ((code == EQ || code == NE)
3985 && op0code == XOR
3986 && (CONST_INT_P (op1)
3987 || GET_CODE (op1) == CONST_DOUBLE)
3988 && (CONST_INT_P (XEXP (op0, 1))
3989 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3990 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3991 simplify_gen_binary (XOR, cmp_mode,
3992 XEXP (op0, 1), op1));
3993
3994 if (op0code == POPCOUNT && op1 == const0_rtx)
3995 switch (code)
3996 {
3997 case EQ:
3998 case LE:
3999 case LEU:
4000 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4001 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4002 XEXP (op0, 0), const0_rtx);
4003
4004 case NE:
4005 case GT:
4006 case GTU:
4007 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4008 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4009 XEXP (op0, 0), const0_rtx);
4010
4011 default:
4012 break;
4013 }
4014
4015 return NULL_RTX;
4016 }
4017
4018 enum
4019 {
4020 CMP_EQ = 1,
4021 CMP_LT = 2,
4022 CMP_GT = 4,
4023 CMP_LTU = 8,
4024 CMP_GTU = 16
4025 };
4026
4027
4028 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4029 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4030 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4031 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4032 For floating-point comparisons, assume that the operands were ordered. */
4033
4034 static rtx
4035 comparison_result (enum rtx_code code, int known_results)
4036 {
4037 switch (code)
4038 {
4039 case EQ:
4040 case UNEQ:
4041 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4042 case NE:
4043 case LTGT:
4044 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4045
4046 case LT:
4047 case UNLT:
4048 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4049 case GE:
4050 case UNGE:
4051 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4052
4053 case GT:
4054 case UNGT:
4055 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4056 case LE:
4057 case UNLE:
4058 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4059
4060 case LTU:
4061 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4062 case GEU:
4063 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4064
4065 case GTU:
4066 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4067 case LEU:
4068 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4069
4070 case ORDERED:
4071 return const_true_rtx;
4072 case UNORDERED:
4073 return const0_rtx;
4074 default:
4075 gcc_unreachable ();
4076 }
4077 }
4078
4079 /* Check if the given comparison (done in the given MODE) is actually a
4080 tautology or a contradiction.
4081 If no simplification is possible, this function returns zero.
4082 Otherwise, it returns either const_true_rtx or const0_rtx. */
4083
4084 rtx
4085 simplify_const_relational_operation (enum rtx_code code,
4086 enum machine_mode mode,
4087 rtx op0, rtx op1)
4088 {
4089 rtx tem;
4090 rtx trueop0;
4091 rtx trueop1;
4092
4093 gcc_assert (mode != VOIDmode
4094 || (GET_MODE (op0) == VOIDmode
4095 && GET_MODE (op1) == VOIDmode));
4096
4097 /* If op0 is a compare, extract the comparison arguments from it. */
4098 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4099 {
4100 op1 = XEXP (op0, 1);
4101 op0 = XEXP (op0, 0);
4102
4103 if (GET_MODE (op0) != VOIDmode)
4104 mode = GET_MODE (op0);
4105 else if (GET_MODE (op1) != VOIDmode)
4106 mode = GET_MODE (op1);
4107 else
4108 return 0;
4109 }
4110
4111 /* We can't simplify MODE_CC values since we don't know what the
4112 actual comparison is. */
4113 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4114 return 0;
4115
4116 /* Make sure the constant is second. */
4117 if (swap_commutative_operands_p (op0, op1))
4118 {
4119 tem = op0, op0 = op1, op1 = tem;
4120 code = swap_condition (code);
4121 }
4122
4123 trueop0 = avoid_constant_pool_reference (op0);
4124 trueop1 = avoid_constant_pool_reference (op1);
4125
4126 /* For integer comparisons of A and B maybe we can simplify A - B and can
4127 then simplify a comparison of that with zero. If A and B are both either
4128 a register or a CONST_INT, this can't help; testing for these cases will
4129 prevent infinite recursion here and speed things up.
4130
4131 We can only do this for EQ and NE comparisons as otherwise we may
4132 lose or introduce overflow which we cannot disregard as undefined as
4133 we do not know the signedness of the operation on either the left or
4134 the right hand side of the comparison. */
4135
4136 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4137 && (code == EQ || code == NE)
4138 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4139 && (REG_P (op1) || CONST_INT_P (trueop1)))
4140 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4141 /* We cannot do this if tem is a nonzero address. */
4142 && ! nonzero_address_p (tem))
4143 return simplify_const_relational_operation (signed_condition (code),
4144 mode, tem, const0_rtx);
4145
4146 if (! HONOR_NANS (mode) && code == ORDERED)
4147 return const_true_rtx;
4148
4149 if (! HONOR_NANS (mode) && code == UNORDERED)
4150 return const0_rtx;
4151
4152 /* For modes without NaNs, if the two operands are equal, we know the
4153 result except if they have side-effects. Even with NaNs we know
4154 the result of unordered comparisons and, if signaling NaNs are
4155 irrelevant, also the result of LT/GT/LTGT. */
4156 if ((! HONOR_NANS (GET_MODE (trueop0))
4157 || code == UNEQ || code == UNLE || code == UNGE
4158 || ((code == LT || code == GT || code == LTGT)
4159 && ! HONOR_SNANS (GET_MODE (trueop0))))
4160 && rtx_equal_p (trueop0, trueop1)
4161 && ! side_effects_p (trueop0))
4162 return comparison_result (code, CMP_EQ);
4163
4164 /* If the operands are floating-point constants, see if we can fold
4165 the result. */
4166 if (GET_CODE (trueop0) == CONST_DOUBLE
4167 && GET_CODE (trueop1) == CONST_DOUBLE
4168 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4169 {
4170 REAL_VALUE_TYPE d0, d1;
4171
4172 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4173 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4174
4175 /* Comparisons are unordered iff at least one of the values is NaN. */
4176 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4177 switch (code)
4178 {
4179 case UNEQ:
4180 case UNLT:
4181 case UNGT:
4182 case UNLE:
4183 case UNGE:
4184 case NE:
4185 case UNORDERED:
4186 return const_true_rtx;
4187 case EQ:
4188 case LT:
4189 case GT:
4190 case LE:
4191 case GE:
4192 case LTGT:
4193 case ORDERED:
4194 return const0_rtx;
4195 default:
4196 return 0;
4197 }
4198
4199 return comparison_result (code,
4200 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4201 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4202 }
4203
4204 /* Otherwise, see if the operands are both integers. */
4205 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4206 && (GET_CODE (trueop0) == CONST_DOUBLE
4207 || CONST_INT_P (trueop0))
4208 && (GET_CODE (trueop1) == CONST_DOUBLE
4209 || CONST_INT_P (trueop1)))
4210 {
4211 int width = GET_MODE_BITSIZE (mode);
4212 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4213 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4214
4215 /* Get the two words comprising each integer constant. */
4216 if (GET_CODE (trueop0) == CONST_DOUBLE)
4217 {
4218 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4219 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4220 }
4221 else
4222 {
4223 l0u = l0s = INTVAL (trueop0);
4224 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4225 }
4226
4227 if (GET_CODE (trueop1) == CONST_DOUBLE)
4228 {
4229 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4230 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4231 }
4232 else
4233 {
4234 l1u = l1s = INTVAL (trueop1);
4235 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4236 }
4237
4238 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4239 we have to sign or zero-extend the values. */
4240 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4241 {
4242 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4243 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4244
4245 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4246 l0s |= ((HOST_WIDE_INT) (-1) << width);
4247
4248 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4249 l1s |= ((HOST_WIDE_INT) (-1) << width);
4250 }
4251 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4252 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4253
4254 if (h0u == h1u && l0u == l1u)
4255 return comparison_result (code, CMP_EQ);
4256 else
4257 {
4258 int cr;
4259 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4260 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4261 return comparison_result (code, cr);
4262 }
4263 }
4264
4265 /* Optimize comparisons with upper and lower bounds. */
4266 if (SCALAR_INT_MODE_P (mode)
4267 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4268 && CONST_INT_P (trueop1))
4269 {
4270 int sign;
4271 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4272 HOST_WIDE_INT val = INTVAL (trueop1);
4273 HOST_WIDE_INT mmin, mmax;
4274
4275 if (code == GEU
4276 || code == LEU
4277 || code == GTU
4278 || code == LTU)
4279 sign = 0;
4280 else
4281 sign = 1;
4282
4283 /* Get a reduced range if the sign bit is zero. */
4284 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4285 {
4286 mmin = 0;
4287 mmax = nonzero;
4288 }
4289 else
4290 {
4291 rtx mmin_rtx, mmax_rtx;
4292 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4293
4294 mmin = INTVAL (mmin_rtx);
4295 mmax = INTVAL (mmax_rtx);
4296 if (sign)
4297 {
4298 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4299
4300 mmin >>= (sign_copies - 1);
4301 mmax >>= (sign_copies - 1);
4302 }
4303 }
4304
4305 switch (code)
4306 {
4307 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4308 case GEU:
4309 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4310 return const_true_rtx;
4311 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4312 return const0_rtx;
4313 break;
4314 case GE:
4315 if (val <= mmin)
4316 return const_true_rtx;
4317 if (val > mmax)
4318 return const0_rtx;
4319 break;
4320
4321 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4322 case LEU:
4323 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4324 return const_true_rtx;
4325 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4326 return const0_rtx;
4327 break;
4328 case LE:
4329 if (val >= mmax)
4330 return const_true_rtx;
4331 if (val < mmin)
4332 return const0_rtx;
4333 break;
4334
4335 case EQ:
4336 /* x == y is always false for y out of range. */
4337 if (val < mmin || val > mmax)
4338 return const0_rtx;
4339 break;
4340
4341 /* x > y is always false for y >= mmax, always true for y < mmin. */
4342 case GTU:
4343 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4344 return const0_rtx;
4345 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4346 return const_true_rtx;
4347 break;
4348 case GT:
4349 if (val >= mmax)
4350 return const0_rtx;
4351 if (val < mmin)
4352 return const_true_rtx;
4353 break;
4354
4355 /* x < y is always false for y <= mmin, always true for y > mmax. */
4356 case LTU:
4357 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4358 return const0_rtx;
4359 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4360 return const_true_rtx;
4361 break;
4362 case LT:
4363 if (val <= mmin)
4364 return const0_rtx;
4365 if (val > mmax)
4366 return const_true_rtx;
4367 break;
4368
4369 case NE:
4370 /* x != y is always true for y out of range. */
4371 if (val < mmin || val > mmax)
4372 return const_true_rtx;
4373 break;
4374
4375 default:
4376 break;
4377 }
4378 }
4379
4380 /* Optimize integer comparisons with zero. */
4381 if (trueop1 == const0_rtx)
4382 {
4383 /* Some addresses are known to be nonzero. We don't know
4384 their sign, but equality comparisons are known. */
4385 if (nonzero_address_p (trueop0))
4386 {
4387 if (code == EQ || code == LEU)
4388 return const0_rtx;
4389 if (code == NE || code == GTU)
4390 return const_true_rtx;
4391 }
4392
4393 /* See if the first operand is an IOR with a constant. If so, we
4394 may be able to determine the result of this comparison. */
4395 if (GET_CODE (op0) == IOR)
4396 {
4397 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4398 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4399 {
4400 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4401 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4402 && (INTVAL (inner_const)
4403 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4404
4405 switch (code)
4406 {
4407 case EQ:
4408 case LEU:
4409 return const0_rtx;
4410 case NE:
4411 case GTU:
4412 return const_true_rtx;
4413 case LT:
4414 case LE:
4415 if (has_sign)
4416 return const_true_rtx;
4417 break;
4418 case GT:
4419 case GE:
4420 if (has_sign)
4421 return const0_rtx;
4422 break;
4423 default:
4424 break;
4425 }
4426 }
4427 }
4428 }
4429
4430 /* Optimize comparison of ABS with zero. */
4431 if (trueop1 == CONST0_RTX (mode)
4432 && (GET_CODE (trueop0) == ABS
4433 || (GET_CODE (trueop0) == FLOAT_EXTEND
4434 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4435 {
4436 switch (code)
4437 {
4438 case LT:
4439 /* Optimize abs(x) < 0.0. */
4440 if (!HONOR_SNANS (mode)
4441 && (!INTEGRAL_MODE_P (mode)
4442 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4443 {
4444 if (INTEGRAL_MODE_P (mode)
4445 && (issue_strict_overflow_warning
4446 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4447 warning (OPT_Wstrict_overflow,
4448 ("assuming signed overflow does not occur when "
4449 "assuming abs (x) < 0 is false"));
4450 return const0_rtx;
4451 }
4452 break;
4453
4454 case GE:
4455 /* Optimize abs(x) >= 0.0. */
4456 if (!HONOR_NANS (mode)
4457 && (!INTEGRAL_MODE_P (mode)
4458 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4459 {
4460 if (INTEGRAL_MODE_P (mode)
4461 && (issue_strict_overflow_warning
4462 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4463 warning (OPT_Wstrict_overflow,
4464 ("assuming signed overflow does not occur when "
4465 "assuming abs (x) >= 0 is true"));
4466 return const_true_rtx;
4467 }
4468 break;
4469
4470 case UNGE:
4471 /* Optimize ! (abs(x) < 0.0). */
4472 return const_true_rtx;
4473
4474 default:
4475 break;
4476 }
4477 }
4478
4479 return 0;
4480 }
4481 \f
4482 /* Simplify CODE, an operation with result mode MODE and three operands,
4483 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4484 a constant. Return 0 if no simplifications is possible. */
4485
4486 rtx
4487 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4488 enum machine_mode op0_mode, rtx op0, rtx op1,
4489 rtx op2)
4490 {
4491 unsigned int width = GET_MODE_BITSIZE (mode);
4492
4493 /* VOIDmode means "infinite" precision. */
4494 if (width == 0)
4495 width = HOST_BITS_PER_WIDE_INT;
4496
4497 switch (code)
4498 {
4499 case SIGN_EXTRACT:
4500 case ZERO_EXTRACT:
4501 if (CONST_INT_P (op0)
4502 && CONST_INT_P (op1)
4503 && CONST_INT_P (op2)
4504 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4505 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4506 {
4507 /* Extracting a bit-field from a constant */
4508 HOST_WIDE_INT val = INTVAL (op0);
4509
4510 if (BITS_BIG_ENDIAN)
4511 val >>= (GET_MODE_BITSIZE (op0_mode)
4512 - INTVAL (op2) - INTVAL (op1));
4513 else
4514 val >>= INTVAL (op2);
4515
4516 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4517 {
4518 /* First zero-extend. */
4519 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4520 /* If desired, propagate sign bit. */
4521 if (code == SIGN_EXTRACT
4522 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4523 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4524 }
4525
4526 /* Clear the bits that don't belong in our mode,
4527 unless they and our sign bit are all one.
4528 So we get either a reasonable negative value or a reasonable
4529 unsigned value for this mode. */
4530 if (width < HOST_BITS_PER_WIDE_INT
4531 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4532 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4533 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4534
4535 return gen_int_mode (val, mode);
4536 }
4537 break;
4538
4539 case IF_THEN_ELSE:
4540 if (CONST_INT_P (op0))
4541 return op0 != const0_rtx ? op1 : op2;
4542
4543 /* Convert c ? a : a into "a". */
4544 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4545 return op1;
4546
4547 /* Convert a != b ? a : b into "a". */
4548 if (GET_CODE (op0) == NE
4549 && ! side_effects_p (op0)
4550 && ! HONOR_NANS (mode)
4551 && ! HONOR_SIGNED_ZEROS (mode)
4552 && ((rtx_equal_p (XEXP (op0, 0), op1)
4553 && rtx_equal_p (XEXP (op0, 1), op2))
4554 || (rtx_equal_p (XEXP (op0, 0), op2)
4555 && rtx_equal_p (XEXP (op0, 1), op1))))
4556 return op1;
4557
4558 /* Convert a == b ? a : b into "b". */
4559 if (GET_CODE (op0) == EQ
4560 && ! side_effects_p (op0)
4561 && ! HONOR_NANS (mode)
4562 && ! HONOR_SIGNED_ZEROS (mode)
4563 && ((rtx_equal_p (XEXP (op0, 0), op1)
4564 && rtx_equal_p (XEXP (op0, 1), op2))
4565 || (rtx_equal_p (XEXP (op0, 0), op2)
4566 && rtx_equal_p (XEXP (op0, 1), op1))))
4567 return op2;
4568
4569 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4570 {
4571 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4572 ? GET_MODE (XEXP (op0, 1))
4573 : GET_MODE (XEXP (op0, 0)));
4574 rtx temp;
4575
4576 /* Look for happy constants in op1 and op2. */
4577 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4578 {
4579 HOST_WIDE_INT t = INTVAL (op1);
4580 HOST_WIDE_INT f = INTVAL (op2);
4581
4582 if (t == STORE_FLAG_VALUE && f == 0)
4583 code = GET_CODE (op0);
4584 else if (t == 0 && f == STORE_FLAG_VALUE)
4585 {
4586 enum rtx_code tmp;
4587 tmp = reversed_comparison_code (op0, NULL_RTX);
4588 if (tmp == UNKNOWN)
4589 break;
4590 code = tmp;
4591 }
4592 else
4593 break;
4594
4595 return simplify_gen_relational (code, mode, cmp_mode,
4596 XEXP (op0, 0), XEXP (op0, 1));
4597 }
4598
4599 if (cmp_mode == VOIDmode)
4600 cmp_mode = op0_mode;
4601 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4602 cmp_mode, XEXP (op0, 0),
4603 XEXP (op0, 1));
4604
4605 /* See if any simplifications were possible. */
4606 if (temp)
4607 {
4608 if (CONST_INT_P (temp))
4609 return temp == const0_rtx ? op2 : op1;
4610 else if (temp)
4611 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4612 }
4613 }
4614 break;
4615
4616 case VEC_MERGE:
4617 gcc_assert (GET_MODE (op0) == mode);
4618 gcc_assert (GET_MODE (op1) == mode);
4619 gcc_assert (VECTOR_MODE_P (mode));
4620 op2 = avoid_constant_pool_reference (op2);
4621 if (CONST_INT_P (op2))
4622 {
4623 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4624 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4625 int mask = (1 << n_elts) - 1;
4626
4627 if (!(INTVAL (op2) & mask))
4628 return op1;
4629 if ((INTVAL (op2) & mask) == mask)
4630 return op0;
4631
4632 op0 = avoid_constant_pool_reference (op0);
4633 op1 = avoid_constant_pool_reference (op1);
4634 if (GET_CODE (op0) == CONST_VECTOR
4635 && GET_CODE (op1) == CONST_VECTOR)
4636 {
4637 rtvec v = rtvec_alloc (n_elts);
4638 unsigned int i;
4639
4640 for (i = 0; i < n_elts; i++)
4641 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4642 ? CONST_VECTOR_ELT (op0, i)
4643 : CONST_VECTOR_ELT (op1, i));
4644 return gen_rtx_CONST_VECTOR (mode, v);
4645 }
4646 }
4647 break;
4648
4649 default:
4650 gcc_unreachable ();
4651 }
4652
4653 return 0;
4654 }
4655
4656 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4657 or CONST_VECTOR,
4658 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4659
4660 Works by unpacking OP into a collection of 8-bit values
4661 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4662 and then repacking them again for OUTERMODE. */
4663
4664 static rtx
4665 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4666 enum machine_mode innermode, unsigned int byte)
4667 {
4668 /* We support up to 512-bit values (for V8DFmode). */
4669 enum {
4670 max_bitsize = 512,
4671 value_bit = 8,
4672 value_mask = (1 << value_bit) - 1
4673 };
4674 unsigned char value[max_bitsize / value_bit];
4675 int value_start;
4676 int i;
4677 int elem;
4678
4679 int num_elem;
4680 rtx * elems;
4681 int elem_bitsize;
4682 rtx result_s;
4683 rtvec result_v = NULL;
4684 enum mode_class outer_class;
4685 enum machine_mode outer_submode;
4686
4687 /* Some ports misuse CCmode. */
4688 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4689 return op;
4690
4691 /* We have no way to represent a complex constant at the rtl level. */
4692 if (COMPLEX_MODE_P (outermode))
4693 return NULL_RTX;
4694
4695 /* Unpack the value. */
4696
4697 if (GET_CODE (op) == CONST_VECTOR)
4698 {
4699 num_elem = CONST_VECTOR_NUNITS (op);
4700 elems = &CONST_VECTOR_ELT (op, 0);
4701 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4702 }
4703 else
4704 {
4705 num_elem = 1;
4706 elems = &op;
4707 elem_bitsize = max_bitsize;
4708 }
4709 /* If this asserts, it is too complicated; reducing value_bit may help. */
4710 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4711 /* I don't know how to handle endianness of sub-units. */
4712 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4713
4714 for (elem = 0; elem < num_elem; elem++)
4715 {
4716 unsigned char * vp;
4717 rtx el = elems[elem];
4718
4719 /* Vectors are kept in target memory order. (This is probably
4720 a mistake.) */
4721 {
4722 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4723 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4724 / BITS_PER_UNIT);
4725 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4726 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4727 unsigned bytele = (subword_byte % UNITS_PER_WORD
4728 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4729 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4730 }
4731
4732 switch (GET_CODE (el))
4733 {
4734 case CONST_INT:
4735 for (i = 0;
4736 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4737 i += value_bit)
4738 *vp++ = INTVAL (el) >> i;
4739 /* CONST_INTs are always logically sign-extended. */
4740 for (; i < elem_bitsize; i += value_bit)
4741 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4742 break;
4743
4744 case CONST_DOUBLE:
4745 if (GET_MODE (el) == VOIDmode)
4746 {
4747 /* If this triggers, someone should have generated a
4748 CONST_INT instead. */
4749 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4750
4751 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4752 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4753 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4754 {
4755 *vp++
4756 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4757 i += value_bit;
4758 }
4759 /* It shouldn't matter what's done here, so fill it with
4760 zero. */
4761 for (; i < elem_bitsize; i += value_bit)
4762 *vp++ = 0;
4763 }
4764 else
4765 {
4766 long tmp[max_bitsize / 32];
4767 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4768
4769 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4770 gcc_assert (bitsize <= elem_bitsize);
4771 gcc_assert (bitsize % value_bit == 0);
4772
4773 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4774 GET_MODE (el));
4775
4776 /* real_to_target produces its result in words affected by
4777 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4778 and use WORDS_BIG_ENDIAN instead; see the documentation
4779 of SUBREG in rtl.texi. */
4780 for (i = 0; i < bitsize; i += value_bit)
4781 {
4782 int ibase;
4783 if (WORDS_BIG_ENDIAN)
4784 ibase = bitsize - 1 - i;
4785 else
4786 ibase = i;
4787 *vp++ = tmp[ibase / 32] >> i % 32;
4788 }
4789
4790 /* It shouldn't matter what's done here, so fill it with
4791 zero. */
4792 for (; i < elem_bitsize; i += value_bit)
4793 *vp++ = 0;
4794 }
4795 break;
4796
4797 case CONST_FIXED:
4798 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4799 {
4800 for (i = 0; i < elem_bitsize; i += value_bit)
4801 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4802 }
4803 else
4804 {
4805 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4806 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4807 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4808 i += value_bit)
4809 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4810 >> (i - HOST_BITS_PER_WIDE_INT);
4811 for (; i < elem_bitsize; i += value_bit)
4812 *vp++ = 0;
4813 }
4814 break;
4815
4816 default:
4817 gcc_unreachable ();
4818 }
4819 }
4820
4821 /* Now, pick the right byte to start with. */
4822 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4823 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4824 will already have offset 0. */
4825 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4826 {
4827 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4828 - byte);
4829 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4830 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4831 byte = (subword_byte % UNITS_PER_WORD
4832 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4833 }
4834
4835 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4836 so if it's become negative it will instead be very large.) */
4837 gcc_assert (byte < GET_MODE_SIZE (innermode));
4838
4839 /* Convert from bytes to chunks of size value_bit. */
4840 value_start = byte * (BITS_PER_UNIT / value_bit);
4841
4842 /* Re-pack the value. */
4843
4844 if (VECTOR_MODE_P (outermode))
4845 {
4846 num_elem = GET_MODE_NUNITS (outermode);
4847 result_v = rtvec_alloc (num_elem);
4848 elems = &RTVEC_ELT (result_v, 0);
4849 outer_submode = GET_MODE_INNER (outermode);
4850 }
4851 else
4852 {
4853 num_elem = 1;
4854 elems = &result_s;
4855 outer_submode = outermode;
4856 }
4857
4858 outer_class = GET_MODE_CLASS (outer_submode);
4859 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4860
4861 gcc_assert (elem_bitsize % value_bit == 0);
4862 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4863
4864 for (elem = 0; elem < num_elem; elem++)
4865 {
4866 unsigned char *vp;
4867
4868 /* Vectors are stored in target memory order. (This is probably
4869 a mistake.) */
4870 {
4871 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4872 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4873 / BITS_PER_UNIT);
4874 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4875 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4876 unsigned bytele = (subword_byte % UNITS_PER_WORD
4877 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4878 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4879 }
4880
4881 switch (outer_class)
4882 {
4883 case MODE_INT:
4884 case MODE_PARTIAL_INT:
4885 {
4886 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4887
4888 for (i = 0;
4889 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4890 i += value_bit)
4891 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4892 for (; i < elem_bitsize; i += value_bit)
4893 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4894 << (i - HOST_BITS_PER_WIDE_INT));
4895
4896 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4897 know why. */
4898 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4899 elems[elem] = gen_int_mode (lo, outer_submode);
4900 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4901 elems[elem] = immed_double_const (lo, hi, outer_submode);
4902 else
4903 return NULL_RTX;
4904 }
4905 break;
4906
4907 case MODE_FLOAT:
4908 case MODE_DECIMAL_FLOAT:
4909 {
4910 REAL_VALUE_TYPE r;
4911 long tmp[max_bitsize / 32];
4912
4913 /* real_from_target wants its input in words affected by
4914 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4915 and use WORDS_BIG_ENDIAN instead; see the documentation
4916 of SUBREG in rtl.texi. */
4917 for (i = 0; i < max_bitsize / 32; i++)
4918 tmp[i] = 0;
4919 for (i = 0; i < elem_bitsize; i += value_bit)
4920 {
4921 int ibase;
4922 if (WORDS_BIG_ENDIAN)
4923 ibase = elem_bitsize - 1 - i;
4924 else
4925 ibase = i;
4926 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4927 }
4928
4929 real_from_target (&r, tmp, outer_submode);
4930 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4931 }
4932 break;
4933
4934 case MODE_FRACT:
4935 case MODE_UFRACT:
4936 case MODE_ACCUM:
4937 case MODE_UACCUM:
4938 {
4939 FIXED_VALUE_TYPE f;
4940 f.data.low = 0;
4941 f.data.high = 0;
4942 f.mode = outer_submode;
4943
4944 for (i = 0;
4945 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4946 i += value_bit)
4947 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4948 for (; i < elem_bitsize; i += value_bit)
4949 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4950 << (i - HOST_BITS_PER_WIDE_INT));
4951
4952 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4953 }
4954 break;
4955
4956 default:
4957 gcc_unreachable ();
4958 }
4959 }
4960 if (VECTOR_MODE_P (outermode))
4961 return gen_rtx_CONST_VECTOR (outermode, result_v);
4962 else
4963 return result_s;
4964 }
4965
4966 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4967 Return 0 if no simplifications are possible. */
4968 rtx
4969 simplify_subreg (enum machine_mode outermode, rtx op,
4970 enum machine_mode innermode, unsigned int byte)
4971 {
4972 /* Little bit of sanity checking. */
4973 gcc_assert (innermode != VOIDmode);
4974 gcc_assert (outermode != VOIDmode);
4975 gcc_assert (innermode != BLKmode);
4976 gcc_assert (outermode != BLKmode);
4977
4978 gcc_assert (GET_MODE (op) == innermode
4979 || GET_MODE (op) == VOIDmode);
4980
4981 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4982 gcc_assert (byte < GET_MODE_SIZE (innermode));
4983
4984 if (outermode == innermode && !byte)
4985 return op;
4986
4987 if (CONST_INT_P (op)
4988 || GET_CODE (op) == CONST_DOUBLE
4989 || GET_CODE (op) == CONST_FIXED
4990 || GET_CODE (op) == CONST_VECTOR)
4991 return simplify_immed_subreg (outermode, op, innermode, byte);
4992
4993 /* Changing mode twice with SUBREG => just change it once,
4994 or not at all if changing back op starting mode. */
4995 if (GET_CODE (op) == SUBREG)
4996 {
4997 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4998 int final_offset = byte + SUBREG_BYTE (op);
4999 rtx newx;
5000
5001 if (outermode == innermostmode
5002 && byte == 0 && SUBREG_BYTE (op) == 0)
5003 return SUBREG_REG (op);
5004
5005 /* The SUBREG_BYTE represents offset, as if the value were stored
5006 in memory. Irritating exception is paradoxical subreg, where
5007 we define SUBREG_BYTE to be 0. On big endian machines, this
5008 value should be negative. For a moment, undo this exception. */
5009 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5010 {
5011 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5012 if (WORDS_BIG_ENDIAN)
5013 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5014 if (BYTES_BIG_ENDIAN)
5015 final_offset += difference % UNITS_PER_WORD;
5016 }
5017 if (SUBREG_BYTE (op) == 0
5018 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5019 {
5020 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5021 if (WORDS_BIG_ENDIAN)
5022 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5023 if (BYTES_BIG_ENDIAN)
5024 final_offset += difference % UNITS_PER_WORD;
5025 }
5026
5027 /* See whether resulting subreg will be paradoxical. */
5028 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5029 {
5030 /* In nonparadoxical subregs we can't handle negative offsets. */
5031 if (final_offset < 0)
5032 return NULL_RTX;
5033 /* Bail out in case resulting subreg would be incorrect. */
5034 if (final_offset % GET_MODE_SIZE (outermode)
5035 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5036 return NULL_RTX;
5037 }
5038 else
5039 {
5040 int offset = 0;
5041 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5042
5043 /* In paradoxical subreg, see if we are still looking on lower part.
5044 If so, our SUBREG_BYTE will be 0. */
5045 if (WORDS_BIG_ENDIAN)
5046 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5047 if (BYTES_BIG_ENDIAN)
5048 offset += difference % UNITS_PER_WORD;
5049 if (offset == final_offset)
5050 final_offset = 0;
5051 else
5052 return NULL_RTX;
5053 }
5054
5055 /* Recurse for further possible simplifications. */
5056 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5057 final_offset);
5058 if (newx)
5059 return newx;
5060 if (validate_subreg (outermode, innermostmode,
5061 SUBREG_REG (op), final_offset))
5062 {
5063 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5064 if (SUBREG_PROMOTED_VAR_P (op)
5065 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5066 && GET_MODE_CLASS (outermode) == MODE_INT
5067 && IN_RANGE (GET_MODE_SIZE (outermode),
5068 GET_MODE_SIZE (innermode),
5069 GET_MODE_SIZE (innermostmode))
5070 && subreg_lowpart_p (newx))
5071 {
5072 SUBREG_PROMOTED_VAR_P (newx) = 1;
5073 SUBREG_PROMOTED_UNSIGNED_SET
5074 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5075 }
5076 return newx;
5077 }
5078 return NULL_RTX;
5079 }
5080
5081 /* Merge implicit and explicit truncations. */
5082
5083 if (GET_CODE (op) == TRUNCATE
5084 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5085 && subreg_lowpart_offset (outermode, innermode) == byte)
5086 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5087 GET_MODE (XEXP (op, 0)));
5088
5089 /* SUBREG of a hard register => just change the register number
5090 and/or mode. If the hard register is not valid in that mode,
5091 suppress this simplification. If the hard register is the stack,
5092 frame, or argument pointer, leave this as a SUBREG. */
5093
5094 if (REG_P (op) && HARD_REGISTER_P (op))
5095 {
5096 unsigned int regno, final_regno;
5097
5098 regno = REGNO (op);
5099 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5100 if (HARD_REGISTER_NUM_P (final_regno))
5101 {
5102 rtx x;
5103 int final_offset = byte;
5104
5105 /* Adjust offset for paradoxical subregs. */
5106 if (byte == 0
5107 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5108 {
5109 int difference = (GET_MODE_SIZE (innermode)
5110 - GET_MODE_SIZE (outermode));
5111 if (WORDS_BIG_ENDIAN)
5112 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5113 if (BYTES_BIG_ENDIAN)
5114 final_offset += difference % UNITS_PER_WORD;
5115 }
5116
5117 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5118
5119 /* Propagate original regno. We don't have any way to specify
5120 the offset inside original regno, so do so only for lowpart.
5121 The information is used only by alias analysis that can not
5122 grog partial register anyway. */
5123
5124 if (subreg_lowpart_offset (outermode, innermode) == byte)
5125 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5126 return x;
5127 }
5128 }
5129
5130 /* If we have a SUBREG of a register that we are replacing and we are
5131 replacing it with a MEM, make a new MEM and try replacing the
5132 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5133 or if we would be widening it. */
5134
5135 if (MEM_P (op)
5136 && ! mode_dependent_address_p (XEXP (op, 0))
5137 /* Allow splitting of volatile memory references in case we don't
5138 have instruction to move the whole thing. */
5139 && (! MEM_VOLATILE_P (op)
5140 || ! have_insn_for (SET, innermode))
5141 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5142 return adjust_address_nv (op, outermode, byte);
5143
5144 /* Handle complex values represented as CONCAT
5145 of real and imaginary part. */
5146 if (GET_CODE (op) == CONCAT)
5147 {
5148 unsigned int part_size, final_offset;
5149 rtx part, res;
5150
5151 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5152 if (byte < part_size)
5153 {
5154 part = XEXP (op, 0);
5155 final_offset = byte;
5156 }
5157 else
5158 {
5159 part = XEXP (op, 1);
5160 final_offset = byte - part_size;
5161 }
5162
5163 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5164 return NULL_RTX;
5165
5166 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5167 if (res)
5168 return res;
5169 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5170 return gen_rtx_SUBREG (outermode, part, final_offset);
5171 return NULL_RTX;
5172 }
5173
5174 /* Optimize SUBREG truncations of zero and sign extended values. */
5175 if ((GET_CODE (op) == ZERO_EXTEND
5176 || GET_CODE (op) == SIGN_EXTEND)
5177 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5178 {
5179 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5180
5181 /* If we're requesting the lowpart of a zero or sign extension,
5182 there are three possibilities. If the outermode is the same
5183 as the origmode, we can omit both the extension and the subreg.
5184 If the outermode is not larger than the origmode, we can apply
5185 the truncation without the extension. Finally, if the outermode
5186 is larger than the origmode, but both are integer modes, we
5187 can just extend to the appropriate mode. */
5188 if (bitpos == 0)
5189 {
5190 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5191 if (outermode == origmode)
5192 return XEXP (op, 0);
5193 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5194 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5195 subreg_lowpart_offset (outermode,
5196 origmode));
5197 if (SCALAR_INT_MODE_P (outermode))
5198 return simplify_gen_unary (GET_CODE (op), outermode,
5199 XEXP (op, 0), origmode);
5200 }
5201
5202 /* A SUBREG resulting from a zero extension may fold to zero if
5203 it extracts higher bits that the ZERO_EXTEND's source bits. */
5204 if (GET_CODE (op) == ZERO_EXTEND
5205 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5206 return CONST0_RTX (outermode);
5207 }
5208
5209 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5210 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5211 the outer subreg is effectively a truncation to the original mode. */
5212 if ((GET_CODE (op) == LSHIFTRT
5213 || GET_CODE (op) == ASHIFTRT)
5214 && SCALAR_INT_MODE_P (outermode)
5215 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5216 to avoid the possibility that an outer LSHIFTRT shifts by more
5217 than the sign extension's sign_bit_copies and introduces zeros
5218 into the high bits of the result. */
5219 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5220 && CONST_INT_P (XEXP (op, 1))
5221 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5222 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5223 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5224 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5225 return simplify_gen_binary (ASHIFTRT, outermode,
5226 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5227
5228 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5229 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5230 the outer subreg is effectively a truncation to the original mode. */
5231 if ((GET_CODE (op) == LSHIFTRT
5232 || GET_CODE (op) == ASHIFTRT)
5233 && SCALAR_INT_MODE_P (outermode)
5234 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5235 && CONST_INT_P (XEXP (op, 1))
5236 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5237 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5238 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5239 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5240 return simplify_gen_binary (LSHIFTRT, outermode,
5241 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5242
5243 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5244 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5245 the outer subreg is effectively a truncation to the original mode. */
5246 if (GET_CODE (op) == ASHIFT
5247 && SCALAR_INT_MODE_P (outermode)
5248 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5249 && CONST_INT_P (XEXP (op, 1))
5250 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5251 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5252 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5253 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5254 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5255 return simplify_gen_binary (ASHIFT, outermode,
5256 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5257
5258 /* Recognize a word extraction from a multi-word subreg. */
5259 if ((GET_CODE (op) == LSHIFTRT
5260 || GET_CODE (op) == ASHIFTRT)
5261 && SCALAR_INT_MODE_P (outermode)
5262 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5263 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5264 && CONST_INT_P (XEXP (op, 1))
5265 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5266 && INTVAL (XEXP (op, 1)) >= 0
5267 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5268 && byte == subreg_lowpart_offset (outermode, innermode))
5269 {
5270 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5271 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5272 (WORDS_BIG_ENDIAN
5273 ? byte - shifted_bytes
5274 : byte + shifted_bytes));
5275 }
5276
5277 return NULL_RTX;
5278 }
5279
5280 /* Make a SUBREG operation or equivalent if it folds. */
5281
5282 rtx
5283 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5284 enum machine_mode innermode, unsigned int byte)
5285 {
5286 rtx newx;
5287
5288 newx = simplify_subreg (outermode, op, innermode, byte);
5289 if (newx)
5290 return newx;
5291
5292 if (GET_CODE (op) == SUBREG
5293 || GET_CODE (op) == CONCAT
5294 || GET_MODE (op) == VOIDmode)
5295 return NULL_RTX;
5296
5297 if (validate_subreg (outermode, innermode, op, byte))
5298 return gen_rtx_SUBREG (outermode, op, byte);
5299
5300 return NULL_RTX;
5301 }
5302
5303 /* Simplify X, an rtx expression.
5304
5305 Return the simplified expression or NULL if no simplifications
5306 were possible.
5307
5308 This is the preferred entry point into the simplification routines;
5309 however, we still allow passes to call the more specific routines.
5310
5311 Right now GCC has three (yes, three) major bodies of RTL simplification
5312 code that need to be unified.
5313
5314 1. fold_rtx in cse.c. This code uses various CSE specific
5315 information to aid in RTL simplification.
5316
5317 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5318 it uses combine specific information to aid in RTL
5319 simplification.
5320
5321 3. The routines in this file.
5322
5323
5324 Long term we want to only have one body of simplification code; to
5325 get to that state I recommend the following steps:
5326
5327 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5328 which are not pass dependent state into these routines.
5329
5330 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5331 use this routine whenever possible.
5332
5333 3. Allow for pass dependent state to be provided to these
5334 routines and add simplifications based on the pass dependent
5335 state. Remove code from cse.c & combine.c that becomes
5336 redundant/dead.
5337
5338 It will take time, but ultimately the compiler will be easier to
5339 maintain and improve. It's totally silly that when we add a
5340 simplification that it needs to be added to 4 places (3 for RTL
5341 simplification and 1 for tree simplification. */
5342
5343 rtx
5344 simplify_rtx (const_rtx x)
5345 {
5346 const enum rtx_code code = GET_CODE (x);
5347 const enum machine_mode mode = GET_MODE (x);
5348
5349 switch (GET_RTX_CLASS (code))
5350 {
5351 case RTX_UNARY:
5352 return simplify_unary_operation (code, mode,
5353 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5354 case RTX_COMM_ARITH:
5355 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5356 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5357
5358 /* Fall through.... */
5359
5360 case RTX_BIN_ARITH:
5361 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5362
5363 case RTX_TERNARY:
5364 case RTX_BITFIELD_OPS:
5365 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5366 XEXP (x, 0), XEXP (x, 1),
5367 XEXP (x, 2));
5368
5369 case RTX_COMPARE:
5370 case RTX_COMM_COMPARE:
5371 return simplify_relational_operation (code, mode,
5372 ((GET_MODE (XEXP (x, 0))
5373 != VOIDmode)
5374 ? GET_MODE (XEXP (x, 0))
5375 : GET_MODE (XEXP (x, 1))),
5376 XEXP (x, 0),
5377 XEXP (x, 1));
5378
5379 case RTX_EXTRA:
5380 if (code == SUBREG)
5381 return simplify_subreg (mode, SUBREG_REG (x),
5382 GET_MODE (SUBREG_REG (x)),
5383 SUBREG_BYTE (x));
5384 break;
5385
5386 case RTX_OBJ:
5387 if (code == LO_SUM)
5388 {
5389 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5390 if (GET_CODE (XEXP (x, 0)) == HIGH
5391 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5392 return XEXP (x, 1);
5393 }
5394 break;
5395
5396 default:
5397 break;
5398 }
5399 return NULL;
5400 }