rs6000.md (fseldfsf4): Add TARGET_SINGLE_FLOAT condition.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
207
208 rtx
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
211 {
212 rtx tem;
213
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
217
218 return gen_rtx_fmt_e (code, mode, op);
219 }
220
221 /* Likewise for ternary operations. */
222
223 rtx
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
226 {
227 rtx tem;
228
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
233
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
235 }
236
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
239
240 rtx
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
243 {
244 rtx tem;
245
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
249
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 }
252 \f
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
255
256 rtx
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
258 {
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
263
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
267
268 if (x == old_rtx)
269 return new_rtx;
270
271 switch (GET_RTX_CLASS (code))
272 {
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
280
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
288
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
299
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
312
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
316 {
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
324 }
325 break;
326
327 case RTX_OBJ:
328 if (code == MEM)
329 {
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
334 }
335 else if (code == LO_SUM)
336 {
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
339
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
343
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
347 }
348 else if (code == REG)
349 {
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
352 }
353 break;
354
355 default:
356 break;
357 }
358 return x;
359 }
360 \f
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
364 rtx
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
367 {
368 rtx trueop, tem;
369
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
372
373 trueop = avoid_constant_pool_reference (op);
374
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
378
379 return simplify_unary_operation_1 (code, mode, op);
380 }
381
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
386 {
387 enum rtx_code reversed;
388 rtx temp;
389
390 switch (code)
391 {
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
396
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
404
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
409
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
413
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
420
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
428
429
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
437 {
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
440 }
441
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
445
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
452
453
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
460 {
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
463
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
469 }
470
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
475
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
477 {
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
480
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
483
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
488
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
490 {
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
493 }
494
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
497 }
498 break;
499
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
504
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
509
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
513
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
523
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 {
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
531 {
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
535 }
536
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
540 }
541
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
546 {
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
549 }
550
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
555 {
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
559 }
560
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
568
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
576
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
582
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
588 {
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
592 {
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 }
601 else if (STORE_FLAG_VALUE == -1)
602 {
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
610 }
611 }
612 break;
613
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
620
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
626
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
636
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
671
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
675
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
680
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
684
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
687
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
699
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
711
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
720
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
728
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
732
733 /* (float_extend (float_extend x)) is (float_extend x)
734
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
737 */
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
748
749 break;
750
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
756
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
761
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
771
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
775
776 break;
777
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
785
786 case POPCOUNT:
787 switch (GET_CODE (op))
788 {
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
802
803 default:
804 break;
805 }
806 break;
807
808 case PARITY:
809 switch (GET_CODE (op))
810 {
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
817
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
825
826 default:
827 break;
828 }
829 break;
830
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
836
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
843
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
855
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
863 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
876
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
885 return rtl_hooks.gen_lowpart_no_emit (mode, op);
886
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
898
899 default:
900 break;
901 }
902
903 return 0;
904 }
905
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
909 rtx
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
912 {
913 unsigned int width = GET_MODE_BITSIZE (mode);
914
915 if (code == VEC_DUPLICATE)
916 {
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
919 {
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
925 }
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
928 {
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
933
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
938 {
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 }
948 return gen_rtx_CONST_VECTOR (mode, v);
949 }
950 }
951
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 {
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
961
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
964 {
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
971 }
972 return gen_rtx_CONST_VECTOR (mode, v);
973 }
974
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
978
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 {
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
984
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 }
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
997 {
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1000
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005
1006 if (op_mode == VOIDmode)
1007 {
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1012 }
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 ;
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 }
1022
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 {
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1028
1029 switch (code)
1030 {
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1034
1035 case NEG:
1036 val = - arg0;
1037 break;
1038
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1042
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1049
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 ;
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1057
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1061 {
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1066 }
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1070
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1077
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1085
1086 case BSWAP:
1087 {
1088 unsigned int s;
1089
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1092 {
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1097 }
1098 }
1099 break;
1100
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1104
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 {
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1116 }
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1122
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 {
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1133 }
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 {
1136 val
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1141 }
1142 else
1143 return 0;
1144 break;
1145
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1154
1155 default:
1156 gcc_unreachable ();
1157 }
1158
1159 return gen_int_mode (val, mode);
1160 }
1161
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || GET_CODE (op) == CONST_INT))
1168 {
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1171
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1176
1177 switch (code)
1178 {
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1183
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1187
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1194
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1198 {
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 }
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1207
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1218
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1228
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1237
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1247
1248 case BSWAP:
1249 {
1250 unsigned int s;
1251
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1255 {
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1258
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1268 }
1269 }
1270 break;
1271
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1276
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1279
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1282
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1286
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1292 {
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298
1299 hv = HWI_SIGN_EXTEND (lv);
1300 }
1301 break;
1302
1303 case SQRT:
1304 return 0;
1305
1306 default:
1307 return 0;
1308 }
1309
1310 return immed_double_const (lv, hv, mode);
1311 }
1312
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1315 {
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1318
1319 switch (code)
1320 {
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1343 {
1344 long tmp[4];
1345 int i;
1346
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1352 }
1353 default:
1354 gcc_unreachable ();
1355 }
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 }
1358
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 {
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1368
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1375 {
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1379
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1382 {
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1386 }
1387 else
1388 {
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 }
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1394 {
1395 xh = th;
1396 xl = tl;
1397 break;
1398 }
1399
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1402 {
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1405 }
1406 else
1407 {
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 }
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1413 {
1414 xh = th;
1415 xl = tl;
1416 break;
1417 }
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1420
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1424
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1427 {
1428 th = -1;
1429 tl = -1;
1430 }
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 {
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1436 }
1437 else
1438 {
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 }
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1444 {
1445 xh = th;
1446 xl = tl;
1447 break;
1448 }
1449
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1452
1453 default:
1454 gcc_unreachable ();
1455 }
1456 return immed_double_const (xl, xh, mode);
1457 }
1458
1459 return NULL_RTX;
1460 }
1461 \f
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1467
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1471 {
1472 rtx tem;
1473
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1476 {
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1479 {
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 }
1483
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1487
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1491 }
1492
1493 if (GET_CODE (op0) == code)
1494 {
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 {
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 }
1501
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1511 }
1512
1513 return 0;
1514 }
1515
1516
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1519
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1522 rtx
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1525 {
1526 rtx trueop0, trueop1;
1527 rtx tem;
1528
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1535
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1539 {
1540 tem = op0, op0 = op1, op1 = tem;
1541 }
1542
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1545
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 }
1551
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1556
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1560 {
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1564
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1567
1568 switch (code)
1569 {
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1577
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1584
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1590
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1596
1597 if ((GET_CODE (op0) == CONST
1598 || GET_CODE (op0) == SYMBOL_REF
1599 || GET_CODE (op0) == LABEL_REF)
1600 && GET_CODE (op1) == CONST_INT)
1601 return plus_constant (op0, INTVAL (op1));
1602 else if ((GET_CODE (op1) == CONST
1603 || GET_CODE (op1) == SYMBOL_REF
1604 || GET_CODE (op1) == LABEL_REF)
1605 && GET_CODE (op0) == CONST_INT)
1606 return plus_constant (op1, INTVAL (op0));
1607
1608 /* See if this is something like X * C - X or vice versa or
1609 if the multiplication is written as a shift. If so, we can
1610 distribute and make a new multiply, shift, or maybe just
1611 have X (if C is 2 in the example above). But don't make
1612 something more expensive than we had before. */
1613
1614 if (SCALAR_INT_MODE_P (mode))
1615 {
1616 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1617 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1618 rtx lhs = op0, rhs = op1;
1619
1620 if (GET_CODE (lhs) == NEG)
1621 {
1622 coeff0l = -1;
1623 coeff0h = -1;
1624 lhs = XEXP (lhs, 0);
1625 }
1626 else if (GET_CODE (lhs) == MULT
1627 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1628 {
1629 coeff0l = INTVAL (XEXP (lhs, 1));
1630 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1631 lhs = XEXP (lhs, 0);
1632 }
1633 else if (GET_CODE (lhs) == ASHIFT
1634 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1635 && INTVAL (XEXP (lhs, 1)) >= 0
1636 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1637 {
1638 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1639 coeff0h = 0;
1640 lhs = XEXP (lhs, 0);
1641 }
1642
1643 if (GET_CODE (rhs) == NEG)
1644 {
1645 coeff1l = -1;
1646 coeff1h = -1;
1647 rhs = XEXP (rhs, 0);
1648 }
1649 else if (GET_CODE (rhs) == MULT
1650 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1651 {
1652 coeff1l = INTVAL (XEXP (rhs, 1));
1653 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1654 rhs = XEXP (rhs, 0);
1655 }
1656 else if (GET_CODE (rhs) == ASHIFT
1657 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1658 && INTVAL (XEXP (rhs, 1)) >= 0
1659 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1660 {
1661 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1662 coeff1h = 0;
1663 rhs = XEXP (rhs, 0);
1664 }
1665
1666 if (rtx_equal_p (lhs, rhs))
1667 {
1668 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1669 rtx coeff;
1670 unsigned HOST_WIDE_INT l;
1671 HOST_WIDE_INT h;
1672 bool speed = optimize_function_for_speed_p (cfun);
1673
1674 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1675 coeff = immed_double_const (l, h, mode);
1676
1677 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1678 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1679 ? tem : 0;
1680 }
1681 }
1682
1683 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1684 if ((GET_CODE (op1) == CONST_INT
1685 || GET_CODE (op1) == CONST_DOUBLE)
1686 && GET_CODE (op0) == XOR
1687 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1688 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1689 && mode_signbit_p (mode, op1))
1690 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1691 simplify_gen_binary (XOR, mode, op1,
1692 XEXP (op0, 1)));
1693
1694 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1695 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1696 && GET_CODE (op0) == MULT
1697 && GET_CODE (XEXP (op0, 0)) == NEG)
1698 {
1699 rtx in1, in2;
1700
1701 in1 = XEXP (XEXP (op0, 0), 0);
1702 in2 = XEXP (op0, 1);
1703 return simplify_gen_binary (MINUS, mode, op1,
1704 simplify_gen_binary (MULT, mode,
1705 in1, in2));
1706 }
1707
1708 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1709 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1710 is 1. */
1711 if (COMPARISON_P (op0)
1712 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1713 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1714 && (reversed = reversed_comparison (op0, mode)))
1715 return
1716 simplify_gen_unary (NEG, mode, reversed, mode);
1717
1718 /* If one of the operands is a PLUS or a MINUS, see if we can
1719 simplify this by the associative law.
1720 Don't use the associative law for floating point.
1721 The inaccuracy makes it nonassociative,
1722 and subtle programs can break if operations are associated. */
1723
1724 if (INTEGRAL_MODE_P (mode)
1725 && (plus_minus_operand_p (op0)
1726 || plus_minus_operand_p (op1))
1727 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1728 return tem;
1729
1730 /* Reassociate floating point addition only when the user
1731 specifies associative math operations. */
1732 if (FLOAT_MODE_P (mode)
1733 && flag_associative_math)
1734 {
1735 tem = simplify_associative_operation (code, mode, op0, op1);
1736 if (tem)
1737 return tem;
1738 }
1739 break;
1740
1741 case COMPARE:
1742 #ifdef HAVE_cc0
1743 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1744 using cc0, in which case we want to leave it as a COMPARE
1745 so we can distinguish it from a register-register-copy.
1746
1747 In IEEE floating point, x-0 is not the same as x. */
1748 if (!(HONOR_SIGNED_ZEROS (mode)
1749 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1750 && trueop1 == CONST0_RTX (mode))
1751 return op0;
1752 #endif
1753
1754 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1755 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1756 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1757 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1758 {
1759 rtx xop00 = XEXP (op0, 0);
1760 rtx xop10 = XEXP (op1, 0);
1761
1762 #ifdef HAVE_cc0
1763 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1764 #else
1765 if (REG_P (xop00) && REG_P (xop10)
1766 && GET_MODE (xop00) == GET_MODE (xop10)
1767 && REGNO (xop00) == REGNO (xop10)
1768 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1769 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1770 #endif
1771 return xop00;
1772 }
1773 break;
1774
1775 case MINUS:
1776 /* We can't assume x-x is 0 even with non-IEEE floating point,
1777 but since it is zero except in very strange circumstances, we
1778 will treat it as zero with -ffinite-math-only. */
1779 if (rtx_equal_p (trueop0, trueop1)
1780 && ! side_effects_p (op0)
1781 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1782 return CONST0_RTX (mode);
1783
1784 /* Change subtraction from zero into negation. (0 - x) is the
1785 same as -x when x is NaN, infinite, or finite and nonzero.
1786 But if the mode has signed zeros, and does not round towards
1787 -infinity, then 0 - 0 is 0, not -0. */
1788 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1789 return simplify_gen_unary (NEG, mode, op1, mode);
1790
1791 /* (-1 - a) is ~a. */
1792 if (trueop0 == constm1_rtx)
1793 return simplify_gen_unary (NOT, mode, op1, mode);
1794
1795 /* Subtracting 0 has no effect unless the mode has signed zeros
1796 and supports rounding towards -infinity. In such a case,
1797 0 - 0 is -0. */
1798 if (!(HONOR_SIGNED_ZEROS (mode)
1799 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1800 && trueop1 == CONST0_RTX (mode))
1801 return op0;
1802
1803 /* See if this is something like X * C - X or vice versa or
1804 if the multiplication is written as a shift. If so, we can
1805 distribute and make a new multiply, shift, or maybe just
1806 have X (if C is 2 in the example above). But don't make
1807 something more expensive than we had before. */
1808
1809 if (SCALAR_INT_MODE_P (mode))
1810 {
1811 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1812 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1813 rtx lhs = op0, rhs = op1;
1814
1815 if (GET_CODE (lhs) == NEG)
1816 {
1817 coeff0l = -1;
1818 coeff0h = -1;
1819 lhs = XEXP (lhs, 0);
1820 }
1821 else if (GET_CODE (lhs) == MULT
1822 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1823 {
1824 coeff0l = INTVAL (XEXP (lhs, 1));
1825 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1826 lhs = XEXP (lhs, 0);
1827 }
1828 else if (GET_CODE (lhs) == ASHIFT
1829 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1830 && INTVAL (XEXP (lhs, 1)) >= 0
1831 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1832 {
1833 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1834 coeff0h = 0;
1835 lhs = XEXP (lhs, 0);
1836 }
1837
1838 if (GET_CODE (rhs) == NEG)
1839 {
1840 negcoeff1l = 1;
1841 negcoeff1h = 0;
1842 rhs = XEXP (rhs, 0);
1843 }
1844 else if (GET_CODE (rhs) == MULT
1845 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1846 {
1847 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1848 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1849 rhs = XEXP (rhs, 0);
1850 }
1851 else if (GET_CODE (rhs) == ASHIFT
1852 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1853 && INTVAL (XEXP (rhs, 1)) >= 0
1854 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1855 {
1856 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1857 negcoeff1h = -1;
1858 rhs = XEXP (rhs, 0);
1859 }
1860
1861 if (rtx_equal_p (lhs, rhs))
1862 {
1863 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1864 rtx coeff;
1865 unsigned HOST_WIDE_INT l;
1866 HOST_WIDE_INT h;
1867 bool speed = optimize_function_for_speed_p (cfun);
1868
1869 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1870 coeff = immed_double_const (l, h, mode);
1871
1872 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1873 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1874 ? tem : 0;
1875 }
1876 }
1877
1878 /* (a - (-b)) -> (a + b). True even for IEEE. */
1879 if (GET_CODE (op1) == NEG)
1880 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1881
1882 /* (-x - c) may be simplified as (-c - x). */
1883 if (GET_CODE (op0) == NEG
1884 && (GET_CODE (op1) == CONST_INT
1885 || GET_CODE (op1) == CONST_DOUBLE))
1886 {
1887 tem = simplify_unary_operation (NEG, mode, op1, mode);
1888 if (tem)
1889 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1890 }
1891
1892 /* Don't let a relocatable value get a negative coeff. */
1893 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1894 return simplify_gen_binary (PLUS, mode,
1895 op0,
1896 neg_const_int (mode, op1));
1897
1898 /* (x - (x & y)) -> (x & ~y) */
1899 if (GET_CODE (op1) == AND)
1900 {
1901 if (rtx_equal_p (op0, XEXP (op1, 0)))
1902 {
1903 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1904 GET_MODE (XEXP (op1, 1)));
1905 return simplify_gen_binary (AND, mode, op0, tem);
1906 }
1907 if (rtx_equal_p (op0, XEXP (op1, 1)))
1908 {
1909 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1910 GET_MODE (XEXP (op1, 0)));
1911 return simplify_gen_binary (AND, mode, op0, tem);
1912 }
1913 }
1914
1915 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1916 by reversing the comparison code if valid. */
1917 if (STORE_FLAG_VALUE == 1
1918 && trueop0 == const1_rtx
1919 && COMPARISON_P (op1)
1920 && (reversed = reversed_comparison (op1, mode)))
1921 return reversed;
1922
1923 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1924 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1925 && GET_CODE (op1) == MULT
1926 && GET_CODE (XEXP (op1, 0)) == NEG)
1927 {
1928 rtx in1, in2;
1929
1930 in1 = XEXP (XEXP (op1, 0), 0);
1931 in2 = XEXP (op1, 1);
1932 return simplify_gen_binary (PLUS, mode,
1933 simplify_gen_binary (MULT, mode,
1934 in1, in2),
1935 op0);
1936 }
1937
1938 /* Canonicalize (minus (neg A) (mult B C)) to
1939 (minus (mult (neg B) C) A). */
1940 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1941 && GET_CODE (op1) == MULT
1942 && GET_CODE (op0) == NEG)
1943 {
1944 rtx in1, in2;
1945
1946 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1947 in2 = XEXP (op1, 1);
1948 return simplify_gen_binary (MINUS, mode,
1949 simplify_gen_binary (MULT, mode,
1950 in1, in2),
1951 XEXP (op0, 0));
1952 }
1953
1954 /* If one of the operands is a PLUS or a MINUS, see if we can
1955 simplify this by the associative law. This will, for example,
1956 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1957 Don't use the associative law for floating point.
1958 The inaccuracy makes it nonassociative,
1959 and subtle programs can break if operations are associated. */
1960
1961 if (INTEGRAL_MODE_P (mode)
1962 && (plus_minus_operand_p (op0)
1963 || plus_minus_operand_p (op1))
1964 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1965 return tem;
1966 break;
1967
1968 case MULT:
1969 if (trueop1 == constm1_rtx)
1970 return simplify_gen_unary (NEG, mode, op0, mode);
1971
1972 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1973 x is NaN, since x * 0 is then also NaN. Nor is it valid
1974 when the mode has signed zeros, since multiplying a negative
1975 number by 0 will give -0, not 0. */
1976 if (!HONOR_NANS (mode)
1977 && !HONOR_SIGNED_ZEROS (mode)
1978 && trueop1 == CONST0_RTX (mode)
1979 && ! side_effects_p (op0))
1980 return op1;
1981
1982 /* In IEEE floating point, x*1 is not equivalent to x for
1983 signalling NaNs. */
1984 if (!HONOR_SNANS (mode)
1985 && trueop1 == CONST1_RTX (mode))
1986 return op0;
1987
1988 /* Convert multiply by constant power of two into shift unless
1989 we are still generating RTL. This test is a kludge. */
1990 if (GET_CODE (trueop1) == CONST_INT
1991 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1992 /* If the mode is larger than the host word size, and the
1993 uppermost bit is set, then this isn't a power of two due
1994 to implicit sign extension. */
1995 && (width <= HOST_BITS_PER_WIDE_INT
1996 || val != HOST_BITS_PER_WIDE_INT - 1))
1997 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1998
1999 /* Likewise for multipliers wider than a word. */
2000 if (GET_CODE (trueop1) == CONST_DOUBLE
2001 && (GET_MODE (trueop1) == VOIDmode
2002 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2003 && GET_MODE (op0) == mode
2004 && CONST_DOUBLE_LOW (trueop1) == 0
2005 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2006 return simplify_gen_binary (ASHIFT, mode, op0,
2007 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2008
2009 /* x*2 is x+x and x*(-1) is -x */
2010 if (GET_CODE (trueop1) == CONST_DOUBLE
2011 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2012 && GET_MODE (op0) == mode)
2013 {
2014 REAL_VALUE_TYPE d;
2015 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2016
2017 if (REAL_VALUES_EQUAL (d, dconst2))
2018 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2019
2020 if (!HONOR_SNANS (mode)
2021 && REAL_VALUES_EQUAL (d, dconstm1))
2022 return simplify_gen_unary (NEG, mode, op0, mode);
2023 }
2024
2025 /* Optimize -x * -x as x * x. */
2026 if (FLOAT_MODE_P (mode)
2027 && GET_CODE (op0) == NEG
2028 && GET_CODE (op1) == NEG
2029 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2030 && !side_effects_p (XEXP (op0, 0)))
2031 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2032
2033 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2034 if (SCALAR_FLOAT_MODE_P (mode)
2035 && GET_CODE (op0) == ABS
2036 && GET_CODE (op1) == ABS
2037 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2038 && !side_effects_p (XEXP (op0, 0)))
2039 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2040
2041 /* Reassociate multiplication, but for floating point MULTs
2042 only when the user specifies unsafe math optimizations. */
2043 if (! FLOAT_MODE_P (mode)
2044 || flag_unsafe_math_optimizations)
2045 {
2046 tem = simplify_associative_operation (code, mode, op0, op1);
2047 if (tem)
2048 return tem;
2049 }
2050 break;
2051
2052 case IOR:
2053 if (trueop1 == const0_rtx)
2054 return op0;
2055 if (GET_CODE (trueop1) == CONST_INT
2056 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2057 == GET_MODE_MASK (mode)))
2058 return op1;
2059 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2060 return op0;
2061 /* A | (~A) -> -1 */
2062 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2063 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2064 && ! side_effects_p (op0)
2065 && SCALAR_INT_MODE_P (mode))
2066 return constm1_rtx;
2067
2068 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2069 if (GET_CODE (op1) == CONST_INT
2070 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2071 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2072 return op1;
2073
2074 /* Canonicalize (X & C1) | C2. */
2075 if (GET_CODE (op0) == AND
2076 && GET_CODE (trueop1) == CONST_INT
2077 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2078 {
2079 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2080 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2081 HOST_WIDE_INT c2 = INTVAL (trueop1);
2082
2083 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2084 if ((c1 & c2) == c1
2085 && !side_effects_p (XEXP (op0, 0)))
2086 return trueop1;
2087
2088 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2089 if (((c1|c2) & mask) == mask)
2090 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2091
2092 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2093 if (((c1 & ~c2) & mask) != (c1 & mask))
2094 {
2095 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2096 gen_int_mode (c1 & ~c2, mode));
2097 return simplify_gen_binary (IOR, mode, tem, op1);
2098 }
2099 }
2100
2101 /* Convert (A & B) | A to A. */
2102 if (GET_CODE (op0) == AND
2103 && (rtx_equal_p (XEXP (op0, 0), op1)
2104 || rtx_equal_p (XEXP (op0, 1), op1))
2105 && ! side_effects_p (XEXP (op0, 0))
2106 && ! side_effects_p (XEXP (op0, 1)))
2107 return op1;
2108
2109 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2110 mode size to (rotate A CX). */
2111
2112 if (GET_CODE (op1) == ASHIFT
2113 || GET_CODE (op1) == SUBREG)
2114 {
2115 opleft = op1;
2116 opright = op0;
2117 }
2118 else
2119 {
2120 opright = op1;
2121 opleft = op0;
2122 }
2123
2124 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2125 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2126 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2127 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2128 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2129 == GET_MODE_BITSIZE (mode)))
2130 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2131
2132 /* Same, but for ashift that has been "simplified" to a wider mode
2133 by simplify_shift_const. */
2134
2135 if (GET_CODE (opleft) == SUBREG
2136 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2137 && GET_CODE (opright) == LSHIFTRT
2138 && GET_CODE (XEXP (opright, 0)) == SUBREG
2139 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2140 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2141 && (GET_MODE_SIZE (GET_MODE (opleft))
2142 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2143 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2144 SUBREG_REG (XEXP (opright, 0)))
2145 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2146 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2147 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2148 == GET_MODE_BITSIZE (mode)))
2149 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2150 XEXP (SUBREG_REG (opleft), 1));
2151
2152 /* If we have (ior (and (X C1) C2)), simplify this by making
2153 C1 as small as possible if C1 actually changes. */
2154 if (GET_CODE (op1) == CONST_INT
2155 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 || INTVAL (op1) > 0)
2157 && GET_CODE (op0) == AND
2158 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2159 && GET_CODE (op1) == CONST_INT
2160 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2161 return simplify_gen_binary (IOR, mode,
2162 simplify_gen_binary
2163 (AND, mode, XEXP (op0, 0),
2164 GEN_INT (INTVAL (XEXP (op0, 1))
2165 & ~INTVAL (op1))),
2166 op1);
2167
2168 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2169 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2170 the PLUS does not affect any of the bits in OP1: then we can do
2171 the IOR as a PLUS and we can associate. This is valid if OP1
2172 can be safely shifted left C bits. */
2173 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2174 && GET_CODE (XEXP (op0, 0)) == PLUS
2175 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2178 {
2179 int count = INTVAL (XEXP (op0, 1));
2180 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2181
2182 if (mask >> count == INTVAL (trueop1)
2183 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2184 return simplify_gen_binary (ASHIFTRT, mode,
2185 plus_constant (XEXP (op0, 0), mask),
2186 XEXP (op0, 1));
2187 }
2188
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2192 break;
2193
2194 case XOR:
2195 if (trueop1 == const0_rtx)
2196 return op0;
2197 if (GET_CODE (trueop1) == CONST_INT
2198 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2199 == GET_MODE_MASK (mode)))
2200 return simplify_gen_unary (NOT, mode, op0, mode);
2201 if (rtx_equal_p (trueop0, trueop1)
2202 && ! side_effects_p (op0)
2203 && GET_MODE_CLASS (mode) != MODE_CC)
2204 return CONST0_RTX (mode);
2205
2206 /* Canonicalize XOR of the most significant bit to PLUS. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && mode_signbit_p (mode, op1))
2210 return simplify_gen_binary (PLUS, mode, op0, op1);
2211 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2212 if ((GET_CODE (op1) == CONST_INT
2213 || GET_CODE (op1) == CONST_DOUBLE)
2214 && GET_CODE (op0) == PLUS
2215 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2216 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2217 && mode_signbit_p (mode, XEXP (op0, 1)))
2218 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2219 simplify_gen_binary (XOR, mode, op1,
2220 XEXP (op0, 1)));
2221
2222 /* If we are XORing two things that have no bits in common,
2223 convert them into an IOR. This helps to detect rotation encoded
2224 using those methods and possibly other simplifications. */
2225
2226 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2227 && (nonzero_bits (op0, mode)
2228 & nonzero_bits (op1, mode)) == 0)
2229 return (simplify_gen_binary (IOR, mode, op0, op1));
2230
2231 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2232 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2233 (NOT y). */
2234 {
2235 int num_negated = 0;
2236
2237 if (GET_CODE (op0) == NOT)
2238 num_negated++, op0 = XEXP (op0, 0);
2239 if (GET_CODE (op1) == NOT)
2240 num_negated++, op1 = XEXP (op1, 0);
2241
2242 if (num_negated == 2)
2243 return simplify_gen_binary (XOR, mode, op0, op1);
2244 else if (num_negated == 1)
2245 return simplify_gen_unary (NOT, mode,
2246 simplify_gen_binary (XOR, mode, op0, op1),
2247 mode);
2248 }
2249
2250 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2251 correspond to a machine insn or result in further simplifications
2252 if B is a constant. */
2253
2254 if (GET_CODE (op0) == AND
2255 && rtx_equal_p (XEXP (op0, 1), op1)
2256 && ! side_effects_p (op1))
2257 return simplify_gen_binary (AND, mode,
2258 simplify_gen_unary (NOT, mode,
2259 XEXP (op0, 0), mode),
2260 op1);
2261
2262 else if (GET_CODE (op0) == AND
2263 && rtx_equal_p (XEXP (op0, 0), op1)
2264 && ! side_effects_p (op1))
2265 return simplify_gen_binary (AND, mode,
2266 simplify_gen_unary (NOT, mode,
2267 XEXP (op0, 1), mode),
2268 op1);
2269
2270 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2271 comparison if STORE_FLAG_VALUE is 1. */
2272 if (STORE_FLAG_VALUE == 1
2273 && trueop1 == const1_rtx
2274 && COMPARISON_P (op0)
2275 && (reversed = reversed_comparison (op0, mode)))
2276 return reversed;
2277
2278 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2279 is (lt foo (const_int 0)), so we can perform the above
2280 simplification if STORE_FLAG_VALUE is 1. */
2281
2282 if (STORE_FLAG_VALUE == 1
2283 && trueop1 == const1_rtx
2284 && GET_CODE (op0) == LSHIFTRT
2285 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2286 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2287 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2288
2289 /* (xor (comparison foo bar) (const_int sign-bit))
2290 when STORE_FLAG_VALUE is the sign bit. */
2291 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2292 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2293 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2294 && trueop1 == const_true_rtx
2295 && COMPARISON_P (op0)
2296 && (reversed = reversed_comparison (op0, mode)))
2297 return reversed;
2298
2299 tem = simplify_associative_operation (code, mode, op0, op1);
2300 if (tem)
2301 return tem;
2302 break;
2303
2304 case AND:
2305 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2306 return trueop1;
2307 /* If we are turning off bits already known off in OP0, we need
2308 not do an AND. */
2309 if (GET_CODE (trueop1) == CONST_INT
2310 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2311 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2312 return op0;
2313 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2314 && GET_MODE_CLASS (mode) != MODE_CC)
2315 return op0;
2316 /* A & (~A) -> 0 */
2317 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2318 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2319 && ! side_effects_p (op0)
2320 && GET_MODE_CLASS (mode) != MODE_CC)
2321 return CONST0_RTX (mode);
2322
2323 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2324 there are no nonzero bits of C outside of X's mode. */
2325 if ((GET_CODE (op0) == SIGN_EXTEND
2326 || GET_CODE (op0) == ZERO_EXTEND)
2327 && GET_CODE (trueop1) == CONST_INT
2328 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2329 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2330 & INTVAL (trueop1)) == 0)
2331 {
2332 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2333 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2334 gen_int_mode (INTVAL (trueop1),
2335 imode));
2336 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2337 }
2338
2339 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2340 if (GET_CODE (op0) == IOR
2341 && GET_CODE (trueop1) == CONST_INT
2342 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2343 {
2344 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2345 return simplify_gen_binary (IOR, mode,
2346 simplify_gen_binary (AND, mode,
2347 XEXP (op0, 0), op1),
2348 gen_int_mode (tmp, mode));
2349 }
2350
2351 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2352 insn (and may simplify more). */
2353 if (GET_CODE (op0) == XOR
2354 && rtx_equal_p (XEXP (op0, 0), op1)
2355 && ! side_effects_p (op1))
2356 return simplify_gen_binary (AND, mode,
2357 simplify_gen_unary (NOT, mode,
2358 XEXP (op0, 1), mode),
2359 op1);
2360
2361 if (GET_CODE (op0) == XOR
2362 && rtx_equal_p (XEXP (op0, 1), op1)
2363 && ! side_effects_p (op1))
2364 return simplify_gen_binary (AND, mode,
2365 simplify_gen_unary (NOT, mode,
2366 XEXP (op0, 0), mode),
2367 op1);
2368
2369 /* Similarly for (~(A ^ B)) & A. */
2370 if (GET_CODE (op0) == NOT
2371 && GET_CODE (XEXP (op0, 0)) == XOR
2372 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2373 && ! side_effects_p (op1))
2374 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2375
2376 if (GET_CODE (op0) == NOT
2377 && GET_CODE (XEXP (op0, 0)) == XOR
2378 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2379 && ! side_effects_p (op1))
2380 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2381
2382 /* Convert (A | B) & A to A. */
2383 if (GET_CODE (op0) == IOR
2384 && (rtx_equal_p (XEXP (op0, 0), op1)
2385 || rtx_equal_p (XEXP (op0, 1), op1))
2386 && ! side_effects_p (XEXP (op0, 0))
2387 && ! side_effects_p (XEXP (op0, 1)))
2388 return op1;
2389
2390 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2391 ((A & N) + B) & M -> (A + B) & M
2392 Similarly if (N & M) == 0,
2393 ((A | N) + B) & M -> (A + B) & M
2394 and for - instead of + and/or ^ instead of |. */
2395 if (GET_CODE (trueop1) == CONST_INT
2396 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2397 && ~INTVAL (trueop1)
2398 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2399 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2400 {
2401 rtx pmop[2];
2402 int which;
2403
2404 pmop[0] = XEXP (op0, 0);
2405 pmop[1] = XEXP (op0, 1);
2406
2407 for (which = 0; which < 2; which++)
2408 {
2409 tem = pmop[which];
2410 switch (GET_CODE (tem))
2411 {
2412 case AND:
2413 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2414 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2415 == INTVAL (trueop1))
2416 pmop[which] = XEXP (tem, 0);
2417 break;
2418 case IOR:
2419 case XOR:
2420 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2421 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2422 pmop[which] = XEXP (tem, 0);
2423 break;
2424 default:
2425 break;
2426 }
2427 }
2428
2429 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2430 {
2431 tem = simplify_gen_binary (GET_CODE (op0), mode,
2432 pmop[0], pmop[1]);
2433 return simplify_gen_binary (code, mode, tem, op1);
2434 }
2435 }
2436
2437 /* (and X (ior (not X) Y) -> (and X Y) */
2438 if (GET_CODE (op1) == IOR
2439 && GET_CODE (XEXP (op1, 0)) == NOT
2440 && op0 == XEXP (XEXP (op1, 0), 0))
2441 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2442
2443 /* (and (ior (not X) Y) X) -> (and X Y) */
2444 if (GET_CODE (op0) == IOR
2445 && GET_CODE (XEXP (op0, 0)) == NOT
2446 && op1 == XEXP (XEXP (op0, 0), 0))
2447 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2448
2449 tem = simplify_associative_operation (code, mode, op0, op1);
2450 if (tem)
2451 return tem;
2452 break;
2453
2454 case UDIV:
2455 /* 0/x is 0 (or x&0 if x has side-effects). */
2456 if (trueop0 == CONST0_RTX (mode))
2457 {
2458 if (side_effects_p (op1))
2459 return simplify_gen_binary (AND, mode, op1, trueop0);
2460 return trueop0;
2461 }
2462 /* x/1 is x. */
2463 if (trueop1 == CONST1_RTX (mode))
2464 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2465 /* Convert divide by power of two into shift. */
2466 if (GET_CODE (trueop1) == CONST_INT
2467 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2468 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2469 break;
2470
2471 case DIV:
2472 /* Handle floating point and integers separately. */
2473 if (SCALAR_FLOAT_MODE_P (mode))
2474 {
2475 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2476 safe for modes with NaNs, since 0.0 / 0.0 will then be
2477 NaN rather than 0.0. Nor is it safe for modes with signed
2478 zeros, since dividing 0 by a negative number gives -0.0 */
2479 if (trueop0 == CONST0_RTX (mode)
2480 && !HONOR_NANS (mode)
2481 && !HONOR_SIGNED_ZEROS (mode)
2482 && ! side_effects_p (op1))
2483 return op0;
2484 /* x/1.0 is x. */
2485 if (trueop1 == CONST1_RTX (mode)
2486 && !HONOR_SNANS (mode))
2487 return op0;
2488
2489 if (GET_CODE (trueop1) == CONST_DOUBLE
2490 && trueop1 != CONST0_RTX (mode))
2491 {
2492 REAL_VALUE_TYPE d;
2493 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2494
2495 /* x/-1.0 is -x. */
2496 if (REAL_VALUES_EQUAL (d, dconstm1)
2497 && !HONOR_SNANS (mode))
2498 return simplify_gen_unary (NEG, mode, op0, mode);
2499
2500 /* Change FP division by a constant into multiplication.
2501 Only do this with -freciprocal-math. */
2502 if (flag_reciprocal_math
2503 && !REAL_VALUES_EQUAL (d, dconst0))
2504 {
2505 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2506 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2507 return simplify_gen_binary (MULT, mode, op0, tem);
2508 }
2509 }
2510 }
2511 else
2512 {
2513 /* 0/x is 0 (or x&0 if x has side-effects). */
2514 if (trueop0 == CONST0_RTX (mode))
2515 {
2516 if (side_effects_p (op1))
2517 return simplify_gen_binary (AND, mode, op1, trueop0);
2518 return trueop0;
2519 }
2520 /* x/1 is x. */
2521 if (trueop1 == CONST1_RTX (mode))
2522 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2523 /* x/-1 is -x. */
2524 if (trueop1 == constm1_rtx)
2525 {
2526 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2527 return simplify_gen_unary (NEG, mode, x, mode);
2528 }
2529 }
2530 break;
2531
2532 case UMOD:
2533 /* 0%x is 0 (or x&0 if x has side-effects). */
2534 if (trueop0 == CONST0_RTX (mode))
2535 {
2536 if (side_effects_p (op1))
2537 return simplify_gen_binary (AND, mode, op1, trueop0);
2538 return trueop0;
2539 }
2540 /* x%1 is 0 (of x&0 if x has side-effects). */
2541 if (trueop1 == CONST1_RTX (mode))
2542 {
2543 if (side_effects_p (op0))
2544 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2545 return CONST0_RTX (mode);
2546 }
2547 /* Implement modulus by power of two as AND. */
2548 if (GET_CODE (trueop1) == CONST_INT
2549 && exact_log2 (INTVAL (trueop1)) > 0)
2550 return simplify_gen_binary (AND, mode, op0,
2551 GEN_INT (INTVAL (op1) - 1));
2552 break;
2553
2554 case MOD:
2555 /* 0%x is 0 (or x&0 if x has side-effects). */
2556 if (trueop0 == CONST0_RTX (mode))
2557 {
2558 if (side_effects_p (op1))
2559 return simplify_gen_binary (AND, mode, op1, trueop0);
2560 return trueop0;
2561 }
2562 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2563 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2564 {
2565 if (side_effects_p (op0))
2566 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2567 return CONST0_RTX (mode);
2568 }
2569 break;
2570
2571 case ROTATERT:
2572 case ROTATE:
2573 case ASHIFTRT:
2574 if (trueop1 == CONST0_RTX (mode))
2575 return op0;
2576 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2577 return op0;
2578 /* Rotating ~0 always results in ~0. */
2579 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2580 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2581 && ! side_effects_p (op1))
2582 return op0;
2583 canonicalize_shift:
2584 if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT)
2585 {
2586 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2587 if (val != INTVAL (op1))
2588 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2589 }
2590 break;
2591
2592 case ASHIFT:
2593 case SS_ASHIFT:
2594 case US_ASHIFT:
2595 if (trueop1 == CONST0_RTX (mode))
2596 return op0;
2597 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2598 return op0;
2599 goto canonicalize_shift;
2600
2601 case LSHIFTRT:
2602 if (trueop1 == CONST0_RTX (mode))
2603 return op0;
2604 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2605 return op0;
2606 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2607 if (GET_CODE (op0) == CLZ
2608 && GET_CODE (trueop1) == CONST_INT
2609 && STORE_FLAG_VALUE == 1
2610 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2611 {
2612 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2613 unsigned HOST_WIDE_INT zero_val = 0;
2614
2615 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2616 && zero_val == GET_MODE_BITSIZE (imode)
2617 && INTVAL (trueop1) == exact_log2 (zero_val))
2618 return simplify_gen_relational (EQ, mode, imode,
2619 XEXP (op0, 0), const0_rtx);
2620 }
2621 goto canonicalize_shift;
2622
2623 case SMIN:
2624 if (width <= HOST_BITS_PER_WIDE_INT
2625 && GET_CODE (trueop1) == CONST_INT
2626 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2627 && ! side_effects_p (op0))
2628 return op1;
2629 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2630 return op0;
2631 tem = simplify_associative_operation (code, mode, op0, op1);
2632 if (tem)
2633 return tem;
2634 break;
2635
2636 case SMAX:
2637 if (width <= HOST_BITS_PER_WIDE_INT
2638 && GET_CODE (trueop1) == CONST_INT
2639 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2640 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2641 && ! side_effects_p (op0))
2642 return op1;
2643 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2644 return op0;
2645 tem = simplify_associative_operation (code, mode, op0, op1);
2646 if (tem)
2647 return tem;
2648 break;
2649
2650 case UMIN:
2651 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2652 return op1;
2653 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2654 return op0;
2655 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (tem)
2657 return tem;
2658 break;
2659
2660 case UMAX:
2661 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2662 return op1;
2663 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2664 return op0;
2665 tem = simplify_associative_operation (code, mode, op0, op1);
2666 if (tem)
2667 return tem;
2668 break;
2669
2670 case SS_PLUS:
2671 case US_PLUS:
2672 case SS_MINUS:
2673 case US_MINUS:
2674 case SS_MULT:
2675 case US_MULT:
2676 case SS_DIV:
2677 case US_DIV:
2678 /* ??? There are simplifications that can be done. */
2679 return 0;
2680
2681 case VEC_SELECT:
2682 if (!VECTOR_MODE_P (mode))
2683 {
2684 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2685 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2686 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2687 gcc_assert (XVECLEN (trueop1, 0) == 1);
2688 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2689
2690 if (GET_CODE (trueop0) == CONST_VECTOR)
2691 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2692 (trueop1, 0, 0)));
2693
2694 /* Extract a scalar element from a nested VEC_SELECT expression
2695 (with optional nested VEC_CONCAT expression). Some targets
2696 (i386) extract scalar element from a vector using chain of
2697 nested VEC_SELECT expressions. When input operand is a memory
2698 operand, this operation can be simplified to a simple scalar
2699 load from an offseted memory address. */
2700 if (GET_CODE (trueop0) == VEC_SELECT)
2701 {
2702 rtx op0 = XEXP (trueop0, 0);
2703 rtx op1 = XEXP (trueop0, 1);
2704
2705 enum machine_mode opmode = GET_MODE (op0);
2706 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2707 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2708
2709 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2710 int elem;
2711
2712 rtvec vec;
2713 rtx tmp_op, tmp;
2714
2715 gcc_assert (GET_CODE (op1) == PARALLEL);
2716 gcc_assert (i < n_elts);
2717
2718 /* Select element, pointed by nested selector. */
2719 elem = INTVAL (XVECEXP (op1, 0, i));
2720
2721 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2722 if (GET_CODE (op0) == VEC_CONCAT)
2723 {
2724 rtx op00 = XEXP (op0, 0);
2725 rtx op01 = XEXP (op0, 1);
2726
2727 enum machine_mode mode00, mode01;
2728 int n_elts00, n_elts01;
2729
2730 mode00 = GET_MODE (op00);
2731 mode01 = GET_MODE (op01);
2732
2733 /* Find out number of elements of each operand. */
2734 if (VECTOR_MODE_P (mode00))
2735 {
2736 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2737 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2738 }
2739 else
2740 n_elts00 = 1;
2741
2742 if (VECTOR_MODE_P (mode01))
2743 {
2744 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2745 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2746 }
2747 else
2748 n_elts01 = 1;
2749
2750 gcc_assert (n_elts == n_elts00 + n_elts01);
2751
2752 /* Select correct operand of VEC_CONCAT
2753 and adjust selector. */
2754 if (elem < n_elts01)
2755 tmp_op = op00;
2756 else
2757 {
2758 tmp_op = op01;
2759 elem -= n_elts00;
2760 }
2761 }
2762 else
2763 tmp_op = op0;
2764
2765 vec = rtvec_alloc (1);
2766 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2767
2768 tmp = gen_rtx_fmt_ee (code, mode,
2769 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2770 return tmp;
2771 }
2772 }
2773 else
2774 {
2775 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2776 gcc_assert (GET_MODE_INNER (mode)
2777 == GET_MODE_INNER (GET_MODE (trueop0)));
2778 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2779
2780 if (GET_CODE (trueop0) == CONST_VECTOR)
2781 {
2782 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2783 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2784 rtvec v = rtvec_alloc (n_elts);
2785 unsigned int i;
2786
2787 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2788 for (i = 0; i < n_elts; i++)
2789 {
2790 rtx x = XVECEXP (trueop1, 0, i);
2791
2792 gcc_assert (GET_CODE (x) == CONST_INT);
2793 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2794 INTVAL (x));
2795 }
2796
2797 return gen_rtx_CONST_VECTOR (mode, v);
2798 }
2799 }
2800
2801 if (XVECLEN (trueop1, 0) == 1
2802 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2803 && GET_CODE (trueop0) == VEC_CONCAT)
2804 {
2805 rtx vec = trueop0;
2806 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2807
2808 /* Try to find the element in the VEC_CONCAT. */
2809 while (GET_MODE (vec) != mode
2810 && GET_CODE (vec) == VEC_CONCAT)
2811 {
2812 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2813 if (offset < vec_size)
2814 vec = XEXP (vec, 0);
2815 else
2816 {
2817 offset -= vec_size;
2818 vec = XEXP (vec, 1);
2819 }
2820 vec = avoid_constant_pool_reference (vec);
2821 }
2822
2823 if (GET_MODE (vec) == mode)
2824 return vec;
2825 }
2826
2827 return 0;
2828 case VEC_CONCAT:
2829 {
2830 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2831 ? GET_MODE (trueop0)
2832 : GET_MODE_INNER (mode));
2833 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2834 ? GET_MODE (trueop1)
2835 : GET_MODE_INNER (mode));
2836
2837 gcc_assert (VECTOR_MODE_P (mode));
2838 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2839 == GET_MODE_SIZE (mode));
2840
2841 if (VECTOR_MODE_P (op0_mode))
2842 gcc_assert (GET_MODE_INNER (mode)
2843 == GET_MODE_INNER (op0_mode));
2844 else
2845 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2846
2847 if (VECTOR_MODE_P (op1_mode))
2848 gcc_assert (GET_MODE_INNER (mode)
2849 == GET_MODE_INNER (op1_mode));
2850 else
2851 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2852
2853 if ((GET_CODE (trueop0) == CONST_VECTOR
2854 || GET_CODE (trueop0) == CONST_INT
2855 || GET_CODE (trueop0) == CONST_DOUBLE)
2856 && (GET_CODE (trueop1) == CONST_VECTOR
2857 || GET_CODE (trueop1) == CONST_INT
2858 || GET_CODE (trueop1) == CONST_DOUBLE))
2859 {
2860 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2861 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2862 rtvec v = rtvec_alloc (n_elts);
2863 unsigned int i;
2864 unsigned in_n_elts = 1;
2865
2866 if (VECTOR_MODE_P (op0_mode))
2867 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2868 for (i = 0; i < n_elts; i++)
2869 {
2870 if (i < in_n_elts)
2871 {
2872 if (!VECTOR_MODE_P (op0_mode))
2873 RTVEC_ELT (v, i) = trueop0;
2874 else
2875 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2876 }
2877 else
2878 {
2879 if (!VECTOR_MODE_P (op1_mode))
2880 RTVEC_ELT (v, i) = trueop1;
2881 else
2882 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2883 i - in_n_elts);
2884 }
2885 }
2886
2887 return gen_rtx_CONST_VECTOR (mode, v);
2888 }
2889 }
2890 return 0;
2891
2892 default:
2893 gcc_unreachable ();
2894 }
2895
2896 return 0;
2897 }
2898
2899 rtx
2900 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2901 rtx op0, rtx op1)
2902 {
2903 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2904 HOST_WIDE_INT val;
2905 unsigned int width = GET_MODE_BITSIZE (mode);
2906
2907 if (VECTOR_MODE_P (mode)
2908 && code != VEC_CONCAT
2909 && GET_CODE (op0) == CONST_VECTOR
2910 && GET_CODE (op1) == CONST_VECTOR)
2911 {
2912 unsigned n_elts = GET_MODE_NUNITS (mode);
2913 enum machine_mode op0mode = GET_MODE (op0);
2914 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2915 enum machine_mode op1mode = GET_MODE (op1);
2916 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2917 rtvec v = rtvec_alloc (n_elts);
2918 unsigned int i;
2919
2920 gcc_assert (op0_n_elts == n_elts);
2921 gcc_assert (op1_n_elts == n_elts);
2922 for (i = 0; i < n_elts; i++)
2923 {
2924 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2925 CONST_VECTOR_ELT (op0, i),
2926 CONST_VECTOR_ELT (op1, i));
2927 if (!x)
2928 return 0;
2929 RTVEC_ELT (v, i) = x;
2930 }
2931
2932 return gen_rtx_CONST_VECTOR (mode, v);
2933 }
2934
2935 if (VECTOR_MODE_P (mode)
2936 && code == VEC_CONCAT
2937 && (CONST_INT_P (op0)
2938 || GET_CODE (op0) == CONST_DOUBLE
2939 || GET_CODE (op0) == CONST_FIXED)
2940 && (CONST_INT_P (op1)
2941 || GET_CODE (op1) == CONST_DOUBLE
2942 || GET_CODE (op1) == CONST_FIXED))
2943 {
2944 unsigned n_elts = GET_MODE_NUNITS (mode);
2945 rtvec v = rtvec_alloc (n_elts);
2946
2947 gcc_assert (n_elts >= 2);
2948 if (n_elts == 2)
2949 {
2950 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2951 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2952
2953 RTVEC_ELT (v, 0) = op0;
2954 RTVEC_ELT (v, 1) = op1;
2955 }
2956 else
2957 {
2958 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2959 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2960 unsigned i;
2961
2962 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2963 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2964 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2965
2966 for (i = 0; i < op0_n_elts; ++i)
2967 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2968 for (i = 0; i < op1_n_elts; ++i)
2969 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2970 }
2971
2972 return gen_rtx_CONST_VECTOR (mode, v);
2973 }
2974
2975 if (SCALAR_FLOAT_MODE_P (mode)
2976 && GET_CODE (op0) == CONST_DOUBLE
2977 && GET_CODE (op1) == CONST_DOUBLE
2978 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2979 {
2980 if (code == AND
2981 || code == IOR
2982 || code == XOR)
2983 {
2984 long tmp0[4];
2985 long tmp1[4];
2986 REAL_VALUE_TYPE r;
2987 int i;
2988
2989 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2990 GET_MODE (op0));
2991 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2992 GET_MODE (op1));
2993 for (i = 0; i < 4; i++)
2994 {
2995 switch (code)
2996 {
2997 case AND:
2998 tmp0[i] &= tmp1[i];
2999 break;
3000 case IOR:
3001 tmp0[i] |= tmp1[i];
3002 break;
3003 case XOR:
3004 tmp0[i] ^= tmp1[i];
3005 break;
3006 default:
3007 gcc_unreachable ();
3008 }
3009 }
3010 real_from_target (&r, tmp0, mode);
3011 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3012 }
3013 else
3014 {
3015 REAL_VALUE_TYPE f0, f1, value, result;
3016 bool inexact;
3017
3018 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3019 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3020 real_convert (&f0, mode, &f0);
3021 real_convert (&f1, mode, &f1);
3022
3023 if (HONOR_SNANS (mode)
3024 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3025 return 0;
3026
3027 if (code == DIV
3028 && REAL_VALUES_EQUAL (f1, dconst0)
3029 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3030 return 0;
3031
3032 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3033 && flag_trapping_math
3034 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3035 {
3036 int s0 = REAL_VALUE_NEGATIVE (f0);
3037 int s1 = REAL_VALUE_NEGATIVE (f1);
3038
3039 switch (code)
3040 {
3041 case PLUS:
3042 /* Inf + -Inf = NaN plus exception. */
3043 if (s0 != s1)
3044 return 0;
3045 break;
3046 case MINUS:
3047 /* Inf - Inf = NaN plus exception. */
3048 if (s0 == s1)
3049 return 0;
3050 break;
3051 case DIV:
3052 /* Inf / Inf = NaN plus exception. */
3053 return 0;
3054 default:
3055 break;
3056 }
3057 }
3058
3059 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3060 && flag_trapping_math
3061 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3062 || (REAL_VALUE_ISINF (f1)
3063 && REAL_VALUES_EQUAL (f0, dconst0))))
3064 /* Inf * 0 = NaN plus exception. */
3065 return 0;
3066
3067 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3068 &f0, &f1);
3069 real_convert (&result, mode, &value);
3070
3071 /* Don't constant fold this floating point operation if
3072 the result has overflowed and flag_trapping_math. */
3073
3074 if (flag_trapping_math
3075 && MODE_HAS_INFINITIES (mode)
3076 && REAL_VALUE_ISINF (result)
3077 && !REAL_VALUE_ISINF (f0)
3078 && !REAL_VALUE_ISINF (f1))
3079 /* Overflow plus exception. */
3080 return 0;
3081
3082 /* Don't constant fold this floating point operation if the
3083 result may dependent upon the run-time rounding mode and
3084 flag_rounding_math is set, or if GCC's software emulation
3085 is unable to accurately represent the result. */
3086
3087 if ((flag_rounding_math
3088 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3089 && (inexact || !real_identical (&result, &value)))
3090 return NULL_RTX;
3091
3092 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3093 }
3094 }
3095
3096 /* We can fold some multi-word operations. */
3097 if (GET_MODE_CLASS (mode) == MODE_INT
3098 && width == HOST_BITS_PER_WIDE_INT * 2
3099 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3100 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3101 {
3102 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3103 HOST_WIDE_INT h1, h2, hv, ht;
3104
3105 if (GET_CODE (op0) == CONST_DOUBLE)
3106 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3107 else
3108 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3109
3110 if (GET_CODE (op1) == CONST_DOUBLE)
3111 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3112 else
3113 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3114
3115 switch (code)
3116 {
3117 case MINUS:
3118 /* A - B == A + (-B). */
3119 neg_double (l2, h2, &lv, &hv);
3120 l2 = lv, h2 = hv;
3121
3122 /* Fall through.... */
3123
3124 case PLUS:
3125 add_double (l1, h1, l2, h2, &lv, &hv);
3126 break;
3127
3128 case MULT:
3129 mul_double (l1, h1, l2, h2, &lv, &hv);
3130 break;
3131
3132 case DIV:
3133 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3134 &lv, &hv, &lt, &ht))
3135 return 0;
3136 break;
3137
3138 case MOD:
3139 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3140 &lt, &ht, &lv, &hv))
3141 return 0;
3142 break;
3143
3144 case UDIV:
3145 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3146 &lv, &hv, &lt, &ht))
3147 return 0;
3148 break;
3149
3150 case UMOD:
3151 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3152 &lt, &ht, &lv, &hv))
3153 return 0;
3154 break;
3155
3156 case AND:
3157 lv = l1 & l2, hv = h1 & h2;
3158 break;
3159
3160 case IOR:
3161 lv = l1 | l2, hv = h1 | h2;
3162 break;
3163
3164 case XOR:
3165 lv = l1 ^ l2, hv = h1 ^ h2;
3166 break;
3167
3168 case SMIN:
3169 if (h1 < h2
3170 || (h1 == h2
3171 && ((unsigned HOST_WIDE_INT) l1
3172 < (unsigned HOST_WIDE_INT) l2)))
3173 lv = l1, hv = h1;
3174 else
3175 lv = l2, hv = h2;
3176 break;
3177
3178 case SMAX:
3179 if (h1 > h2
3180 || (h1 == h2
3181 && ((unsigned HOST_WIDE_INT) l1
3182 > (unsigned HOST_WIDE_INT) l2)))
3183 lv = l1, hv = h1;
3184 else
3185 lv = l2, hv = h2;
3186 break;
3187
3188 case UMIN:
3189 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3190 || (h1 == h2
3191 && ((unsigned HOST_WIDE_INT) l1
3192 < (unsigned HOST_WIDE_INT) l2)))
3193 lv = l1, hv = h1;
3194 else
3195 lv = l2, hv = h2;
3196 break;
3197
3198 case UMAX:
3199 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3200 || (h1 == h2
3201 && ((unsigned HOST_WIDE_INT) l1
3202 > (unsigned HOST_WIDE_INT) l2)))
3203 lv = l1, hv = h1;
3204 else
3205 lv = l2, hv = h2;
3206 break;
3207
3208 case LSHIFTRT: case ASHIFTRT:
3209 case ASHIFT:
3210 case ROTATE: case ROTATERT:
3211 if (SHIFT_COUNT_TRUNCATED)
3212 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3213
3214 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3215 return 0;
3216
3217 if (code == LSHIFTRT || code == ASHIFTRT)
3218 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3219 code == ASHIFTRT);
3220 else if (code == ASHIFT)
3221 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3222 else if (code == ROTATE)
3223 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3224 else /* code == ROTATERT */
3225 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3226 break;
3227
3228 default:
3229 return 0;
3230 }
3231
3232 return immed_double_const (lv, hv, mode);
3233 }
3234
3235 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3236 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3237 {
3238 /* Get the integer argument values in two forms:
3239 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3240
3241 arg0 = INTVAL (op0);
3242 arg1 = INTVAL (op1);
3243
3244 if (width < HOST_BITS_PER_WIDE_INT)
3245 {
3246 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3247 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3248
3249 arg0s = arg0;
3250 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3251 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3252
3253 arg1s = arg1;
3254 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3255 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3256 }
3257 else
3258 {
3259 arg0s = arg0;
3260 arg1s = arg1;
3261 }
3262
3263 /* Compute the value of the arithmetic. */
3264
3265 switch (code)
3266 {
3267 case PLUS:
3268 val = arg0s + arg1s;
3269 break;
3270
3271 case MINUS:
3272 val = arg0s - arg1s;
3273 break;
3274
3275 case MULT:
3276 val = arg0s * arg1s;
3277 break;
3278
3279 case DIV:
3280 if (arg1s == 0
3281 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3282 && arg1s == -1))
3283 return 0;
3284 val = arg0s / arg1s;
3285 break;
3286
3287 case MOD:
3288 if (arg1s == 0
3289 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3290 && arg1s == -1))
3291 return 0;
3292 val = arg0s % arg1s;
3293 break;
3294
3295 case UDIV:
3296 if (arg1 == 0
3297 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3298 && arg1s == -1))
3299 return 0;
3300 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3301 break;
3302
3303 case UMOD:
3304 if (arg1 == 0
3305 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3306 && arg1s == -1))
3307 return 0;
3308 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3309 break;
3310
3311 case AND:
3312 val = arg0 & arg1;
3313 break;
3314
3315 case IOR:
3316 val = arg0 | arg1;
3317 break;
3318
3319 case XOR:
3320 val = arg0 ^ arg1;
3321 break;
3322
3323 case LSHIFTRT:
3324 case ASHIFT:
3325 case ASHIFTRT:
3326 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3327 the value is in range. We can't return any old value for
3328 out-of-range arguments because either the middle-end (via
3329 shift_truncation_mask) or the back-end might be relying on
3330 target-specific knowledge. Nor can we rely on
3331 shift_truncation_mask, since the shift might not be part of an
3332 ashlM3, lshrM3 or ashrM3 instruction. */
3333 if (SHIFT_COUNT_TRUNCATED)
3334 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3335 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3336 return 0;
3337
3338 val = (code == ASHIFT
3339 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3340 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3341
3342 /* Sign-extend the result for arithmetic right shifts. */
3343 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3344 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3345 break;
3346
3347 case ROTATERT:
3348 if (arg1 < 0)
3349 return 0;
3350
3351 arg1 %= width;
3352 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3353 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3354 break;
3355
3356 case ROTATE:
3357 if (arg1 < 0)
3358 return 0;
3359
3360 arg1 %= width;
3361 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3362 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3363 break;
3364
3365 case COMPARE:
3366 /* Do nothing here. */
3367 return 0;
3368
3369 case SMIN:
3370 val = arg0s <= arg1s ? arg0s : arg1s;
3371 break;
3372
3373 case UMIN:
3374 val = ((unsigned HOST_WIDE_INT) arg0
3375 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3376 break;
3377
3378 case SMAX:
3379 val = arg0s > arg1s ? arg0s : arg1s;
3380 break;
3381
3382 case UMAX:
3383 val = ((unsigned HOST_WIDE_INT) arg0
3384 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3385 break;
3386
3387 case SS_PLUS:
3388 case US_PLUS:
3389 case SS_MINUS:
3390 case US_MINUS:
3391 case SS_MULT:
3392 case US_MULT:
3393 case SS_DIV:
3394 case US_DIV:
3395 case SS_ASHIFT:
3396 case US_ASHIFT:
3397 /* ??? There are simplifications that can be done. */
3398 return 0;
3399
3400 default:
3401 gcc_unreachable ();
3402 }
3403
3404 return gen_int_mode (val, mode);
3405 }
3406
3407 return NULL_RTX;
3408 }
3409
3410
3411 \f
3412 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3413 PLUS or MINUS.
3414
3415 Rather than test for specific case, we do this by a brute-force method
3416 and do all possible simplifications until no more changes occur. Then
3417 we rebuild the operation. */
3418
3419 struct simplify_plus_minus_op_data
3420 {
3421 rtx op;
3422 short neg;
3423 };
3424
3425 static bool
3426 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3427 {
3428 int result;
3429
3430 result = (commutative_operand_precedence (y)
3431 - commutative_operand_precedence (x));
3432 if (result)
3433 return result > 0;
3434
3435 /* Group together equal REGs to do more simplification. */
3436 if (REG_P (x) && REG_P (y))
3437 return REGNO (x) > REGNO (y);
3438 else
3439 return false;
3440 }
3441
3442 static rtx
3443 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3444 rtx op1)
3445 {
3446 struct simplify_plus_minus_op_data ops[8];
3447 rtx result, tem;
3448 int n_ops = 2, input_ops = 2;
3449 int changed, n_constants = 0, canonicalized = 0;
3450 int i, j;
3451
3452 memset (ops, 0, sizeof ops);
3453
3454 /* Set up the two operands and then expand them until nothing has been
3455 changed. If we run out of room in our array, give up; this should
3456 almost never happen. */
3457
3458 ops[0].op = op0;
3459 ops[0].neg = 0;
3460 ops[1].op = op1;
3461 ops[1].neg = (code == MINUS);
3462
3463 do
3464 {
3465 changed = 0;
3466
3467 for (i = 0; i < n_ops; i++)
3468 {
3469 rtx this_op = ops[i].op;
3470 int this_neg = ops[i].neg;
3471 enum rtx_code this_code = GET_CODE (this_op);
3472
3473 switch (this_code)
3474 {
3475 case PLUS:
3476 case MINUS:
3477 if (n_ops == 7)
3478 return NULL_RTX;
3479
3480 ops[n_ops].op = XEXP (this_op, 1);
3481 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3482 n_ops++;
3483
3484 ops[i].op = XEXP (this_op, 0);
3485 input_ops++;
3486 changed = 1;
3487 canonicalized |= this_neg;
3488 break;
3489
3490 case NEG:
3491 ops[i].op = XEXP (this_op, 0);
3492 ops[i].neg = ! this_neg;
3493 changed = 1;
3494 canonicalized = 1;
3495 break;
3496
3497 case CONST:
3498 if (n_ops < 7
3499 && GET_CODE (XEXP (this_op, 0)) == PLUS
3500 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3501 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3502 {
3503 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3504 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3505 ops[n_ops].neg = this_neg;
3506 n_ops++;
3507 changed = 1;
3508 canonicalized = 1;
3509 }
3510 break;
3511
3512 case NOT:
3513 /* ~a -> (-a - 1) */
3514 if (n_ops != 7)
3515 {
3516 ops[n_ops].op = constm1_rtx;
3517 ops[n_ops++].neg = this_neg;
3518 ops[i].op = XEXP (this_op, 0);
3519 ops[i].neg = !this_neg;
3520 changed = 1;
3521 canonicalized = 1;
3522 }
3523 break;
3524
3525 case CONST_INT:
3526 n_constants++;
3527 if (this_neg)
3528 {
3529 ops[i].op = neg_const_int (mode, this_op);
3530 ops[i].neg = 0;
3531 changed = 1;
3532 canonicalized = 1;
3533 }
3534 break;
3535
3536 default:
3537 break;
3538 }
3539 }
3540 }
3541 while (changed);
3542
3543 if (n_constants > 1)
3544 canonicalized = 1;
3545
3546 gcc_assert (n_ops >= 2);
3547
3548 /* If we only have two operands, we can avoid the loops. */
3549 if (n_ops == 2)
3550 {
3551 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3552 rtx lhs, rhs;
3553
3554 /* Get the two operands. Be careful with the order, especially for
3555 the cases where code == MINUS. */
3556 if (ops[0].neg && ops[1].neg)
3557 {
3558 lhs = gen_rtx_NEG (mode, ops[0].op);
3559 rhs = ops[1].op;
3560 }
3561 else if (ops[0].neg)
3562 {
3563 lhs = ops[1].op;
3564 rhs = ops[0].op;
3565 }
3566 else
3567 {
3568 lhs = ops[0].op;
3569 rhs = ops[1].op;
3570 }
3571
3572 return simplify_const_binary_operation (code, mode, lhs, rhs);
3573 }
3574
3575 /* Now simplify each pair of operands until nothing changes. */
3576 do
3577 {
3578 /* Insertion sort is good enough for an eight-element array. */
3579 for (i = 1; i < n_ops; i++)
3580 {
3581 struct simplify_plus_minus_op_data save;
3582 j = i - 1;
3583 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3584 continue;
3585
3586 canonicalized = 1;
3587 save = ops[i];
3588 do
3589 ops[j + 1] = ops[j];
3590 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3591 ops[j + 1] = save;
3592 }
3593
3594 /* This is only useful the first time through. */
3595 if (!canonicalized)
3596 return NULL_RTX;
3597
3598 changed = 0;
3599 for (i = n_ops - 1; i > 0; i--)
3600 for (j = i - 1; j >= 0; j--)
3601 {
3602 rtx lhs = ops[j].op, rhs = ops[i].op;
3603 int lneg = ops[j].neg, rneg = ops[i].neg;
3604
3605 if (lhs != 0 && rhs != 0)
3606 {
3607 enum rtx_code ncode = PLUS;
3608
3609 if (lneg != rneg)
3610 {
3611 ncode = MINUS;
3612 if (lneg)
3613 tem = lhs, lhs = rhs, rhs = tem;
3614 }
3615 else if (swap_commutative_operands_p (lhs, rhs))
3616 tem = lhs, lhs = rhs, rhs = tem;
3617
3618 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3619 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3620 {
3621 rtx tem_lhs, tem_rhs;
3622
3623 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3624 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3625 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3626
3627 if (tem && !CONSTANT_P (tem))
3628 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3629 }
3630 else
3631 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3632
3633 /* Reject "simplifications" that just wrap the two
3634 arguments in a CONST. Failure to do so can result
3635 in infinite recursion with simplify_binary_operation
3636 when it calls us to simplify CONST operations. */
3637 if (tem
3638 && ! (GET_CODE (tem) == CONST
3639 && GET_CODE (XEXP (tem, 0)) == ncode
3640 && XEXP (XEXP (tem, 0), 0) == lhs
3641 && XEXP (XEXP (tem, 0), 1) == rhs))
3642 {
3643 lneg &= rneg;
3644 if (GET_CODE (tem) == NEG)
3645 tem = XEXP (tem, 0), lneg = !lneg;
3646 if (GET_CODE (tem) == CONST_INT && lneg)
3647 tem = neg_const_int (mode, tem), lneg = 0;
3648
3649 ops[i].op = tem;
3650 ops[i].neg = lneg;
3651 ops[j].op = NULL_RTX;
3652 changed = 1;
3653 }
3654 }
3655 }
3656
3657 /* Pack all the operands to the lower-numbered entries. */
3658 for (i = 0, j = 0; j < n_ops; j++)
3659 if (ops[j].op)
3660 {
3661 ops[i] = ops[j];
3662 i++;
3663 }
3664 n_ops = i;
3665 }
3666 while (changed);
3667
3668 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3669 if (n_ops == 2
3670 && GET_CODE (ops[1].op) == CONST_INT
3671 && CONSTANT_P (ops[0].op)
3672 && ops[0].neg)
3673 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3674
3675 /* We suppressed creation of trivial CONST expressions in the
3676 combination loop to avoid recursion. Create one manually now.
3677 The combination loop should have ensured that there is exactly
3678 one CONST_INT, and the sort will have ensured that it is last
3679 in the array and that any other constant will be next-to-last. */
3680
3681 if (GET_CODE (ops[n_ops - 1].op) == CONST_INT)
3682 i = n_ops - 2;
3683 else
3684 i = n_ops - 1;
3685
3686 if (i >= 1
3687 && ops[i].neg
3688 && !ops[i - 1].neg
3689 && CONSTANT_P (ops[i].op)
3690 && GET_CODE (ops[i].op) == GET_CODE (ops[i - 1].op))
3691 {
3692 ops[i - 1].op = gen_rtx_MINUS (mode, ops[i - 1].op, ops[i].op);
3693 ops[i - 1].op = gen_rtx_CONST (mode, ops[i - 1].op);
3694 if (i < n_ops - 1)
3695 ops[i] = ops[i + 1];
3696 n_ops--;
3697 }
3698
3699 if (n_ops > 1
3700 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3701 && CONSTANT_P (ops[n_ops - 2].op))
3702 {
3703 rtx value = ops[n_ops - 1].op;
3704 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3705 value = neg_const_int (mode, value);
3706 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3707 n_ops--;
3708 }
3709
3710 /* Put a non-negated operand first, if possible. */
3711
3712 for (i = 0; i < n_ops && ops[i].neg; i++)
3713 continue;
3714 if (i == n_ops)
3715 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3716 else if (i != 0)
3717 {
3718 tem = ops[0].op;
3719 ops[0] = ops[i];
3720 ops[i].op = tem;
3721 ops[i].neg = 1;
3722 }
3723
3724 /* Now make the result by performing the requested operations. */
3725 result = ops[0].op;
3726 for (i = 1; i < n_ops; i++)
3727 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3728 mode, result, ops[i].op);
3729
3730 return result;
3731 }
3732
3733 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3734 static bool
3735 plus_minus_operand_p (const_rtx x)
3736 {
3737 return GET_CODE (x) == PLUS
3738 || GET_CODE (x) == MINUS
3739 || (GET_CODE (x) == CONST
3740 && GET_CODE (XEXP (x, 0)) == PLUS
3741 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3742 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3743 }
3744
3745 /* Like simplify_binary_operation except used for relational operators.
3746 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3747 not also be VOIDmode.
3748
3749 CMP_MODE specifies in which mode the comparison is done in, so it is
3750 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3751 the operands or, if both are VOIDmode, the operands are compared in
3752 "infinite precision". */
3753 rtx
3754 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3755 enum machine_mode cmp_mode, rtx op0, rtx op1)
3756 {
3757 rtx tem, trueop0, trueop1;
3758
3759 if (cmp_mode == VOIDmode)
3760 cmp_mode = GET_MODE (op0);
3761 if (cmp_mode == VOIDmode)
3762 cmp_mode = GET_MODE (op1);
3763
3764 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3765 if (tem)
3766 {
3767 if (SCALAR_FLOAT_MODE_P (mode))
3768 {
3769 if (tem == const0_rtx)
3770 return CONST0_RTX (mode);
3771 #ifdef FLOAT_STORE_FLAG_VALUE
3772 {
3773 REAL_VALUE_TYPE val;
3774 val = FLOAT_STORE_FLAG_VALUE (mode);
3775 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3776 }
3777 #else
3778 return NULL_RTX;
3779 #endif
3780 }
3781 if (VECTOR_MODE_P (mode))
3782 {
3783 if (tem == const0_rtx)
3784 return CONST0_RTX (mode);
3785 #ifdef VECTOR_STORE_FLAG_VALUE
3786 {
3787 int i, units;
3788 rtvec v;
3789
3790 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3791 if (val == NULL_RTX)
3792 return NULL_RTX;
3793 if (val == const1_rtx)
3794 return CONST1_RTX (mode);
3795
3796 units = GET_MODE_NUNITS (mode);
3797 v = rtvec_alloc (units);
3798 for (i = 0; i < units; i++)
3799 RTVEC_ELT (v, i) = val;
3800 return gen_rtx_raw_CONST_VECTOR (mode, v);
3801 }
3802 #else
3803 return NULL_RTX;
3804 #endif
3805 }
3806
3807 return tem;
3808 }
3809
3810 /* For the following tests, ensure const0_rtx is op1. */
3811 if (swap_commutative_operands_p (op0, op1)
3812 || (op0 == const0_rtx && op1 != const0_rtx))
3813 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3814
3815 /* If op0 is a compare, extract the comparison arguments from it. */
3816 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3817 return simplify_relational_operation (code, mode, VOIDmode,
3818 XEXP (op0, 0), XEXP (op0, 1));
3819
3820 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3821 || CC0_P (op0))
3822 return NULL_RTX;
3823
3824 trueop0 = avoid_constant_pool_reference (op0);
3825 trueop1 = avoid_constant_pool_reference (op1);
3826 return simplify_relational_operation_1 (code, mode, cmp_mode,
3827 trueop0, trueop1);
3828 }
3829
3830 /* This part of simplify_relational_operation is only used when CMP_MODE
3831 is not in class MODE_CC (i.e. it is a real comparison).
3832
3833 MODE is the mode of the result, while CMP_MODE specifies in which
3834 mode the comparison is done in, so it is the mode of the operands. */
3835
3836 static rtx
3837 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3838 enum machine_mode cmp_mode, rtx op0, rtx op1)
3839 {
3840 enum rtx_code op0code = GET_CODE (op0);
3841
3842 if (op1 == const0_rtx && COMPARISON_P (op0))
3843 {
3844 /* If op0 is a comparison, extract the comparison arguments
3845 from it. */
3846 if (code == NE)
3847 {
3848 if (GET_MODE (op0) == mode)
3849 return simplify_rtx (op0);
3850 else
3851 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3852 XEXP (op0, 0), XEXP (op0, 1));
3853 }
3854 else if (code == EQ)
3855 {
3856 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3857 if (new_code != UNKNOWN)
3858 return simplify_gen_relational (new_code, mode, VOIDmode,
3859 XEXP (op0, 0), XEXP (op0, 1));
3860 }
3861 }
3862
3863 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3864 if ((code == LTU || code == GEU)
3865 && GET_CODE (op0) == PLUS
3866 && rtx_equal_p (op1, XEXP (op0, 1))
3867 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3868 && !rtx_equal_p (op1, XEXP (op0, 0)))
3869 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3870
3871 if (op1 == const0_rtx)
3872 {
3873 /* Canonicalize (GTU x 0) as (NE x 0). */
3874 if (code == GTU)
3875 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3876 /* Canonicalize (LEU x 0) as (EQ x 0). */
3877 if (code == LEU)
3878 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3879 }
3880 else if (op1 == const1_rtx)
3881 {
3882 switch (code)
3883 {
3884 case GE:
3885 /* Canonicalize (GE x 1) as (GT x 0). */
3886 return simplify_gen_relational (GT, mode, cmp_mode,
3887 op0, const0_rtx);
3888 case GEU:
3889 /* Canonicalize (GEU x 1) as (NE x 0). */
3890 return simplify_gen_relational (NE, mode, cmp_mode,
3891 op0, const0_rtx);
3892 case LT:
3893 /* Canonicalize (LT x 1) as (LE x 0). */
3894 return simplify_gen_relational (LE, mode, cmp_mode,
3895 op0, const0_rtx);
3896 case LTU:
3897 /* Canonicalize (LTU x 1) as (EQ x 0). */
3898 return simplify_gen_relational (EQ, mode, cmp_mode,
3899 op0, const0_rtx);
3900 default:
3901 break;
3902 }
3903 }
3904 else if (op1 == constm1_rtx)
3905 {
3906 /* Canonicalize (LE x -1) as (LT x 0). */
3907 if (code == LE)
3908 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3909 /* Canonicalize (GT x -1) as (GE x 0). */
3910 if (code == GT)
3911 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3912 }
3913
3914 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3915 if ((code == EQ || code == NE)
3916 && (op0code == PLUS || op0code == MINUS)
3917 && CONSTANT_P (op1)
3918 && CONSTANT_P (XEXP (op0, 1))
3919 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3920 {
3921 rtx x = XEXP (op0, 0);
3922 rtx c = XEXP (op0, 1);
3923
3924 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3925 cmp_mode, op1, c);
3926 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3927 }
3928
3929 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3930 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3931 if (code == NE
3932 && op1 == const0_rtx
3933 && GET_MODE_CLASS (mode) == MODE_INT
3934 && cmp_mode != VOIDmode
3935 /* ??? Work-around BImode bugs in the ia64 backend. */
3936 && mode != BImode
3937 && cmp_mode != BImode
3938 && nonzero_bits (op0, cmp_mode) == 1
3939 && STORE_FLAG_VALUE == 1)
3940 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3941 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3942 : lowpart_subreg (mode, op0, cmp_mode);
3943
3944 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3945 if ((code == EQ || code == NE)
3946 && op1 == const0_rtx
3947 && op0code == XOR)
3948 return simplify_gen_relational (code, mode, cmp_mode,
3949 XEXP (op0, 0), XEXP (op0, 1));
3950
3951 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3952 if ((code == EQ || code == NE)
3953 && op0code == XOR
3954 && rtx_equal_p (XEXP (op0, 0), op1)
3955 && !side_effects_p (XEXP (op0, 0)))
3956 return simplify_gen_relational (code, mode, cmp_mode,
3957 XEXP (op0, 1), const0_rtx);
3958
3959 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3960 if ((code == EQ || code == NE)
3961 && op0code == XOR
3962 && rtx_equal_p (XEXP (op0, 1), op1)
3963 && !side_effects_p (XEXP (op0, 1)))
3964 return simplify_gen_relational (code, mode, cmp_mode,
3965 XEXP (op0, 0), const0_rtx);
3966
3967 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3968 if ((code == EQ || code == NE)
3969 && op0code == XOR
3970 && (GET_CODE (op1) == CONST_INT
3971 || GET_CODE (op1) == CONST_DOUBLE)
3972 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3973 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3974 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3975 simplify_gen_binary (XOR, cmp_mode,
3976 XEXP (op0, 1), op1));
3977
3978 if (op0code == POPCOUNT && op1 == const0_rtx)
3979 switch (code)
3980 {
3981 case EQ:
3982 case LE:
3983 case LEU:
3984 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3985 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3986 XEXP (op0, 0), const0_rtx);
3987
3988 case NE:
3989 case GT:
3990 case GTU:
3991 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3992 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3993 XEXP (op0, 0), const0_rtx);
3994
3995 default:
3996 break;
3997 }
3998
3999 return NULL_RTX;
4000 }
4001
4002 enum
4003 {
4004 CMP_EQ = 1,
4005 CMP_LT = 2,
4006 CMP_GT = 4,
4007 CMP_LTU = 8,
4008 CMP_GTU = 16
4009 };
4010
4011
4012 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4013 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4014 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4015 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4016 For floating-point comparisons, assume that the operands were ordered. */
4017
4018 static rtx
4019 comparison_result (enum rtx_code code, int known_results)
4020 {
4021 switch (code)
4022 {
4023 case EQ:
4024 case UNEQ:
4025 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4026 case NE:
4027 case LTGT:
4028 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4029
4030 case LT:
4031 case UNLT:
4032 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4033 case GE:
4034 case UNGE:
4035 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4036
4037 case GT:
4038 case UNGT:
4039 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4040 case LE:
4041 case UNLE:
4042 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4043
4044 case LTU:
4045 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4046 case GEU:
4047 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4048
4049 case GTU:
4050 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4051 case LEU:
4052 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4053
4054 case ORDERED:
4055 return const_true_rtx;
4056 case UNORDERED:
4057 return const0_rtx;
4058 default:
4059 gcc_unreachable ();
4060 }
4061 }
4062
4063 /* Check if the given comparison (done in the given MODE) is actually a
4064 tautology or a contradiction.
4065 If no simplification is possible, this function returns zero.
4066 Otherwise, it returns either const_true_rtx or const0_rtx. */
4067
4068 rtx
4069 simplify_const_relational_operation (enum rtx_code code,
4070 enum machine_mode mode,
4071 rtx op0, rtx op1)
4072 {
4073 rtx tem;
4074 rtx trueop0;
4075 rtx trueop1;
4076
4077 gcc_assert (mode != VOIDmode
4078 || (GET_MODE (op0) == VOIDmode
4079 && GET_MODE (op1) == VOIDmode));
4080
4081 /* If op0 is a compare, extract the comparison arguments from it. */
4082 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4083 {
4084 op1 = XEXP (op0, 1);
4085 op0 = XEXP (op0, 0);
4086
4087 if (GET_MODE (op0) != VOIDmode)
4088 mode = GET_MODE (op0);
4089 else if (GET_MODE (op1) != VOIDmode)
4090 mode = GET_MODE (op1);
4091 else
4092 return 0;
4093 }
4094
4095 /* We can't simplify MODE_CC values since we don't know what the
4096 actual comparison is. */
4097 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4098 return 0;
4099
4100 /* Make sure the constant is second. */
4101 if (swap_commutative_operands_p (op0, op1))
4102 {
4103 tem = op0, op0 = op1, op1 = tem;
4104 code = swap_condition (code);
4105 }
4106
4107 trueop0 = avoid_constant_pool_reference (op0);
4108 trueop1 = avoid_constant_pool_reference (op1);
4109
4110 /* For integer comparisons of A and B maybe we can simplify A - B and can
4111 then simplify a comparison of that with zero. If A and B are both either
4112 a register or a CONST_INT, this can't help; testing for these cases will
4113 prevent infinite recursion here and speed things up.
4114
4115 We can only do this for EQ and NE comparisons as otherwise we may
4116 lose or introduce overflow which we cannot disregard as undefined as
4117 we do not know the signedness of the operation on either the left or
4118 the right hand side of the comparison. */
4119
4120 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4121 && (code == EQ || code == NE)
4122 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
4123 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
4124 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4125 /* We cannot do this if tem is a nonzero address. */
4126 && ! nonzero_address_p (tem))
4127 return simplify_const_relational_operation (signed_condition (code),
4128 mode, tem, const0_rtx);
4129
4130 if (! HONOR_NANS (mode) && code == ORDERED)
4131 return const_true_rtx;
4132
4133 if (! HONOR_NANS (mode) && code == UNORDERED)
4134 return const0_rtx;
4135
4136 /* For modes without NaNs, if the two operands are equal, we know the
4137 result except if they have side-effects. Even with NaNs we know
4138 the result of unordered comparisons and, if signaling NaNs are
4139 irrelevant, also the result of LT/GT/LTGT. */
4140 if ((! HONOR_NANS (GET_MODE (trueop0))
4141 || code == UNEQ || code == UNLE || code == UNGE
4142 || ((code == LT || code == GT || code == LTGT)
4143 && ! HONOR_SNANS (GET_MODE (trueop0))))
4144 && rtx_equal_p (trueop0, trueop1)
4145 && ! side_effects_p (trueop0))
4146 return comparison_result (code, CMP_EQ);
4147
4148 /* If the operands are floating-point constants, see if we can fold
4149 the result. */
4150 if (GET_CODE (trueop0) == CONST_DOUBLE
4151 && GET_CODE (trueop1) == CONST_DOUBLE
4152 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4153 {
4154 REAL_VALUE_TYPE d0, d1;
4155
4156 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4157 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4158
4159 /* Comparisons are unordered iff at least one of the values is NaN. */
4160 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4161 switch (code)
4162 {
4163 case UNEQ:
4164 case UNLT:
4165 case UNGT:
4166 case UNLE:
4167 case UNGE:
4168 case NE:
4169 case UNORDERED:
4170 return const_true_rtx;
4171 case EQ:
4172 case LT:
4173 case GT:
4174 case LE:
4175 case GE:
4176 case LTGT:
4177 case ORDERED:
4178 return const0_rtx;
4179 default:
4180 return 0;
4181 }
4182
4183 return comparison_result (code,
4184 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4185 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4186 }
4187
4188 /* Otherwise, see if the operands are both integers. */
4189 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4190 && (GET_CODE (trueop0) == CONST_DOUBLE
4191 || GET_CODE (trueop0) == CONST_INT)
4192 && (GET_CODE (trueop1) == CONST_DOUBLE
4193 || GET_CODE (trueop1) == CONST_INT))
4194 {
4195 int width = GET_MODE_BITSIZE (mode);
4196 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4197 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4198
4199 /* Get the two words comprising each integer constant. */
4200 if (GET_CODE (trueop0) == CONST_DOUBLE)
4201 {
4202 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4203 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4204 }
4205 else
4206 {
4207 l0u = l0s = INTVAL (trueop0);
4208 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4209 }
4210
4211 if (GET_CODE (trueop1) == CONST_DOUBLE)
4212 {
4213 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4214 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4215 }
4216 else
4217 {
4218 l1u = l1s = INTVAL (trueop1);
4219 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4220 }
4221
4222 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4223 we have to sign or zero-extend the values. */
4224 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4225 {
4226 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4227 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4228
4229 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4230 l0s |= ((HOST_WIDE_INT) (-1) << width);
4231
4232 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4233 l1s |= ((HOST_WIDE_INT) (-1) << width);
4234 }
4235 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4236 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4237
4238 if (h0u == h1u && l0u == l1u)
4239 return comparison_result (code, CMP_EQ);
4240 else
4241 {
4242 int cr;
4243 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4244 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4245 return comparison_result (code, cr);
4246 }
4247 }
4248
4249 /* Optimize comparisons with upper and lower bounds. */
4250 if (SCALAR_INT_MODE_P (mode)
4251 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4252 && GET_CODE (trueop1) == CONST_INT)
4253 {
4254 int sign;
4255 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4256 HOST_WIDE_INT val = INTVAL (trueop1);
4257 HOST_WIDE_INT mmin, mmax;
4258
4259 if (code == GEU
4260 || code == LEU
4261 || code == GTU
4262 || code == LTU)
4263 sign = 0;
4264 else
4265 sign = 1;
4266
4267 /* Get a reduced range if the sign bit is zero. */
4268 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4269 {
4270 mmin = 0;
4271 mmax = nonzero;
4272 }
4273 else
4274 {
4275 rtx mmin_rtx, mmax_rtx;
4276 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4277
4278 mmin = INTVAL (mmin_rtx);
4279 mmax = INTVAL (mmax_rtx);
4280 if (sign)
4281 {
4282 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4283
4284 mmin >>= (sign_copies - 1);
4285 mmax >>= (sign_copies - 1);
4286 }
4287 }
4288
4289 switch (code)
4290 {
4291 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4292 case GEU:
4293 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4294 return const_true_rtx;
4295 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4296 return const0_rtx;
4297 break;
4298 case GE:
4299 if (val <= mmin)
4300 return const_true_rtx;
4301 if (val > mmax)
4302 return const0_rtx;
4303 break;
4304
4305 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4306 case LEU:
4307 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4308 return const_true_rtx;
4309 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4310 return const0_rtx;
4311 break;
4312 case LE:
4313 if (val >= mmax)
4314 return const_true_rtx;
4315 if (val < mmin)
4316 return const0_rtx;
4317 break;
4318
4319 case EQ:
4320 /* x == y is always false for y out of range. */
4321 if (val < mmin || val > mmax)
4322 return const0_rtx;
4323 break;
4324
4325 /* x > y is always false for y >= mmax, always true for y < mmin. */
4326 case GTU:
4327 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4328 return const0_rtx;
4329 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4330 return const_true_rtx;
4331 break;
4332 case GT:
4333 if (val >= mmax)
4334 return const0_rtx;
4335 if (val < mmin)
4336 return const_true_rtx;
4337 break;
4338
4339 /* x < y is always false for y <= mmin, always true for y > mmax. */
4340 case LTU:
4341 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4342 return const0_rtx;
4343 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4344 return const_true_rtx;
4345 break;
4346 case LT:
4347 if (val <= mmin)
4348 return const0_rtx;
4349 if (val > mmax)
4350 return const_true_rtx;
4351 break;
4352
4353 case NE:
4354 /* x != y is always true for y out of range. */
4355 if (val < mmin || val > mmax)
4356 return const_true_rtx;
4357 break;
4358
4359 default:
4360 break;
4361 }
4362 }
4363
4364 /* Optimize integer comparisons with zero. */
4365 if (trueop1 == const0_rtx)
4366 {
4367 /* Some addresses are known to be nonzero. We don't know
4368 their sign, but equality comparisons are known. */
4369 if (nonzero_address_p (trueop0))
4370 {
4371 if (code == EQ || code == LEU)
4372 return const0_rtx;
4373 if (code == NE || code == GTU)
4374 return const_true_rtx;
4375 }
4376
4377 /* See if the first operand is an IOR with a constant. If so, we
4378 may be able to determine the result of this comparison. */
4379 if (GET_CODE (op0) == IOR)
4380 {
4381 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4382 if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx)
4383 {
4384 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4385 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4386 && (INTVAL (inner_const)
4387 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4388
4389 switch (code)
4390 {
4391 case EQ:
4392 case LEU:
4393 return const0_rtx;
4394 case NE:
4395 case GTU:
4396 return const_true_rtx;
4397 case LT:
4398 case LE:
4399 if (has_sign)
4400 return const_true_rtx;
4401 break;
4402 case GT:
4403 case GE:
4404 if (has_sign)
4405 return const0_rtx;
4406 break;
4407 default:
4408 break;
4409 }
4410 }
4411 }
4412 }
4413
4414 /* Optimize comparison of ABS with zero. */
4415 if (trueop1 == CONST0_RTX (mode)
4416 && (GET_CODE (trueop0) == ABS
4417 || (GET_CODE (trueop0) == FLOAT_EXTEND
4418 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4419 {
4420 switch (code)
4421 {
4422 case LT:
4423 /* Optimize abs(x) < 0.0. */
4424 if (!HONOR_SNANS (mode)
4425 && (!INTEGRAL_MODE_P (mode)
4426 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4427 {
4428 if (INTEGRAL_MODE_P (mode)
4429 && (issue_strict_overflow_warning
4430 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4431 warning (OPT_Wstrict_overflow,
4432 ("assuming signed overflow does not occur when "
4433 "assuming abs (x) < 0 is false"));
4434 return const0_rtx;
4435 }
4436 break;
4437
4438 case GE:
4439 /* Optimize abs(x) >= 0.0. */
4440 if (!HONOR_NANS (mode)
4441 && (!INTEGRAL_MODE_P (mode)
4442 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4443 {
4444 if (INTEGRAL_MODE_P (mode)
4445 && (issue_strict_overflow_warning
4446 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4447 warning (OPT_Wstrict_overflow,
4448 ("assuming signed overflow does not occur when "
4449 "assuming abs (x) >= 0 is true"));
4450 return const_true_rtx;
4451 }
4452 break;
4453
4454 case UNGE:
4455 /* Optimize ! (abs(x) < 0.0). */
4456 return const_true_rtx;
4457
4458 default:
4459 break;
4460 }
4461 }
4462
4463 return 0;
4464 }
4465 \f
4466 /* Simplify CODE, an operation with result mode MODE and three operands,
4467 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4468 a constant. Return 0 if no simplifications is possible. */
4469
4470 rtx
4471 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4472 enum machine_mode op0_mode, rtx op0, rtx op1,
4473 rtx op2)
4474 {
4475 unsigned int width = GET_MODE_BITSIZE (mode);
4476
4477 /* VOIDmode means "infinite" precision. */
4478 if (width == 0)
4479 width = HOST_BITS_PER_WIDE_INT;
4480
4481 switch (code)
4482 {
4483 case SIGN_EXTRACT:
4484 case ZERO_EXTRACT:
4485 if (GET_CODE (op0) == CONST_INT
4486 && GET_CODE (op1) == CONST_INT
4487 && GET_CODE (op2) == CONST_INT
4488 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4489 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4490 {
4491 /* Extracting a bit-field from a constant */
4492 HOST_WIDE_INT val = INTVAL (op0);
4493
4494 if (BITS_BIG_ENDIAN)
4495 val >>= (GET_MODE_BITSIZE (op0_mode)
4496 - INTVAL (op2) - INTVAL (op1));
4497 else
4498 val >>= INTVAL (op2);
4499
4500 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4501 {
4502 /* First zero-extend. */
4503 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4504 /* If desired, propagate sign bit. */
4505 if (code == SIGN_EXTRACT
4506 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4507 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4508 }
4509
4510 /* Clear the bits that don't belong in our mode,
4511 unless they and our sign bit are all one.
4512 So we get either a reasonable negative value or a reasonable
4513 unsigned value for this mode. */
4514 if (width < HOST_BITS_PER_WIDE_INT
4515 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4516 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4517 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4518
4519 return gen_int_mode (val, mode);
4520 }
4521 break;
4522
4523 case IF_THEN_ELSE:
4524 if (GET_CODE (op0) == CONST_INT)
4525 return op0 != const0_rtx ? op1 : op2;
4526
4527 /* Convert c ? a : a into "a". */
4528 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4529 return op1;
4530
4531 /* Convert a != b ? a : b into "a". */
4532 if (GET_CODE (op0) == NE
4533 && ! side_effects_p (op0)
4534 && ! HONOR_NANS (mode)
4535 && ! HONOR_SIGNED_ZEROS (mode)
4536 && ((rtx_equal_p (XEXP (op0, 0), op1)
4537 && rtx_equal_p (XEXP (op0, 1), op2))
4538 || (rtx_equal_p (XEXP (op0, 0), op2)
4539 && rtx_equal_p (XEXP (op0, 1), op1))))
4540 return op1;
4541
4542 /* Convert a == b ? a : b into "b". */
4543 if (GET_CODE (op0) == EQ
4544 && ! side_effects_p (op0)
4545 && ! HONOR_NANS (mode)
4546 && ! HONOR_SIGNED_ZEROS (mode)
4547 && ((rtx_equal_p (XEXP (op0, 0), op1)
4548 && rtx_equal_p (XEXP (op0, 1), op2))
4549 || (rtx_equal_p (XEXP (op0, 0), op2)
4550 && rtx_equal_p (XEXP (op0, 1), op1))))
4551 return op2;
4552
4553 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4554 {
4555 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4556 ? GET_MODE (XEXP (op0, 1))
4557 : GET_MODE (XEXP (op0, 0)));
4558 rtx temp;
4559
4560 /* Look for happy constants in op1 and op2. */
4561 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4562 {
4563 HOST_WIDE_INT t = INTVAL (op1);
4564 HOST_WIDE_INT f = INTVAL (op2);
4565
4566 if (t == STORE_FLAG_VALUE && f == 0)
4567 code = GET_CODE (op0);
4568 else if (t == 0 && f == STORE_FLAG_VALUE)
4569 {
4570 enum rtx_code tmp;
4571 tmp = reversed_comparison_code (op0, NULL_RTX);
4572 if (tmp == UNKNOWN)
4573 break;
4574 code = tmp;
4575 }
4576 else
4577 break;
4578
4579 return simplify_gen_relational (code, mode, cmp_mode,
4580 XEXP (op0, 0), XEXP (op0, 1));
4581 }
4582
4583 if (cmp_mode == VOIDmode)
4584 cmp_mode = op0_mode;
4585 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4586 cmp_mode, XEXP (op0, 0),
4587 XEXP (op0, 1));
4588
4589 /* See if any simplifications were possible. */
4590 if (temp)
4591 {
4592 if (GET_CODE (temp) == CONST_INT)
4593 return temp == const0_rtx ? op2 : op1;
4594 else if (temp)
4595 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4596 }
4597 }
4598 break;
4599
4600 case VEC_MERGE:
4601 gcc_assert (GET_MODE (op0) == mode);
4602 gcc_assert (GET_MODE (op1) == mode);
4603 gcc_assert (VECTOR_MODE_P (mode));
4604 op2 = avoid_constant_pool_reference (op2);
4605 if (GET_CODE (op2) == CONST_INT)
4606 {
4607 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4608 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4609 int mask = (1 << n_elts) - 1;
4610
4611 if (!(INTVAL (op2) & mask))
4612 return op1;
4613 if ((INTVAL (op2) & mask) == mask)
4614 return op0;
4615
4616 op0 = avoid_constant_pool_reference (op0);
4617 op1 = avoid_constant_pool_reference (op1);
4618 if (GET_CODE (op0) == CONST_VECTOR
4619 && GET_CODE (op1) == CONST_VECTOR)
4620 {
4621 rtvec v = rtvec_alloc (n_elts);
4622 unsigned int i;
4623
4624 for (i = 0; i < n_elts; i++)
4625 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4626 ? CONST_VECTOR_ELT (op0, i)
4627 : CONST_VECTOR_ELT (op1, i));
4628 return gen_rtx_CONST_VECTOR (mode, v);
4629 }
4630 }
4631 break;
4632
4633 default:
4634 gcc_unreachable ();
4635 }
4636
4637 return 0;
4638 }
4639
4640 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4641 or CONST_VECTOR,
4642 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4643
4644 Works by unpacking OP into a collection of 8-bit values
4645 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4646 and then repacking them again for OUTERMODE. */
4647
4648 static rtx
4649 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4650 enum machine_mode innermode, unsigned int byte)
4651 {
4652 /* We support up to 512-bit values (for V8DFmode). */
4653 enum {
4654 max_bitsize = 512,
4655 value_bit = 8,
4656 value_mask = (1 << value_bit) - 1
4657 };
4658 unsigned char value[max_bitsize / value_bit];
4659 int value_start;
4660 int i;
4661 int elem;
4662
4663 int num_elem;
4664 rtx * elems;
4665 int elem_bitsize;
4666 rtx result_s;
4667 rtvec result_v = NULL;
4668 enum mode_class outer_class;
4669 enum machine_mode outer_submode;
4670
4671 /* Some ports misuse CCmode. */
4672 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4673 return op;
4674
4675 /* We have no way to represent a complex constant at the rtl level. */
4676 if (COMPLEX_MODE_P (outermode))
4677 return NULL_RTX;
4678
4679 /* Unpack the value. */
4680
4681 if (GET_CODE (op) == CONST_VECTOR)
4682 {
4683 num_elem = CONST_VECTOR_NUNITS (op);
4684 elems = &CONST_VECTOR_ELT (op, 0);
4685 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4686 }
4687 else
4688 {
4689 num_elem = 1;
4690 elems = &op;
4691 elem_bitsize = max_bitsize;
4692 }
4693 /* If this asserts, it is too complicated; reducing value_bit may help. */
4694 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4695 /* I don't know how to handle endianness of sub-units. */
4696 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4697
4698 for (elem = 0; elem < num_elem; elem++)
4699 {
4700 unsigned char * vp;
4701 rtx el = elems[elem];
4702
4703 /* Vectors are kept in target memory order. (This is probably
4704 a mistake.) */
4705 {
4706 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4707 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4708 / BITS_PER_UNIT);
4709 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4710 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4711 unsigned bytele = (subword_byte % UNITS_PER_WORD
4712 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4713 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4714 }
4715
4716 switch (GET_CODE (el))
4717 {
4718 case CONST_INT:
4719 for (i = 0;
4720 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4721 i += value_bit)
4722 *vp++ = INTVAL (el) >> i;
4723 /* CONST_INTs are always logically sign-extended. */
4724 for (; i < elem_bitsize; i += value_bit)
4725 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4726 break;
4727
4728 case CONST_DOUBLE:
4729 if (GET_MODE (el) == VOIDmode)
4730 {
4731 /* If this triggers, someone should have generated a
4732 CONST_INT instead. */
4733 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4734
4735 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4736 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4737 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4738 {
4739 *vp++
4740 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4741 i += value_bit;
4742 }
4743 /* It shouldn't matter what's done here, so fill it with
4744 zero. */
4745 for (; i < elem_bitsize; i += value_bit)
4746 *vp++ = 0;
4747 }
4748 else
4749 {
4750 long tmp[max_bitsize / 32];
4751 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4752
4753 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4754 gcc_assert (bitsize <= elem_bitsize);
4755 gcc_assert (bitsize % value_bit == 0);
4756
4757 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4758 GET_MODE (el));
4759
4760 /* real_to_target produces its result in words affected by
4761 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4762 and use WORDS_BIG_ENDIAN instead; see the documentation
4763 of SUBREG in rtl.texi. */
4764 for (i = 0; i < bitsize; i += value_bit)
4765 {
4766 int ibase;
4767 if (WORDS_BIG_ENDIAN)
4768 ibase = bitsize - 1 - i;
4769 else
4770 ibase = i;
4771 *vp++ = tmp[ibase / 32] >> i % 32;
4772 }
4773
4774 /* It shouldn't matter what's done here, so fill it with
4775 zero. */
4776 for (; i < elem_bitsize; i += value_bit)
4777 *vp++ = 0;
4778 }
4779 break;
4780
4781 case CONST_FIXED:
4782 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4783 {
4784 for (i = 0; i < elem_bitsize; i += value_bit)
4785 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4786 }
4787 else
4788 {
4789 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4790 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4791 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4792 i += value_bit)
4793 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4794 >> (i - HOST_BITS_PER_WIDE_INT);
4795 for (; i < elem_bitsize; i += value_bit)
4796 *vp++ = 0;
4797 }
4798 break;
4799
4800 default:
4801 gcc_unreachable ();
4802 }
4803 }
4804
4805 /* Now, pick the right byte to start with. */
4806 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4807 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4808 will already have offset 0. */
4809 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4810 {
4811 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4812 - byte);
4813 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4814 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4815 byte = (subword_byte % UNITS_PER_WORD
4816 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4817 }
4818
4819 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4820 so if it's become negative it will instead be very large.) */
4821 gcc_assert (byte < GET_MODE_SIZE (innermode));
4822
4823 /* Convert from bytes to chunks of size value_bit. */
4824 value_start = byte * (BITS_PER_UNIT / value_bit);
4825
4826 /* Re-pack the value. */
4827
4828 if (VECTOR_MODE_P (outermode))
4829 {
4830 num_elem = GET_MODE_NUNITS (outermode);
4831 result_v = rtvec_alloc (num_elem);
4832 elems = &RTVEC_ELT (result_v, 0);
4833 outer_submode = GET_MODE_INNER (outermode);
4834 }
4835 else
4836 {
4837 num_elem = 1;
4838 elems = &result_s;
4839 outer_submode = outermode;
4840 }
4841
4842 outer_class = GET_MODE_CLASS (outer_submode);
4843 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4844
4845 gcc_assert (elem_bitsize % value_bit == 0);
4846 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4847
4848 for (elem = 0; elem < num_elem; elem++)
4849 {
4850 unsigned char *vp;
4851
4852 /* Vectors are stored in target memory order. (This is probably
4853 a mistake.) */
4854 {
4855 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4856 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4857 / BITS_PER_UNIT);
4858 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4859 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4860 unsigned bytele = (subword_byte % UNITS_PER_WORD
4861 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4862 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4863 }
4864
4865 switch (outer_class)
4866 {
4867 case MODE_INT:
4868 case MODE_PARTIAL_INT:
4869 {
4870 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4871
4872 for (i = 0;
4873 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4874 i += value_bit)
4875 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4876 for (; i < elem_bitsize; i += value_bit)
4877 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4878 << (i - HOST_BITS_PER_WIDE_INT));
4879
4880 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4881 know why. */
4882 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4883 elems[elem] = gen_int_mode (lo, outer_submode);
4884 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4885 elems[elem] = immed_double_const (lo, hi, outer_submode);
4886 else
4887 return NULL_RTX;
4888 }
4889 break;
4890
4891 case MODE_FLOAT:
4892 case MODE_DECIMAL_FLOAT:
4893 {
4894 REAL_VALUE_TYPE r;
4895 long tmp[max_bitsize / 32];
4896
4897 /* real_from_target wants its input in words affected by
4898 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4899 and use WORDS_BIG_ENDIAN instead; see the documentation
4900 of SUBREG in rtl.texi. */
4901 for (i = 0; i < max_bitsize / 32; i++)
4902 tmp[i] = 0;
4903 for (i = 0; i < elem_bitsize; i += value_bit)
4904 {
4905 int ibase;
4906 if (WORDS_BIG_ENDIAN)
4907 ibase = elem_bitsize - 1 - i;
4908 else
4909 ibase = i;
4910 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4911 }
4912
4913 real_from_target (&r, tmp, outer_submode);
4914 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4915 }
4916 break;
4917
4918 case MODE_FRACT:
4919 case MODE_UFRACT:
4920 case MODE_ACCUM:
4921 case MODE_UACCUM:
4922 {
4923 FIXED_VALUE_TYPE f;
4924 f.data.low = 0;
4925 f.data.high = 0;
4926 f.mode = outer_submode;
4927
4928 for (i = 0;
4929 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4930 i += value_bit)
4931 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4932 for (; i < elem_bitsize; i += value_bit)
4933 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4934 << (i - HOST_BITS_PER_WIDE_INT));
4935
4936 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4937 }
4938 break;
4939
4940 default:
4941 gcc_unreachable ();
4942 }
4943 }
4944 if (VECTOR_MODE_P (outermode))
4945 return gen_rtx_CONST_VECTOR (outermode, result_v);
4946 else
4947 return result_s;
4948 }
4949
4950 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4951 Return 0 if no simplifications are possible. */
4952 rtx
4953 simplify_subreg (enum machine_mode outermode, rtx op,
4954 enum machine_mode innermode, unsigned int byte)
4955 {
4956 /* Little bit of sanity checking. */
4957 gcc_assert (innermode != VOIDmode);
4958 gcc_assert (outermode != VOIDmode);
4959 gcc_assert (innermode != BLKmode);
4960 gcc_assert (outermode != BLKmode);
4961
4962 gcc_assert (GET_MODE (op) == innermode
4963 || GET_MODE (op) == VOIDmode);
4964
4965 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4966 gcc_assert (byte < GET_MODE_SIZE (innermode));
4967
4968 if (outermode == innermode && !byte)
4969 return op;
4970
4971 if (GET_CODE (op) == CONST_INT
4972 || GET_CODE (op) == CONST_DOUBLE
4973 || GET_CODE (op) == CONST_FIXED
4974 || GET_CODE (op) == CONST_VECTOR)
4975 return simplify_immed_subreg (outermode, op, innermode, byte);
4976
4977 /* Changing mode twice with SUBREG => just change it once,
4978 or not at all if changing back op starting mode. */
4979 if (GET_CODE (op) == SUBREG)
4980 {
4981 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4982 int final_offset = byte + SUBREG_BYTE (op);
4983 rtx newx;
4984
4985 if (outermode == innermostmode
4986 && byte == 0 && SUBREG_BYTE (op) == 0)
4987 return SUBREG_REG (op);
4988
4989 /* The SUBREG_BYTE represents offset, as if the value were stored
4990 in memory. Irritating exception is paradoxical subreg, where
4991 we define SUBREG_BYTE to be 0. On big endian machines, this
4992 value should be negative. For a moment, undo this exception. */
4993 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4994 {
4995 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4996 if (WORDS_BIG_ENDIAN)
4997 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4998 if (BYTES_BIG_ENDIAN)
4999 final_offset += difference % UNITS_PER_WORD;
5000 }
5001 if (SUBREG_BYTE (op) == 0
5002 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5003 {
5004 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5005 if (WORDS_BIG_ENDIAN)
5006 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5007 if (BYTES_BIG_ENDIAN)
5008 final_offset += difference % UNITS_PER_WORD;
5009 }
5010
5011 /* See whether resulting subreg will be paradoxical. */
5012 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5013 {
5014 /* In nonparadoxical subregs we can't handle negative offsets. */
5015 if (final_offset < 0)
5016 return NULL_RTX;
5017 /* Bail out in case resulting subreg would be incorrect. */
5018 if (final_offset % GET_MODE_SIZE (outermode)
5019 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5020 return NULL_RTX;
5021 }
5022 else
5023 {
5024 int offset = 0;
5025 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5026
5027 /* In paradoxical subreg, see if we are still looking on lower part.
5028 If so, our SUBREG_BYTE will be 0. */
5029 if (WORDS_BIG_ENDIAN)
5030 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5031 if (BYTES_BIG_ENDIAN)
5032 offset += difference % UNITS_PER_WORD;
5033 if (offset == final_offset)
5034 final_offset = 0;
5035 else
5036 return NULL_RTX;
5037 }
5038
5039 /* Recurse for further possible simplifications. */
5040 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5041 final_offset);
5042 if (newx)
5043 return newx;
5044 if (validate_subreg (outermode, innermostmode,
5045 SUBREG_REG (op), final_offset))
5046 {
5047 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5048 if (SUBREG_PROMOTED_VAR_P (op)
5049 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5050 && GET_MODE_CLASS (outermode) == MODE_INT
5051 && IN_RANGE (GET_MODE_SIZE (outermode),
5052 GET_MODE_SIZE (innermode),
5053 GET_MODE_SIZE (innermostmode))
5054 && subreg_lowpart_p (newx))
5055 {
5056 SUBREG_PROMOTED_VAR_P (newx) = 1;
5057 SUBREG_PROMOTED_UNSIGNED_SET
5058 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5059 }
5060 return newx;
5061 }
5062 return NULL_RTX;
5063 }
5064
5065 /* Merge implicit and explicit truncations. */
5066
5067 if (GET_CODE (op) == TRUNCATE
5068 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5069 && subreg_lowpart_offset (outermode, innermode) == byte)
5070 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5071 GET_MODE (XEXP (op, 0)));
5072
5073 /* SUBREG of a hard register => just change the register number
5074 and/or mode. If the hard register is not valid in that mode,
5075 suppress this simplification. If the hard register is the stack,
5076 frame, or argument pointer, leave this as a SUBREG. */
5077
5078 if (REG_P (op) && HARD_REGISTER_P (op))
5079 {
5080 unsigned int regno, final_regno;
5081
5082 regno = REGNO (op);
5083 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5084 if (HARD_REGISTER_NUM_P (final_regno))
5085 {
5086 rtx x;
5087 int final_offset = byte;
5088
5089 /* Adjust offset for paradoxical subregs. */
5090 if (byte == 0
5091 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5092 {
5093 int difference = (GET_MODE_SIZE (innermode)
5094 - GET_MODE_SIZE (outermode));
5095 if (WORDS_BIG_ENDIAN)
5096 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5097 if (BYTES_BIG_ENDIAN)
5098 final_offset += difference % UNITS_PER_WORD;
5099 }
5100
5101 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5102
5103 /* Propagate original regno. We don't have any way to specify
5104 the offset inside original regno, so do so only for lowpart.
5105 The information is used only by alias analysis that can not
5106 grog partial register anyway. */
5107
5108 if (subreg_lowpart_offset (outermode, innermode) == byte)
5109 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5110 return x;
5111 }
5112 }
5113
5114 /* If we have a SUBREG of a register that we are replacing and we are
5115 replacing it with a MEM, make a new MEM and try replacing the
5116 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5117 or if we would be widening it. */
5118
5119 if (MEM_P (op)
5120 && ! mode_dependent_address_p (XEXP (op, 0))
5121 /* Allow splitting of volatile memory references in case we don't
5122 have instruction to move the whole thing. */
5123 && (! MEM_VOLATILE_P (op)
5124 || ! have_insn_for (SET, innermode))
5125 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5126 return adjust_address_nv (op, outermode, byte);
5127
5128 /* Handle complex values represented as CONCAT
5129 of real and imaginary part. */
5130 if (GET_CODE (op) == CONCAT)
5131 {
5132 unsigned int part_size, final_offset;
5133 rtx part, res;
5134
5135 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5136 if (byte < part_size)
5137 {
5138 part = XEXP (op, 0);
5139 final_offset = byte;
5140 }
5141 else
5142 {
5143 part = XEXP (op, 1);
5144 final_offset = byte - part_size;
5145 }
5146
5147 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5148 return NULL_RTX;
5149
5150 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5151 if (res)
5152 return res;
5153 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5154 return gen_rtx_SUBREG (outermode, part, final_offset);
5155 return NULL_RTX;
5156 }
5157
5158 /* Optimize SUBREG truncations of zero and sign extended values. */
5159 if ((GET_CODE (op) == ZERO_EXTEND
5160 || GET_CODE (op) == SIGN_EXTEND)
5161 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5162 {
5163 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5164
5165 /* If we're requesting the lowpart of a zero or sign extension,
5166 there are three possibilities. If the outermode is the same
5167 as the origmode, we can omit both the extension and the subreg.
5168 If the outermode is not larger than the origmode, we can apply
5169 the truncation without the extension. Finally, if the outermode
5170 is larger than the origmode, but both are integer modes, we
5171 can just extend to the appropriate mode. */
5172 if (bitpos == 0)
5173 {
5174 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5175 if (outermode == origmode)
5176 return XEXP (op, 0);
5177 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5178 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5179 subreg_lowpart_offset (outermode,
5180 origmode));
5181 if (SCALAR_INT_MODE_P (outermode))
5182 return simplify_gen_unary (GET_CODE (op), outermode,
5183 XEXP (op, 0), origmode);
5184 }
5185
5186 /* A SUBREG resulting from a zero extension may fold to zero if
5187 it extracts higher bits that the ZERO_EXTEND's source bits. */
5188 if (GET_CODE (op) == ZERO_EXTEND
5189 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5190 return CONST0_RTX (outermode);
5191 }
5192
5193 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5194 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5195 the outer subreg is effectively a truncation to the original mode. */
5196 if ((GET_CODE (op) == LSHIFTRT
5197 || GET_CODE (op) == ASHIFTRT)
5198 && SCALAR_INT_MODE_P (outermode)
5199 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5200 to avoid the possibility that an outer LSHIFTRT shifts by more
5201 than the sign extension's sign_bit_copies and introduces zeros
5202 into the high bits of the result. */
5203 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5204 && GET_CODE (XEXP (op, 1)) == CONST_INT
5205 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5206 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5207 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5208 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5209 return simplify_gen_binary (ASHIFTRT, outermode,
5210 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5211
5212 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5213 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5214 the outer subreg is effectively a truncation to the original mode. */
5215 if ((GET_CODE (op) == LSHIFTRT
5216 || GET_CODE (op) == ASHIFTRT)
5217 && SCALAR_INT_MODE_P (outermode)
5218 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5219 && GET_CODE (XEXP (op, 1)) == CONST_INT
5220 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5221 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5222 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5223 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5224 return simplify_gen_binary (LSHIFTRT, outermode,
5225 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5226
5227 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5228 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5229 the outer subreg is effectively a truncation to the original mode. */
5230 if (GET_CODE (op) == ASHIFT
5231 && SCALAR_INT_MODE_P (outermode)
5232 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5233 && GET_CODE (XEXP (op, 1)) == CONST_INT
5234 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5235 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5236 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5237 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5238 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5239 return simplify_gen_binary (ASHIFT, outermode,
5240 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5241
5242 /* Recognize a word extraction from a multi-word subreg. */
5243 if ((GET_CODE (op) == LSHIFTRT
5244 || GET_CODE (op) == ASHIFTRT)
5245 && SCALAR_INT_MODE_P (outermode)
5246 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5247 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5248 && GET_CODE (XEXP (op, 1)) == CONST_INT
5249 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5250 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5251 && byte == subreg_lowpart_offset (outermode, innermode))
5252 {
5253 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5254 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5255 (WORDS_BIG_ENDIAN
5256 ? byte - shifted_bytes : byte + shifted_bytes));
5257 }
5258
5259 return NULL_RTX;
5260 }
5261
5262 /* Make a SUBREG operation or equivalent if it folds. */
5263
5264 rtx
5265 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5266 enum machine_mode innermode, unsigned int byte)
5267 {
5268 rtx newx;
5269
5270 newx = simplify_subreg (outermode, op, innermode, byte);
5271 if (newx)
5272 return newx;
5273
5274 if (GET_CODE (op) == SUBREG
5275 || GET_CODE (op) == CONCAT
5276 || GET_MODE (op) == VOIDmode)
5277 return NULL_RTX;
5278
5279 if (validate_subreg (outermode, innermode, op, byte))
5280 return gen_rtx_SUBREG (outermode, op, byte);
5281
5282 return NULL_RTX;
5283 }
5284
5285 /* Simplify X, an rtx expression.
5286
5287 Return the simplified expression or NULL if no simplifications
5288 were possible.
5289
5290 This is the preferred entry point into the simplification routines;
5291 however, we still allow passes to call the more specific routines.
5292
5293 Right now GCC has three (yes, three) major bodies of RTL simplification
5294 code that need to be unified.
5295
5296 1. fold_rtx in cse.c. This code uses various CSE specific
5297 information to aid in RTL simplification.
5298
5299 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5300 it uses combine specific information to aid in RTL
5301 simplification.
5302
5303 3. The routines in this file.
5304
5305
5306 Long term we want to only have one body of simplification code; to
5307 get to that state I recommend the following steps:
5308
5309 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5310 which are not pass dependent state into these routines.
5311
5312 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5313 use this routine whenever possible.
5314
5315 3. Allow for pass dependent state to be provided to these
5316 routines and add simplifications based on the pass dependent
5317 state. Remove code from cse.c & combine.c that becomes
5318 redundant/dead.
5319
5320 It will take time, but ultimately the compiler will be easier to
5321 maintain and improve. It's totally silly that when we add a
5322 simplification that it needs to be added to 4 places (3 for RTL
5323 simplification and 1 for tree simplification. */
5324
5325 rtx
5326 simplify_rtx (const_rtx x)
5327 {
5328 const enum rtx_code code = GET_CODE (x);
5329 const enum machine_mode mode = GET_MODE (x);
5330
5331 switch (GET_RTX_CLASS (code))
5332 {
5333 case RTX_UNARY:
5334 return simplify_unary_operation (code, mode,
5335 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5336 case RTX_COMM_ARITH:
5337 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5338 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5339
5340 /* Fall through.... */
5341
5342 case RTX_BIN_ARITH:
5343 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5344
5345 case RTX_TERNARY:
5346 case RTX_BITFIELD_OPS:
5347 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5348 XEXP (x, 0), XEXP (x, 1),
5349 XEXP (x, 2));
5350
5351 case RTX_COMPARE:
5352 case RTX_COMM_COMPARE:
5353 return simplify_relational_operation (code, mode,
5354 ((GET_MODE (XEXP (x, 0))
5355 != VOIDmode)
5356 ? GET_MODE (XEXP (x, 0))
5357 : GET_MODE (XEXP (x, 1))),
5358 XEXP (x, 0),
5359 XEXP (x, 1));
5360
5361 case RTX_EXTRA:
5362 if (code == SUBREG)
5363 return simplify_subreg (mode, SUBREG_REG (x),
5364 GET_MODE (SUBREG_REG (x)),
5365 SUBREG_BYTE (x));
5366 break;
5367
5368 case RTX_OBJ:
5369 if (code == LO_SUM)
5370 {
5371 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5372 if (GET_CODE (XEXP (x, 0)) == HIGH
5373 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5374 return XEXP (x, 1);
5375 }
5376 break;
5377
5378 default:
5379 break;
5380 }
5381 return NULL;
5382 }