4e87d04abaf680bf3dd7bcf3df4ce015d67684ee
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
208
209 rtx
210 delegitimize_mem_from_attrs (rtx x)
211 {
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
216 {
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
220
221 switch (TREE_CODE (decl))
222 {
223 default:
224 decl = NULL;
225 break;
226
227 case VAR_DECL:
228 break;
229
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
237 {
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
241
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
249 {
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
253 }
254 break;
255 }
256 }
257
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
265 {
266 rtx newx;
267
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
270
271 newx = DECL_RTL (decl);
272
273 if (MEM_P (newx))
274 {
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
295 }
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
299 }
300 }
301
302 return x;
303 }
304 \f
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
307
308 rtx
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
311 {
312 rtx tem;
313
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
317
318 return gen_rtx_fmt_e (code, mode, op);
319 }
320
321 /* Likewise for ternary operations. */
322
323 rtx
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 {
327 rtx tem;
328
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
333
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
335 }
336
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
339
340 rtx
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 {
344 rtx tem;
345
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
349
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
351 }
352 \f
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
355
356 rtx
357 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
358 {
359 enum rtx_code code = GET_CODE (x);
360 enum machine_mode mode = GET_MODE (x);
361 enum machine_mode op_mode;
362 rtx op0, op1, op2;
363
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
367
368 if (rtx_equal_p (x, old_rtx))
369 return copy_rtx (new_rtx);
370
371 switch (GET_RTX_CLASS (code))
372 {
373 case RTX_UNARY:
374 op0 = XEXP (x, 0);
375 op_mode = GET_MODE (op0);
376 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
377 if (op0 == XEXP (x, 0))
378 return x;
379 return simplify_gen_unary (code, mode, op0, op_mode);
380
381 case RTX_BIN_ARITH:
382 case RTX_COMM_ARITH:
383 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
384 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
385 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
386 return x;
387 return simplify_gen_binary (code, mode, op0, op1);
388
389 case RTX_COMPARE:
390 case RTX_COMM_COMPARE:
391 op0 = XEXP (x, 0);
392 op1 = XEXP (x, 1);
393 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
394 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
395 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
396 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
397 return x;
398 return simplify_gen_relational (code, mode, op_mode, op0, op1);
399
400 case RTX_TERNARY:
401 case RTX_BITFIELD_OPS:
402 op0 = XEXP (x, 0);
403 op_mode = GET_MODE (op0);
404 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
405 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
406 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
407 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
408 return x;
409 if (op_mode == VOIDmode)
410 op_mode = GET_MODE (op0);
411 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
412
413 case RTX_EXTRA:
414 /* The only case we try to handle is a SUBREG. */
415 if (code == SUBREG)
416 {
417 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
418 if (op0 == SUBREG_REG (x))
419 return x;
420 op0 = simplify_gen_subreg (GET_MODE (x), op0,
421 GET_MODE (SUBREG_REG (x)),
422 SUBREG_BYTE (x));
423 return op0 ? op0 : x;
424 }
425 break;
426
427 case RTX_OBJ:
428 if (code == MEM)
429 {
430 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
431 if (op0 == XEXP (x, 0))
432 return x;
433 return replace_equiv_address_nv (x, op0);
434 }
435 else if (code == LO_SUM)
436 {
437 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
438 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
439
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
442 return op1;
443
444 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
445 return x;
446 return gen_rtx_LO_SUM (mode, op0, op1);
447 }
448 break;
449
450 default:
451 break;
452 }
453 return x;
454 }
455 \f
456 /* Try to simplify a unary operation CODE whose output mode is to be
457 MODE with input operand OP whose mode was originally OP_MODE.
458 Return zero if no simplification can be made. */
459 rtx
460 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
461 rtx op, enum machine_mode op_mode)
462 {
463 rtx trueop, tem;
464
465 if (GET_CODE (op) == CONST)
466 op = XEXP (op, 0);
467
468 trueop = avoid_constant_pool_reference (op);
469
470 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
471 if (tem)
472 return tem;
473
474 return simplify_unary_operation_1 (code, mode, op);
475 }
476
477 /* Perform some simplifications we can do even if the operands
478 aren't constant. */
479 static rtx
480 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
481 {
482 enum rtx_code reversed;
483 rtx temp;
484
485 switch (code)
486 {
487 case NOT:
488 /* (not (not X)) == X. */
489 if (GET_CODE (op) == NOT)
490 return XEXP (op, 0);
491
492 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
493 comparison is all ones. */
494 if (COMPARISON_P (op)
495 && (mode == BImode || STORE_FLAG_VALUE == -1)
496 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
497 return simplify_gen_relational (reversed, mode, VOIDmode,
498 XEXP (op, 0), XEXP (op, 1));
499
500 /* (not (plus X -1)) can become (neg X). */
501 if (GET_CODE (op) == PLUS
502 && XEXP (op, 1) == constm1_rtx)
503 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
504
505 /* Similarly, (not (neg X)) is (plus X -1). */
506 if (GET_CODE (op) == NEG)
507 return plus_constant (XEXP (op, 0), -1);
508
509 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
510 if (GET_CODE (op) == XOR
511 && CONST_INT_P (XEXP (op, 1))
512 && (temp = simplify_unary_operation (NOT, mode,
513 XEXP (op, 1), mode)) != 0)
514 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
515
516 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
517 if (GET_CODE (op) == PLUS
518 && CONST_INT_P (XEXP (op, 1))
519 && mode_signbit_p (mode, XEXP (op, 1))
520 && (temp = simplify_unary_operation (NOT, mode,
521 XEXP (op, 1), mode)) != 0)
522 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
523
524
525 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
526 operands other than 1, but that is not valid. We could do a
527 similar simplification for (not (lshiftrt C X)) where C is
528 just the sign bit, but this doesn't seem common enough to
529 bother with. */
530 if (GET_CODE (op) == ASHIFT
531 && XEXP (op, 0) == const1_rtx)
532 {
533 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
534 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
535 }
536
537 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
538 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
539 so we can perform the above simplification. */
540
541 if (STORE_FLAG_VALUE == -1
542 && GET_CODE (op) == ASHIFTRT
543 && GET_CODE (XEXP (op, 1))
544 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
545 return simplify_gen_relational (GE, mode, VOIDmode,
546 XEXP (op, 0), const0_rtx);
547
548
549 if (GET_CODE (op) == SUBREG
550 && subreg_lowpart_p (op)
551 && (GET_MODE_SIZE (GET_MODE (op))
552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
553 && GET_CODE (SUBREG_REG (op)) == ASHIFT
554 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
555 {
556 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
557 rtx x;
558
559 x = gen_rtx_ROTATE (inner_mode,
560 simplify_gen_unary (NOT, inner_mode, const1_rtx,
561 inner_mode),
562 XEXP (SUBREG_REG (op), 1));
563 return rtl_hooks.gen_lowpart_no_emit (mode, x);
564 }
565
566 /* Apply De Morgan's laws to reduce number of patterns for machines
567 with negating logical insns (and-not, nand, etc.). If result has
568 only one NOT, put it first, since that is how the patterns are
569 coded. */
570
571 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
572 {
573 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
574 enum machine_mode op_mode;
575
576 op_mode = GET_MODE (in1);
577 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
578
579 op_mode = GET_MODE (in2);
580 if (op_mode == VOIDmode)
581 op_mode = mode;
582 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
583
584 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
585 {
586 rtx tem = in2;
587 in2 = in1; in1 = tem;
588 }
589
590 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
591 mode, in1, in2);
592 }
593 break;
594
595 case NEG:
596 /* (neg (neg X)) == X. */
597 if (GET_CODE (op) == NEG)
598 return XEXP (op, 0);
599
600 /* (neg (plus X 1)) can become (not X). */
601 if (GET_CODE (op) == PLUS
602 && XEXP (op, 1) == const1_rtx)
603 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
604
605 /* Similarly, (neg (not X)) is (plus X 1). */
606 if (GET_CODE (op) == NOT)
607 return plus_constant (XEXP (op, 0), 1);
608
609 /* (neg (minus X Y)) can become (minus Y X). This transformation
610 isn't safe for modes with signed zeros, since if X and Y are
611 both +0, (minus Y X) is the same as (minus X Y). If the
612 rounding mode is towards +infinity (or -infinity) then the two
613 expressions will be rounded differently. */
614 if (GET_CODE (op) == MINUS
615 && !HONOR_SIGNED_ZEROS (mode)
616 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
617 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
618
619 if (GET_CODE (op) == PLUS
620 && !HONOR_SIGNED_ZEROS (mode)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
622 {
623 /* (neg (plus A C)) is simplified to (minus -C A). */
624 if (CONST_INT_P (XEXP (op, 1))
625 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
626 {
627 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
628 if (temp)
629 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
630 }
631
632 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
633 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
634 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
635 }
636
637 /* (neg (mult A B)) becomes (mult (neg A) B).
638 This works even for floating-point values. */
639 if (GET_CODE (op) == MULT
640 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
641 {
642 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
643 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
644 }
645
646 /* NEG commutes with ASHIFT since it is multiplication. Only do
647 this if we can then eliminate the NEG (e.g., if the operand
648 is a constant). */
649 if (GET_CODE (op) == ASHIFT)
650 {
651 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
652 if (temp)
653 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
654 }
655
656 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
657 C is equal to the width of MODE minus 1. */
658 if (GET_CODE (op) == ASHIFTRT
659 && CONST_INT_P (XEXP (op, 1))
660 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
661 return simplify_gen_binary (LSHIFTRT, mode,
662 XEXP (op, 0), XEXP (op, 1));
663
664 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
665 C is equal to the width of MODE minus 1. */
666 if (GET_CODE (op) == LSHIFTRT
667 && CONST_INT_P (XEXP (op, 1))
668 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
669 return simplify_gen_binary (ASHIFTRT, mode,
670 XEXP (op, 0), XEXP (op, 1));
671
672 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
673 if (GET_CODE (op) == XOR
674 && XEXP (op, 1) == const1_rtx
675 && nonzero_bits (XEXP (op, 0), mode) == 1)
676 return plus_constant (XEXP (op, 0), -1);
677
678 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
679 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
680 if (GET_CODE (op) == LT
681 && XEXP (op, 1) == const0_rtx
682 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
683 {
684 enum machine_mode inner = GET_MODE (XEXP (op, 0));
685 int isize = GET_MODE_BITSIZE (inner);
686 if (STORE_FLAG_VALUE == 1)
687 {
688 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
689 GEN_INT (isize - 1));
690 if (mode == inner)
691 return temp;
692 if (GET_MODE_BITSIZE (mode) > isize)
693 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
694 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
695 }
696 else if (STORE_FLAG_VALUE == -1)
697 {
698 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
699 GEN_INT (isize - 1));
700 if (mode == inner)
701 return temp;
702 if (GET_MODE_BITSIZE (mode) > isize)
703 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
704 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
705 }
706 }
707 break;
708
709 case TRUNCATE:
710 /* We can't handle truncation to a partial integer mode here
711 because we don't know the real bitsize of the partial
712 integer mode. */
713 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
714 break;
715
716 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
717 if ((GET_CODE (op) == SIGN_EXTEND
718 || GET_CODE (op) == ZERO_EXTEND)
719 && GET_MODE (XEXP (op, 0)) == mode)
720 return XEXP (op, 0);
721
722 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
723 (OP:SI foo:SI) if OP is NEG or ABS. */
724 if ((GET_CODE (op) == ABS
725 || GET_CODE (op) == NEG)
726 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
727 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
729 return simplify_gen_unary (GET_CODE (op), mode,
730 XEXP (XEXP (op, 0), 0), mode);
731
732 /* (truncate:A (subreg:B (truncate:C X) 0)) is
733 (truncate:A X). */
734 if (GET_CODE (op) == SUBREG
735 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
736 && subreg_lowpart_p (op))
737 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
738 GET_MODE (XEXP (SUBREG_REG (op), 0)));
739
740 /* If we know that the value is already truncated, we can
741 replace the TRUNCATE with a SUBREG. Note that this is also
742 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
743 modes we just have to apply a different definition for
744 truncation. But don't do this for an (LSHIFTRT (MULT ...))
745 since this will cause problems with the umulXi3_highpart
746 patterns. */
747 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
748 GET_MODE_BITSIZE (GET_MODE (op)))
749 ? (num_sign_bit_copies (op, GET_MODE (op))
750 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
751 - GET_MODE_BITSIZE (mode)))
752 : truncated_to_mode (mode, op))
753 && ! (GET_CODE (op) == LSHIFTRT
754 && GET_CODE (XEXP (op, 0)) == MULT))
755 return rtl_hooks.gen_lowpart_no_emit (mode, op);
756
757 /* A truncate of a comparison can be replaced with a subreg if
758 STORE_FLAG_VALUE permits. This is like the previous test,
759 but it works even if the comparison is done in a mode larger
760 than HOST_BITS_PER_WIDE_INT. */
761 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
762 && COMPARISON_P (op)
763 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
764 return rtl_hooks.gen_lowpart_no_emit (mode, op);
765 break;
766
767 case FLOAT_TRUNCATE:
768 if (DECIMAL_FLOAT_MODE_P (mode))
769 break;
770
771 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
772 if (GET_CODE (op) == FLOAT_EXTEND
773 && GET_MODE (XEXP (op, 0)) == mode)
774 return XEXP (op, 0);
775
776 /* (float_truncate:SF (float_truncate:DF foo:XF))
777 = (float_truncate:SF foo:XF).
778 This may eliminate double rounding, so it is unsafe.
779
780 (float_truncate:SF (float_extend:XF foo:DF))
781 = (float_truncate:SF foo:DF).
782
783 (float_truncate:DF (float_extend:XF foo:SF))
784 = (float_extend:SF foo:DF). */
785 if ((GET_CODE (op) == FLOAT_TRUNCATE
786 && flag_unsafe_math_optimizations)
787 || GET_CODE (op) == FLOAT_EXTEND)
788 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
789 0)))
790 > GET_MODE_SIZE (mode)
791 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
792 mode,
793 XEXP (op, 0), mode);
794
795 /* (float_truncate (float x)) is (float x) */
796 if (GET_CODE (op) == FLOAT
797 && (flag_unsafe_math_optimizations
798 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
799 && ((unsigned)significand_size (GET_MODE (op))
800 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
801 - num_sign_bit_copies (XEXP (op, 0),
802 GET_MODE (XEXP (op, 0))))))))
803 return simplify_gen_unary (FLOAT, mode,
804 XEXP (op, 0),
805 GET_MODE (XEXP (op, 0)));
806
807 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
808 (OP:SF foo:SF) if OP is NEG or ABS. */
809 if ((GET_CODE (op) == ABS
810 || GET_CODE (op) == NEG)
811 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
815
816 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
817 is (float_truncate:SF x). */
818 if (GET_CODE (op) == SUBREG
819 && subreg_lowpart_p (op)
820 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
821 return SUBREG_REG (op);
822 break;
823
824 case FLOAT_EXTEND:
825 if (DECIMAL_FLOAT_MODE_P (mode))
826 break;
827
828 /* (float_extend (float_extend x)) is (float_extend x)
829
830 (float_extend (float x)) is (float x) assuming that double
831 rounding can't happen.
832 */
833 if (GET_CODE (op) == FLOAT_EXTEND
834 || (GET_CODE (op) == FLOAT
835 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
836 && ((unsigned)significand_size (GET_MODE (op))
837 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
838 - num_sign_bit_copies (XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)))))))
840 return simplify_gen_unary (GET_CODE (op), mode,
841 XEXP (op, 0),
842 GET_MODE (XEXP (op, 0)));
843
844 break;
845
846 case ABS:
847 /* (abs (neg <foo>)) -> (abs <foo>) */
848 if (GET_CODE (op) == NEG)
849 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
850 GET_MODE (XEXP (op, 0)));
851
852 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
853 do nothing. */
854 if (GET_MODE (op) == VOIDmode)
855 break;
856
857 /* If operand is something known to be positive, ignore the ABS. */
858 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
859 || ((GET_MODE_BITSIZE (GET_MODE (op))
860 <= HOST_BITS_PER_WIDE_INT)
861 && ((nonzero_bits (op, GET_MODE (op))
862 & ((HOST_WIDE_INT) 1
863 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
864 == 0)))
865 return op;
866
867 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
868 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
869 return gen_rtx_NEG (mode, op);
870
871 break;
872
873 case FFS:
874 /* (ffs (*_extend <X>)) = (ffs <X>) */
875 if (GET_CODE (op) == SIGN_EXTEND
876 || GET_CODE (op) == ZERO_EXTEND)
877 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
878 GET_MODE (XEXP (op, 0)));
879 break;
880
881 case POPCOUNT:
882 switch (GET_CODE (op))
883 {
884 case BSWAP:
885 case ZERO_EXTEND:
886 /* (popcount (zero_extend <X>)) = (popcount <X>) */
887 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
888 GET_MODE (XEXP (op, 0)));
889
890 case ROTATE:
891 case ROTATERT:
892 /* Rotations don't affect popcount. */
893 if (!side_effects_p (XEXP (op, 1)))
894 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
895 GET_MODE (XEXP (op, 0)));
896 break;
897
898 default:
899 break;
900 }
901 break;
902
903 case PARITY:
904 switch (GET_CODE (op))
905 {
906 case NOT:
907 case BSWAP:
908 case ZERO_EXTEND:
909 case SIGN_EXTEND:
910 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
911 GET_MODE (XEXP (op, 0)));
912
913 case ROTATE:
914 case ROTATERT:
915 /* Rotations don't affect parity. */
916 if (!side_effects_p (XEXP (op, 1)))
917 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
918 GET_MODE (XEXP (op, 0)));
919 break;
920
921 default:
922 break;
923 }
924 break;
925
926 case BSWAP:
927 /* (bswap (bswap x)) -> x. */
928 if (GET_CODE (op) == BSWAP)
929 return XEXP (op, 0);
930 break;
931
932 case FLOAT:
933 /* (float (sign_extend <X>)) = (float <X>). */
934 if (GET_CODE (op) == SIGN_EXTEND)
935 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
936 GET_MODE (XEXP (op, 0)));
937 break;
938
939 case SIGN_EXTEND:
940 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
941 becomes just the MINUS if its mode is MODE. This allows
942 folding switch statements on machines using casesi (such as
943 the VAX). */
944 if (GET_CODE (op) == TRUNCATE
945 && GET_MODE (XEXP (op, 0)) == mode
946 && GET_CODE (XEXP (op, 0)) == MINUS
947 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
948 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
949 return XEXP (op, 0);
950
951 /* Check for a sign extension of a subreg of a promoted
952 variable, where the promotion is sign-extended, and the
953 target mode is the same as the variable's promotion. */
954 if (GET_CODE (op) == SUBREG
955 && SUBREG_PROMOTED_VAR_P (op)
956 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
957 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
958 return rtl_hooks.gen_lowpart_no_emit (mode, op);
959
960 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
961 if (! POINTERS_EXTEND_UNSIGNED
962 && mode == Pmode && GET_MODE (op) == ptr_mode
963 && (CONSTANT_P (op)
964 || (GET_CODE (op) == SUBREG
965 && REG_P (SUBREG_REG (op))
966 && REG_POINTER (SUBREG_REG (op))
967 && GET_MODE (SUBREG_REG (op)) == Pmode)))
968 return convert_memory_address (Pmode, op);
969 #endif
970 break;
971
972 case ZERO_EXTEND:
973 /* Check for a zero extension of a subreg of a promoted
974 variable, where the promotion is zero-extended, and the
975 target mode is the same as the variable's promotion. */
976 if (GET_CODE (op) == SUBREG
977 && SUBREG_PROMOTED_VAR_P (op)
978 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
979 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
980 return rtl_hooks.gen_lowpart_no_emit (mode, op);
981
982 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
983 if (POINTERS_EXTEND_UNSIGNED > 0
984 && mode == Pmode && GET_MODE (op) == ptr_mode
985 && (CONSTANT_P (op)
986 || (GET_CODE (op) == SUBREG
987 && REG_P (SUBREG_REG (op))
988 && REG_POINTER (SUBREG_REG (op))
989 && GET_MODE (SUBREG_REG (op)) == Pmode)))
990 return convert_memory_address (Pmode, op);
991 #endif
992 break;
993
994 default:
995 break;
996 }
997
998 return 0;
999 }
1000
1001 /* Try to compute the value of a unary operation CODE whose output mode is to
1002 be MODE with input operand OP whose mode was originally OP_MODE.
1003 Return zero if the value cannot be computed. */
1004 rtx
1005 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1006 rtx op, enum machine_mode op_mode)
1007 {
1008 unsigned int width = GET_MODE_BITSIZE (mode);
1009
1010 if (code == VEC_DUPLICATE)
1011 {
1012 gcc_assert (VECTOR_MODE_P (mode));
1013 if (GET_MODE (op) != VOIDmode)
1014 {
1015 if (!VECTOR_MODE_P (GET_MODE (op)))
1016 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1017 else
1018 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1019 (GET_MODE (op)));
1020 }
1021 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1022 || GET_CODE (op) == CONST_VECTOR)
1023 {
1024 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1025 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1026 rtvec v = rtvec_alloc (n_elts);
1027 unsigned int i;
1028
1029 if (GET_CODE (op) != CONST_VECTOR)
1030 for (i = 0; i < n_elts; i++)
1031 RTVEC_ELT (v, i) = op;
1032 else
1033 {
1034 enum machine_mode inmode = GET_MODE (op);
1035 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1036 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1037
1038 gcc_assert (in_n_elts < n_elts);
1039 gcc_assert ((n_elts % in_n_elts) == 0);
1040 for (i = 0; i < n_elts; i++)
1041 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1042 }
1043 return gen_rtx_CONST_VECTOR (mode, v);
1044 }
1045 }
1046
1047 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1048 {
1049 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1050 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1051 enum machine_mode opmode = GET_MODE (op);
1052 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1053 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1054 rtvec v = rtvec_alloc (n_elts);
1055 unsigned int i;
1056
1057 gcc_assert (op_n_elts == n_elts);
1058 for (i = 0; i < n_elts; i++)
1059 {
1060 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1061 CONST_VECTOR_ELT (op, i),
1062 GET_MODE_INNER (opmode));
1063 if (!x)
1064 return 0;
1065 RTVEC_ELT (v, i) = x;
1066 }
1067 return gen_rtx_CONST_VECTOR (mode, v);
1068 }
1069
1070 /* The order of these tests is critical so that, for example, we don't
1071 check the wrong mode (input vs. output) for a conversion operation,
1072 such as FIX. At some point, this should be simplified. */
1073
1074 if (code == FLOAT && GET_MODE (op) == VOIDmode
1075 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1076 {
1077 HOST_WIDE_INT hv, lv;
1078 REAL_VALUE_TYPE d;
1079
1080 if (CONST_INT_P (op))
1081 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1082 else
1083 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1084
1085 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1086 d = real_value_truncate (mode, d);
1087 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1088 }
1089 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1090 && (GET_CODE (op) == CONST_DOUBLE
1091 || CONST_INT_P (op)))
1092 {
1093 HOST_WIDE_INT hv, lv;
1094 REAL_VALUE_TYPE d;
1095
1096 if (CONST_INT_P (op))
1097 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1098 else
1099 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1100
1101 if (op_mode == VOIDmode)
1102 {
1103 /* We don't know how to interpret negative-looking numbers in
1104 this case, so don't try to fold those. */
1105 if (hv < 0)
1106 return 0;
1107 }
1108 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1109 ;
1110 else
1111 hv = 0, lv &= GET_MODE_MASK (op_mode);
1112
1113 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1114 d = real_value_truncate (mode, d);
1115 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1116 }
1117
1118 if (CONST_INT_P (op)
1119 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1120 {
1121 HOST_WIDE_INT arg0 = INTVAL (op);
1122 HOST_WIDE_INT val;
1123
1124 switch (code)
1125 {
1126 case NOT:
1127 val = ~ arg0;
1128 break;
1129
1130 case NEG:
1131 val = - arg0;
1132 break;
1133
1134 case ABS:
1135 val = (arg0 >= 0 ? arg0 : - arg0);
1136 break;
1137
1138 case FFS:
1139 /* Don't use ffs here. Instead, get low order bit and then its
1140 number. If arg0 is zero, this will return 0, as desired. */
1141 arg0 &= GET_MODE_MASK (mode);
1142 val = exact_log2 (arg0 & (- arg0)) + 1;
1143 break;
1144
1145 case CLZ:
1146 arg0 &= GET_MODE_MASK (mode);
1147 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1148 ;
1149 else
1150 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1151 break;
1152
1153 case CTZ:
1154 arg0 &= GET_MODE_MASK (mode);
1155 if (arg0 == 0)
1156 {
1157 /* Even if the value at zero is undefined, we have to come
1158 up with some replacement. Seems good enough. */
1159 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1160 val = GET_MODE_BITSIZE (mode);
1161 }
1162 else
1163 val = exact_log2 (arg0 & -arg0);
1164 break;
1165
1166 case POPCOUNT:
1167 arg0 &= GET_MODE_MASK (mode);
1168 val = 0;
1169 while (arg0)
1170 val++, arg0 &= arg0 - 1;
1171 break;
1172
1173 case PARITY:
1174 arg0 &= GET_MODE_MASK (mode);
1175 val = 0;
1176 while (arg0)
1177 val++, arg0 &= arg0 - 1;
1178 val &= 1;
1179 break;
1180
1181 case BSWAP:
1182 {
1183 unsigned int s;
1184
1185 val = 0;
1186 for (s = 0; s < width; s += 8)
1187 {
1188 unsigned int d = width - s - 8;
1189 unsigned HOST_WIDE_INT byte;
1190 byte = (arg0 >> s) & 0xff;
1191 val |= byte << d;
1192 }
1193 }
1194 break;
1195
1196 case TRUNCATE:
1197 val = arg0;
1198 break;
1199
1200 case ZERO_EXTEND:
1201 /* When zero-extending a CONST_INT, we need to know its
1202 original mode. */
1203 gcc_assert (op_mode != VOIDmode);
1204 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1205 {
1206 /* If we were really extending the mode,
1207 we would have to distinguish between zero-extension
1208 and sign-extension. */
1209 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1210 val = arg0;
1211 }
1212 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1213 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1214 else
1215 return 0;
1216 break;
1217
1218 case SIGN_EXTEND:
1219 if (op_mode == VOIDmode)
1220 op_mode = mode;
1221 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1222 {
1223 /* If we were really extending the mode,
1224 we would have to distinguish between zero-extension
1225 and sign-extension. */
1226 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1227 val = arg0;
1228 }
1229 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1230 {
1231 val
1232 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1233 if (val
1234 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1235 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1236 }
1237 else
1238 return 0;
1239 break;
1240
1241 case SQRT:
1242 case FLOAT_EXTEND:
1243 case FLOAT_TRUNCATE:
1244 case SS_TRUNCATE:
1245 case US_TRUNCATE:
1246 case SS_NEG:
1247 case US_NEG:
1248 return 0;
1249
1250 default:
1251 gcc_unreachable ();
1252 }
1253
1254 return gen_int_mode (val, mode);
1255 }
1256
1257 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1258 for a DImode operation on a CONST_INT. */
1259 else if (GET_MODE (op) == VOIDmode
1260 && width <= HOST_BITS_PER_WIDE_INT * 2
1261 && (GET_CODE (op) == CONST_DOUBLE
1262 || CONST_INT_P (op)))
1263 {
1264 unsigned HOST_WIDE_INT l1, lv;
1265 HOST_WIDE_INT h1, hv;
1266
1267 if (GET_CODE (op) == CONST_DOUBLE)
1268 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1269 else
1270 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1271
1272 switch (code)
1273 {
1274 case NOT:
1275 lv = ~ l1;
1276 hv = ~ h1;
1277 break;
1278
1279 case NEG:
1280 neg_double (l1, h1, &lv, &hv);
1281 break;
1282
1283 case ABS:
1284 if (h1 < 0)
1285 neg_double (l1, h1, &lv, &hv);
1286 else
1287 lv = l1, hv = h1;
1288 break;
1289
1290 case FFS:
1291 hv = 0;
1292 if (l1 == 0)
1293 {
1294 if (h1 == 0)
1295 lv = 0;
1296 else
1297 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1298 }
1299 else
1300 lv = exact_log2 (l1 & -l1) + 1;
1301 break;
1302
1303 case CLZ:
1304 hv = 0;
1305 if (h1 != 0)
1306 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1307 - HOST_BITS_PER_WIDE_INT;
1308 else if (l1 != 0)
1309 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1310 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1311 lv = GET_MODE_BITSIZE (mode);
1312 break;
1313
1314 case CTZ:
1315 hv = 0;
1316 if (l1 != 0)
1317 lv = exact_log2 (l1 & -l1);
1318 else if (h1 != 0)
1319 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1320 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1321 lv = GET_MODE_BITSIZE (mode);
1322 break;
1323
1324 case POPCOUNT:
1325 hv = 0;
1326 lv = 0;
1327 while (l1)
1328 lv++, l1 &= l1 - 1;
1329 while (h1)
1330 lv++, h1 &= h1 - 1;
1331 break;
1332
1333 case PARITY:
1334 hv = 0;
1335 lv = 0;
1336 while (l1)
1337 lv++, l1 &= l1 - 1;
1338 while (h1)
1339 lv++, h1 &= h1 - 1;
1340 lv &= 1;
1341 break;
1342
1343 case BSWAP:
1344 {
1345 unsigned int s;
1346
1347 hv = 0;
1348 lv = 0;
1349 for (s = 0; s < width; s += 8)
1350 {
1351 unsigned int d = width - s - 8;
1352 unsigned HOST_WIDE_INT byte;
1353
1354 if (s < HOST_BITS_PER_WIDE_INT)
1355 byte = (l1 >> s) & 0xff;
1356 else
1357 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1358
1359 if (d < HOST_BITS_PER_WIDE_INT)
1360 lv |= byte << d;
1361 else
1362 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1363 }
1364 }
1365 break;
1366
1367 case TRUNCATE:
1368 /* This is just a change-of-mode, so do nothing. */
1369 lv = l1, hv = h1;
1370 break;
1371
1372 case ZERO_EXTEND:
1373 gcc_assert (op_mode != VOIDmode);
1374
1375 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1376 return 0;
1377
1378 hv = 0;
1379 lv = l1 & GET_MODE_MASK (op_mode);
1380 break;
1381
1382 case SIGN_EXTEND:
1383 if (op_mode == VOIDmode
1384 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1385 return 0;
1386 else
1387 {
1388 lv = l1 & GET_MODE_MASK (op_mode);
1389 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1390 && (lv & ((HOST_WIDE_INT) 1
1391 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1392 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1393
1394 hv = HWI_SIGN_EXTEND (lv);
1395 }
1396 break;
1397
1398 case SQRT:
1399 return 0;
1400
1401 default:
1402 return 0;
1403 }
1404
1405 return immed_double_const (lv, hv, mode);
1406 }
1407
1408 else if (GET_CODE (op) == CONST_DOUBLE
1409 && SCALAR_FLOAT_MODE_P (mode))
1410 {
1411 REAL_VALUE_TYPE d, t;
1412 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1413
1414 switch (code)
1415 {
1416 case SQRT:
1417 if (HONOR_SNANS (mode) && real_isnan (&d))
1418 return 0;
1419 real_sqrt (&t, mode, &d);
1420 d = t;
1421 break;
1422 case ABS:
1423 d = REAL_VALUE_ABS (d);
1424 break;
1425 case NEG:
1426 d = REAL_VALUE_NEGATE (d);
1427 break;
1428 case FLOAT_TRUNCATE:
1429 d = real_value_truncate (mode, d);
1430 break;
1431 case FLOAT_EXTEND:
1432 /* All this does is change the mode. */
1433 break;
1434 case FIX:
1435 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1436 break;
1437 case NOT:
1438 {
1439 long tmp[4];
1440 int i;
1441
1442 real_to_target (tmp, &d, GET_MODE (op));
1443 for (i = 0; i < 4; i++)
1444 tmp[i] = ~tmp[i];
1445 real_from_target (&d, tmp, mode);
1446 break;
1447 }
1448 default:
1449 gcc_unreachable ();
1450 }
1451 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1452 }
1453
1454 else if (GET_CODE (op) == CONST_DOUBLE
1455 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1456 && GET_MODE_CLASS (mode) == MODE_INT
1457 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1458 {
1459 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1460 operators are intentionally left unspecified (to ease implementation
1461 by target backends), for consistency, this routine implements the
1462 same semantics for constant folding as used by the middle-end. */
1463
1464 /* This was formerly used only for non-IEEE float.
1465 eggert@twinsun.com says it is safe for IEEE also. */
1466 HOST_WIDE_INT xh, xl, th, tl;
1467 REAL_VALUE_TYPE x, t;
1468 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1469 switch (code)
1470 {
1471 case FIX:
1472 if (REAL_VALUE_ISNAN (x))
1473 return const0_rtx;
1474
1475 /* Test against the signed upper bound. */
1476 if (width > HOST_BITS_PER_WIDE_INT)
1477 {
1478 th = ((unsigned HOST_WIDE_INT) 1
1479 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1480 tl = -1;
1481 }
1482 else
1483 {
1484 th = 0;
1485 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1486 }
1487 real_from_integer (&t, VOIDmode, tl, th, 0);
1488 if (REAL_VALUES_LESS (t, x))
1489 {
1490 xh = th;
1491 xl = tl;
1492 break;
1493 }
1494
1495 /* Test against the signed lower bound. */
1496 if (width > HOST_BITS_PER_WIDE_INT)
1497 {
1498 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1499 tl = 0;
1500 }
1501 else
1502 {
1503 th = -1;
1504 tl = (HOST_WIDE_INT) -1 << (width - 1);
1505 }
1506 real_from_integer (&t, VOIDmode, tl, th, 0);
1507 if (REAL_VALUES_LESS (x, t))
1508 {
1509 xh = th;
1510 xl = tl;
1511 break;
1512 }
1513 REAL_VALUE_TO_INT (&xl, &xh, x);
1514 break;
1515
1516 case UNSIGNED_FIX:
1517 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1518 return const0_rtx;
1519
1520 /* Test against the unsigned upper bound. */
1521 if (width == 2*HOST_BITS_PER_WIDE_INT)
1522 {
1523 th = -1;
1524 tl = -1;
1525 }
1526 else if (width >= HOST_BITS_PER_WIDE_INT)
1527 {
1528 th = ((unsigned HOST_WIDE_INT) 1
1529 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1530 tl = -1;
1531 }
1532 else
1533 {
1534 th = 0;
1535 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1536 }
1537 real_from_integer (&t, VOIDmode, tl, th, 1);
1538 if (REAL_VALUES_LESS (t, x))
1539 {
1540 xh = th;
1541 xl = tl;
1542 break;
1543 }
1544
1545 REAL_VALUE_TO_INT (&xl, &xh, x);
1546 break;
1547
1548 default:
1549 gcc_unreachable ();
1550 }
1551 return immed_double_const (xl, xh, mode);
1552 }
1553
1554 return NULL_RTX;
1555 }
1556 \f
1557 /* Subroutine of simplify_binary_operation to simplify a commutative,
1558 associative binary operation CODE with result mode MODE, operating
1559 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1560 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1561 canonicalization is possible. */
1562
1563 static rtx
1564 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1565 rtx op0, rtx op1)
1566 {
1567 rtx tem;
1568
1569 /* Linearize the operator to the left. */
1570 if (GET_CODE (op1) == code)
1571 {
1572 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1573 if (GET_CODE (op0) == code)
1574 {
1575 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1576 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1577 }
1578
1579 /* "a op (b op c)" becomes "(b op c) op a". */
1580 if (! swap_commutative_operands_p (op1, op0))
1581 return simplify_gen_binary (code, mode, op1, op0);
1582
1583 tem = op0;
1584 op0 = op1;
1585 op1 = tem;
1586 }
1587
1588 if (GET_CODE (op0) == code)
1589 {
1590 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1591 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1592 {
1593 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1594 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1595 }
1596
1597 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1598 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1599 if (tem != 0)
1600 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1601
1602 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1603 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1604 if (tem != 0)
1605 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1606 }
1607
1608 return 0;
1609 }
1610
1611
1612 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1613 and OP1. Return 0 if no simplification is possible.
1614
1615 Don't use this for relational operations such as EQ or LT.
1616 Use simplify_relational_operation instead. */
1617 rtx
1618 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1619 rtx op0, rtx op1)
1620 {
1621 rtx trueop0, trueop1;
1622 rtx tem;
1623
1624 /* Relational operations don't work here. We must know the mode
1625 of the operands in order to do the comparison correctly.
1626 Assuming a full word can give incorrect results.
1627 Consider comparing 128 with -128 in QImode. */
1628 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1629 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1630
1631 /* Make sure the constant is second. */
1632 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1633 && swap_commutative_operands_p (op0, op1))
1634 {
1635 tem = op0, op0 = op1, op1 = tem;
1636 }
1637
1638 trueop0 = avoid_constant_pool_reference (op0);
1639 trueop1 = avoid_constant_pool_reference (op1);
1640
1641 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1642 if (tem)
1643 return tem;
1644 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1645 }
1646
1647 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1648 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1649 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1650 actual constants. */
1651
1652 static rtx
1653 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1654 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1655 {
1656 rtx tem, reversed, opleft, opright;
1657 HOST_WIDE_INT val;
1658 unsigned int width = GET_MODE_BITSIZE (mode);
1659
1660 /* Even if we can't compute a constant result,
1661 there are some cases worth simplifying. */
1662
1663 switch (code)
1664 {
1665 case PLUS:
1666 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1667 when x is NaN, infinite, or finite and nonzero. They aren't
1668 when x is -0 and the rounding mode is not towards -infinity,
1669 since (-0) + 0 is then 0. */
1670 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1671 return op0;
1672
1673 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1674 transformations are safe even for IEEE. */
1675 if (GET_CODE (op0) == NEG)
1676 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1677 else if (GET_CODE (op1) == NEG)
1678 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1679
1680 /* (~a) + 1 -> -a */
1681 if (INTEGRAL_MODE_P (mode)
1682 && GET_CODE (op0) == NOT
1683 && trueop1 == const1_rtx)
1684 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1685
1686 /* Handle both-operands-constant cases. We can only add
1687 CONST_INTs to constants since the sum of relocatable symbols
1688 can't be handled by most assemblers. Don't add CONST_INT
1689 to CONST_INT since overflow won't be computed properly if wider
1690 than HOST_BITS_PER_WIDE_INT. */
1691
1692 if ((GET_CODE (op0) == CONST
1693 || GET_CODE (op0) == SYMBOL_REF
1694 || GET_CODE (op0) == LABEL_REF)
1695 && CONST_INT_P (op1))
1696 return plus_constant (op0, INTVAL (op1));
1697 else if ((GET_CODE (op1) == CONST
1698 || GET_CODE (op1) == SYMBOL_REF
1699 || GET_CODE (op1) == LABEL_REF)
1700 && CONST_INT_P (op0))
1701 return plus_constant (op1, INTVAL (op0));
1702
1703 /* See if this is something like X * C - X or vice versa or
1704 if the multiplication is written as a shift. If so, we can
1705 distribute and make a new multiply, shift, or maybe just
1706 have X (if C is 2 in the example above). But don't make
1707 something more expensive than we had before. */
1708
1709 if (SCALAR_INT_MODE_P (mode))
1710 {
1711 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1712 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1713 rtx lhs = op0, rhs = op1;
1714
1715 if (GET_CODE (lhs) == NEG)
1716 {
1717 coeff0l = -1;
1718 coeff0h = -1;
1719 lhs = XEXP (lhs, 0);
1720 }
1721 else if (GET_CODE (lhs) == MULT
1722 && CONST_INT_P (XEXP (lhs, 1)))
1723 {
1724 coeff0l = INTVAL (XEXP (lhs, 1));
1725 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1726 lhs = XEXP (lhs, 0);
1727 }
1728 else if (GET_CODE (lhs) == ASHIFT
1729 && CONST_INT_P (XEXP (lhs, 1))
1730 && INTVAL (XEXP (lhs, 1)) >= 0
1731 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1732 {
1733 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1734 coeff0h = 0;
1735 lhs = XEXP (lhs, 0);
1736 }
1737
1738 if (GET_CODE (rhs) == NEG)
1739 {
1740 coeff1l = -1;
1741 coeff1h = -1;
1742 rhs = XEXP (rhs, 0);
1743 }
1744 else if (GET_CODE (rhs) == MULT
1745 && CONST_INT_P (XEXP (rhs, 1)))
1746 {
1747 coeff1l = INTVAL (XEXP (rhs, 1));
1748 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1749 rhs = XEXP (rhs, 0);
1750 }
1751 else if (GET_CODE (rhs) == ASHIFT
1752 && CONST_INT_P (XEXP (rhs, 1))
1753 && INTVAL (XEXP (rhs, 1)) >= 0
1754 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1755 {
1756 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1757 coeff1h = 0;
1758 rhs = XEXP (rhs, 0);
1759 }
1760
1761 if (rtx_equal_p (lhs, rhs))
1762 {
1763 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1764 rtx coeff;
1765 unsigned HOST_WIDE_INT l;
1766 HOST_WIDE_INT h;
1767 bool speed = optimize_function_for_speed_p (cfun);
1768
1769 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1770 coeff = immed_double_const (l, h, mode);
1771
1772 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1773 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1774 ? tem : 0;
1775 }
1776 }
1777
1778 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1779 if ((CONST_INT_P (op1)
1780 || GET_CODE (op1) == CONST_DOUBLE)
1781 && GET_CODE (op0) == XOR
1782 && (CONST_INT_P (XEXP (op0, 1))
1783 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1784 && mode_signbit_p (mode, op1))
1785 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1786 simplify_gen_binary (XOR, mode, op1,
1787 XEXP (op0, 1)));
1788
1789 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1790 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1791 && GET_CODE (op0) == MULT
1792 && GET_CODE (XEXP (op0, 0)) == NEG)
1793 {
1794 rtx in1, in2;
1795
1796 in1 = XEXP (XEXP (op0, 0), 0);
1797 in2 = XEXP (op0, 1);
1798 return simplify_gen_binary (MINUS, mode, op1,
1799 simplify_gen_binary (MULT, mode,
1800 in1, in2));
1801 }
1802
1803 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1804 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1805 is 1. */
1806 if (COMPARISON_P (op0)
1807 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1808 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1809 && (reversed = reversed_comparison (op0, mode)))
1810 return
1811 simplify_gen_unary (NEG, mode, reversed, mode);
1812
1813 /* If one of the operands is a PLUS or a MINUS, see if we can
1814 simplify this by the associative law.
1815 Don't use the associative law for floating point.
1816 The inaccuracy makes it nonassociative,
1817 and subtle programs can break if operations are associated. */
1818
1819 if (INTEGRAL_MODE_P (mode)
1820 && (plus_minus_operand_p (op0)
1821 || plus_minus_operand_p (op1))
1822 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1823 return tem;
1824
1825 /* Reassociate floating point addition only when the user
1826 specifies associative math operations. */
1827 if (FLOAT_MODE_P (mode)
1828 && flag_associative_math)
1829 {
1830 tem = simplify_associative_operation (code, mode, op0, op1);
1831 if (tem)
1832 return tem;
1833 }
1834 break;
1835
1836 case COMPARE:
1837 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1838 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1839 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1840 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1841 {
1842 rtx xop00 = XEXP (op0, 0);
1843 rtx xop10 = XEXP (op1, 0);
1844
1845 #ifdef HAVE_cc0
1846 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1847 #else
1848 if (REG_P (xop00) && REG_P (xop10)
1849 && GET_MODE (xop00) == GET_MODE (xop10)
1850 && REGNO (xop00) == REGNO (xop10)
1851 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1852 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1853 #endif
1854 return xop00;
1855 }
1856 break;
1857
1858 case MINUS:
1859 /* We can't assume x-x is 0 even with non-IEEE floating point,
1860 but since it is zero except in very strange circumstances, we
1861 will treat it as zero with -ffinite-math-only. */
1862 if (rtx_equal_p (trueop0, trueop1)
1863 && ! side_effects_p (op0)
1864 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1865 return CONST0_RTX (mode);
1866
1867 /* Change subtraction from zero into negation. (0 - x) is the
1868 same as -x when x is NaN, infinite, or finite and nonzero.
1869 But if the mode has signed zeros, and does not round towards
1870 -infinity, then 0 - 0 is 0, not -0. */
1871 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1872 return simplify_gen_unary (NEG, mode, op1, mode);
1873
1874 /* (-1 - a) is ~a. */
1875 if (trueop0 == constm1_rtx)
1876 return simplify_gen_unary (NOT, mode, op1, mode);
1877
1878 /* Subtracting 0 has no effect unless the mode has signed zeros
1879 and supports rounding towards -infinity. In such a case,
1880 0 - 0 is -0. */
1881 if (!(HONOR_SIGNED_ZEROS (mode)
1882 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1883 && trueop1 == CONST0_RTX (mode))
1884 return op0;
1885
1886 /* See if this is something like X * C - X or vice versa or
1887 if the multiplication is written as a shift. If so, we can
1888 distribute and make a new multiply, shift, or maybe just
1889 have X (if C is 2 in the example above). But don't make
1890 something more expensive than we had before. */
1891
1892 if (SCALAR_INT_MODE_P (mode))
1893 {
1894 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1895 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1896 rtx lhs = op0, rhs = op1;
1897
1898 if (GET_CODE (lhs) == NEG)
1899 {
1900 coeff0l = -1;
1901 coeff0h = -1;
1902 lhs = XEXP (lhs, 0);
1903 }
1904 else if (GET_CODE (lhs) == MULT
1905 && CONST_INT_P (XEXP (lhs, 1)))
1906 {
1907 coeff0l = INTVAL (XEXP (lhs, 1));
1908 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1909 lhs = XEXP (lhs, 0);
1910 }
1911 else if (GET_CODE (lhs) == ASHIFT
1912 && CONST_INT_P (XEXP (lhs, 1))
1913 && INTVAL (XEXP (lhs, 1)) >= 0
1914 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1915 {
1916 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1917 coeff0h = 0;
1918 lhs = XEXP (lhs, 0);
1919 }
1920
1921 if (GET_CODE (rhs) == NEG)
1922 {
1923 negcoeff1l = 1;
1924 negcoeff1h = 0;
1925 rhs = XEXP (rhs, 0);
1926 }
1927 else if (GET_CODE (rhs) == MULT
1928 && CONST_INT_P (XEXP (rhs, 1)))
1929 {
1930 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1931 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1932 rhs = XEXP (rhs, 0);
1933 }
1934 else if (GET_CODE (rhs) == ASHIFT
1935 && CONST_INT_P (XEXP (rhs, 1))
1936 && INTVAL (XEXP (rhs, 1)) >= 0
1937 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1938 {
1939 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1940 negcoeff1h = -1;
1941 rhs = XEXP (rhs, 0);
1942 }
1943
1944 if (rtx_equal_p (lhs, rhs))
1945 {
1946 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1947 rtx coeff;
1948 unsigned HOST_WIDE_INT l;
1949 HOST_WIDE_INT h;
1950 bool speed = optimize_function_for_speed_p (cfun);
1951
1952 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1953 coeff = immed_double_const (l, h, mode);
1954
1955 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1956 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1957 ? tem : 0;
1958 }
1959 }
1960
1961 /* (a - (-b)) -> (a + b). True even for IEEE. */
1962 if (GET_CODE (op1) == NEG)
1963 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1964
1965 /* (-x - c) may be simplified as (-c - x). */
1966 if (GET_CODE (op0) == NEG
1967 && (CONST_INT_P (op1)
1968 || GET_CODE (op1) == CONST_DOUBLE))
1969 {
1970 tem = simplify_unary_operation (NEG, mode, op1, mode);
1971 if (tem)
1972 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1973 }
1974
1975 /* Don't let a relocatable value get a negative coeff. */
1976 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1977 return simplify_gen_binary (PLUS, mode,
1978 op0,
1979 neg_const_int (mode, op1));
1980
1981 /* (x - (x & y)) -> (x & ~y) */
1982 if (GET_CODE (op1) == AND)
1983 {
1984 if (rtx_equal_p (op0, XEXP (op1, 0)))
1985 {
1986 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1987 GET_MODE (XEXP (op1, 1)));
1988 return simplify_gen_binary (AND, mode, op0, tem);
1989 }
1990 if (rtx_equal_p (op0, XEXP (op1, 1)))
1991 {
1992 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1993 GET_MODE (XEXP (op1, 0)));
1994 return simplify_gen_binary (AND, mode, op0, tem);
1995 }
1996 }
1997
1998 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1999 by reversing the comparison code if valid. */
2000 if (STORE_FLAG_VALUE == 1
2001 && trueop0 == const1_rtx
2002 && COMPARISON_P (op1)
2003 && (reversed = reversed_comparison (op1, mode)))
2004 return reversed;
2005
2006 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2007 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2008 && GET_CODE (op1) == MULT
2009 && GET_CODE (XEXP (op1, 0)) == NEG)
2010 {
2011 rtx in1, in2;
2012
2013 in1 = XEXP (XEXP (op1, 0), 0);
2014 in2 = XEXP (op1, 1);
2015 return simplify_gen_binary (PLUS, mode,
2016 simplify_gen_binary (MULT, mode,
2017 in1, in2),
2018 op0);
2019 }
2020
2021 /* Canonicalize (minus (neg A) (mult B C)) to
2022 (minus (mult (neg B) C) A). */
2023 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2024 && GET_CODE (op1) == MULT
2025 && GET_CODE (op0) == NEG)
2026 {
2027 rtx in1, in2;
2028
2029 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2030 in2 = XEXP (op1, 1);
2031 return simplify_gen_binary (MINUS, mode,
2032 simplify_gen_binary (MULT, mode,
2033 in1, in2),
2034 XEXP (op0, 0));
2035 }
2036
2037 /* If one of the operands is a PLUS or a MINUS, see if we can
2038 simplify this by the associative law. This will, for example,
2039 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2040 Don't use the associative law for floating point.
2041 The inaccuracy makes it nonassociative,
2042 and subtle programs can break if operations are associated. */
2043
2044 if (INTEGRAL_MODE_P (mode)
2045 && (plus_minus_operand_p (op0)
2046 || plus_minus_operand_p (op1))
2047 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2048 return tem;
2049 break;
2050
2051 case MULT:
2052 if (trueop1 == constm1_rtx)
2053 return simplify_gen_unary (NEG, mode, op0, mode);
2054
2055 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2056 x is NaN, since x * 0 is then also NaN. Nor is it valid
2057 when the mode has signed zeros, since multiplying a negative
2058 number by 0 will give -0, not 0. */
2059 if (!HONOR_NANS (mode)
2060 && !HONOR_SIGNED_ZEROS (mode)
2061 && trueop1 == CONST0_RTX (mode)
2062 && ! side_effects_p (op0))
2063 return op1;
2064
2065 /* In IEEE floating point, x*1 is not equivalent to x for
2066 signalling NaNs. */
2067 if (!HONOR_SNANS (mode)
2068 && trueop1 == CONST1_RTX (mode))
2069 return op0;
2070
2071 /* Convert multiply by constant power of two into shift unless
2072 we are still generating RTL. This test is a kludge. */
2073 if (CONST_INT_P (trueop1)
2074 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2075 /* If the mode is larger than the host word size, and the
2076 uppermost bit is set, then this isn't a power of two due
2077 to implicit sign extension. */
2078 && (width <= HOST_BITS_PER_WIDE_INT
2079 || val != HOST_BITS_PER_WIDE_INT - 1))
2080 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2081
2082 /* Likewise for multipliers wider than a word. */
2083 if (GET_CODE (trueop1) == CONST_DOUBLE
2084 && (GET_MODE (trueop1) == VOIDmode
2085 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2086 && GET_MODE (op0) == mode
2087 && CONST_DOUBLE_LOW (trueop1) == 0
2088 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2089 return simplify_gen_binary (ASHIFT, mode, op0,
2090 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2091
2092 /* x*2 is x+x and x*(-1) is -x */
2093 if (GET_CODE (trueop1) == CONST_DOUBLE
2094 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2095 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2096 && GET_MODE (op0) == mode)
2097 {
2098 REAL_VALUE_TYPE d;
2099 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2100
2101 if (REAL_VALUES_EQUAL (d, dconst2))
2102 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2103
2104 if (!HONOR_SNANS (mode)
2105 && REAL_VALUES_EQUAL (d, dconstm1))
2106 return simplify_gen_unary (NEG, mode, op0, mode);
2107 }
2108
2109 /* Optimize -x * -x as x * x. */
2110 if (FLOAT_MODE_P (mode)
2111 && GET_CODE (op0) == NEG
2112 && GET_CODE (op1) == NEG
2113 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2114 && !side_effects_p (XEXP (op0, 0)))
2115 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2116
2117 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2118 if (SCALAR_FLOAT_MODE_P (mode)
2119 && GET_CODE (op0) == ABS
2120 && GET_CODE (op1) == ABS
2121 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2122 && !side_effects_p (XEXP (op0, 0)))
2123 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2124
2125 /* Reassociate multiplication, but for floating point MULTs
2126 only when the user specifies unsafe math optimizations. */
2127 if (! FLOAT_MODE_P (mode)
2128 || flag_unsafe_math_optimizations)
2129 {
2130 tem = simplify_associative_operation (code, mode, op0, op1);
2131 if (tem)
2132 return tem;
2133 }
2134 break;
2135
2136 case IOR:
2137 if (trueop1 == const0_rtx)
2138 return op0;
2139 if (CONST_INT_P (trueop1)
2140 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2141 == GET_MODE_MASK (mode)))
2142 return op1;
2143 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2144 return op0;
2145 /* A | (~A) -> -1 */
2146 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2147 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2148 && ! side_effects_p (op0)
2149 && SCALAR_INT_MODE_P (mode))
2150 return constm1_rtx;
2151
2152 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2153 if (CONST_INT_P (op1)
2154 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2155 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2156 return op1;
2157
2158 /* Canonicalize (X & C1) | C2. */
2159 if (GET_CODE (op0) == AND
2160 && CONST_INT_P (trueop1)
2161 && CONST_INT_P (XEXP (op0, 1)))
2162 {
2163 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2164 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2165 HOST_WIDE_INT c2 = INTVAL (trueop1);
2166
2167 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2168 if ((c1 & c2) == c1
2169 && !side_effects_p (XEXP (op0, 0)))
2170 return trueop1;
2171
2172 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2173 if (((c1|c2) & mask) == mask)
2174 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2175
2176 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2177 if (((c1 & ~c2) & mask) != (c1 & mask))
2178 {
2179 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2180 gen_int_mode (c1 & ~c2, mode));
2181 return simplify_gen_binary (IOR, mode, tem, op1);
2182 }
2183 }
2184
2185 /* Convert (A & B) | A to A. */
2186 if (GET_CODE (op0) == AND
2187 && (rtx_equal_p (XEXP (op0, 0), op1)
2188 || rtx_equal_p (XEXP (op0, 1), op1))
2189 && ! side_effects_p (XEXP (op0, 0))
2190 && ! side_effects_p (XEXP (op0, 1)))
2191 return op1;
2192
2193 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2194 mode size to (rotate A CX). */
2195
2196 if (GET_CODE (op1) == ASHIFT
2197 || GET_CODE (op1) == SUBREG)
2198 {
2199 opleft = op1;
2200 opright = op0;
2201 }
2202 else
2203 {
2204 opright = op1;
2205 opleft = op0;
2206 }
2207
2208 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2209 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2210 && CONST_INT_P (XEXP (opleft, 1))
2211 && CONST_INT_P (XEXP (opright, 1))
2212 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2213 == GET_MODE_BITSIZE (mode)))
2214 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2215
2216 /* Same, but for ashift that has been "simplified" to a wider mode
2217 by simplify_shift_const. */
2218
2219 if (GET_CODE (opleft) == SUBREG
2220 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2221 && GET_CODE (opright) == LSHIFTRT
2222 && GET_CODE (XEXP (opright, 0)) == SUBREG
2223 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2224 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2225 && (GET_MODE_SIZE (GET_MODE (opleft))
2226 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2227 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2228 SUBREG_REG (XEXP (opright, 0)))
2229 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2230 && CONST_INT_P (XEXP (opright, 1))
2231 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2232 == GET_MODE_BITSIZE (mode)))
2233 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2234 XEXP (SUBREG_REG (opleft), 1));
2235
2236 /* If we have (ior (and (X C1) C2)), simplify this by making
2237 C1 as small as possible if C1 actually changes. */
2238 if (CONST_INT_P (op1)
2239 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2240 || INTVAL (op1) > 0)
2241 && GET_CODE (op0) == AND
2242 && CONST_INT_P (XEXP (op0, 1))
2243 && CONST_INT_P (op1)
2244 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2245 return simplify_gen_binary (IOR, mode,
2246 simplify_gen_binary
2247 (AND, mode, XEXP (op0, 0),
2248 GEN_INT (INTVAL (XEXP (op0, 1))
2249 & ~INTVAL (op1))),
2250 op1);
2251
2252 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2253 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2254 the PLUS does not affect any of the bits in OP1: then we can do
2255 the IOR as a PLUS and we can associate. This is valid if OP1
2256 can be safely shifted left C bits. */
2257 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2258 && GET_CODE (XEXP (op0, 0)) == PLUS
2259 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2260 && CONST_INT_P (XEXP (op0, 1))
2261 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2262 {
2263 int count = INTVAL (XEXP (op0, 1));
2264 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2265
2266 if (mask >> count == INTVAL (trueop1)
2267 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2268 return simplify_gen_binary (ASHIFTRT, mode,
2269 plus_constant (XEXP (op0, 0), mask),
2270 XEXP (op0, 1));
2271 }
2272
2273 tem = simplify_associative_operation (code, mode, op0, op1);
2274 if (tem)
2275 return tem;
2276 break;
2277
2278 case XOR:
2279 if (trueop1 == const0_rtx)
2280 return op0;
2281 if (CONST_INT_P (trueop1)
2282 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2283 == GET_MODE_MASK (mode)))
2284 return simplify_gen_unary (NOT, mode, op0, mode);
2285 if (rtx_equal_p (trueop0, trueop1)
2286 && ! side_effects_p (op0)
2287 && GET_MODE_CLASS (mode) != MODE_CC)
2288 return CONST0_RTX (mode);
2289
2290 /* Canonicalize XOR of the most significant bit to PLUS. */
2291 if ((CONST_INT_P (op1)
2292 || GET_CODE (op1) == CONST_DOUBLE)
2293 && mode_signbit_p (mode, op1))
2294 return simplify_gen_binary (PLUS, mode, op0, op1);
2295 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2296 if ((CONST_INT_P (op1)
2297 || GET_CODE (op1) == CONST_DOUBLE)
2298 && GET_CODE (op0) == PLUS
2299 && (CONST_INT_P (XEXP (op0, 1))
2300 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2301 && mode_signbit_p (mode, XEXP (op0, 1)))
2302 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2303 simplify_gen_binary (XOR, mode, op1,
2304 XEXP (op0, 1)));
2305
2306 /* If we are XORing two things that have no bits in common,
2307 convert them into an IOR. This helps to detect rotation encoded
2308 using those methods and possibly other simplifications. */
2309
2310 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2311 && (nonzero_bits (op0, mode)
2312 & nonzero_bits (op1, mode)) == 0)
2313 return (simplify_gen_binary (IOR, mode, op0, op1));
2314
2315 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2316 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2317 (NOT y). */
2318 {
2319 int num_negated = 0;
2320
2321 if (GET_CODE (op0) == NOT)
2322 num_negated++, op0 = XEXP (op0, 0);
2323 if (GET_CODE (op1) == NOT)
2324 num_negated++, op1 = XEXP (op1, 0);
2325
2326 if (num_negated == 2)
2327 return simplify_gen_binary (XOR, mode, op0, op1);
2328 else if (num_negated == 1)
2329 return simplify_gen_unary (NOT, mode,
2330 simplify_gen_binary (XOR, mode, op0, op1),
2331 mode);
2332 }
2333
2334 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2335 correspond to a machine insn or result in further simplifications
2336 if B is a constant. */
2337
2338 if (GET_CODE (op0) == AND
2339 && rtx_equal_p (XEXP (op0, 1), op1)
2340 && ! side_effects_p (op1))
2341 return simplify_gen_binary (AND, mode,
2342 simplify_gen_unary (NOT, mode,
2343 XEXP (op0, 0), mode),
2344 op1);
2345
2346 else if (GET_CODE (op0) == AND
2347 && rtx_equal_p (XEXP (op0, 0), op1)
2348 && ! side_effects_p (op1))
2349 return simplify_gen_binary (AND, mode,
2350 simplify_gen_unary (NOT, mode,
2351 XEXP (op0, 1), mode),
2352 op1);
2353
2354 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2355 comparison if STORE_FLAG_VALUE is 1. */
2356 if (STORE_FLAG_VALUE == 1
2357 && trueop1 == const1_rtx
2358 && COMPARISON_P (op0)
2359 && (reversed = reversed_comparison (op0, mode)))
2360 return reversed;
2361
2362 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2363 is (lt foo (const_int 0)), so we can perform the above
2364 simplification if STORE_FLAG_VALUE is 1. */
2365
2366 if (STORE_FLAG_VALUE == 1
2367 && trueop1 == const1_rtx
2368 && GET_CODE (op0) == LSHIFTRT
2369 && CONST_INT_P (XEXP (op0, 1))
2370 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2371 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2372
2373 /* (xor (comparison foo bar) (const_int sign-bit))
2374 when STORE_FLAG_VALUE is the sign bit. */
2375 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2376 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2377 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2378 && trueop1 == const_true_rtx
2379 && COMPARISON_P (op0)
2380 && (reversed = reversed_comparison (op0, mode)))
2381 return reversed;
2382
2383 tem = simplify_associative_operation (code, mode, op0, op1);
2384 if (tem)
2385 return tem;
2386 break;
2387
2388 case AND:
2389 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2390 return trueop1;
2391 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2392 {
2393 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2394 HOST_WIDE_INT nzop1;
2395 if (CONST_INT_P (trueop1))
2396 {
2397 HOST_WIDE_INT val1 = INTVAL (trueop1);
2398 /* If we are turning off bits already known off in OP0, we need
2399 not do an AND. */
2400 if ((nzop0 & ~val1) == 0)
2401 return op0;
2402 }
2403 nzop1 = nonzero_bits (trueop1, mode);
2404 /* If we are clearing all the nonzero bits, the result is zero. */
2405 if ((nzop1 & nzop0) == 0
2406 && !side_effects_p (op0) && !side_effects_p (op1))
2407 return CONST0_RTX (mode);
2408 }
2409 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2410 && GET_MODE_CLASS (mode) != MODE_CC)
2411 return op0;
2412 /* A & (~A) -> 0 */
2413 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2414 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2415 && ! side_effects_p (op0)
2416 && GET_MODE_CLASS (mode) != MODE_CC)
2417 return CONST0_RTX (mode);
2418
2419 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2420 there are no nonzero bits of C outside of X's mode. */
2421 if ((GET_CODE (op0) == SIGN_EXTEND
2422 || GET_CODE (op0) == ZERO_EXTEND)
2423 && CONST_INT_P (trueop1)
2424 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2425 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2426 & INTVAL (trueop1)) == 0)
2427 {
2428 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2429 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2430 gen_int_mode (INTVAL (trueop1),
2431 imode));
2432 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2433 }
2434
2435 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2436 we might be able to further simplify the AND with X and potentially
2437 remove the truncation altogether. */
2438 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2439 {
2440 rtx x = XEXP (op0, 0);
2441 enum machine_mode xmode = GET_MODE (x);
2442 tem = simplify_gen_binary (AND, xmode, x,
2443 gen_int_mode (INTVAL (trueop1), xmode));
2444 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2445 }
2446
2447 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2448 if (GET_CODE (op0) == IOR
2449 && CONST_INT_P (trueop1)
2450 && CONST_INT_P (XEXP (op0, 1)))
2451 {
2452 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2453 return simplify_gen_binary (IOR, mode,
2454 simplify_gen_binary (AND, mode,
2455 XEXP (op0, 0), op1),
2456 gen_int_mode (tmp, mode));
2457 }
2458
2459 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2460 insn (and may simplify more). */
2461 if (GET_CODE (op0) == XOR
2462 && rtx_equal_p (XEXP (op0, 0), op1)
2463 && ! side_effects_p (op1))
2464 return simplify_gen_binary (AND, mode,
2465 simplify_gen_unary (NOT, mode,
2466 XEXP (op0, 1), mode),
2467 op1);
2468
2469 if (GET_CODE (op0) == XOR
2470 && rtx_equal_p (XEXP (op0, 1), op1)
2471 && ! side_effects_p (op1))
2472 return simplify_gen_binary (AND, mode,
2473 simplify_gen_unary (NOT, mode,
2474 XEXP (op0, 0), mode),
2475 op1);
2476
2477 /* Similarly for (~(A ^ B)) & A. */
2478 if (GET_CODE (op0) == NOT
2479 && GET_CODE (XEXP (op0, 0)) == XOR
2480 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2481 && ! side_effects_p (op1))
2482 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2483
2484 if (GET_CODE (op0) == NOT
2485 && GET_CODE (XEXP (op0, 0)) == XOR
2486 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2487 && ! side_effects_p (op1))
2488 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2489
2490 /* Convert (A | B) & A to A. */
2491 if (GET_CODE (op0) == IOR
2492 && (rtx_equal_p (XEXP (op0, 0), op1)
2493 || rtx_equal_p (XEXP (op0, 1), op1))
2494 && ! side_effects_p (XEXP (op0, 0))
2495 && ! side_effects_p (XEXP (op0, 1)))
2496 return op1;
2497
2498 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2499 ((A & N) + B) & M -> (A + B) & M
2500 Similarly if (N & M) == 0,
2501 ((A | N) + B) & M -> (A + B) & M
2502 and for - instead of + and/or ^ instead of |.
2503 Also, if (N & M) == 0, then
2504 (A +- N) & M -> A & M. */
2505 if (CONST_INT_P (trueop1)
2506 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2507 && ~INTVAL (trueop1)
2508 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2509 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2510 {
2511 rtx pmop[2];
2512 int which;
2513
2514 pmop[0] = XEXP (op0, 0);
2515 pmop[1] = XEXP (op0, 1);
2516
2517 if (CONST_INT_P (pmop[1])
2518 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2519 return simplify_gen_binary (AND, mode, pmop[0], op1);
2520
2521 for (which = 0; which < 2; which++)
2522 {
2523 tem = pmop[which];
2524 switch (GET_CODE (tem))
2525 {
2526 case AND:
2527 if (CONST_INT_P (XEXP (tem, 1))
2528 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2529 == INTVAL (trueop1))
2530 pmop[which] = XEXP (tem, 0);
2531 break;
2532 case IOR:
2533 case XOR:
2534 if (CONST_INT_P (XEXP (tem, 1))
2535 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2536 pmop[which] = XEXP (tem, 0);
2537 break;
2538 default:
2539 break;
2540 }
2541 }
2542
2543 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2544 {
2545 tem = simplify_gen_binary (GET_CODE (op0), mode,
2546 pmop[0], pmop[1]);
2547 return simplify_gen_binary (code, mode, tem, op1);
2548 }
2549 }
2550
2551 /* (and X (ior (not X) Y) -> (and X Y) */
2552 if (GET_CODE (op1) == IOR
2553 && GET_CODE (XEXP (op1, 0)) == NOT
2554 && op0 == XEXP (XEXP (op1, 0), 0))
2555 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2556
2557 /* (and (ior (not X) Y) X) -> (and X Y) */
2558 if (GET_CODE (op0) == IOR
2559 && GET_CODE (XEXP (op0, 0)) == NOT
2560 && op1 == XEXP (XEXP (op0, 0), 0))
2561 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2562
2563 tem = simplify_associative_operation (code, mode, op0, op1);
2564 if (tem)
2565 return tem;
2566 break;
2567
2568 case UDIV:
2569 /* 0/x is 0 (or x&0 if x has side-effects). */
2570 if (trueop0 == CONST0_RTX (mode))
2571 {
2572 if (side_effects_p (op1))
2573 return simplify_gen_binary (AND, mode, op1, trueop0);
2574 return trueop0;
2575 }
2576 /* x/1 is x. */
2577 if (trueop1 == CONST1_RTX (mode))
2578 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2579 /* Convert divide by power of two into shift. */
2580 if (CONST_INT_P (trueop1)
2581 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2582 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2583 break;
2584
2585 case DIV:
2586 /* Handle floating point and integers separately. */
2587 if (SCALAR_FLOAT_MODE_P (mode))
2588 {
2589 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2590 safe for modes with NaNs, since 0.0 / 0.0 will then be
2591 NaN rather than 0.0. Nor is it safe for modes with signed
2592 zeros, since dividing 0 by a negative number gives -0.0 */
2593 if (trueop0 == CONST0_RTX (mode)
2594 && !HONOR_NANS (mode)
2595 && !HONOR_SIGNED_ZEROS (mode)
2596 && ! side_effects_p (op1))
2597 return op0;
2598 /* x/1.0 is x. */
2599 if (trueop1 == CONST1_RTX (mode)
2600 && !HONOR_SNANS (mode))
2601 return op0;
2602
2603 if (GET_CODE (trueop1) == CONST_DOUBLE
2604 && trueop1 != CONST0_RTX (mode))
2605 {
2606 REAL_VALUE_TYPE d;
2607 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2608
2609 /* x/-1.0 is -x. */
2610 if (REAL_VALUES_EQUAL (d, dconstm1)
2611 && !HONOR_SNANS (mode))
2612 return simplify_gen_unary (NEG, mode, op0, mode);
2613
2614 /* Change FP division by a constant into multiplication.
2615 Only do this with -freciprocal-math. */
2616 if (flag_reciprocal_math
2617 && !REAL_VALUES_EQUAL (d, dconst0))
2618 {
2619 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2620 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2621 return simplify_gen_binary (MULT, mode, op0, tem);
2622 }
2623 }
2624 }
2625 else
2626 {
2627 /* 0/x is 0 (or x&0 if x has side-effects). */
2628 if (trueop0 == CONST0_RTX (mode))
2629 {
2630 if (side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode, op1, trueop0);
2632 return trueop0;
2633 }
2634 /* x/1 is x. */
2635 if (trueop1 == CONST1_RTX (mode))
2636 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2637 /* x/-1 is -x. */
2638 if (trueop1 == constm1_rtx)
2639 {
2640 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2641 return simplify_gen_unary (NEG, mode, x, mode);
2642 }
2643 }
2644 break;
2645
2646 case UMOD:
2647 /* 0%x is 0 (or x&0 if x has side-effects). */
2648 if (trueop0 == CONST0_RTX (mode))
2649 {
2650 if (side_effects_p (op1))
2651 return simplify_gen_binary (AND, mode, op1, trueop0);
2652 return trueop0;
2653 }
2654 /* x%1 is 0 (of x&0 if x has side-effects). */
2655 if (trueop1 == CONST1_RTX (mode))
2656 {
2657 if (side_effects_p (op0))
2658 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2659 return CONST0_RTX (mode);
2660 }
2661 /* Implement modulus by power of two as AND. */
2662 if (CONST_INT_P (trueop1)
2663 && exact_log2 (INTVAL (trueop1)) > 0)
2664 return simplify_gen_binary (AND, mode, op0,
2665 GEN_INT (INTVAL (op1) - 1));
2666 break;
2667
2668 case MOD:
2669 /* 0%x is 0 (or x&0 if x has side-effects). */
2670 if (trueop0 == CONST0_RTX (mode))
2671 {
2672 if (side_effects_p (op1))
2673 return simplify_gen_binary (AND, mode, op1, trueop0);
2674 return trueop0;
2675 }
2676 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2677 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2678 {
2679 if (side_effects_p (op0))
2680 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2681 return CONST0_RTX (mode);
2682 }
2683 break;
2684
2685 case ROTATERT:
2686 case ROTATE:
2687 case ASHIFTRT:
2688 if (trueop1 == CONST0_RTX (mode))
2689 return op0;
2690 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2691 return op0;
2692 /* Rotating ~0 always results in ~0. */
2693 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2694 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2695 && ! side_effects_p (op1))
2696 return op0;
2697 canonicalize_shift:
2698 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2699 {
2700 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2701 if (val != INTVAL (op1))
2702 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2703 }
2704 break;
2705
2706 case ASHIFT:
2707 case SS_ASHIFT:
2708 case US_ASHIFT:
2709 if (trueop1 == CONST0_RTX (mode))
2710 return op0;
2711 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2712 return op0;
2713 goto canonicalize_shift;
2714
2715 case LSHIFTRT:
2716 if (trueop1 == CONST0_RTX (mode))
2717 return op0;
2718 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2719 return op0;
2720 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2721 if (GET_CODE (op0) == CLZ
2722 && CONST_INT_P (trueop1)
2723 && STORE_FLAG_VALUE == 1
2724 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2725 {
2726 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2727 unsigned HOST_WIDE_INT zero_val = 0;
2728
2729 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2730 && zero_val == GET_MODE_BITSIZE (imode)
2731 && INTVAL (trueop1) == exact_log2 (zero_val))
2732 return simplify_gen_relational (EQ, mode, imode,
2733 XEXP (op0, 0), const0_rtx);
2734 }
2735 goto canonicalize_shift;
2736
2737 case SMIN:
2738 if (width <= HOST_BITS_PER_WIDE_INT
2739 && CONST_INT_P (trueop1)
2740 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2741 && ! side_effects_p (op0))
2742 return op1;
2743 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2744 return op0;
2745 tem = simplify_associative_operation (code, mode, op0, op1);
2746 if (tem)
2747 return tem;
2748 break;
2749
2750 case SMAX:
2751 if (width <= HOST_BITS_PER_WIDE_INT
2752 && CONST_INT_P (trueop1)
2753 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2754 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2755 && ! side_effects_p (op0))
2756 return op1;
2757 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2758 return op0;
2759 tem = simplify_associative_operation (code, mode, op0, op1);
2760 if (tem)
2761 return tem;
2762 break;
2763
2764 case UMIN:
2765 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2766 return op1;
2767 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2768 return op0;
2769 tem = simplify_associative_operation (code, mode, op0, op1);
2770 if (tem)
2771 return tem;
2772 break;
2773
2774 case UMAX:
2775 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2776 return op1;
2777 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2778 return op0;
2779 tem = simplify_associative_operation (code, mode, op0, op1);
2780 if (tem)
2781 return tem;
2782 break;
2783
2784 case SS_PLUS:
2785 case US_PLUS:
2786 case SS_MINUS:
2787 case US_MINUS:
2788 case SS_MULT:
2789 case US_MULT:
2790 case SS_DIV:
2791 case US_DIV:
2792 /* ??? There are simplifications that can be done. */
2793 return 0;
2794
2795 case VEC_SELECT:
2796 if (!VECTOR_MODE_P (mode))
2797 {
2798 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2799 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2800 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2801 gcc_assert (XVECLEN (trueop1, 0) == 1);
2802 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2803
2804 if (GET_CODE (trueop0) == CONST_VECTOR)
2805 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2806 (trueop1, 0, 0)));
2807
2808 /* Extract a scalar element from a nested VEC_SELECT expression
2809 (with optional nested VEC_CONCAT expression). Some targets
2810 (i386) extract scalar element from a vector using chain of
2811 nested VEC_SELECT expressions. When input operand is a memory
2812 operand, this operation can be simplified to a simple scalar
2813 load from an offseted memory address. */
2814 if (GET_CODE (trueop0) == VEC_SELECT)
2815 {
2816 rtx op0 = XEXP (trueop0, 0);
2817 rtx op1 = XEXP (trueop0, 1);
2818
2819 enum machine_mode opmode = GET_MODE (op0);
2820 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2821 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2822
2823 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2824 int elem;
2825
2826 rtvec vec;
2827 rtx tmp_op, tmp;
2828
2829 gcc_assert (GET_CODE (op1) == PARALLEL);
2830 gcc_assert (i < n_elts);
2831
2832 /* Select element, pointed by nested selector. */
2833 elem = INTVAL (XVECEXP (op1, 0, i));
2834
2835 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2836 if (GET_CODE (op0) == VEC_CONCAT)
2837 {
2838 rtx op00 = XEXP (op0, 0);
2839 rtx op01 = XEXP (op0, 1);
2840
2841 enum machine_mode mode00, mode01;
2842 int n_elts00, n_elts01;
2843
2844 mode00 = GET_MODE (op00);
2845 mode01 = GET_MODE (op01);
2846
2847 /* Find out number of elements of each operand. */
2848 if (VECTOR_MODE_P (mode00))
2849 {
2850 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2851 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2852 }
2853 else
2854 n_elts00 = 1;
2855
2856 if (VECTOR_MODE_P (mode01))
2857 {
2858 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2859 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2860 }
2861 else
2862 n_elts01 = 1;
2863
2864 gcc_assert (n_elts == n_elts00 + n_elts01);
2865
2866 /* Select correct operand of VEC_CONCAT
2867 and adjust selector. */
2868 if (elem < n_elts01)
2869 tmp_op = op00;
2870 else
2871 {
2872 tmp_op = op01;
2873 elem -= n_elts00;
2874 }
2875 }
2876 else
2877 tmp_op = op0;
2878
2879 vec = rtvec_alloc (1);
2880 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2881
2882 tmp = gen_rtx_fmt_ee (code, mode,
2883 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2884 return tmp;
2885 }
2886 }
2887 else
2888 {
2889 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2890 gcc_assert (GET_MODE_INNER (mode)
2891 == GET_MODE_INNER (GET_MODE (trueop0)));
2892 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2893
2894 if (GET_CODE (trueop0) == CONST_VECTOR)
2895 {
2896 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2897 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2898 rtvec v = rtvec_alloc (n_elts);
2899 unsigned int i;
2900
2901 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2902 for (i = 0; i < n_elts; i++)
2903 {
2904 rtx x = XVECEXP (trueop1, 0, i);
2905
2906 gcc_assert (CONST_INT_P (x));
2907 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2908 INTVAL (x));
2909 }
2910
2911 return gen_rtx_CONST_VECTOR (mode, v);
2912 }
2913 }
2914
2915 if (XVECLEN (trueop1, 0) == 1
2916 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2917 && GET_CODE (trueop0) == VEC_CONCAT)
2918 {
2919 rtx vec = trueop0;
2920 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2921
2922 /* Try to find the element in the VEC_CONCAT. */
2923 while (GET_MODE (vec) != mode
2924 && GET_CODE (vec) == VEC_CONCAT)
2925 {
2926 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2927 if (offset < vec_size)
2928 vec = XEXP (vec, 0);
2929 else
2930 {
2931 offset -= vec_size;
2932 vec = XEXP (vec, 1);
2933 }
2934 vec = avoid_constant_pool_reference (vec);
2935 }
2936
2937 if (GET_MODE (vec) == mode)
2938 return vec;
2939 }
2940
2941 return 0;
2942 case VEC_CONCAT:
2943 {
2944 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2945 ? GET_MODE (trueop0)
2946 : GET_MODE_INNER (mode));
2947 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2948 ? GET_MODE (trueop1)
2949 : GET_MODE_INNER (mode));
2950
2951 gcc_assert (VECTOR_MODE_P (mode));
2952 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2953 == GET_MODE_SIZE (mode));
2954
2955 if (VECTOR_MODE_P (op0_mode))
2956 gcc_assert (GET_MODE_INNER (mode)
2957 == GET_MODE_INNER (op0_mode));
2958 else
2959 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2960
2961 if (VECTOR_MODE_P (op1_mode))
2962 gcc_assert (GET_MODE_INNER (mode)
2963 == GET_MODE_INNER (op1_mode));
2964 else
2965 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2966
2967 if ((GET_CODE (trueop0) == CONST_VECTOR
2968 || CONST_INT_P (trueop0)
2969 || GET_CODE (trueop0) == CONST_DOUBLE)
2970 && (GET_CODE (trueop1) == CONST_VECTOR
2971 || CONST_INT_P (trueop1)
2972 || GET_CODE (trueop1) == CONST_DOUBLE))
2973 {
2974 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2975 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2976 rtvec v = rtvec_alloc (n_elts);
2977 unsigned int i;
2978 unsigned in_n_elts = 1;
2979
2980 if (VECTOR_MODE_P (op0_mode))
2981 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2982 for (i = 0; i < n_elts; i++)
2983 {
2984 if (i < in_n_elts)
2985 {
2986 if (!VECTOR_MODE_P (op0_mode))
2987 RTVEC_ELT (v, i) = trueop0;
2988 else
2989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2990 }
2991 else
2992 {
2993 if (!VECTOR_MODE_P (op1_mode))
2994 RTVEC_ELT (v, i) = trueop1;
2995 else
2996 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2997 i - in_n_elts);
2998 }
2999 }
3000
3001 return gen_rtx_CONST_VECTOR (mode, v);
3002 }
3003 }
3004 return 0;
3005
3006 default:
3007 gcc_unreachable ();
3008 }
3009
3010 return 0;
3011 }
3012
3013 rtx
3014 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3015 rtx op0, rtx op1)
3016 {
3017 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3018 HOST_WIDE_INT val;
3019 unsigned int width = GET_MODE_BITSIZE (mode);
3020
3021 if (VECTOR_MODE_P (mode)
3022 && code != VEC_CONCAT
3023 && GET_CODE (op0) == CONST_VECTOR
3024 && GET_CODE (op1) == CONST_VECTOR)
3025 {
3026 unsigned n_elts = GET_MODE_NUNITS (mode);
3027 enum machine_mode op0mode = GET_MODE (op0);
3028 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3029 enum machine_mode op1mode = GET_MODE (op1);
3030 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3031 rtvec v = rtvec_alloc (n_elts);
3032 unsigned int i;
3033
3034 gcc_assert (op0_n_elts == n_elts);
3035 gcc_assert (op1_n_elts == n_elts);
3036 for (i = 0; i < n_elts; i++)
3037 {
3038 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3039 CONST_VECTOR_ELT (op0, i),
3040 CONST_VECTOR_ELT (op1, i));
3041 if (!x)
3042 return 0;
3043 RTVEC_ELT (v, i) = x;
3044 }
3045
3046 return gen_rtx_CONST_VECTOR (mode, v);
3047 }
3048
3049 if (VECTOR_MODE_P (mode)
3050 && code == VEC_CONCAT
3051 && (CONST_INT_P (op0)
3052 || GET_CODE (op0) == CONST_DOUBLE
3053 || GET_CODE (op0) == CONST_FIXED)
3054 && (CONST_INT_P (op1)
3055 || GET_CODE (op1) == CONST_DOUBLE
3056 || GET_CODE (op1) == CONST_FIXED))
3057 {
3058 unsigned n_elts = GET_MODE_NUNITS (mode);
3059 rtvec v = rtvec_alloc (n_elts);
3060
3061 gcc_assert (n_elts >= 2);
3062 if (n_elts == 2)
3063 {
3064 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3065 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3066
3067 RTVEC_ELT (v, 0) = op0;
3068 RTVEC_ELT (v, 1) = op1;
3069 }
3070 else
3071 {
3072 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3073 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3074 unsigned i;
3075
3076 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3077 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3078 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3079
3080 for (i = 0; i < op0_n_elts; ++i)
3081 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3082 for (i = 0; i < op1_n_elts; ++i)
3083 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3084 }
3085
3086 return gen_rtx_CONST_VECTOR (mode, v);
3087 }
3088
3089 if (SCALAR_FLOAT_MODE_P (mode)
3090 && GET_CODE (op0) == CONST_DOUBLE
3091 && GET_CODE (op1) == CONST_DOUBLE
3092 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3093 {
3094 if (code == AND
3095 || code == IOR
3096 || code == XOR)
3097 {
3098 long tmp0[4];
3099 long tmp1[4];
3100 REAL_VALUE_TYPE r;
3101 int i;
3102
3103 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3104 GET_MODE (op0));
3105 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3106 GET_MODE (op1));
3107 for (i = 0; i < 4; i++)
3108 {
3109 switch (code)
3110 {
3111 case AND:
3112 tmp0[i] &= tmp1[i];
3113 break;
3114 case IOR:
3115 tmp0[i] |= tmp1[i];
3116 break;
3117 case XOR:
3118 tmp0[i] ^= tmp1[i];
3119 break;
3120 default:
3121 gcc_unreachable ();
3122 }
3123 }
3124 real_from_target (&r, tmp0, mode);
3125 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3126 }
3127 else
3128 {
3129 REAL_VALUE_TYPE f0, f1, value, result;
3130 bool inexact;
3131
3132 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3133 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3134 real_convert (&f0, mode, &f0);
3135 real_convert (&f1, mode, &f1);
3136
3137 if (HONOR_SNANS (mode)
3138 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3139 return 0;
3140
3141 if (code == DIV
3142 && REAL_VALUES_EQUAL (f1, dconst0)
3143 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3144 return 0;
3145
3146 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3147 && flag_trapping_math
3148 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3149 {
3150 int s0 = REAL_VALUE_NEGATIVE (f0);
3151 int s1 = REAL_VALUE_NEGATIVE (f1);
3152
3153 switch (code)
3154 {
3155 case PLUS:
3156 /* Inf + -Inf = NaN plus exception. */
3157 if (s0 != s1)
3158 return 0;
3159 break;
3160 case MINUS:
3161 /* Inf - Inf = NaN plus exception. */
3162 if (s0 == s1)
3163 return 0;
3164 break;
3165 case DIV:
3166 /* Inf / Inf = NaN plus exception. */
3167 return 0;
3168 default:
3169 break;
3170 }
3171 }
3172
3173 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3174 && flag_trapping_math
3175 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3176 || (REAL_VALUE_ISINF (f1)
3177 && REAL_VALUES_EQUAL (f0, dconst0))))
3178 /* Inf * 0 = NaN plus exception. */
3179 return 0;
3180
3181 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3182 &f0, &f1);
3183 real_convert (&result, mode, &value);
3184
3185 /* Don't constant fold this floating point operation if
3186 the result has overflowed and flag_trapping_math. */
3187
3188 if (flag_trapping_math
3189 && MODE_HAS_INFINITIES (mode)
3190 && REAL_VALUE_ISINF (result)
3191 && !REAL_VALUE_ISINF (f0)
3192 && !REAL_VALUE_ISINF (f1))
3193 /* Overflow plus exception. */
3194 return 0;
3195
3196 /* Don't constant fold this floating point operation if the
3197 result may dependent upon the run-time rounding mode and
3198 flag_rounding_math is set, or if GCC's software emulation
3199 is unable to accurately represent the result. */
3200
3201 if ((flag_rounding_math
3202 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3203 && (inexact || !real_identical (&result, &value)))
3204 return NULL_RTX;
3205
3206 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3207 }
3208 }
3209
3210 /* We can fold some multi-word operations. */
3211 if (GET_MODE_CLASS (mode) == MODE_INT
3212 && width == HOST_BITS_PER_WIDE_INT * 2
3213 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3214 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3215 {
3216 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3217 HOST_WIDE_INT h1, h2, hv, ht;
3218
3219 if (GET_CODE (op0) == CONST_DOUBLE)
3220 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3221 else
3222 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3223
3224 if (GET_CODE (op1) == CONST_DOUBLE)
3225 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3226 else
3227 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3228
3229 switch (code)
3230 {
3231 case MINUS:
3232 /* A - B == A + (-B). */
3233 neg_double (l2, h2, &lv, &hv);
3234 l2 = lv, h2 = hv;
3235
3236 /* Fall through.... */
3237
3238 case PLUS:
3239 add_double (l1, h1, l2, h2, &lv, &hv);
3240 break;
3241
3242 case MULT:
3243 mul_double (l1, h1, l2, h2, &lv, &hv);
3244 break;
3245
3246 case DIV:
3247 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3248 &lv, &hv, &lt, &ht))
3249 return 0;
3250 break;
3251
3252 case MOD:
3253 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3254 &lt, &ht, &lv, &hv))
3255 return 0;
3256 break;
3257
3258 case UDIV:
3259 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3260 &lv, &hv, &lt, &ht))
3261 return 0;
3262 break;
3263
3264 case UMOD:
3265 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3266 &lt, &ht, &lv, &hv))
3267 return 0;
3268 break;
3269
3270 case AND:
3271 lv = l1 & l2, hv = h1 & h2;
3272 break;
3273
3274 case IOR:
3275 lv = l1 | l2, hv = h1 | h2;
3276 break;
3277
3278 case XOR:
3279 lv = l1 ^ l2, hv = h1 ^ h2;
3280 break;
3281
3282 case SMIN:
3283 if (h1 < h2
3284 || (h1 == h2
3285 && ((unsigned HOST_WIDE_INT) l1
3286 < (unsigned HOST_WIDE_INT) l2)))
3287 lv = l1, hv = h1;
3288 else
3289 lv = l2, hv = h2;
3290 break;
3291
3292 case SMAX:
3293 if (h1 > h2
3294 || (h1 == h2
3295 && ((unsigned HOST_WIDE_INT) l1
3296 > (unsigned HOST_WIDE_INT) l2)))
3297 lv = l1, hv = h1;
3298 else
3299 lv = l2, hv = h2;
3300 break;
3301
3302 case UMIN:
3303 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3304 || (h1 == h2
3305 && ((unsigned HOST_WIDE_INT) l1
3306 < (unsigned HOST_WIDE_INT) l2)))
3307 lv = l1, hv = h1;
3308 else
3309 lv = l2, hv = h2;
3310 break;
3311
3312 case UMAX:
3313 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3314 || (h1 == h2
3315 && ((unsigned HOST_WIDE_INT) l1
3316 > (unsigned HOST_WIDE_INT) l2)))
3317 lv = l1, hv = h1;
3318 else
3319 lv = l2, hv = h2;
3320 break;
3321
3322 case LSHIFTRT: case ASHIFTRT:
3323 case ASHIFT:
3324 case ROTATE: case ROTATERT:
3325 if (SHIFT_COUNT_TRUNCATED)
3326 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3327
3328 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3329 return 0;
3330
3331 if (code == LSHIFTRT || code == ASHIFTRT)
3332 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3333 code == ASHIFTRT);
3334 else if (code == ASHIFT)
3335 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3336 else if (code == ROTATE)
3337 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3338 else /* code == ROTATERT */
3339 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3340 break;
3341
3342 default:
3343 return 0;
3344 }
3345
3346 return immed_double_const (lv, hv, mode);
3347 }
3348
3349 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3350 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3351 {
3352 /* Get the integer argument values in two forms:
3353 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3354
3355 arg0 = INTVAL (op0);
3356 arg1 = INTVAL (op1);
3357
3358 if (width < HOST_BITS_PER_WIDE_INT)
3359 {
3360 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3361 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3362
3363 arg0s = arg0;
3364 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3365 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3366
3367 arg1s = arg1;
3368 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3369 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3370 }
3371 else
3372 {
3373 arg0s = arg0;
3374 arg1s = arg1;
3375 }
3376
3377 /* Compute the value of the arithmetic. */
3378
3379 switch (code)
3380 {
3381 case PLUS:
3382 val = arg0s + arg1s;
3383 break;
3384
3385 case MINUS:
3386 val = arg0s - arg1s;
3387 break;
3388
3389 case MULT:
3390 val = arg0s * arg1s;
3391 break;
3392
3393 case DIV:
3394 if (arg1s == 0
3395 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3396 && arg1s == -1))
3397 return 0;
3398 val = arg0s / arg1s;
3399 break;
3400
3401 case MOD:
3402 if (arg1s == 0
3403 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3404 && arg1s == -1))
3405 return 0;
3406 val = arg0s % arg1s;
3407 break;
3408
3409 case UDIV:
3410 if (arg1 == 0
3411 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3412 && arg1s == -1))
3413 return 0;
3414 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3415 break;
3416
3417 case UMOD:
3418 if (arg1 == 0
3419 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3420 && arg1s == -1))
3421 return 0;
3422 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3423 break;
3424
3425 case AND:
3426 val = arg0 & arg1;
3427 break;
3428
3429 case IOR:
3430 val = arg0 | arg1;
3431 break;
3432
3433 case XOR:
3434 val = arg0 ^ arg1;
3435 break;
3436
3437 case LSHIFTRT:
3438 case ASHIFT:
3439 case ASHIFTRT:
3440 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3441 the value is in range. We can't return any old value for
3442 out-of-range arguments because either the middle-end (via
3443 shift_truncation_mask) or the back-end might be relying on
3444 target-specific knowledge. Nor can we rely on
3445 shift_truncation_mask, since the shift might not be part of an
3446 ashlM3, lshrM3 or ashrM3 instruction. */
3447 if (SHIFT_COUNT_TRUNCATED)
3448 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3449 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3450 return 0;
3451
3452 val = (code == ASHIFT
3453 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3454 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3455
3456 /* Sign-extend the result for arithmetic right shifts. */
3457 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3458 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3459 break;
3460
3461 case ROTATERT:
3462 if (arg1 < 0)
3463 return 0;
3464
3465 arg1 %= width;
3466 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3467 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3468 break;
3469
3470 case ROTATE:
3471 if (arg1 < 0)
3472 return 0;
3473
3474 arg1 %= width;
3475 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3476 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3477 break;
3478
3479 case COMPARE:
3480 /* Do nothing here. */
3481 return 0;
3482
3483 case SMIN:
3484 val = arg0s <= arg1s ? arg0s : arg1s;
3485 break;
3486
3487 case UMIN:
3488 val = ((unsigned HOST_WIDE_INT) arg0
3489 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3490 break;
3491
3492 case SMAX:
3493 val = arg0s > arg1s ? arg0s : arg1s;
3494 break;
3495
3496 case UMAX:
3497 val = ((unsigned HOST_WIDE_INT) arg0
3498 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3499 break;
3500
3501 case SS_PLUS:
3502 case US_PLUS:
3503 case SS_MINUS:
3504 case US_MINUS:
3505 case SS_MULT:
3506 case US_MULT:
3507 case SS_DIV:
3508 case US_DIV:
3509 case SS_ASHIFT:
3510 case US_ASHIFT:
3511 /* ??? There are simplifications that can be done. */
3512 return 0;
3513
3514 default:
3515 gcc_unreachable ();
3516 }
3517
3518 return gen_int_mode (val, mode);
3519 }
3520
3521 return NULL_RTX;
3522 }
3523
3524
3525 \f
3526 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3527 PLUS or MINUS.
3528
3529 Rather than test for specific case, we do this by a brute-force method
3530 and do all possible simplifications until no more changes occur. Then
3531 we rebuild the operation. */
3532
3533 struct simplify_plus_minus_op_data
3534 {
3535 rtx op;
3536 short neg;
3537 };
3538
3539 static bool
3540 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3541 {
3542 int result;
3543
3544 result = (commutative_operand_precedence (y)
3545 - commutative_operand_precedence (x));
3546 if (result)
3547 return result > 0;
3548
3549 /* Group together equal REGs to do more simplification. */
3550 if (REG_P (x) && REG_P (y))
3551 return REGNO (x) > REGNO (y);
3552 else
3553 return false;
3554 }
3555
3556 static rtx
3557 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3558 rtx op1)
3559 {
3560 struct simplify_plus_minus_op_data ops[8];
3561 rtx result, tem;
3562 int n_ops = 2, input_ops = 2;
3563 int changed, n_constants = 0, canonicalized = 0;
3564 int i, j;
3565
3566 memset (ops, 0, sizeof ops);
3567
3568 /* Set up the two operands and then expand them until nothing has been
3569 changed. If we run out of room in our array, give up; this should
3570 almost never happen. */
3571
3572 ops[0].op = op0;
3573 ops[0].neg = 0;
3574 ops[1].op = op1;
3575 ops[1].neg = (code == MINUS);
3576
3577 do
3578 {
3579 changed = 0;
3580
3581 for (i = 0; i < n_ops; i++)
3582 {
3583 rtx this_op = ops[i].op;
3584 int this_neg = ops[i].neg;
3585 enum rtx_code this_code = GET_CODE (this_op);
3586
3587 switch (this_code)
3588 {
3589 case PLUS:
3590 case MINUS:
3591 if (n_ops == 7)
3592 return NULL_RTX;
3593
3594 ops[n_ops].op = XEXP (this_op, 1);
3595 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3596 n_ops++;
3597
3598 ops[i].op = XEXP (this_op, 0);
3599 input_ops++;
3600 changed = 1;
3601 canonicalized |= this_neg;
3602 break;
3603
3604 case NEG:
3605 ops[i].op = XEXP (this_op, 0);
3606 ops[i].neg = ! this_neg;
3607 changed = 1;
3608 canonicalized = 1;
3609 break;
3610
3611 case CONST:
3612 if (n_ops < 7
3613 && GET_CODE (XEXP (this_op, 0)) == PLUS
3614 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3615 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3616 {
3617 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3618 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3619 ops[n_ops].neg = this_neg;
3620 n_ops++;
3621 changed = 1;
3622 canonicalized = 1;
3623 }
3624 break;
3625
3626 case NOT:
3627 /* ~a -> (-a - 1) */
3628 if (n_ops != 7)
3629 {
3630 ops[n_ops].op = constm1_rtx;
3631 ops[n_ops++].neg = this_neg;
3632 ops[i].op = XEXP (this_op, 0);
3633 ops[i].neg = !this_neg;
3634 changed = 1;
3635 canonicalized = 1;
3636 }
3637 break;
3638
3639 case CONST_INT:
3640 n_constants++;
3641 if (this_neg)
3642 {
3643 ops[i].op = neg_const_int (mode, this_op);
3644 ops[i].neg = 0;
3645 changed = 1;
3646 canonicalized = 1;
3647 }
3648 break;
3649
3650 default:
3651 break;
3652 }
3653 }
3654 }
3655 while (changed);
3656
3657 if (n_constants > 1)
3658 canonicalized = 1;
3659
3660 gcc_assert (n_ops >= 2);
3661
3662 /* If we only have two operands, we can avoid the loops. */
3663 if (n_ops == 2)
3664 {
3665 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3666 rtx lhs, rhs;
3667
3668 /* Get the two operands. Be careful with the order, especially for
3669 the cases where code == MINUS. */
3670 if (ops[0].neg && ops[1].neg)
3671 {
3672 lhs = gen_rtx_NEG (mode, ops[0].op);
3673 rhs = ops[1].op;
3674 }
3675 else if (ops[0].neg)
3676 {
3677 lhs = ops[1].op;
3678 rhs = ops[0].op;
3679 }
3680 else
3681 {
3682 lhs = ops[0].op;
3683 rhs = ops[1].op;
3684 }
3685
3686 return simplify_const_binary_operation (code, mode, lhs, rhs);
3687 }
3688
3689 /* Now simplify each pair of operands until nothing changes. */
3690 do
3691 {
3692 /* Insertion sort is good enough for an eight-element array. */
3693 for (i = 1; i < n_ops; i++)
3694 {
3695 struct simplify_plus_minus_op_data save;
3696 j = i - 1;
3697 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3698 continue;
3699
3700 canonicalized = 1;
3701 save = ops[i];
3702 do
3703 ops[j + 1] = ops[j];
3704 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3705 ops[j + 1] = save;
3706 }
3707
3708 changed = 0;
3709 for (i = n_ops - 1; i > 0; i--)
3710 for (j = i - 1; j >= 0; j--)
3711 {
3712 rtx lhs = ops[j].op, rhs = ops[i].op;
3713 int lneg = ops[j].neg, rneg = ops[i].neg;
3714
3715 if (lhs != 0 && rhs != 0)
3716 {
3717 enum rtx_code ncode = PLUS;
3718
3719 if (lneg != rneg)
3720 {
3721 ncode = MINUS;
3722 if (lneg)
3723 tem = lhs, lhs = rhs, rhs = tem;
3724 }
3725 else if (swap_commutative_operands_p (lhs, rhs))
3726 tem = lhs, lhs = rhs, rhs = tem;
3727
3728 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3729 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3730 {
3731 rtx tem_lhs, tem_rhs;
3732
3733 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3734 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3735 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3736
3737 if (tem && !CONSTANT_P (tem))
3738 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3739 }
3740 else
3741 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3742
3743 /* Reject "simplifications" that just wrap the two
3744 arguments in a CONST. Failure to do so can result
3745 in infinite recursion with simplify_binary_operation
3746 when it calls us to simplify CONST operations. */
3747 if (tem
3748 && ! (GET_CODE (tem) == CONST
3749 && GET_CODE (XEXP (tem, 0)) == ncode
3750 && XEXP (XEXP (tem, 0), 0) == lhs
3751 && XEXP (XEXP (tem, 0), 1) == rhs))
3752 {
3753 lneg &= rneg;
3754 if (GET_CODE (tem) == NEG)
3755 tem = XEXP (tem, 0), lneg = !lneg;
3756 if (CONST_INT_P (tem) && lneg)
3757 tem = neg_const_int (mode, tem), lneg = 0;
3758
3759 ops[i].op = tem;
3760 ops[i].neg = lneg;
3761 ops[j].op = NULL_RTX;
3762 changed = 1;
3763 canonicalized = 1;
3764 }
3765 }
3766 }
3767
3768 /* If nothing changed, fail. */
3769 if (!canonicalized)
3770 return NULL_RTX;
3771
3772 /* Pack all the operands to the lower-numbered entries. */
3773 for (i = 0, j = 0; j < n_ops; j++)
3774 if (ops[j].op)
3775 {
3776 ops[i] = ops[j];
3777 i++;
3778 }
3779 n_ops = i;
3780 }
3781 while (changed);
3782
3783 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3784 if (n_ops == 2
3785 && CONST_INT_P (ops[1].op)
3786 && CONSTANT_P (ops[0].op)
3787 && ops[0].neg)
3788 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3789
3790 /* We suppressed creation of trivial CONST expressions in the
3791 combination loop to avoid recursion. Create one manually now.
3792 The combination loop should have ensured that there is exactly
3793 one CONST_INT, and the sort will have ensured that it is last
3794 in the array and that any other constant will be next-to-last. */
3795
3796 if (n_ops > 1
3797 && CONST_INT_P (ops[n_ops - 1].op)
3798 && CONSTANT_P (ops[n_ops - 2].op))
3799 {
3800 rtx value = ops[n_ops - 1].op;
3801 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3802 value = neg_const_int (mode, value);
3803 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3804 n_ops--;
3805 }
3806
3807 /* Put a non-negated operand first, if possible. */
3808
3809 for (i = 0; i < n_ops && ops[i].neg; i++)
3810 continue;
3811 if (i == n_ops)
3812 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3813 else if (i != 0)
3814 {
3815 tem = ops[0].op;
3816 ops[0] = ops[i];
3817 ops[i].op = tem;
3818 ops[i].neg = 1;
3819 }
3820
3821 /* Now make the result by performing the requested operations. */
3822 result = ops[0].op;
3823 for (i = 1; i < n_ops; i++)
3824 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3825 mode, result, ops[i].op);
3826
3827 return result;
3828 }
3829
3830 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3831 static bool
3832 plus_minus_operand_p (const_rtx x)
3833 {
3834 return GET_CODE (x) == PLUS
3835 || GET_CODE (x) == MINUS
3836 || (GET_CODE (x) == CONST
3837 && GET_CODE (XEXP (x, 0)) == PLUS
3838 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3839 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3840 }
3841
3842 /* Like simplify_binary_operation except used for relational operators.
3843 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3844 not also be VOIDmode.
3845
3846 CMP_MODE specifies in which mode the comparison is done in, so it is
3847 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3848 the operands or, if both are VOIDmode, the operands are compared in
3849 "infinite precision". */
3850 rtx
3851 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3852 enum machine_mode cmp_mode, rtx op0, rtx op1)
3853 {
3854 rtx tem, trueop0, trueop1;
3855
3856 if (cmp_mode == VOIDmode)
3857 cmp_mode = GET_MODE (op0);
3858 if (cmp_mode == VOIDmode)
3859 cmp_mode = GET_MODE (op1);
3860
3861 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3862 if (tem)
3863 {
3864 if (SCALAR_FLOAT_MODE_P (mode))
3865 {
3866 if (tem == const0_rtx)
3867 return CONST0_RTX (mode);
3868 #ifdef FLOAT_STORE_FLAG_VALUE
3869 {
3870 REAL_VALUE_TYPE val;
3871 val = FLOAT_STORE_FLAG_VALUE (mode);
3872 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3873 }
3874 #else
3875 return NULL_RTX;
3876 #endif
3877 }
3878 if (VECTOR_MODE_P (mode))
3879 {
3880 if (tem == const0_rtx)
3881 return CONST0_RTX (mode);
3882 #ifdef VECTOR_STORE_FLAG_VALUE
3883 {
3884 int i, units;
3885 rtvec v;
3886
3887 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3888 if (val == NULL_RTX)
3889 return NULL_RTX;
3890 if (val == const1_rtx)
3891 return CONST1_RTX (mode);
3892
3893 units = GET_MODE_NUNITS (mode);
3894 v = rtvec_alloc (units);
3895 for (i = 0; i < units; i++)
3896 RTVEC_ELT (v, i) = val;
3897 return gen_rtx_raw_CONST_VECTOR (mode, v);
3898 }
3899 #else
3900 return NULL_RTX;
3901 #endif
3902 }
3903
3904 return tem;
3905 }
3906
3907 /* For the following tests, ensure const0_rtx is op1. */
3908 if (swap_commutative_operands_p (op0, op1)
3909 || (op0 == const0_rtx && op1 != const0_rtx))
3910 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3911
3912 /* If op0 is a compare, extract the comparison arguments from it. */
3913 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3914 return simplify_gen_relational (code, mode, VOIDmode,
3915 XEXP (op0, 0), XEXP (op0, 1));
3916
3917 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3918 || CC0_P (op0))
3919 return NULL_RTX;
3920
3921 trueop0 = avoid_constant_pool_reference (op0);
3922 trueop1 = avoid_constant_pool_reference (op1);
3923 return simplify_relational_operation_1 (code, mode, cmp_mode,
3924 trueop0, trueop1);
3925 }
3926
3927 /* This part of simplify_relational_operation is only used when CMP_MODE
3928 is not in class MODE_CC (i.e. it is a real comparison).
3929
3930 MODE is the mode of the result, while CMP_MODE specifies in which
3931 mode the comparison is done in, so it is the mode of the operands. */
3932
3933 static rtx
3934 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3935 enum machine_mode cmp_mode, rtx op0, rtx op1)
3936 {
3937 enum rtx_code op0code = GET_CODE (op0);
3938
3939 if (op1 == const0_rtx && COMPARISON_P (op0))
3940 {
3941 /* If op0 is a comparison, extract the comparison arguments
3942 from it. */
3943 if (code == NE)
3944 {
3945 if (GET_MODE (op0) == mode)
3946 return simplify_rtx (op0);
3947 else
3948 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3949 XEXP (op0, 0), XEXP (op0, 1));
3950 }
3951 else if (code == EQ)
3952 {
3953 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3954 if (new_code != UNKNOWN)
3955 return simplify_gen_relational (new_code, mode, VOIDmode,
3956 XEXP (op0, 0), XEXP (op0, 1));
3957 }
3958 }
3959
3960 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3961 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3962 if ((code == LTU || code == GEU)
3963 && GET_CODE (op0) == PLUS
3964 && CONST_INT_P (XEXP (op0, 1))
3965 && (rtx_equal_p (op1, XEXP (op0, 0))
3966 || rtx_equal_p (op1, XEXP (op0, 1))))
3967 {
3968 rtx new_cmp
3969 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3970 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3971 cmp_mode, XEXP (op0, 0), new_cmp);
3972 }
3973
3974 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3975 if ((code == LTU || code == GEU)
3976 && GET_CODE (op0) == PLUS
3977 && rtx_equal_p (op1, XEXP (op0, 1))
3978 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3979 && !rtx_equal_p (op1, XEXP (op0, 0)))
3980 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3981
3982 if (op1 == const0_rtx)
3983 {
3984 /* Canonicalize (GTU x 0) as (NE x 0). */
3985 if (code == GTU)
3986 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3987 /* Canonicalize (LEU x 0) as (EQ x 0). */
3988 if (code == LEU)
3989 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3990 }
3991 else if (op1 == const1_rtx)
3992 {
3993 switch (code)
3994 {
3995 case GE:
3996 /* Canonicalize (GE x 1) as (GT x 0). */
3997 return simplify_gen_relational (GT, mode, cmp_mode,
3998 op0, const0_rtx);
3999 case GEU:
4000 /* Canonicalize (GEU x 1) as (NE x 0). */
4001 return simplify_gen_relational (NE, mode, cmp_mode,
4002 op0, const0_rtx);
4003 case LT:
4004 /* Canonicalize (LT x 1) as (LE x 0). */
4005 return simplify_gen_relational (LE, mode, cmp_mode,
4006 op0, const0_rtx);
4007 case LTU:
4008 /* Canonicalize (LTU x 1) as (EQ x 0). */
4009 return simplify_gen_relational (EQ, mode, cmp_mode,
4010 op0, const0_rtx);
4011 default:
4012 break;
4013 }
4014 }
4015 else if (op1 == constm1_rtx)
4016 {
4017 /* Canonicalize (LE x -1) as (LT x 0). */
4018 if (code == LE)
4019 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4020 /* Canonicalize (GT x -1) as (GE x 0). */
4021 if (code == GT)
4022 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4023 }
4024
4025 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4026 if ((code == EQ || code == NE)
4027 && (op0code == PLUS || op0code == MINUS)
4028 && CONSTANT_P (op1)
4029 && CONSTANT_P (XEXP (op0, 1))
4030 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4031 {
4032 rtx x = XEXP (op0, 0);
4033 rtx c = XEXP (op0, 1);
4034
4035 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4036 cmp_mode, op1, c);
4037 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4038 }
4039
4040 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4041 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4042 if (code == NE
4043 && op1 == const0_rtx
4044 && GET_MODE_CLASS (mode) == MODE_INT
4045 && cmp_mode != VOIDmode
4046 /* ??? Work-around BImode bugs in the ia64 backend. */
4047 && mode != BImode
4048 && cmp_mode != BImode
4049 && nonzero_bits (op0, cmp_mode) == 1
4050 && STORE_FLAG_VALUE == 1)
4051 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4052 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4053 : lowpart_subreg (mode, op0, cmp_mode);
4054
4055 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4056 if ((code == EQ || code == NE)
4057 && op1 == const0_rtx
4058 && op0code == XOR)
4059 return simplify_gen_relational (code, mode, cmp_mode,
4060 XEXP (op0, 0), XEXP (op0, 1));
4061
4062 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4063 if ((code == EQ || code == NE)
4064 && op0code == XOR
4065 && rtx_equal_p (XEXP (op0, 0), op1)
4066 && !side_effects_p (XEXP (op0, 0)))
4067 return simplify_gen_relational (code, mode, cmp_mode,
4068 XEXP (op0, 1), const0_rtx);
4069
4070 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4071 if ((code == EQ || code == NE)
4072 && op0code == XOR
4073 && rtx_equal_p (XEXP (op0, 1), op1)
4074 && !side_effects_p (XEXP (op0, 1)))
4075 return simplify_gen_relational (code, mode, cmp_mode,
4076 XEXP (op0, 0), const0_rtx);
4077
4078 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4079 if ((code == EQ || code == NE)
4080 && op0code == XOR
4081 && (CONST_INT_P (op1)
4082 || GET_CODE (op1) == CONST_DOUBLE)
4083 && (CONST_INT_P (XEXP (op0, 1))
4084 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4085 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4086 simplify_gen_binary (XOR, cmp_mode,
4087 XEXP (op0, 1), op1));
4088
4089 if (op0code == POPCOUNT && op1 == const0_rtx)
4090 switch (code)
4091 {
4092 case EQ:
4093 case LE:
4094 case LEU:
4095 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4096 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4097 XEXP (op0, 0), const0_rtx);
4098
4099 case NE:
4100 case GT:
4101 case GTU:
4102 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4103 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4104 XEXP (op0, 0), const0_rtx);
4105
4106 default:
4107 break;
4108 }
4109
4110 return NULL_RTX;
4111 }
4112
4113 enum
4114 {
4115 CMP_EQ = 1,
4116 CMP_LT = 2,
4117 CMP_GT = 4,
4118 CMP_LTU = 8,
4119 CMP_GTU = 16
4120 };
4121
4122
4123 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4124 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4125 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4126 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4127 For floating-point comparisons, assume that the operands were ordered. */
4128
4129 static rtx
4130 comparison_result (enum rtx_code code, int known_results)
4131 {
4132 switch (code)
4133 {
4134 case EQ:
4135 case UNEQ:
4136 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4137 case NE:
4138 case LTGT:
4139 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4140
4141 case LT:
4142 case UNLT:
4143 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4144 case GE:
4145 case UNGE:
4146 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4147
4148 case GT:
4149 case UNGT:
4150 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4151 case LE:
4152 case UNLE:
4153 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4154
4155 case LTU:
4156 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4157 case GEU:
4158 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4159
4160 case GTU:
4161 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4162 case LEU:
4163 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4164
4165 case ORDERED:
4166 return const_true_rtx;
4167 case UNORDERED:
4168 return const0_rtx;
4169 default:
4170 gcc_unreachable ();
4171 }
4172 }
4173
4174 /* Check if the given comparison (done in the given MODE) is actually a
4175 tautology or a contradiction.
4176 If no simplification is possible, this function returns zero.
4177 Otherwise, it returns either const_true_rtx or const0_rtx. */
4178
4179 rtx
4180 simplify_const_relational_operation (enum rtx_code code,
4181 enum machine_mode mode,
4182 rtx op0, rtx op1)
4183 {
4184 rtx tem;
4185 rtx trueop0;
4186 rtx trueop1;
4187
4188 gcc_assert (mode != VOIDmode
4189 || (GET_MODE (op0) == VOIDmode
4190 && GET_MODE (op1) == VOIDmode));
4191
4192 /* If op0 is a compare, extract the comparison arguments from it. */
4193 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4194 {
4195 op1 = XEXP (op0, 1);
4196 op0 = XEXP (op0, 0);
4197
4198 if (GET_MODE (op0) != VOIDmode)
4199 mode = GET_MODE (op0);
4200 else if (GET_MODE (op1) != VOIDmode)
4201 mode = GET_MODE (op1);
4202 else
4203 return 0;
4204 }
4205
4206 /* We can't simplify MODE_CC values since we don't know what the
4207 actual comparison is. */
4208 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4209 return 0;
4210
4211 /* Make sure the constant is second. */
4212 if (swap_commutative_operands_p (op0, op1))
4213 {
4214 tem = op0, op0 = op1, op1 = tem;
4215 code = swap_condition (code);
4216 }
4217
4218 trueop0 = avoid_constant_pool_reference (op0);
4219 trueop1 = avoid_constant_pool_reference (op1);
4220
4221 /* For integer comparisons of A and B maybe we can simplify A - B and can
4222 then simplify a comparison of that with zero. If A and B are both either
4223 a register or a CONST_INT, this can't help; testing for these cases will
4224 prevent infinite recursion here and speed things up.
4225
4226 We can only do this for EQ and NE comparisons as otherwise we may
4227 lose or introduce overflow which we cannot disregard as undefined as
4228 we do not know the signedness of the operation on either the left or
4229 the right hand side of the comparison. */
4230
4231 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4232 && (code == EQ || code == NE)
4233 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4234 && (REG_P (op1) || CONST_INT_P (trueop1)))
4235 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4236 /* We cannot do this if tem is a nonzero address. */
4237 && ! nonzero_address_p (tem))
4238 return simplify_const_relational_operation (signed_condition (code),
4239 mode, tem, const0_rtx);
4240
4241 if (! HONOR_NANS (mode) && code == ORDERED)
4242 return const_true_rtx;
4243
4244 if (! HONOR_NANS (mode) && code == UNORDERED)
4245 return const0_rtx;
4246
4247 /* For modes without NaNs, if the two operands are equal, we know the
4248 result except if they have side-effects. Even with NaNs we know
4249 the result of unordered comparisons and, if signaling NaNs are
4250 irrelevant, also the result of LT/GT/LTGT. */
4251 if ((! HONOR_NANS (GET_MODE (trueop0))
4252 || code == UNEQ || code == UNLE || code == UNGE
4253 || ((code == LT || code == GT || code == LTGT)
4254 && ! HONOR_SNANS (GET_MODE (trueop0))))
4255 && rtx_equal_p (trueop0, trueop1)
4256 && ! side_effects_p (trueop0))
4257 return comparison_result (code, CMP_EQ);
4258
4259 /* If the operands are floating-point constants, see if we can fold
4260 the result. */
4261 if (GET_CODE (trueop0) == CONST_DOUBLE
4262 && GET_CODE (trueop1) == CONST_DOUBLE
4263 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4264 {
4265 REAL_VALUE_TYPE d0, d1;
4266
4267 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4268 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4269
4270 /* Comparisons are unordered iff at least one of the values is NaN. */
4271 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4272 switch (code)
4273 {
4274 case UNEQ:
4275 case UNLT:
4276 case UNGT:
4277 case UNLE:
4278 case UNGE:
4279 case NE:
4280 case UNORDERED:
4281 return const_true_rtx;
4282 case EQ:
4283 case LT:
4284 case GT:
4285 case LE:
4286 case GE:
4287 case LTGT:
4288 case ORDERED:
4289 return const0_rtx;
4290 default:
4291 return 0;
4292 }
4293
4294 return comparison_result (code,
4295 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4296 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4297 }
4298
4299 /* Otherwise, see if the operands are both integers. */
4300 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4301 && (GET_CODE (trueop0) == CONST_DOUBLE
4302 || CONST_INT_P (trueop0))
4303 && (GET_CODE (trueop1) == CONST_DOUBLE
4304 || CONST_INT_P (trueop1)))
4305 {
4306 int width = GET_MODE_BITSIZE (mode);
4307 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4308 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4309
4310 /* Get the two words comprising each integer constant. */
4311 if (GET_CODE (trueop0) == CONST_DOUBLE)
4312 {
4313 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4314 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4315 }
4316 else
4317 {
4318 l0u = l0s = INTVAL (trueop0);
4319 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4320 }
4321
4322 if (GET_CODE (trueop1) == CONST_DOUBLE)
4323 {
4324 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4325 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4326 }
4327 else
4328 {
4329 l1u = l1s = INTVAL (trueop1);
4330 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4331 }
4332
4333 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4334 we have to sign or zero-extend the values. */
4335 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4336 {
4337 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4338 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4339
4340 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4341 l0s |= ((HOST_WIDE_INT) (-1) << width);
4342
4343 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4344 l1s |= ((HOST_WIDE_INT) (-1) << width);
4345 }
4346 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4347 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4348
4349 if (h0u == h1u && l0u == l1u)
4350 return comparison_result (code, CMP_EQ);
4351 else
4352 {
4353 int cr;
4354 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4355 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4356 return comparison_result (code, cr);
4357 }
4358 }
4359
4360 /* Optimize comparisons with upper and lower bounds. */
4361 if (SCALAR_INT_MODE_P (mode)
4362 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4363 && CONST_INT_P (trueop1))
4364 {
4365 int sign;
4366 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4367 HOST_WIDE_INT val = INTVAL (trueop1);
4368 HOST_WIDE_INT mmin, mmax;
4369
4370 if (code == GEU
4371 || code == LEU
4372 || code == GTU
4373 || code == LTU)
4374 sign = 0;
4375 else
4376 sign = 1;
4377
4378 /* Get a reduced range if the sign bit is zero. */
4379 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4380 {
4381 mmin = 0;
4382 mmax = nonzero;
4383 }
4384 else
4385 {
4386 rtx mmin_rtx, mmax_rtx;
4387 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4388
4389 mmin = INTVAL (mmin_rtx);
4390 mmax = INTVAL (mmax_rtx);
4391 if (sign)
4392 {
4393 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4394
4395 mmin >>= (sign_copies - 1);
4396 mmax >>= (sign_copies - 1);
4397 }
4398 }
4399
4400 switch (code)
4401 {
4402 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4403 case GEU:
4404 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4405 return const_true_rtx;
4406 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4407 return const0_rtx;
4408 break;
4409 case GE:
4410 if (val <= mmin)
4411 return const_true_rtx;
4412 if (val > mmax)
4413 return const0_rtx;
4414 break;
4415
4416 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4417 case LEU:
4418 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4419 return const_true_rtx;
4420 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4421 return const0_rtx;
4422 break;
4423 case LE:
4424 if (val >= mmax)
4425 return const_true_rtx;
4426 if (val < mmin)
4427 return const0_rtx;
4428 break;
4429
4430 case EQ:
4431 /* x == y is always false for y out of range. */
4432 if (val < mmin || val > mmax)
4433 return const0_rtx;
4434 break;
4435
4436 /* x > y is always false for y >= mmax, always true for y < mmin. */
4437 case GTU:
4438 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4439 return const0_rtx;
4440 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4441 return const_true_rtx;
4442 break;
4443 case GT:
4444 if (val >= mmax)
4445 return const0_rtx;
4446 if (val < mmin)
4447 return const_true_rtx;
4448 break;
4449
4450 /* x < y is always false for y <= mmin, always true for y > mmax. */
4451 case LTU:
4452 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4453 return const0_rtx;
4454 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4455 return const_true_rtx;
4456 break;
4457 case LT:
4458 if (val <= mmin)
4459 return const0_rtx;
4460 if (val > mmax)
4461 return const_true_rtx;
4462 break;
4463
4464 case NE:
4465 /* x != y is always true for y out of range. */
4466 if (val < mmin || val > mmax)
4467 return const_true_rtx;
4468 break;
4469
4470 default:
4471 break;
4472 }
4473 }
4474
4475 /* Optimize integer comparisons with zero. */
4476 if (trueop1 == const0_rtx)
4477 {
4478 /* Some addresses are known to be nonzero. We don't know
4479 their sign, but equality comparisons are known. */
4480 if (nonzero_address_p (trueop0))
4481 {
4482 if (code == EQ || code == LEU)
4483 return const0_rtx;
4484 if (code == NE || code == GTU)
4485 return const_true_rtx;
4486 }
4487
4488 /* See if the first operand is an IOR with a constant. If so, we
4489 may be able to determine the result of this comparison. */
4490 if (GET_CODE (op0) == IOR)
4491 {
4492 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4493 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4494 {
4495 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4496 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4497 && (INTVAL (inner_const)
4498 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4499
4500 switch (code)
4501 {
4502 case EQ:
4503 case LEU:
4504 return const0_rtx;
4505 case NE:
4506 case GTU:
4507 return const_true_rtx;
4508 case LT:
4509 case LE:
4510 if (has_sign)
4511 return const_true_rtx;
4512 break;
4513 case GT:
4514 case GE:
4515 if (has_sign)
4516 return const0_rtx;
4517 break;
4518 default:
4519 break;
4520 }
4521 }
4522 }
4523 }
4524
4525 /* Optimize comparison of ABS with zero. */
4526 if (trueop1 == CONST0_RTX (mode)
4527 && (GET_CODE (trueop0) == ABS
4528 || (GET_CODE (trueop0) == FLOAT_EXTEND
4529 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4530 {
4531 switch (code)
4532 {
4533 case LT:
4534 /* Optimize abs(x) < 0.0. */
4535 if (!HONOR_SNANS (mode)
4536 && (!INTEGRAL_MODE_P (mode)
4537 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4538 {
4539 if (INTEGRAL_MODE_P (mode)
4540 && (issue_strict_overflow_warning
4541 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4542 warning (OPT_Wstrict_overflow,
4543 ("assuming signed overflow does not occur when "
4544 "assuming abs (x) < 0 is false"));
4545 return const0_rtx;
4546 }
4547 break;
4548
4549 case GE:
4550 /* Optimize abs(x) >= 0.0. */
4551 if (!HONOR_NANS (mode)
4552 && (!INTEGRAL_MODE_P (mode)
4553 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4554 {
4555 if (INTEGRAL_MODE_P (mode)
4556 && (issue_strict_overflow_warning
4557 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4558 warning (OPT_Wstrict_overflow,
4559 ("assuming signed overflow does not occur when "
4560 "assuming abs (x) >= 0 is true"));
4561 return const_true_rtx;
4562 }
4563 break;
4564
4565 case UNGE:
4566 /* Optimize ! (abs(x) < 0.0). */
4567 return const_true_rtx;
4568
4569 default:
4570 break;
4571 }
4572 }
4573
4574 return 0;
4575 }
4576 \f
4577 /* Simplify CODE, an operation with result mode MODE and three operands,
4578 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4579 a constant. Return 0 if no simplifications is possible. */
4580
4581 rtx
4582 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4583 enum machine_mode op0_mode, rtx op0, rtx op1,
4584 rtx op2)
4585 {
4586 unsigned int width = GET_MODE_BITSIZE (mode);
4587
4588 /* VOIDmode means "infinite" precision. */
4589 if (width == 0)
4590 width = HOST_BITS_PER_WIDE_INT;
4591
4592 switch (code)
4593 {
4594 case SIGN_EXTRACT:
4595 case ZERO_EXTRACT:
4596 if (CONST_INT_P (op0)
4597 && CONST_INT_P (op1)
4598 && CONST_INT_P (op2)
4599 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4600 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4601 {
4602 /* Extracting a bit-field from a constant */
4603 HOST_WIDE_INT val = INTVAL (op0);
4604
4605 if (BITS_BIG_ENDIAN)
4606 val >>= (GET_MODE_BITSIZE (op0_mode)
4607 - INTVAL (op2) - INTVAL (op1));
4608 else
4609 val >>= INTVAL (op2);
4610
4611 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4612 {
4613 /* First zero-extend. */
4614 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4615 /* If desired, propagate sign bit. */
4616 if (code == SIGN_EXTRACT
4617 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4618 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4619 }
4620
4621 /* Clear the bits that don't belong in our mode,
4622 unless they and our sign bit are all one.
4623 So we get either a reasonable negative value or a reasonable
4624 unsigned value for this mode. */
4625 if (width < HOST_BITS_PER_WIDE_INT
4626 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4627 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4628 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4629
4630 return gen_int_mode (val, mode);
4631 }
4632 break;
4633
4634 case IF_THEN_ELSE:
4635 if (CONST_INT_P (op0))
4636 return op0 != const0_rtx ? op1 : op2;
4637
4638 /* Convert c ? a : a into "a". */
4639 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4640 return op1;
4641
4642 /* Convert a != b ? a : b into "a". */
4643 if (GET_CODE (op0) == NE
4644 && ! side_effects_p (op0)
4645 && ! HONOR_NANS (mode)
4646 && ! HONOR_SIGNED_ZEROS (mode)
4647 && ((rtx_equal_p (XEXP (op0, 0), op1)
4648 && rtx_equal_p (XEXP (op0, 1), op2))
4649 || (rtx_equal_p (XEXP (op0, 0), op2)
4650 && rtx_equal_p (XEXP (op0, 1), op1))))
4651 return op1;
4652
4653 /* Convert a == b ? a : b into "b". */
4654 if (GET_CODE (op0) == EQ
4655 && ! side_effects_p (op0)
4656 && ! HONOR_NANS (mode)
4657 && ! HONOR_SIGNED_ZEROS (mode)
4658 && ((rtx_equal_p (XEXP (op0, 0), op1)
4659 && rtx_equal_p (XEXP (op0, 1), op2))
4660 || (rtx_equal_p (XEXP (op0, 0), op2)
4661 && rtx_equal_p (XEXP (op0, 1), op1))))
4662 return op2;
4663
4664 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4665 {
4666 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4667 ? GET_MODE (XEXP (op0, 1))
4668 : GET_MODE (XEXP (op0, 0)));
4669 rtx temp;
4670
4671 /* Look for happy constants in op1 and op2. */
4672 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4673 {
4674 HOST_WIDE_INT t = INTVAL (op1);
4675 HOST_WIDE_INT f = INTVAL (op2);
4676
4677 if (t == STORE_FLAG_VALUE && f == 0)
4678 code = GET_CODE (op0);
4679 else if (t == 0 && f == STORE_FLAG_VALUE)
4680 {
4681 enum rtx_code tmp;
4682 tmp = reversed_comparison_code (op0, NULL_RTX);
4683 if (tmp == UNKNOWN)
4684 break;
4685 code = tmp;
4686 }
4687 else
4688 break;
4689
4690 return simplify_gen_relational (code, mode, cmp_mode,
4691 XEXP (op0, 0), XEXP (op0, 1));
4692 }
4693
4694 if (cmp_mode == VOIDmode)
4695 cmp_mode = op0_mode;
4696 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4697 cmp_mode, XEXP (op0, 0),
4698 XEXP (op0, 1));
4699
4700 /* See if any simplifications were possible. */
4701 if (temp)
4702 {
4703 if (CONST_INT_P (temp))
4704 return temp == const0_rtx ? op2 : op1;
4705 else if (temp)
4706 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4707 }
4708 }
4709 break;
4710
4711 case VEC_MERGE:
4712 gcc_assert (GET_MODE (op0) == mode);
4713 gcc_assert (GET_MODE (op1) == mode);
4714 gcc_assert (VECTOR_MODE_P (mode));
4715 op2 = avoid_constant_pool_reference (op2);
4716 if (CONST_INT_P (op2))
4717 {
4718 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4719 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4720 int mask = (1 << n_elts) - 1;
4721
4722 if (!(INTVAL (op2) & mask))
4723 return op1;
4724 if ((INTVAL (op2) & mask) == mask)
4725 return op0;
4726
4727 op0 = avoid_constant_pool_reference (op0);
4728 op1 = avoid_constant_pool_reference (op1);
4729 if (GET_CODE (op0) == CONST_VECTOR
4730 && GET_CODE (op1) == CONST_VECTOR)
4731 {
4732 rtvec v = rtvec_alloc (n_elts);
4733 unsigned int i;
4734
4735 for (i = 0; i < n_elts; i++)
4736 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4737 ? CONST_VECTOR_ELT (op0, i)
4738 : CONST_VECTOR_ELT (op1, i));
4739 return gen_rtx_CONST_VECTOR (mode, v);
4740 }
4741 }
4742 break;
4743
4744 default:
4745 gcc_unreachable ();
4746 }
4747
4748 return 0;
4749 }
4750
4751 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4752 or CONST_VECTOR,
4753 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4754
4755 Works by unpacking OP into a collection of 8-bit values
4756 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4757 and then repacking them again for OUTERMODE. */
4758
4759 static rtx
4760 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4761 enum machine_mode innermode, unsigned int byte)
4762 {
4763 /* We support up to 512-bit values (for V8DFmode). */
4764 enum {
4765 max_bitsize = 512,
4766 value_bit = 8,
4767 value_mask = (1 << value_bit) - 1
4768 };
4769 unsigned char value[max_bitsize / value_bit];
4770 int value_start;
4771 int i;
4772 int elem;
4773
4774 int num_elem;
4775 rtx * elems;
4776 int elem_bitsize;
4777 rtx result_s;
4778 rtvec result_v = NULL;
4779 enum mode_class outer_class;
4780 enum machine_mode outer_submode;
4781
4782 /* Some ports misuse CCmode. */
4783 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4784 return op;
4785
4786 /* We have no way to represent a complex constant at the rtl level. */
4787 if (COMPLEX_MODE_P (outermode))
4788 return NULL_RTX;
4789
4790 /* Unpack the value. */
4791
4792 if (GET_CODE (op) == CONST_VECTOR)
4793 {
4794 num_elem = CONST_VECTOR_NUNITS (op);
4795 elems = &CONST_VECTOR_ELT (op, 0);
4796 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4797 }
4798 else
4799 {
4800 num_elem = 1;
4801 elems = &op;
4802 elem_bitsize = max_bitsize;
4803 }
4804 /* If this asserts, it is too complicated; reducing value_bit may help. */
4805 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4806 /* I don't know how to handle endianness of sub-units. */
4807 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4808
4809 for (elem = 0; elem < num_elem; elem++)
4810 {
4811 unsigned char * vp;
4812 rtx el = elems[elem];
4813
4814 /* Vectors are kept in target memory order. (This is probably
4815 a mistake.) */
4816 {
4817 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4818 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4819 / BITS_PER_UNIT);
4820 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4821 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4822 unsigned bytele = (subword_byte % UNITS_PER_WORD
4823 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4824 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4825 }
4826
4827 switch (GET_CODE (el))
4828 {
4829 case CONST_INT:
4830 for (i = 0;
4831 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4832 i += value_bit)
4833 *vp++ = INTVAL (el) >> i;
4834 /* CONST_INTs are always logically sign-extended. */
4835 for (; i < elem_bitsize; i += value_bit)
4836 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4837 break;
4838
4839 case CONST_DOUBLE:
4840 if (GET_MODE (el) == VOIDmode)
4841 {
4842 /* If this triggers, someone should have generated a
4843 CONST_INT instead. */
4844 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4845
4846 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4847 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4848 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4849 {
4850 *vp++
4851 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4852 i += value_bit;
4853 }
4854 /* It shouldn't matter what's done here, so fill it with
4855 zero. */
4856 for (; i < elem_bitsize; i += value_bit)
4857 *vp++ = 0;
4858 }
4859 else
4860 {
4861 long tmp[max_bitsize / 32];
4862 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4863
4864 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4865 gcc_assert (bitsize <= elem_bitsize);
4866 gcc_assert (bitsize % value_bit == 0);
4867
4868 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4869 GET_MODE (el));
4870
4871 /* real_to_target produces its result in words affected by
4872 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4873 and use WORDS_BIG_ENDIAN instead; see the documentation
4874 of SUBREG in rtl.texi. */
4875 for (i = 0; i < bitsize; i += value_bit)
4876 {
4877 int ibase;
4878 if (WORDS_BIG_ENDIAN)
4879 ibase = bitsize - 1 - i;
4880 else
4881 ibase = i;
4882 *vp++ = tmp[ibase / 32] >> i % 32;
4883 }
4884
4885 /* It shouldn't matter what's done here, so fill it with
4886 zero. */
4887 for (; i < elem_bitsize; i += value_bit)
4888 *vp++ = 0;
4889 }
4890 break;
4891
4892 case CONST_FIXED:
4893 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4894 {
4895 for (i = 0; i < elem_bitsize; i += value_bit)
4896 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4897 }
4898 else
4899 {
4900 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4901 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4902 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4903 i += value_bit)
4904 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4905 >> (i - HOST_BITS_PER_WIDE_INT);
4906 for (; i < elem_bitsize; i += value_bit)
4907 *vp++ = 0;
4908 }
4909 break;
4910
4911 default:
4912 gcc_unreachable ();
4913 }
4914 }
4915
4916 /* Now, pick the right byte to start with. */
4917 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4918 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4919 will already have offset 0. */
4920 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4921 {
4922 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4923 - byte);
4924 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4925 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4926 byte = (subword_byte % UNITS_PER_WORD
4927 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4928 }
4929
4930 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4931 so if it's become negative it will instead be very large.) */
4932 gcc_assert (byte < GET_MODE_SIZE (innermode));
4933
4934 /* Convert from bytes to chunks of size value_bit. */
4935 value_start = byte * (BITS_PER_UNIT / value_bit);
4936
4937 /* Re-pack the value. */
4938
4939 if (VECTOR_MODE_P (outermode))
4940 {
4941 num_elem = GET_MODE_NUNITS (outermode);
4942 result_v = rtvec_alloc (num_elem);
4943 elems = &RTVEC_ELT (result_v, 0);
4944 outer_submode = GET_MODE_INNER (outermode);
4945 }
4946 else
4947 {
4948 num_elem = 1;
4949 elems = &result_s;
4950 outer_submode = outermode;
4951 }
4952
4953 outer_class = GET_MODE_CLASS (outer_submode);
4954 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4955
4956 gcc_assert (elem_bitsize % value_bit == 0);
4957 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4958
4959 for (elem = 0; elem < num_elem; elem++)
4960 {
4961 unsigned char *vp;
4962
4963 /* Vectors are stored in target memory order. (This is probably
4964 a mistake.) */
4965 {
4966 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4967 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4968 / BITS_PER_UNIT);
4969 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4970 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4971 unsigned bytele = (subword_byte % UNITS_PER_WORD
4972 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4973 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4974 }
4975
4976 switch (outer_class)
4977 {
4978 case MODE_INT:
4979 case MODE_PARTIAL_INT:
4980 {
4981 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4982
4983 for (i = 0;
4984 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4985 i += value_bit)
4986 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4987 for (; i < elem_bitsize; i += value_bit)
4988 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4989 << (i - HOST_BITS_PER_WIDE_INT));
4990
4991 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4992 know why. */
4993 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4994 elems[elem] = gen_int_mode (lo, outer_submode);
4995 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4996 elems[elem] = immed_double_const (lo, hi, outer_submode);
4997 else
4998 return NULL_RTX;
4999 }
5000 break;
5001
5002 case MODE_FLOAT:
5003 case MODE_DECIMAL_FLOAT:
5004 {
5005 REAL_VALUE_TYPE r;
5006 long tmp[max_bitsize / 32];
5007
5008 /* real_from_target wants its input in words affected by
5009 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5010 and use WORDS_BIG_ENDIAN instead; see the documentation
5011 of SUBREG in rtl.texi. */
5012 for (i = 0; i < max_bitsize / 32; i++)
5013 tmp[i] = 0;
5014 for (i = 0; i < elem_bitsize; i += value_bit)
5015 {
5016 int ibase;
5017 if (WORDS_BIG_ENDIAN)
5018 ibase = elem_bitsize - 1 - i;
5019 else
5020 ibase = i;
5021 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5022 }
5023
5024 real_from_target (&r, tmp, outer_submode);
5025 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5026 }
5027 break;
5028
5029 case MODE_FRACT:
5030 case MODE_UFRACT:
5031 case MODE_ACCUM:
5032 case MODE_UACCUM:
5033 {
5034 FIXED_VALUE_TYPE f;
5035 f.data.low = 0;
5036 f.data.high = 0;
5037 f.mode = outer_submode;
5038
5039 for (i = 0;
5040 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5041 i += value_bit)
5042 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5043 for (; i < elem_bitsize; i += value_bit)
5044 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5045 << (i - HOST_BITS_PER_WIDE_INT));
5046
5047 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5048 }
5049 break;
5050
5051 default:
5052 gcc_unreachable ();
5053 }
5054 }
5055 if (VECTOR_MODE_P (outermode))
5056 return gen_rtx_CONST_VECTOR (outermode, result_v);
5057 else
5058 return result_s;
5059 }
5060
5061 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5062 Return 0 if no simplifications are possible. */
5063 rtx
5064 simplify_subreg (enum machine_mode outermode, rtx op,
5065 enum machine_mode innermode, unsigned int byte)
5066 {
5067 /* Little bit of sanity checking. */
5068 gcc_assert (innermode != VOIDmode);
5069 gcc_assert (outermode != VOIDmode);
5070 gcc_assert (innermode != BLKmode);
5071 gcc_assert (outermode != BLKmode);
5072
5073 gcc_assert (GET_MODE (op) == innermode
5074 || GET_MODE (op) == VOIDmode);
5075
5076 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5077 gcc_assert (byte < GET_MODE_SIZE (innermode));
5078
5079 if (outermode == innermode && !byte)
5080 return op;
5081
5082 if (CONST_INT_P (op)
5083 || GET_CODE (op) == CONST_DOUBLE
5084 || GET_CODE (op) == CONST_FIXED
5085 || GET_CODE (op) == CONST_VECTOR)
5086 return simplify_immed_subreg (outermode, op, innermode, byte);
5087
5088 /* Changing mode twice with SUBREG => just change it once,
5089 or not at all if changing back op starting mode. */
5090 if (GET_CODE (op) == SUBREG)
5091 {
5092 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5093 int final_offset = byte + SUBREG_BYTE (op);
5094 rtx newx;
5095
5096 if (outermode == innermostmode
5097 && byte == 0 && SUBREG_BYTE (op) == 0)
5098 return SUBREG_REG (op);
5099
5100 /* The SUBREG_BYTE represents offset, as if the value were stored
5101 in memory. Irritating exception is paradoxical subreg, where
5102 we define SUBREG_BYTE to be 0. On big endian machines, this
5103 value should be negative. For a moment, undo this exception. */
5104 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5105 {
5106 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5107 if (WORDS_BIG_ENDIAN)
5108 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5109 if (BYTES_BIG_ENDIAN)
5110 final_offset += difference % UNITS_PER_WORD;
5111 }
5112 if (SUBREG_BYTE (op) == 0
5113 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5114 {
5115 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5116 if (WORDS_BIG_ENDIAN)
5117 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5118 if (BYTES_BIG_ENDIAN)
5119 final_offset += difference % UNITS_PER_WORD;
5120 }
5121
5122 /* See whether resulting subreg will be paradoxical. */
5123 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5124 {
5125 /* In nonparadoxical subregs we can't handle negative offsets. */
5126 if (final_offset < 0)
5127 return NULL_RTX;
5128 /* Bail out in case resulting subreg would be incorrect. */
5129 if (final_offset % GET_MODE_SIZE (outermode)
5130 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5131 return NULL_RTX;
5132 }
5133 else
5134 {
5135 int offset = 0;
5136 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5137
5138 /* In paradoxical subreg, see if we are still looking on lower part.
5139 If so, our SUBREG_BYTE will be 0. */
5140 if (WORDS_BIG_ENDIAN)
5141 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5142 if (BYTES_BIG_ENDIAN)
5143 offset += difference % UNITS_PER_WORD;
5144 if (offset == final_offset)
5145 final_offset = 0;
5146 else
5147 return NULL_RTX;
5148 }
5149
5150 /* Recurse for further possible simplifications. */
5151 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5152 final_offset);
5153 if (newx)
5154 return newx;
5155 if (validate_subreg (outermode, innermostmode,
5156 SUBREG_REG (op), final_offset))
5157 {
5158 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5159 if (SUBREG_PROMOTED_VAR_P (op)
5160 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5161 && GET_MODE_CLASS (outermode) == MODE_INT
5162 && IN_RANGE (GET_MODE_SIZE (outermode),
5163 GET_MODE_SIZE (innermode),
5164 GET_MODE_SIZE (innermostmode))
5165 && subreg_lowpart_p (newx))
5166 {
5167 SUBREG_PROMOTED_VAR_P (newx) = 1;
5168 SUBREG_PROMOTED_UNSIGNED_SET
5169 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5170 }
5171 return newx;
5172 }
5173 return NULL_RTX;
5174 }
5175
5176 /* Merge implicit and explicit truncations. */
5177
5178 if (GET_CODE (op) == TRUNCATE
5179 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5180 && subreg_lowpart_offset (outermode, innermode) == byte)
5181 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5182 GET_MODE (XEXP (op, 0)));
5183
5184 /* SUBREG of a hard register => just change the register number
5185 and/or mode. If the hard register is not valid in that mode,
5186 suppress this simplification. If the hard register is the stack,
5187 frame, or argument pointer, leave this as a SUBREG. */
5188
5189 if (REG_P (op) && HARD_REGISTER_P (op))
5190 {
5191 unsigned int regno, final_regno;
5192
5193 regno = REGNO (op);
5194 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5195 if (HARD_REGISTER_NUM_P (final_regno))
5196 {
5197 rtx x;
5198 int final_offset = byte;
5199
5200 /* Adjust offset for paradoxical subregs. */
5201 if (byte == 0
5202 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5203 {
5204 int difference = (GET_MODE_SIZE (innermode)
5205 - GET_MODE_SIZE (outermode));
5206 if (WORDS_BIG_ENDIAN)
5207 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5208 if (BYTES_BIG_ENDIAN)
5209 final_offset += difference % UNITS_PER_WORD;
5210 }
5211
5212 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5213
5214 /* Propagate original regno. We don't have any way to specify
5215 the offset inside original regno, so do so only for lowpart.
5216 The information is used only by alias analysis that can not
5217 grog partial register anyway. */
5218
5219 if (subreg_lowpart_offset (outermode, innermode) == byte)
5220 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5221 return x;
5222 }
5223 }
5224
5225 /* If we have a SUBREG of a register that we are replacing and we are
5226 replacing it with a MEM, make a new MEM and try replacing the
5227 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5228 or if we would be widening it. */
5229
5230 if (MEM_P (op)
5231 && ! mode_dependent_address_p (XEXP (op, 0))
5232 /* Allow splitting of volatile memory references in case we don't
5233 have instruction to move the whole thing. */
5234 && (! MEM_VOLATILE_P (op)
5235 || ! have_insn_for (SET, innermode))
5236 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5237 return adjust_address_nv (op, outermode, byte);
5238
5239 /* Handle complex values represented as CONCAT
5240 of real and imaginary part. */
5241 if (GET_CODE (op) == CONCAT)
5242 {
5243 unsigned int part_size, final_offset;
5244 rtx part, res;
5245
5246 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5247 if (byte < part_size)
5248 {
5249 part = XEXP (op, 0);
5250 final_offset = byte;
5251 }
5252 else
5253 {
5254 part = XEXP (op, 1);
5255 final_offset = byte - part_size;
5256 }
5257
5258 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5259 return NULL_RTX;
5260
5261 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5262 if (res)
5263 return res;
5264 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5265 return gen_rtx_SUBREG (outermode, part, final_offset);
5266 return NULL_RTX;
5267 }
5268
5269 /* Optimize SUBREG truncations of zero and sign extended values. */
5270 if ((GET_CODE (op) == ZERO_EXTEND
5271 || GET_CODE (op) == SIGN_EXTEND)
5272 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5273 {
5274 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5275
5276 /* If we're requesting the lowpart of a zero or sign extension,
5277 there are three possibilities. If the outermode is the same
5278 as the origmode, we can omit both the extension and the subreg.
5279 If the outermode is not larger than the origmode, we can apply
5280 the truncation without the extension. Finally, if the outermode
5281 is larger than the origmode, but both are integer modes, we
5282 can just extend to the appropriate mode. */
5283 if (bitpos == 0)
5284 {
5285 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5286 if (outermode == origmode)
5287 return XEXP (op, 0);
5288 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5289 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5290 subreg_lowpart_offset (outermode,
5291 origmode));
5292 if (SCALAR_INT_MODE_P (outermode))
5293 return simplify_gen_unary (GET_CODE (op), outermode,
5294 XEXP (op, 0), origmode);
5295 }
5296
5297 /* A SUBREG resulting from a zero extension may fold to zero if
5298 it extracts higher bits that the ZERO_EXTEND's source bits. */
5299 if (GET_CODE (op) == ZERO_EXTEND
5300 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5301 return CONST0_RTX (outermode);
5302 }
5303
5304 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5305 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5306 the outer subreg is effectively a truncation to the original mode. */
5307 if ((GET_CODE (op) == LSHIFTRT
5308 || GET_CODE (op) == ASHIFTRT)
5309 && SCALAR_INT_MODE_P (outermode)
5310 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5311 to avoid the possibility that an outer LSHIFTRT shifts by more
5312 than the sign extension's sign_bit_copies and introduces zeros
5313 into the high bits of the result. */
5314 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5315 && CONST_INT_P (XEXP (op, 1))
5316 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5317 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5318 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5319 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5320 return simplify_gen_binary (ASHIFTRT, outermode,
5321 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5322
5323 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5324 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5325 the outer subreg is effectively a truncation to the original mode. */
5326 if ((GET_CODE (op) == LSHIFTRT
5327 || GET_CODE (op) == ASHIFTRT)
5328 && SCALAR_INT_MODE_P (outermode)
5329 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5330 && CONST_INT_P (XEXP (op, 1))
5331 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5332 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5333 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5334 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5335 return simplify_gen_binary (LSHIFTRT, outermode,
5336 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5337
5338 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5339 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5340 the outer subreg is effectively a truncation to the original mode. */
5341 if (GET_CODE (op) == ASHIFT
5342 && SCALAR_INT_MODE_P (outermode)
5343 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5344 && CONST_INT_P (XEXP (op, 1))
5345 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5346 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5347 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5348 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5349 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5350 return simplify_gen_binary (ASHIFT, outermode,
5351 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5352
5353 /* Recognize a word extraction from a multi-word subreg. */
5354 if ((GET_CODE (op) == LSHIFTRT
5355 || GET_CODE (op) == ASHIFTRT)
5356 && SCALAR_INT_MODE_P (outermode)
5357 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5358 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5359 && CONST_INT_P (XEXP (op, 1))
5360 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5361 && INTVAL (XEXP (op, 1)) >= 0
5362 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5363 && byte == subreg_lowpart_offset (outermode, innermode))
5364 {
5365 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5366 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5367 (WORDS_BIG_ENDIAN
5368 ? byte - shifted_bytes
5369 : byte + shifted_bytes));
5370 }
5371
5372 return NULL_RTX;
5373 }
5374
5375 /* Make a SUBREG operation or equivalent if it folds. */
5376
5377 rtx
5378 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5379 enum machine_mode innermode, unsigned int byte)
5380 {
5381 rtx newx;
5382
5383 newx = simplify_subreg (outermode, op, innermode, byte);
5384 if (newx)
5385 return newx;
5386
5387 if (GET_CODE (op) == SUBREG
5388 || GET_CODE (op) == CONCAT
5389 || GET_MODE (op) == VOIDmode)
5390 return NULL_RTX;
5391
5392 if (validate_subreg (outermode, innermode, op, byte))
5393 return gen_rtx_SUBREG (outermode, op, byte);
5394
5395 return NULL_RTX;
5396 }
5397
5398 /* Simplify X, an rtx expression.
5399
5400 Return the simplified expression or NULL if no simplifications
5401 were possible.
5402
5403 This is the preferred entry point into the simplification routines;
5404 however, we still allow passes to call the more specific routines.
5405
5406 Right now GCC has three (yes, three) major bodies of RTL simplification
5407 code that need to be unified.
5408
5409 1. fold_rtx in cse.c. This code uses various CSE specific
5410 information to aid in RTL simplification.
5411
5412 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5413 it uses combine specific information to aid in RTL
5414 simplification.
5415
5416 3. The routines in this file.
5417
5418
5419 Long term we want to only have one body of simplification code; to
5420 get to that state I recommend the following steps:
5421
5422 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5423 which are not pass dependent state into these routines.
5424
5425 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5426 use this routine whenever possible.
5427
5428 3. Allow for pass dependent state to be provided to these
5429 routines and add simplifications based on the pass dependent
5430 state. Remove code from cse.c & combine.c that becomes
5431 redundant/dead.
5432
5433 It will take time, but ultimately the compiler will be easier to
5434 maintain and improve. It's totally silly that when we add a
5435 simplification that it needs to be added to 4 places (3 for RTL
5436 simplification and 1 for tree simplification. */
5437
5438 rtx
5439 simplify_rtx (const_rtx x)
5440 {
5441 const enum rtx_code code = GET_CODE (x);
5442 const enum machine_mode mode = GET_MODE (x);
5443
5444 switch (GET_RTX_CLASS (code))
5445 {
5446 case RTX_UNARY:
5447 return simplify_unary_operation (code, mode,
5448 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5449 case RTX_COMM_ARITH:
5450 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5451 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5452
5453 /* Fall through.... */
5454
5455 case RTX_BIN_ARITH:
5456 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5457
5458 case RTX_TERNARY:
5459 case RTX_BITFIELD_OPS:
5460 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5461 XEXP (x, 0), XEXP (x, 1),
5462 XEXP (x, 2));
5463
5464 case RTX_COMPARE:
5465 case RTX_COMM_COMPARE:
5466 return simplify_relational_operation (code, mode,
5467 ((GET_MODE (XEXP (x, 0))
5468 != VOIDmode)
5469 ? GET_MODE (XEXP (x, 0))
5470 : GET_MODE (XEXP (x, 1))),
5471 XEXP (x, 0),
5472 XEXP (x, 1));
5473
5474 case RTX_EXTRA:
5475 if (code == SUBREG)
5476 return simplify_subreg (mode, SUBREG_REG (x),
5477 GET_MODE (SUBREG_REG (x)),
5478 SUBREG_BYTE (x));
5479 break;
5480
5481 case RTX_OBJ:
5482 if (code == LO_SUM)
5483 {
5484 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5485 if (GET_CODE (XEXP (x, 0)) == HIGH
5486 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5487 return XEXP (x, 1);
5488 }
5489 break;
5490
5491 default:
5492 break;
5493 }
5494 return NULL;
5495 }