simplify-rtx.c (simplify_const_unary_operation): Handle SS_ABS.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
208
209 rtx
210 delegitimize_mem_from_attrs (rtx x)
211 {
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
216 {
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
220
221 switch (TREE_CODE (decl))
222 {
223 default:
224 decl = NULL;
225 break;
226
227 case VAR_DECL:
228 break;
229
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
237 {
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
241
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
249 {
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
253 }
254 break;
255 }
256 }
257
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
265 {
266 rtx newx;
267
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
270
271 newx = DECL_RTL (decl);
272
273 if (MEM_P (newx))
274 {
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
295 }
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
299 }
300 }
301
302 return x;
303 }
304 \f
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
307
308 rtx
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
311 {
312 rtx tem;
313
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
317
318 return gen_rtx_fmt_e (code, mode, op);
319 }
320
321 /* Likewise for ternary operations. */
322
323 rtx
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 {
327 rtx tem;
328
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
333
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
335 }
336
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
339
340 rtx
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 {
344 rtx tem;
345
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
349
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
351 }
352 \f
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
355
356 rtx
357 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
358 {
359 enum rtx_code code = GET_CODE (x);
360 enum machine_mode mode = GET_MODE (x);
361 enum machine_mode op_mode;
362 rtx op0, op1, op2;
363
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
367
368 if (rtx_equal_p (x, old_rtx))
369 return copy_rtx (new_rtx);
370
371 switch (GET_RTX_CLASS (code))
372 {
373 case RTX_UNARY:
374 op0 = XEXP (x, 0);
375 op_mode = GET_MODE (op0);
376 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
377 if (op0 == XEXP (x, 0))
378 return x;
379 return simplify_gen_unary (code, mode, op0, op_mode);
380
381 case RTX_BIN_ARITH:
382 case RTX_COMM_ARITH:
383 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
384 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
385 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
386 return x;
387 return simplify_gen_binary (code, mode, op0, op1);
388
389 case RTX_COMPARE:
390 case RTX_COMM_COMPARE:
391 op0 = XEXP (x, 0);
392 op1 = XEXP (x, 1);
393 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
394 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
395 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
396 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
397 return x;
398 return simplify_gen_relational (code, mode, op_mode, op0, op1);
399
400 case RTX_TERNARY:
401 case RTX_BITFIELD_OPS:
402 op0 = XEXP (x, 0);
403 op_mode = GET_MODE (op0);
404 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
405 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
406 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
407 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
408 return x;
409 if (op_mode == VOIDmode)
410 op_mode = GET_MODE (op0);
411 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
412
413 case RTX_EXTRA:
414 /* The only case we try to handle is a SUBREG. */
415 if (code == SUBREG)
416 {
417 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
418 if (op0 == SUBREG_REG (x))
419 return x;
420 op0 = simplify_gen_subreg (GET_MODE (x), op0,
421 GET_MODE (SUBREG_REG (x)),
422 SUBREG_BYTE (x));
423 return op0 ? op0 : x;
424 }
425 break;
426
427 case RTX_OBJ:
428 if (code == MEM)
429 {
430 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
431 if (op0 == XEXP (x, 0))
432 return x;
433 return replace_equiv_address_nv (x, op0);
434 }
435 else if (code == LO_SUM)
436 {
437 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
438 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
439
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
442 return op1;
443
444 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
445 return x;
446 return gen_rtx_LO_SUM (mode, op0, op1);
447 }
448 break;
449
450 default:
451 break;
452 }
453 return x;
454 }
455 \f
456 /* Try to simplify a unary operation CODE whose output mode is to be
457 MODE with input operand OP whose mode was originally OP_MODE.
458 Return zero if no simplification can be made. */
459 rtx
460 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
461 rtx op, enum machine_mode op_mode)
462 {
463 rtx trueop, tem;
464
465 if (GET_CODE (op) == CONST)
466 op = XEXP (op, 0);
467
468 trueop = avoid_constant_pool_reference (op);
469
470 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
471 if (tem)
472 return tem;
473
474 return simplify_unary_operation_1 (code, mode, op);
475 }
476
477 /* Perform some simplifications we can do even if the operands
478 aren't constant. */
479 static rtx
480 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
481 {
482 enum rtx_code reversed;
483 rtx temp;
484
485 switch (code)
486 {
487 case NOT:
488 /* (not (not X)) == X. */
489 if (GET_CODE (op) == NOT)
490 return XEXP (op, 0);
491
492 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
493 comparison is all ones. */
494 if (COMPARISON_P (op)
495 && (mode == BImode || STORE_FLAG_VALUE == -1)
496 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
497 return simplify_gen_relational (reversed, mode, VOIDmode,
498 XEXP (op, 0), XEXP (op, 1));
499
500 /* (not (plus X -1)) can become (neg X). */
501 if (GET_CODE (op) == PLUS
502 && XEXP (op, 1) == constm1_rtx)
503 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
504
505 /* Similarly, (not (neg X)) is (plus X -1). */
506 if (GET_CODE (op) == NEG)
507 return plus_constant (XEXP (op, 0), -1);
508
509 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
510 if (GET_CODE (op) == XOR
511 && CONST_INT_P (XEXP (op, 1))
512 && (temp = simplify_unary_operation (NOT, mode,
513 XEXP (op, 1), mode)) != 0)
514 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
515
516 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
517 if (GET_CODE (op) == PLUS
518 && CONST_INT_P (XEXP (op, 1))
519 && mode_signbit_p (mode, XEXP (op, 1))
520 && (temp = simplify_unary_operation (NOT, mode,
521 XEXP (op, 1), mode)) != 0)
522 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
523
524
525 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
526 operands other than 1, but that is not valid. We could do a
527 similar simplification for (not (lshiftrt C X)) where C is
528 just the sign bit, but this doesn't seem common enough to
529 bother with. */
530 if (GET_CODE (op) == ASHIFT
531 && XEXP (op, 0) == const1_rtx)
532 {
533 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
534 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
535 }
536
537 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
538 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
539 so we can perform the above simplification. */
540
541 if (STORE_FLAG_VALUE == -1
542 && GET_CODE (op) == ASHIFTRT
543 && GET_CODE (XEXP (op, 1))
544 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
545 return simplify_gen_relational (GE, mode, VOIDmode,
546 XEXP (op, 0), const0_rtx);
547
548
549 if (GET_CODE (op) == SUBREG
550 && subreg_lowpart_p (op)
551 && (GET_MODE_SIZE (GET_MODE (op))
552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
553 && GET_CODE (SUBREG_REG (op)) == ASHIFT
554 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
555 {
556 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
557 rtx x;
558
559 x = gen_rtx_ROTATE (inner_mode,
560 simplify_gen_unary (NOT, inner_mode, const1_rtx,
561 inner_mode),
562 XEXP (SUBREG_REG (op), 1));
563 return rtl_hooks.gen_lowpart_no_emit (mode, x);
564 }
565
566 /* Apply De Morgan's laws to reduce number of patterns for machines
567 with negating logical insns (and-not, nand, etc.). If result has
568 only one NOT, put it first, since that is how the patterns are
569 coded. */
570
571 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
572 {
573 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
574 enum machine_mode op_mode;
575
576 op_mode = GET_MODE (in1);
577 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
578
579 op_mode = GET_MODE (in2);
580 if (op_mode == VOIDmode)
581 op_mode = mode;
582 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
583
584 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
585 {
586 rtx tem = in2;
587 in2 = in1; in1 = tem;
588 }
589
590 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
591 mode, in1, in2);
592 }
593 break;
594
595 case NEG:
596 /* (neg (neg X)) == X. */
597 if (GET_CODE (op) == NEG)
598 return XEXP (op, 0);
599
600 /* (neg (plus X 1)) can become (not X). */
601 if (GET_CODE (op) == PLUS
602 && XEXP (op, 1) == const1_rtx)
603 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
604
605 /* Similarly, (neg (not X)) is (plus X 1). */
606 if (GET_CODE (op) == NOT)
607 return plus_constant (XEXP (op, 0), 1);
608
609 /* (neg (minus X Y)) can become (minus Y X). This transformation
610 isn't safe for modes with signed zeros, since if X and Y are
611 both +0, (minus Y X) is the same as (minus X Y). If the
612 rounding mode is towards +infinity (or -infinity) then the two
613 expressions will be rounded differently. */
614 if (GET_CODE (op) == MINUS
615 && !HONOR_SIGNED_ZEROS (mode)
616 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
617 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
618
619 if (GET_CODE (op) == PLUS
620 && !HONOR_SIGNED_ZEROS (mode)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
622 {
623 /* (neg (plus A C)) is simplified to (minus -C A). */
624 if (CONST_INT_P (XEXP (op, 1))
625 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
626 {
627 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
628 if (temp)
629 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
630 }
631
632 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
633 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
634 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
635 }
636
637 /* (neg (mult A B)) becomes (mult (neg A) B).
638 This works even for floating-point values. */
639 if (GET_CODE (op) == MULT
640 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
641 {
642 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
643 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
644 }
645
646 /* NEG commutes with ASHIFT since it is multiplication. Only do
647 this if we can then eliminate the NEG (e.g., if the operand
648 is a constant). */
649 if (GET_CODE (op) == ASHIFT)
650 {
651 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
652 if (temp)
653 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
654 }
655
656 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
657 C is equal to the width of MODE minus 1. */
658 if (GET_CODE (op) == ASHIFTRT
659 && CONST_INT_P (XEXP (op, 1))
660 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
661 return simplify_gen_binary (LSHIFTRT, mode,
662 XEXP (op, 0), XEXP (op, 1));
663
664 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
665 C is equal to the width of MODE minus 1. */
666 if (GET_CODE (op) == LSHIFTRT
667 && CONST_INT_P (XEXP (op, 1))
668 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
669 return simplify_gen_binary (ASHIFTRT, mode,
670 XEXP (op, 0), XEXP (op, 1));
671
672 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
673 if (GET_CODE (op) == XOR
674 && XEXP (op, 1) == const1_rtx
675 && nonzero_bits (XEXP (op, 0), mode) == 1)
676 return plus_constant (XEXP (op, 0), -1);
677
678 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
679 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
680 if (GET_CODE (op) == LT
681 && XEXP (op, 1) == const0_rtx
682 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
683 {
684 enum machine_mode inner = GET_MODE (XEXP (op, 0));
685 int isize = GET_MODE_BITSIZE (inner);
686 if (STORE_FLAG_VALUE == 1)
687 {
688 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
689 GEN_INT (isize - 1));
690 if (mode == inner)
691 return temp;
692 if (GET_MODE_BITSIZE (mode) > isize)
693 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
694 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
695 }
696 else if (STORE_FLAG_VALUE == -1)
697 {
698 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
699 GEN_INT (isize - 1));
700 if (mode == inner)
701 return temp;
702 if (GET_MODE_BITSIZE (mode) > isize)
703 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
704 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
705 }
706 }
707 break;
708
709 case TRUNCATE:
710 /* We can't handle truncation to a partial integer mode here
711 because we don't know the real bitsize of the partial
712 integer mode. */
713 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
714 break;
715
716 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
717 if ((GET_CODE (op) == SIGN_EXTEND
718 || GET_CODE (op) == ZERO_EXTEND)
719 && GET_MODE (XEXP (op, 0)) == mode)
720 return XEXP (op, 0);
721
722 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
723 (OP:SI foo:SI) if OP is NEG or ABS. */
724 if ((GET_CODE (op) == ABS
725 || GET_CODE (op) == NEG)
726 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
727 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
729 return simplify_gen_unary (GET_CODE (op), mode,
730 XEXP (XEXP (op, 0), 0), mode);
731
732 /* (truncate:A (subreg:B (truncate:C X) 0)) is
733 (truncate:A X). */
734 if (GET_CODE (op) == SUBREG
735 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
736 && subreg_lowpart_p (op))
737 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
738 GET_MODE (XEXP (SUBREG_REG (op), 0)));
739
740 /* If we know that the value is already truncated, we can
741 replace the TRUNCATE with a SUBREG. Note that this is also
742 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
743 modes we just have to apply a different definition for
744 truncation. But don't do this for an (LSHIFTRT (MULT ...))
745 since this will cause problems with the umulXi3_highpart
746 patterns. */
747 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
748 GET_MODE_BITSIZE (GET_MODE (op)))
749 ? (num_sign_bit_copies (op, GET_MODE (op))
750 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
751 - GET_MODE_BITSIZE (mode)))
752 : truncated_to_mode (mode, op))
753 && ! (GET_CODE (op) == LSHIFTRT
754 && GET_CODE (XEXP (op, 0)) == MULT))
755 return rtl_hooks.gen_lowpart_no_emit (mode, op);
756
757 /* A truncate of a comparison can be replaced with a subreg if
758 STORE_FLAG_VALUE permits. This is like the previous test,
759 but it works even if the comparison is done in a mode larger
760 than HOST_BITS_PER_WIDE_INT. */
761 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
762 && COMPARISON_P (op)
763 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
764 return rtl_hooks.gen_lowpart_no_emit (mode, op);
765 break;
766
767 case FLOAT_TRUNCATE:
768 if (DECIMAL_FLOAT_MODE_P (mode))
769 break;
770
771 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
772 if (GET_CODE (op) == FLOAT_EXTEND
773 && GET_MODE (XEXP (op, 0)) == mode)
774 return XEXP (op, 0);
775
776 /* (float_truncate:SF (float_truncate:DF foo:XF))
777 = (float_truncate:SF foo:XF).
778 This may eliminate double rounding, so it is unsafe.
779
780 (float_truncate:SF (float_extend:XF foo:DF))
781 = (float_truncate:SF foo:DF).
782
783 (float_truncate:DF (float_extend:XF foo:SF))
784 = (float_extend:SF foo:DF). */
785 if ((GET_CODE (op) == FLOAT_TRUNCATE
786 && flag_unsafe_math_optimizations)
787 || GET_CODE (op) == FLOAT_EXTEND)
788 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
789 0)))
790 > GET_MODE_SIZE (mode)
791 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
792 mode,
793 XEXP (op, 0), mode);
794
795 /* (float_truncate (float x)) is (float x) */
796 if (GET_CODE (op) == FLOAT
797 && (flag_unsafe_math_optimizations
798 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
799 && ((unsigned)significand_size (GET_MODE (op))
800 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
801 - num_sign_bit_copies (XEXP (op, 0),
802 GET_MODE (XEXP (op, 0))))))))
803 return simplify_gen_unary (FLOAT, mode,
804 XEXP (op, 0),
805 GET_MODE (XEXP (op, 0)));
806
807 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
808 (OP:SF foo:SF) if OP is NEG or ABS. */
809 if ((GET_CODE (op) == ABS
810 || GET_CODE (op) == NEG)
811 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
815
816 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
817 is (float_truncate:SF x). */
818 if (GET_CODE (op) == SUBREG
819 && subreg_lowpart_p (op)
820 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
821 return SUBREG_REG (op);
822 break;
823
824 case FLOAT_EXTEND:
825 if (DECIMAL_FLOAT_MODE_P (mode))
826 break;
827
828 /* (float_extend (float_extend x)) is (float_extend x)
829
830 (float_extend (float x)) is (float x) assuming that double
831 rounding can't happen.
832 */
833 if (GET_CODE (op) == FLOAT_EXTEND
834 || (GET_CODE (op) == FLOAT
835 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
836 && ((unsigned)significand_size (GET_MODE (op))
837 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
838 - num_sign_bit_copies (XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)))))))
840 return simplify_gen_unary (GET_CODE (op), mode,
841 XEXP (op, 0),
842 GET_MODE (XEXP (op, 0)));
843
844 break;
845
846 case ABS:
847 /* (abs (neg <foo>)) -> (abs <foo>) */
848 if (GET_CODE (op) == NEG)
849 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
850 GET_MODE (XEXP (op, 0)));
851
852 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
853 do nothing. */
854 if (GET_MODE (op) == VOIDmode)
855 break;
856
857 /* If operand is something known to be positive, ignore the ABS. */
858 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
859 || ((GET_MODE_BITSIZE (GET_MODE (op))
860 <= HOST_BITS_PER_WIDE_INT)
861 && ((nonzero_bits (op, GET_MODE (op))
862 & ((HOST_WIDE_INT) 1
863 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
864 == 0)))
865 return op;
866
867 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
868 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
869 return gen_rtx_NEG (mode, op);
870
871 break;
872
873 case FFS:
874 /* (ffs (*_extend <X>)) = (ffs <X>) */
875 if (GET_CODE (op) == SIGN_EXTEND
876 || GET_CODE (op) == ZERO_EXTEND)
877 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
878 GET_MODE (XEXP (op, 0)));
879 break;
880
881 case POPCOUNT:
882 switch (GET_CODE (op))
883 {
884 case BSWAP:
885 case ZERO_EXTEND:
886 /* (popcount (zero_extend <X>)) = (popcount <X>) */
887 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
888 GET_MODE (XEXP (op, 0)));
889
890 case ROTATE:
891 case ROTATERT:
892 /* Rotations don't affect popcount. */
893 if (!side_effects_p (XEXP (op, 1)))
894 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
895 GET_MODE (XEXP (op, 0)));
896 break;
897
898 default:
899 break;
900 }
901 break;
902
903 case PARITY:
904 switch (GET_CODE (op))
905 {
906 case NOT:
907 case BSWAP:
908 case ZERO_EXTEND:
909 case SIGN_EXTEND:
910 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
911 GET_MODE (XEXP (op, 0)));
912
913 case ROTATE:
914 case ROTATERT:
915 /* Rotations don't affect parity. */
916 if (!side_effects_p (XEXP (op, 1)))
917 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
918 GET_MODE (XEXP (op, 0)));
919 break;
920
921 default:
922 break;
923 }
924 break;
925
926 case BSWAP:
927 /* (bswap (bswap x)) -> x. */
928 if (GET_CODE (op) == BSWAP)
929 return XEXP (op, 0);
930 break;
931
932 case FLOAT:
933 /* (float (sign_extend <X>)) = (float <X>). */
934 if (GET_CODE (op) == SIGN_EXTEND)
935 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
936 GET_MODE (XEXP (op, 0)));
937 break;
938
939 case SIGN_EXTEND:
940 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
941 becomes just the MINUS if its mode is MODE. This allows
942 folding switch statements on machines using casesi (such as
943 the VAX). */
944 if (GET_CODE (op) == TRUNCATE
945 && GET_MODE (XEXP (op, 0)) == mode
946 && GET_CODE (XEXP (op, 0)) == MINUS
947 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
948 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
949 return XEXP (op, 0);
950
951 /* Check for a sign extension of a subreg of a promoted
952 variable, where the promotion is sign-extended, and the
953 target mode is the same as the variable's promotion. */
954 if (GET_CODE (op) == SUBREG
955 && SUBREG_PROMOTED_VAR_P (op)
956 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
957 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
958 return rtl_hooks.gen_lowpart_no_emit (mode, op);
959
960 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
961 if (! POINTERS_EXTEND_UNSIGNED
962 && mode == Pmode && GET_MODE (op) == ptr_mode
963 && (CONSTANT_P (op)
964 || (GET_CODE (op) == SUBREG
965 && REG_P (SUBREG_REG (op))
966 && REG_POINTER (SUBREG_REG (op))
967 && GET_MODE (SUBREG_REG (op)) == Pmode)))
968 return convert_memory_address (Pmode, op);
969 #endif
970 break;
971
972 case ZERO_EXTEND:
973 /* Check for a zero extension of a subreg of a promoted
974 variable, where the promotion is zero-extended, and the
975 target mode is the same as the variable's promotion. */
976 if (GET_CODE (op) == SUBREG
977 && SUBREG_PROMOTED_VAR_P (op)
978 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
979 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
980 return rtl_hooks.gen_lowpart_no_emit (mode, op);
981
982 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
983 if (POINTERS_EXTEND_UNSIGNED > 0
984 && mode == Pmode && GET_MODE (op) == ptr_mode
985 && (CONSTANT_P (op)
986 || (GET_CODE (op) == SUBREG
987 && REG_P (SUBREG_REG (op))
988 && REG_POINTER (SUBREG_REG (op))
989 && GET_MODE (SUBREG_REG (op)) == Pmode)))
990 return convert_memory_address (Pmode, op);
991 #endif
992 break;
993
994 default:
995 break;
996 }
997
998 return 0;
999 }
1000
1001 /* Try to compute the value of a unary operation CODE whose output mode is to
1002 be MODE with input operand OP whose mode was originally OP_MODE.
1003 Return zero if the value cannot be computed. */
1004 rtx
1005 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1006 rtx op, enum machine_mode op_mode)
1007 {
1008 unsigned int width = GET_MODE_BITSIZE (mode);
1009
1010 if (code == VEC_DUPLICATE)
1011 {
1012 gcc_assert (VECTOR_MODE_P (mode));
1013 if (GET_MODE (op) != VOIDmode)
1014 {
1015 if (!VECTOR_MODE_P (GET_MODE (op)))
1016 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1017 else
1018 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1019 (GET_MODE (op)));
1020 }
1021 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1022 || GET_CODE (op) == CONST_VECTOR)
1023 {
1024 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1025 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1026 rtvec v = rtvec_alloc (n_elts);
1027 unsigned int i;
1028
1029 if (GET_CODE (op) != CONST_VECTOR)
1030 for (i = 0; i < n_elts; i++)
1031 RTVEC_ELT (v, i) = op;
1032 else
1033 {
1034 enum machine_mode inmode = GET_MODE (op);
1035 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1036 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1037
1038 gcc_assert (in_n_elts < n_elts);
1039 gcc_assert ((n_elts % in_n_elts) == 0);
1040 for (i = 0; i < n_elts; i++)
1041 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1042 }
1043 return gen_rtx_CONST_VECTOR (mode, v);
1044 }
1045 }
1046
1047 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1048 {
1049 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1050 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1051 enum machine_mode opmode = GET_MODE (op);
1052 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1053 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1054 rtvec v = rtvec_alloc (n_elts);
1055 unsigned int i;
1056
1057 gcc_assert (op_n_elts == n_elts);
1058 for (i = 0; i < n_elts; i++)
1059 {
1060 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1061 CONST_VECTOR_ELT (op, i),
1062 GET_MODE_INNER (opmode));
1063 if (!x)
1064 return 0;
1065 RTVEC_ELT (v, i) = x;
1066 }
1067 return gen_rtx_CONST_VECTOR (mode, v);
1068 }
1069
1070 /* The order of these tests is critical so that, for example, we don't
1071 check the wrong mode (input vs. output) for a conversion operation,
1072 such as FIX. At some point, this should be simplified. */
1073
1074 if (code == FLOAT && GET_MODE (op) == VOIDmode
1075 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1076 {
1077 HOST_WIDE_INT hv, lv;
1078 REAL_VALUE_TYPE d;
1079
1080 if (CONST_INT_P (op))
1081 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1082 else
1083 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1084
1085 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1086 d = real_value_truncate (mode, d);
1087 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1088 }
1089 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1090 && (GET_CODE (op) == CONST_DOUBLE
1091 || CONST_INT_P (op)))
1092 {
1093 HOST_WIDE_INT hv, lv;
1094 REAL_VALUE_TYPE d;
1095
1096 if (CONST_INT_P (op))
1097 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1098 else
1099 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1100
1101 if (op_mode == VOIDmode)
1102 {
1103 /* We don't know how to interpret negative-looking numbers in
1104 this case, so don't try to fold those. */
1105 if (hv < 0)
1106 return 0;
1107 }
1108 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1109 ;
1110 else
1111 hv = 0, lv &= GET_MODE_MASK (op_mode);
1112
1113 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1114 d = real_value_truncate (mode, d);
1115 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1116 }
1117
1118 if (CONST_INT_P (op)
1119 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1120 {
1121 HOST_WIDE_INT arg0 = INTVAL (op);
1122 HOST_WIDE_INT val;
1123
1124 switch (code)
1125 {
1126 case NOT:
1127 val = ~ arg0;
1128 break;
1129
1130 case NEG:
1131 val = - arg0;
1132 break;
1133
1134 case ABS:
1135 val = (arg0 >= 0 ? arg0 : - arg0);
1136 break;
1137
1138 case FFS:
1139 /* Don't use ffs here. Instead, get low order bit and then its
1140 number. If arg0 is zero, this will return 0, as desired. */
1141 arg0 &= GET_MODE_MASK (mode);
1142 val = exact_log2 (arg0 & (- arg0)) + 1;
1143 break;
1144
1145 case CLZ:
1146 arg0 &= GET_MODE_MASK (mode);
1147 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1148 ;
1149 else
1150 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1151 break;
1152
1153 case CTZ:
1154 arg0 &= GET_MODE_MASK (mode);
1155 if (arg0 == 0)
1156 {
1157 /* Even if the value at zero is undefined, we have to come
1158 up with some replacement. Seems good enough. */
1159 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1160 val = GET_MODE_BITSIZE (mode);
1161 }
1162 else
1163 val = exact_log2 (arg0 & -arg0);
1164 break;
1165
1166 case POPCOUNT:
1167 arg0 &= GET_MODE_MASK (mode);
1168 val = 0;
1169 while (arg0)
1170 val++, arg0 &= arg0 - 1;
1171 break;
1172
1173 case PARITY:
1174 arg0 &= GET_MODE_MASK (mode);
1175 val = 0;
1176 while (arg0)
1177 val++, arg0 &= arg0 - 1;
1178 val &= 1;
1179 break;
1180
1181 case BSWAP:
1182 {
1183 unsigned int s;
1184
1185 val = 0;
1186 for (s = 0; s < width; s += 8)
1187 {
1188 unsigned int d = width - s - 8;
1189 unsigned HOST_WIDE_INT byte;
1190 byte = (arg0 >> s) & 0xff;
1191 val |= byte << d;
1192 }
1193 }
1194 break;
1195
1196 case TRUNCATE:
1197 val = arg0;
1198 break;
1199
1200 case ZERO_EXTEND:
1201 /* When zero-extending a CONST_INT, we need to know its
1202 original mode. */
1203 gcc_assert (op_mode != VOIDmode);
1204 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1205 {
1206 /* If we were really extending the mode,
1207 we would have to distinguish between zero-extension
1208 and sign-extension. */
1209 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1210 val = arg0;
1211 }
1212 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1213 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1214 else
1215 return 0;
1216 break;
1217
1218 case SIGN_EXTEND:
1219 if (op_mode == VOIDmode)
1220 op_mode = mode;
1221 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1222 {
1223 /* If we were really extending the mode,
1224 we would have to distinguish between zero-extension
1225 and sign-extension. */
1226 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1227 val = arg0;
1228 }
1229 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1230 {
1231 val
1232 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1233 if (val
1234 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1235 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1236 }
1237 else
1238 return 0;
1239 break;
1240
1241 case SQRT:
1242 case FLOAT_EXTEND:
1243 case FLOAT_TRUNCATE:
1244 case SS_TRUNCATE:
1245 case US_TRUNCATE:
1246 case SS_NEG:
1247 case US_NEG:
1248 case SS_ABS:
1249 return 0;
1250
1251 default:
1252 gcc_unreachable ();
1253 }
1254
1255 return gen_int_mode (val, mode);
1256 }
1257
1258 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1259 for a DImode operation on a CONST_INT. */
1260 else if (GET_MODE (op) == VOIDmode
1261 && width <= HOST_BITS_PER_WIDE_INT * 2
1262 && (GET_CODE (op) == CONST_DOUBLE
1263 || CONST_INT_P (op)))
1264 {
1265 unsigned HOST_WIDE_INT l1, lv;
1266 HOST_WIDE_INT h1, hv;
1267
1268 if (GET_CODE (op) == CONST_DOUBLE)
1269 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1270 else
1271 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1272
1273 switch (code)
1274 {
1275 case NOT:
1276 lv = ~ l1;
1277 hv = ~ h1;
1278 break;
1279
1280 case NEG:
1281 neg_double (l1, h1, &lv, &hv);
1282 break;
1283
1284 case ABS:
1285 if (h1 < 0)
1286 neg_double (l1, h1, &lv, &hv);
1287 else
1288 lv = l1, hv = h1;
1289 break;
1290
1291 case FFS:
1292 hv = 0;
1293 if (l1 == 0)
1294 {
1295 if (h1 == 0)
1296 lv = 0;
1297 else
1298 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1299 }
1300 else
1301 lv = exact_log2 (l1 & -l1) + 1;
1302 break;
1303
1304 case CLZ:
1305 hv = 0;
1306 if (h1 != 0)
1307 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1308 - HOST_BITS_PER_WIDE_INT;
1309 else if (l1 != 0)
1310 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1311 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1312 lv = GET_MODE_BITSIZE (mode);
1313 break;
1314
1315 case CTZ:
1316 hv = 0;
1317 if (l1 != 0)
1318 lv = exact_log2 (l1 & -l1);
1319 else if (h1 != 0)
1320 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1321 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1322 lv = GET_MODE_BITSIZE (mode);
1323 break;
1324
1325 case POPCOUNT:
1326 hv = 0;
1327 lv = 0;
1328 while (l1)
1329 lv++, l1 &= l1 - 1;
1330 while (h1)
1331 lv++, h1 &= h1 - 1;
1332 break;
1333
1334 case PARITY:
1335 hv = 0;
1336 lv = 0;
1337 while (l1)
1338 lv++, l1 &= l1 - 1;
1339 while (h1)
1340 lv++, h1 &= h1 - 1;
1341 lv &= 1;
1342 break;
1343
1344 case BSWAP:
1345 {
1346 unsigned int s;
1347
1348 hv = 0;
1349 lv = 0;
1350 for (s = 0; s < width; s += 8)
1351 {
1352 unsigned int d = width - s - 8;
1353 unsigned HOST_WIDE_INT byte;
1354
1355 if (s < HOST_BITS_PER_WIDE_INT)
1356 byte = (l1 >> s) & 0xff;
1357 else
1358 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1359
1360 if (d < HOST_BITS_PER_WIDE_INT)
1361 lv |= byte << d;
1362 else
1363 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1364 }
1365 }
1366 break;
1367
1368 case TRUNCATE:
1369 /* This is just a change-of-mode, so do nothing. */
1370 lv = l1, hv = h1;
1371 break;
1372
1373 case ZERO_EXTEND:
1374 gcc_assert (op_mode != VOIDmode);
1375
1376 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1377 return 0;
1378
1379 hv = 0;
1380 lv = l1 & GET_MODE_MASK (op_mode);
1381 break;
1382
1383 case SIGN_EXTEND:
1384 if (op_mode == VOIDmode
1385 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1386 return 0;
1387 else
1388 {
1389 lv = l1 & GET_MODE_MASK (op_mode);
1390 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1391 && (lv & ((HOST_WIDE_INT) 1
1392 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1393 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1394
1395 hv = HWI_SIGN_EXTEND (lv);
1396 }
1397 break;
1398
1399 case SQRT:
1400 return 0;
1401
1402 default:
1403 return 0;
1404 }
1405
1406 return immed_double_const (lv, hv, mode);
1407 }
1408
1409 else if (GET_CODE (op) == CONST_DOUBLE
1410 && SCALAR_FLOAT_MODE_P (mode))
1411 {
1412 REAL_VALUE_TYPE d, t;
1413 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1414
1415 switch (code)
1416 {
1417 case SQRT:
1418 if (HONOR_SNANS (mode) && real_isnan (&d))
1419 return 0;
1420 real_sqrt (&t, mode, &d);
1421 d = t;
1422 break;
1423 case ABS:
1424 d = REAL_VALUE_ABS (d);
1425 break;
1426 case NEG:
1427 d = REAL_VALUE_NEGATE (d);
1428 break;
1429 case FLOAT_TRUNCATE:
1430 d = real_value_truncate (mode, d);
1431 break;
1432 case FLOAT_EXTEND:
1433 /* All this does is change the mode. */
1434 break;
1435 case FIX:
1436 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1437 break;
1438 case NOT:
1439 {
1440 long tmp[4];
1441 int i;
1442
1443 real_to_target (tmp, &d, GET_MODE (op));
1444 for (i = 0; i < 4; i++)
1445 tmp[i] = ~tmp[i];
1446 real_from_target (&d, tmp, mode);
1447 break;
1448 }
1449 default:
1450 gcc_unreachable ();
1451 }
1452 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1453 }
1454
1455 else if (GET_CODE (op) == CONST_DOUBLE
1456 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1457 && GET_MODE_CLASS (mode) == MODE_INT
1458 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1459 {
1460 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1461 operators are intentionally left unspecified (to ease implementation
1462 by target backends), for consistency, this routine implements the
1463 same semantics for constant folding as used by the middle-end. */
1464
1465 /* This was formerly used only for non-IEEE float.
1466 eggert@twinsun.com says it is safe for IEEE also. */
1467 HOST_WIDE_INT xh, xl, th, tl;
1468 REAL_VALUE_TYPE x, t;
1469 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1470 switch (code)
1471 {
1472 case FIX:
1473 if (REAL_VALUE_ISNAN (x))
1474 return const0_rtx;
1475
1476 /* Test against the signed upper bound. */
1477 if (width > HOST_BITS_PER_WIDE_INT)
1478 {
1479 th = ((unsigned HOST_WIDE_INT) 1
1480 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1481 tl = -1;
1482 }
1483 else
1484 {
1485 th = 0;
1486 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1487 }
1488 real_from_integer (&t, VOIDmode, tl, th, 0);
1489 if (REAL_VALUES_LESS (t, x))
1490 {
1491 xh = th;
1492 xl = tl;
1493 break;
1494 }
1495
1496 /* Test against the signed lower bound. */
1497 if (width > HOST_BITS_PER_WIDE_INT)
1498 {
1499 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1500 tl = 0;
1501 }
1502 else
1503 {
1504 th = -1;
1505 tl = (HOST_WIDE_INT) -1 << (width - 1);
1506 }
1507 real_from_integer (&t, VOIDmode, tl, th, 0);
1508 if (REAL_VALUES_LESS (x, t))
1509 {
1510 xh = th;
1511 xl = tl;
1512 break;
1513 }
1514 REAL_VALUE_TO_INT (&xl, &xh, x);
1515 break;
1516
1517 case UNSIGNED_FIX:
1518 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1519 return const0_rtx;
1520
1521 /* Test against the unsigned upper bound. */
1522 if (width == 2*HOST_BITS_PER_WIDE_INT)
1523 {
1524 th = -1;
1525 tl = -1;
1526 }
1527 else if (width >= HOST_BITS_PER_WIDE_INT)
1528 {
1529 th = ((unsigned HOST_WIDE_INT) 1
1530 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1531 tl = -1;
1532 }
1533 else
1534 {
1535 th = 0;
1536 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1537 }
1538 real_from_integer (&t, VOIDmode, tl, th, 1);
1539 if (REAL_VALUES_LESS (t, x))
1540 {
1541 xh = th;
1542 xl = tl;
1543 break;
1544 }
1545
1546 REAL_VALUE_TO_INT (&xl, &xh, x);
1547 break;
1548
1549 default:
1550 gcc_unreachable ();
1551 }
1552 return immed_double_const (xl, xh, mode);
1553 }
1554
1555 return NULL_RTX;
1556 }
1557 \f
1558 /* Subroutine of simplify_binary_operation to simplify a commutative,
1559 associative binary operation CODE with result mode MODE, operating
1560 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1561 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1562 canonicalization is possible. */
1563
1564 static rtx
1565 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1566 rtx op0, rtx op1)
1567 {
1568 rtx tem;
1569
1570 /* Linearize the operator to the left. */
1571 if (GET_CODE (op1) == code)
1572 {
1573 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1574 if (GET_CODE (op0) == code)
1575 {
1576 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1577 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1578 }
1579
1580 /* "a op (b op c)" becomes "(b op c) op a". */
1581 if (! swap_commutative_operands_p (op1, op0))
1582 return simplify_gen_binary (code, mode, op1, op0);
1583
1584 tem = op0;
1585 op0 = op1;
1586 op1 = tem;
1587 }
1588
1589 if (GET_CODE (op0) == code)
1590 {
1591 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1592 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1593 {
1594 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1595 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1596 }
1597
1598 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1599 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1600 if (tem != 0)
1601 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1602
1603 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1604 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1605 if (tem != 0)
1606 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1607 }
1608
1609 return 0;
1610 }
1611
1612
1613 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1614 and OP1. Return 0 if no simplification is possible.
1615
1616 Don't use this for relational operations such as EQ or LT.
1617 Use simplify_relational_operation instead. */
1618 rtx
1619 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1620 rtx op0, rtx op1)
1621 {
1622 rtx trueop0, trueop1;
1623 rtx tem;
1624
1625 /* Relational operations don't work here. We must know the mode
1626 of the operands in order to do the comparison correctly.
1627 Assuming a full word can give incorrect results.
1628 Consider comparing 128 with -128 in QImode. */
1629 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1630 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1631
1632 /* Make sure the constant is second. */
1633 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1634 && swap_commutative_operands_p (op0, op1))
1635 {
1636 tem = op0, op0 = op1, op1 = tem;
1637 }
1638
1639 trueop0 = avoid_constant_pool_reference (op0);
1640 trueop1 = avoid_constant_pool_reference (op1);
1641
1642 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1643 if (tem)
1644 return tem;
1645 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1646 }
1647
1648 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1649 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1650 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1651 actual constants. */
1652
1653 static rtx
1654 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1655 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1656 {
1657 rtx tem, reversed, opleft, opright;
1658 HOST_WIDE_INT val;
1659 unsigned int width = GET_MODE_BITSIZE (mode);
1660
1661 /* Even if we can't compute a constant result,
1662 there are some cases worth simplifying. */
1663
1664 switch (code)
1665 {
1666 case PLUS:
1667 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1668 when x is NaN, infinite, or finite and nonzero. They aren't
1669 when x is -0 and the rounding mode is not towards -infinity,
1670 since (-0) + 0 is then 0. */
1671 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1672 return op0;
1673
1674 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1675 transformations are safe even for IEEE. */
1676 if (GET_CODE (op0) == NEG)
1677 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1678 else if (GET_CODE (op1) == NEG)
1679 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1680
1681 /* (~a) + 1 -> -a */
1682 if (INTEGRAL_MODE_P (mode)
1683 && GET_CODE (op0) == NOT
1684 && trueop1 == const1_rtx)
1685 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1686
1687 /* Handle both-operands-constant cases. We can only add
1688 CONST_INTs to constants since the sum of relocatable symbols
1689 can't be handled by most assemblers. Don't add CONST_INT
1690 to CONST_INT since overflow won't be computed properly if wider
1691 than HOST_BITS_PER_WIDE_INT. */
1692
1693 if ((GET_CODE (op0) == CONST
1694 || GET_CODE (op0) == SYMBOL_REF
1695 || GET_CODE (op0) == LABEL_REF)
1696 && CONST_INT_P (op1))
1697 return plus_constant (op0, INTVAL (op1));
1698 else if ((GET_CODE (op1) == CONST
1699 || GET_CODE (op1) == SYMBOL_REF
1700 || GET_CODE (op1) == LABEL_REF)
1701 && CONST_INT_P (op0))
1702 return plus_constant (op1, INTVAL (op0));
1703
1704 /* See if this is something like X * C - X or vice versa or
1705 if the multiplication is written as a shift. If so, we can
1706 distribute and make a new multiply, shift, or maybe just
1707 have X (if C is 2 in the example above). But don't make
1708 something more expensive than we had before. */
1709
1710 if (SCALAR_INT_MODE_P (mode))
1711 {
1712 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1713 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1714 rtx lhs = op0, rhs = op1;
1715
1716 if (GET_CODE (lhs) == NEG)
1717 {
1718 coeff0l = -1;
1719 coeff0h = -1;
1720 lhs = XEXP (lhs, 0);
1721 }
1722 else if (GET_CODE (lhs) == MULT
1723 && CONST_INT_P (XEXP (lhs, 1)))
1724 {
1725 coeff0l = INTVAL (XEXP (lhs, 1));
1726 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1727 lhs = XEXP (lhs, 0);
1728 }
1729 else if (GET_CODE (lhs) == ASHIFT
1730 && CONST_INT_P (XEXP (lhs, 1))
1731 && INTVAL (XEXP (lhs, 1)) >= 0
1732 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1733 {
1734 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1735 coeff0h = 0;
1736 lhs = XEXP (lhs, 0);
1737 }
1738
1739 if (GET_CODE (rhs) == NEG)
1740 {
1741 coeff1l = -1;
1742 coeff1h = -1;
1743 rhs = XEXP (rhs, 0);
1744 }
1745 else if (GET_CODE (rhs) == MULT
1746 && CONST_INT_P (XEXP (rhs, 1)))
1747 {
1748 coeff1l = INTVAL (XEXP (rhs, 1));
1749 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1750 rhs = XEXP (rhs, 0);
1751 }
1752 else if (GET_CODE (rhs) == ASHIFT
1753 && CONST_INT_P (XEXP (rhs, 1))
1754 && INTVAL (XEXP (rhs, 1)) >= 0
1755 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1756 {
1757 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1758 coeff1h = 0;
1759 rhs = XEXP (rhs, 0);
1760 }
1761
1762 if (rtx_equal_p (lhs, rhs))
1763 {
1764 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1765 rtx coeff;
1766 unsigned HOST_WIDE_INT l;
1767 HOST_WIDE_INT h;
1768 bool speed = optimize_function_for_speed_p (cfun);
1769
1770 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1771 coeff = immed_double_const (l, h, mode);
1772
1773 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1774 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1775 ? tem : 0;
1776 }
1777 }
1778
1779 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1780 if ((CONST_INT_P (op1)
1781 || GET_CODE (op1) == CONST_DOUBLE)
1782 && GET_CODE (op0) == XOR
1783 && (CONST_INT_P (XEXP (op0, 1))
1784 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1785 && mode_signbit_p (mode, op1))
1786 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1787 simplify_gen_binary (XOR, mode, op1,
1788 XEXP (op0, 1)));
1789
1790 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1791 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1792 && GET_CODE (op0) == MULT
1793 && GET_CODE (XEXP (op0, 0)) == NEG)
1794 {
1795 rtx in1, in2;
1796
1797 in1 = XEXP (XEXP (op0, 0), 0);
1798 in2 = XEXP (op0, 1);
1799 return simplify_gen_binary (MINUS, mode, op1,
1800 simplify_gen_binary (MULT, mode,
1801 in1, in2));
1802 }
1803
1804 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1805 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1806 is 1. */
1807 if (COMPARISON_P (op0)
1808 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1809 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1810 && (reversed = reversed_comparison (op0, mode)))
1811 return
1812 simplify_gen_unary (NEG, mode, reversed, mode);
1813
1814 /* If one of the operands is a PLUS or a MINUS, see if we can
1815 simplify this by the associative law.
1816 Don't use the associative law for floating point.
1817 The inaccuracy makes it nonassociative,
1818 and subtle programs can break if operations are associated. */
1819
1820 if (INTEGRAL_MODE_P (mode)
1821 && (plus_minus_operand_p (op0)
1822 || plus_minus_operand_p (op1))
1823 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1824 return tem;
1825
1826 /* Reassociate floating point addition only when the user
1827 specifies associative math operations. */
1828 if (FLOAT_MODE_P (mode)
1829 && flag_associative_math)
1830 {
1831 tem = simplify_associative_operation (code, mode, op0, op1);
1832 if (tem)
1833 return tem;
1834 }
1835 break;
1836
1837 case COMPARE:
1838 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1839 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1840 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1841 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1842 {
1843 rtx xop00 = XEXP (op0, 0);
1844 rtx xop10 = XEXP (op1, 0);
1845
1846 #ifdef HAVE_cc0
1847 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1848 #else
1849 if (REG_P (xop00) && REG_P (xop10)
1850 && GET_MODE (xop00) == GET_MODE (xop10)
1851 && REGNO (xop00) == REGNO (xop10)
1852 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1853 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1854 #endif
1855 return xop00;
1856 }
1857 break;
1858
1859 case MINUS:
1860 /* We can't assume x-x is 0 even with non-IEEE floating point,
1861 but since it is zero except in very strange circumstances, we
1862 will treat it as zero with -ffinite-math-only. */
1863 if (rtx_equal_p (trueop0, trueop1)
1864 && ! side_effects_p (op0)
1865 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1866 return CONST0_RTX (mode);
1867
1868 /* Change subtraction from zero into negation. (0 - x) is the
1869 same as -x when x is NaN, infinite, or finite and nonzero.
1870 But if the mode has signed zeros, and does not round towards
1871 -infinity, then 0 - 0 is 0, not -0. */
1872 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1873 return simplify_gen_unary (NEG, mode, op1, mode);
1874
1875 /* (-1 - a) is ~a. */
1876 if (trueop0 == constm1_rtx)
1877 return simplify_gen_unary (NOT, mode, op1, mode);
1878
1879 /* Subtracting 0 has no effect unless the mode has signed zeros
1880 and supports rounding towards -infinity. In such a case,
1881 0 - 0 is -0. */
1882 if (!(HONOR_SIGNED_ZEROS (mode)
1883 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1884 && trueop1 == CONST0_RTX (mode))
1885 return op0;
1886
1887 /* See if this is something like X * C - X or vice versa or
1888 if the multiplication is written as a shift. If so, we can
1889 distribute and make a new multiply, shift, or maybe just
1890 have X (if C is 2 in the example above). But don't make
1891 something more expensive than we had before. */
1892
1893 if (SCALAR_INT_MODE_P (mode))
1894 {
1895 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1896 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1897 rtx lhs = op0, rhs = op1;
1898
1899 if (GET_CODE (lhs) == NEG)
1900 {
1901 coeff0l = -1;
1902 coeff0h = -1;
1903 lhs = XEXP (lhs, 0);
1904 }
1905 else if (GET_CODE (lhs) == MULT
1906 && CONST_INT_P (XEXP (lhs, 1)))
1907 {
1908 coeff0l = INTVAL (XEXP (lhs, 1));
1909 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1910 lhs = XEXP (lhs, 0);
1911 }
1912 else if (GET_CODE (lhs) == ASHIFT
1913 && CONST_INT_P (XEXP (lhs, 1))
1914 && INTVAL (XEXP (lhs, 1)) >= 0
1915 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1916 {
1917 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1918 coeff0h = 0;
1919 lhs = XEXP (lhs, 0);
1920 }
1921
1922 if (GET_CODE (rhs) == NEG)
1923 {
1924 negcoeff1l = 1;
1925 negcoeff1h = 0;
1926 rhs = XEXP (rhs, 0);
1927 }
1928 else if (GET_CODE (rhs) == MULT
1929 && CONST_INT_P (XEXP (rhs, 1)))
1930 {
1931 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1932 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1933 rhs = XEXP (rhs, 0);
1934 }
1935 else if (GET_CODE (rhs) == ASHIFT
1936 && CONST_INT_P (XEXP (rhs, 1))
1937 && INTVAL (XEXP (rhs, 1)) >= 0
1938 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1939 {
1940 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1941 negcoeff1h = -1;
1942 rhs = XEXP (rhs, 0);
1943 }
1944
1945 if (rtx_equal_p (lhs, rhs))
1946 {
1947 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1948 rtx coeff;
1949 unsigned HOST_WIDE_INT l;
1950 HOST_WIDE_INT h;
1951 bool speed = optimize_function_for_speed_p (cfun);
1952
1953 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1954 coeff = immed_double_const (l, h, mode);
1955
1956 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1957 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1958 ? tem : 0;
1959 }
1960 }
1961
1962 /* (a - (-b)) -> (a + b). True even for IEEE. */
1963 if (GET_CODE (op1) == NEG)
1964 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1965
1966 /* (-x - c) may be simplified as (-c - x). */
1967 if (GET_CODE (op0) == NEG
1968 && (CONST_INT_P (op1)
1969 || GET_CODE (op1) == CONST_DOUBLE))
1970 {
1971 tem = simplify_unary_operation (NEG, mode, op1, mode);
1972 if (tem)
1973 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1974 }
1975
1976 /* Don't let a relocatable value get a negative coeff. */
1977 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1978 return simplify_gen_binary (PLUS, mode,
1979 op0,
1980 neg_const_int (mode, op1));
1981
1982 /* (x - (x & y)) -> (x & ~y) */
1983 if (GET_CODE (op1) == AND)
1984 {
1985 if (rtx_equal_p (op0, XEXP (op1, 0)))
1986 {
1987 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1988 GET_MODE (XEXP (op1, 1)));
1989 return simplify_gen_binary (AND, mode, op0, tem);
1990 }
1991 if (rtx_equal_p (op0, XEXP (op1, 1)))
1992 {
1993 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1994 GET_MODE (XEXP (op1, 0)));
1995 return simplify_gen_binary (AND, mode, op0, tem);
1996 }
1997 }
1998
1999 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2000 by reversing the comparison code if valid. */
2001 if (STORE_FLAG_VALUE == 1
2002 && trueop0 == const1_rtx
2003 && COMPARISON_P (op1)
2004 && (reversed = reversed_comparison (op1, mode)))
2005 return reversed;
2006
2007 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2008 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2009 && GET_CODE (op1) == MULT
2010 && GET_CODE (XEXP (op1, 0)) == NEG)
2011 {
2012 rtx in1, in2;
2013
2014 in1 = XEXP (XEXP (op1, 0), 0);
2015 in2 = XEXP (op1, 1);
2016 return simplify_gen_binary (PLUS, mode,
2017 simplify_gen_binary (MULT, mode,
2018 in1, in2),
2019 op0);
2020 }
2021
2022 /* Canonicalize (minus (neg A) (mult B C)) to
2023 (minus (mult (neg B) C) A). */
2024 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2025 && GET_CODE (op1) == MULT
2026 && GET_CODE (op0) == NEG)
2027 {
2028 rtx in1, in2;
2029
2030 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2031 in2 = XEXP (op1, 1);
2032 return simplify_gen_binary (MINUS, mode,
2033 simplify_gen_binary (MULT, mode,
2034 in1, in2),
2035 XEXP (op0, 0));
2036 }
2037
2038 /* If one of the operands is a PLUS or a MINUS, see if we can
2039 simplify this by the associative law. This will, for example,
2040 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2041 Don't use the associative law for floating point.
2042 The inaccuracy makes it nonassociative,
2043 and subtle programs can break if operations are associated. */
2044
2045 if (INTEGRAL_MODE_P (mode)
2046 && (plus_minus_operand_p (op0)
2047 || plus_minus_operand_p (op1))
2048 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2049 return tem;
2050 break;
2051
2052 case MULT:
2053 if (trueop1 == constm1_rtx)
2054 return simplify_gen_unary (NEG, mode, op0, mode);
2055
2056 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2057 x is NaN, since x * 0 is then also NaN. Nor is it valid
2058 when the mode has signed zeros, since multiplying a negative
2059 number by 0 will give -0, not 0. */
2060 if (!HONOR_NANS (mode)
2061 && !HONOR_SIGNED_ZEROS (mode)
2062 && trueop1 == CONST0_RTX (mode)
2063 && ! side_effects_p (op0))
2064 return op1;
2065
2066 /* In IEEE floating point, x*1 is not equivalent to x for
2067 signalling NaNs. */
2068 if (!HONOR_SNANS (mode)
2069 && trueop1 == CONST1_RTX (mode))
2070 return op0;
2071
2072 /* Convert multiply by constant power of two into shift unless
2073 we are still generating RTL. This test is a kludge. */
2074 if (CONST_INT_P (trueop1)
2075 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2076 /* If the mode is larger than the host word size, and the
2077 uppermost bit is set, then this isn't a power of two due
2078 to implicit sign extension. */
2079 && (width <= HOST_BITS_PER_WIDE_INT
2080 || val != HOST_BITS_PER_WIDE_INT - 1))
2081 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2082
2083 /* Likewise for multipliers wider than a word. */
2084 if (GET_CODE (trueop1) == CONST_DOUBLE
2085 && (GET_MODE (trueop1) == VOIDmode
2086 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2087 && GET_MODE (op0) == mode
2088 && CONST_DOUBLE_LOW (trueop1) == 0
2089 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2090 return simplify_gen_binary (ASHIFT, mode, op0,
2091 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2092
2093 /* x*2 is x+x and x*(-1) is -x */
2094 if (GET_CODE (trueop1) == CONST_DOUBLE
2095 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2096 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2097 && GET_MODE (op0) == mode)
2098 {
2099 REAL_VALUE_TYPE d;
2100 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2101
2102 if (REAL_VALUES_EQUAL (d, dconst2))
2103 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2104
2105 if (!HONOR_SNANS (mode)
2106 && REAL_VALUES_EQUAL (d, dconstm1))
2107 return simplify_gen_unary (NEG, mode, op0, mode);
2108 }
2109
2110 /* Optimize -x * -x as x * x. */
2111 if (FLOAT_MODE_P (mode)
2112 && GET_CODE (op0) == NEG
2113 && GET_CODE (op1) == NEG
2114 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2115 && !side_effects_p (XEXP (op0, 0)))
2116 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2117
2118 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2119 if (SCALAR_FLOAT_MODE_P (mode)
2120 && GET_CODE (op0) == ABS
2121 && GET_CODE (op1) == ABS
2122 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2123 && !side_effects_p (XEXP (op0, 0)))
2124 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2125
2126 /* Reassociate multiplication, but for floating point MULTs
2127 only when the user specifies unsafe math optimizations. */
2128 if (! FLOAT_MODE_P (mode)
2129 || flag_unsafe_math_optimizations)
2130 {
2131 tem = simplify_associative_operation (code, mode, op0, op1);
2132 if (tem)
2133 return tem;
2134 }
2135 break;
2136
2137 case IOR:
2138 if (trueop1 == const0_rtx)
2139 return op0;
2140 if (CONST_INT_P (trueop1)
2141 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2142 == GET_MODE_MASK (mode)))
2143 return op1;
2144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2145 return op0;
2146 /* A | (~A) -> -1 */
2147 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2148 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2149 && ! side_effects_p (op0)
2150 && SCALAR_INT_MODE_P (mode))
2151 return constm1_rtx;
2152
2153 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2154 if (CONST_INT_P (op1)
2155 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2157 return op1;
2158
2159 /* Canonicalize (X & C1) | C2. */
2160 if (GET_CODE (op0) == AND
2161 && CONST_INT_P (trueop1)
2162 && CONST_INT_P (XEXP (op0, 1)))
2163 {
2164 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2165 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2166 HOST_WIDE_INT c2 = INTVAL (trueop1);
2167
2168 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2169 if ((c1 & c2) == c1
2170 && !side_effects_p (XEXP (op0, 0)))
2171 return trueop1;
2172
2173 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2174 if (((c1|c2) & mask) == mask)
2175 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2176
2177 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2178 if (((c1 & ~c2) & mask) != (c1 & mask))
2179 {
2180 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2181 gen_int_mode (c1 & ~c2, mode));
2182 return simplify_gen_binary (IOR, mode, tem, op1);
2183 }
2184 }
2185
2186 /* Convert (A & B) | A to A. */
2187 if (GET_CODE (op0) == AND
2188 && (rtx_equal_p (XEXP (op0, 0), op1)
2189 || rtx_equal_p (XEXP (op0, 1), op1))
2190 && ! side_effects_p (XEXP (op0, 0))
2191 && ! side_effects_p (XEXP (op0, 1)))
2192 return op1;
2193
2194 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2195 mode size to (rotate A CX). */
2196
2197 if (GET_CODE (op1) == ASHIFT
2198 || GET_CODE (op1) == SUBREG)
2199 {
2200 opleft = op1;
2201 opright = op0;
2202 }
2203 else
2204 {
2205 opright = op1;
2206 opleft = op0;
2207 }
2208
2209 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2210 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2211 && CONST_INT_P (XEXP (opleft, 1))
2212 && CONST_INT_P (XEXP (opright, 1))
2213 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2214 == GET_MODE_BITSIZE (mode)))
2215 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2216
2217 /* Same, but for ashift that has been "simplified" to a wider mode
2218 by simplify_shift_const. */
2219
2220 if (GET_CODE (opleft) == SUBREG
2221 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2222 && GET_CODE (opright) == LSHIFTRT
2223 && GET_CODE (XEXP (opright, 0)) == SUBREG
2224 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2225 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2226 && (GET_MODE_SIZE (GET_MODE (opleft))
2227 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2228 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2229 SUBREG_REG (XEXP (opright, 0)))
2230 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2231 && CONST_INT_P (XEXP (opright, 1))
2232 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2233 == GET_MODE_BITSIZE (mode)))
2234 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2235 XEXP (SUBREG_REG (opleft), 1));
2236
2237 /* If we have (ior (and (X C1) C2)), simplify this by making
2238 C1 as small as possible if C1 actually changes. */
2239 if (CONST_INT_P (op1)
2240 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2241 || INTVAL (op1) > 0)
2242 && GET_CODE (op0) == AND
2243 && CONST_INT_P (XEXP (op0, 1))
2244 && CONST_INT_P (op1)
2245 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2246 return simplify_gen_binary (IOR, mode,
2247 simplify_gen_binary
2248 (AND, mode, XEXP (op0, 0),
2249 GEN_INT (INTVAL (XEXP (op0, 1))
2250 & ~INTVAL (op1))),
2251 op1);
2252
2253 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2254 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2255 the PLUS does not affect any of the bits in OP1: then we can do
2256 the IOR as a PLUS and we can associate. This is valid if OP1
2257 can be safely shifted left C bits. */
2258 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2259 && GET_CODE (XEXP (op0, 0)) == PLUS
2260 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2261 && CONST_INT_P (XEXP (op0, 1))
2262 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2263 {
2264 int count = INTVAL (XEXP (op0, 1));
2265 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2266
2267 if (mask >> count == INTVAL (trueop1)
2268 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2269 return simplify_gen_binary (ASHIFTRT, mode,
2270 plus_constant (XEXP (op0, 0), mask),
2271 XEXP (op0, 1));
2272 }
2273
2274 tem = simplify_associative_operation (code, mode, op0, op1);
2275 if (tem)
2276 return tem;
2277 break;
2278
2279 case XOR:
2280 if (trueop1 == const0_rtx)
2281 return op0;
2282 if (CONST_INT_P (trueop1)
2283 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2284 == GET_MODE_MASK (mode)))
2285 return simplify_gen_unary (NOT, mode, op0, mode);
2286 if (rtx_equal_p (trueop0, trueop1)
2287 && ! side_effects_p (op0)
2288 && GET_MODE_CLASS (mode) != MODE_CC)
2289 return CONST0_RTX (mode);
2290
2291 /* Canonicalize XOR of the most significant bit to PLUS. */
2292 if ((CONST_INT_P (op1)
2293 || GET_CODE (op1) == CONST_DOUBLE)
2294 && mode_signbit_p (mode, op1))
2295 return simplify_gen_binary (PLUS, mode, op0, op1);
2296 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2297 if ((CONST_INT_P (op1)
2298 || GET_CODE (op1) == CONST_DOUBLE)
2299 && GET_CODE (op0) == PLUS
2300 && (CONST_INT_P (XEXP (op0, 1))
2301 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2302 && mode_signbit_p (mode, XEXP (op0, 1)))
2303 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2304 simplify_gen_binary (XOR, mode, op1,
2305 XEXP (op0, 1)));
2306
2307 /* If we are XORing two things that have no bits in common,
2308 convert them into an IOR. This helps to detect rotation encoded
2309 using those methods and possibly other simplifications. */
2310
2311 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2312 && (nonzero_bits (op0, mode)
2313 & nonzero_bits (op1, mode)) == 0)
2314 return (simplify_gen_binary (IOR, mode, op0, op1));
2315
2316 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2317 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2318 (NOT y). */
2319 {
2320 int num_negated = 0;
2321
2322 if (GET_CODE (op0) == NOT)
2323 num_negated++, op0 = XEXP (op0, 0);
2324 if (GET_CODE (op1) == NOT)
2325 num_negated++, op1 = XEXP (op1, 0);
2326
2327 if (num_negated == 2)
2328 return simplify_gen_binary (XOR, mode, op0, op1);
2329 else if (num_negated == 1)
2330 return simplify_gen_unary (NOT, mode,
2331 simplify_gen_binary (XOR, mode, op0, op1),
2332 mode);
2333 }
2334
2335 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2336 correspond to a machine insn or result in further simplifications
2337 if B is a constant. */
2338
2339 if (GET_CODE (op0) == AND
2340 && rtx_equal_p (XEXP (op0, 1), op1)
2341 && ! side_effects_p (op1))
2342 return simplify_gen_binary (AND, mode,
2343 simplify_gen_unary (NOT, mode,
2344 XEXP (op0, 0), mode),
2345 op1);
2346
2347 else if (GET_CODE (op0) == AND
2348 && rtx_equal_p (XEXP (op0, 0), op1)
2349 && ! side_effects_p (op1))
2350 return simplify_gen_binary (AND, mode,
2351 simplify_gen_unary (NOT, mode,
2352 XEXP (op0, 1), mode),
2353 op1);
2354
2355 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2356 comparison if STORE_FLAG_VALUE is 1. */
2357 if (STORE_FLAG_VALUE == 1
2358 && trueop1 == const1_rtx
2359 && COMPARISON_P (op0)
2360 && (reversed = reversed_comparison (op0, mode)))
2361 return reversed;
2362
2363 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2364 is (lt foo (const_int 0)), so we can perform the above
2365 simplification if STORE_FLAG_VALUE is 1. */
2366
2367 if (STORE_FLAG_VALUE == 1
2368 && trueop1 == const1_rtx
2369 && GET_CODE (op0) == LSHIFTRT
2370 && CONST_INT_P (XEXP (op0, 1))
2371 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2372 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2373
2374 /* (xor (comparison foo bar) (const_int sign-bit))
2375 when STORE_FLAG_VALUE is the sign bit. */
2376 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2377 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2378 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2379 && trueop1 == const_true_rtx
2380 && COMPARISON_P (op0)
2381 && (reversed = reversed_comparison (op0, mode)))
2382 return reversed;
2383
2384 tem = simplify_associative_operation (code, mode, op0, op1);
2385 if (tem)
2386 return tem;
2387 break;
2388
2389 case AND:
2390 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2391 return trueop1;
2392 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2393 {
2394 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2395 HOST_WIDE_INT nzop1;
2396 if (CONST_INT_P (trueop1))
2397 {
2398 HOST_WIDE_INT val1 = INTVAL (trueop1);
2399 /* If we are turning off bits already known off in OP0, we need
2400 not do an AND. */
2401 if ((nzop0 & ~val1) == 0)
2402 return op0;
2403 }
2404 nzop1 = nonzero_bits (trueop1, mode);
2405 /* If we are clearing all the nonzero bits, the result is zero. */
2406 if ((nzop1 & nzop0) == 0
2407 && !side_effects_p (op0) && !side_effects_p (op1))
2408 return CONST0_RTX (mode);
2409 }
2410 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2411 && GET_MODE_CLASS (mode) != MODE_CC)
2412 return op0;
2413 /* A & (~A) -> 0 */
2414 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2415 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2416 && ! side_effects_p (op0)
2417 && GET_MODE_CLASS (mode) != MODE_CC)
2418 return CONST0_RTX (mode);
2419
2420 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2421 there are no nonzero bits of C outside of X's mode. */
2422 if ((GET_CODE (op0) == SIGN_EXTEND
2423 || GET_CODE (op0) == ZERO_EXTEND)
2424 && CONST_INT_P (trueop1)
2425 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2426 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2427 & INTVAL (trueop1)) == 0)
2428 {
2429 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2430 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2431 gen_int_mode (INTVAL (trueop1),
2432 imode));
2433 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2434 }
2435
2436 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2437 we might be able to further simplify the AND with X and potentially
2438 remove the truncation altogether. */
2439 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2440 {
2441 rtx x = XEXP (op0, 0);
2442 enum machine_mode xmode = GET_MODE (x);
2443 tem = simplify_gen_binary (AND, xmode, x,
2444 gen_int_mode (INTVAL (trueop1), xmode));
2445 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2446 }
2447
2448 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2449 if (GET_CODE (op0) == IOR
2450 && CONST_INT_P (trueop1)
2451 && CONST_INT_P (XEXP (op0, 1)))
2452 {
2453 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2454 return simplify_gen_binary (IOR, mode,
2455 simplify_gen_binary (AND, mode,
2456 XEXP (op0, 0), op1),
2457 gen_int_mode (tmp, mode));
2458 }
2459
2460 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2461 insn (and may simplify more). */
2462 if (GET_CODE (op0) == XOR
2463 && rtx_equal_p (XEXP (op0, 0), op1)
2464 && ! side_effects_p (op1))
2465 return simplify_gen_binary (AND, mode,
2466 simplify_gen_unary (NOT, mode,
2467 XEXP (op0, 1), mode),
2468 op1);
2469
2470 if (GET_CODE (op0) == XOR
2471 && rtx_equal_p (XEXP (op0, 1), op1)
2472 && ! side_effects_p (op1))
2473 return simplify_gen_binary (AND, mode,
2474 simplify_gen_unary (NOT, mode,
2475 XEXP (op0, 0), mode),
2476 op1);
2477
2478 /* Similarly for (~(A ^ B)) & A. */
2479 if (GET_CODE (op0) == NOT
2480 && GET_CODE (XEXP (op0, 0)) == XOR
2481 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2482 && ! side_effects_p (op1))
2483 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2484
2485 if (GET_CODE (op0) == NOT
2486 && GET_CODE (XEXP (op0, 0)) == XOR
2487 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2488 && ! side_effects_p (op1))
2489 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2490
2491 /* Convert (A | B) & A to A. */
2492 if (GET_CODE (op0) == IOR
2493 && (rtx_equal_p (XEXP (op0, 0), op1)
2494 || rtx_equal_p (XEXP (op0, 1), op1))
2495 && ! side_effects_p (XEXP (op0, 0))
2496 && ! side_effects_p (XEXP (op0, 1)))
2497 return op1;
2498
2499 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2500 ((A & N) + B) & M -> (A + B) & M
2501 Similarly if (N & M) == 0,
2502 ((A | N) + B) & M -> (A + B) & M
2503 and for - instead of + and/or ^ instead of |.
2504 Also, if (N & M) == 0, then
2505 (A +- N) & M -> A & M. */
2506 if (CONST_INT_P (trueop1)
2507 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2508 && ~INTVAL (trueop1)
2509 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2510 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2511 {
2512 rtx pmop[2];
2513 int which;
2514
2515 pmop[0] = XEXP (op0, 0);
2516 pmop[1] = XEXP (op0, 1);
2517
2518 if (CONST_INT_P (pmop[1])
2519 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2520 return simplify_gen_binary (AND, mode, pmop[0], op1);
2521
2522 for (which = 0; which < 2; which++)
2523 {
2524 tem = pmop[which];
2525 switch (GET_CODE (tem))
2526 {
2527 case AND:
2528 if (CONST_INT_P (XEXP (tem, 1))
2529 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2530 == INTVAL (trueop1))
2531 pmop[which] = XEXP (tem, 0);
2532 break;
2533 case IOR:
2534 case XOR:
2535 if (CONST_INT_P (XEXP (tem, 1))
2536 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2537 pmop[which] = XEXP (tem, 0);
2538 break;
2539 default:
2540 break;
2541 }
2542 }
2543
2544 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2545 {
2546 tem = simplify_gen_binary (GET_CODE (op0), mode,
2547 pmop[0], pmop[1]);
2548 return simplify_gen_binary (code, mode, tem, op1);
2549 }
2550 }
2551
2552 /* (and X (ior (not X) Y) -> (and X Y) */
2553 if (GET_CODE (op1) == IOR
2554 && GET_CODE (XEXP (op1, 0)) == NOT
2555 && op0 == XEXP (XEXP (op1, 0), 0))
2556 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2557
2558 /* (and (ior (not X) Y) X) -> (and X Y) */
2559 if (GET_CODE (op0) == IOR
2560 && GET_CODE (XEXP (op0, 0)) == NOT
2561 && op1 == XEXP (XEXP (op0, 0), 0))
2562 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2563
2564 tem = simplify_associative_operation (code, mode, op0, op1);
2565 if (tem)
2566 return tem;
2567 break;
2568
2569 case UDIV:
2570 /* 0/x is 0 (or x&0 if x has side-effects). */
2571 if (trueop0 == CONST0_RTX (mode))
2572 {
2573 if (side_effects_p (op1))
2574 return simplify_gen_binary (AND, mode, op1, trueop0);
2575 return trueop0;
2576 }
2577 /* x/1 is x. */
2578 if (trueop1 == CONST1_RTX (mode))
2579 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2580 /* Convert divide by power of two into shift. */
2581 if (CONST_INT_P (trueop1)
2582 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2583 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2584 break;
2585
2586 case DIV:
2587 /* Handle floating point and integers separately. */
2588 if (SCALAR_FLOAT_MODE_P (mode))
2589 {
2590 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2591 safe for modes with NaNs, since 0.0 / 0.0 will then be
2592 NaN rather than 0.0. Nor is it safe for modes with signed
2593 zeros, since dividing 0 by a negative number gives -0.0 */
2594 if (trueop0 == CONST0_RTX (mode)
2595 && !HONOR_NANS (mode)
2596 && !HONOR_SIGNED_ZEROS (mode)
2597 && ! side_effects_p (op1))
2598 return op0;
2599 /* x/1.0 is x. */
2600 if (trueop1 == CONST1_RTX (mode)
2601 && !HONOR_SNANS (mode))
2602 return op0;
2603
2604 if (GET_CODE (trueop1) == CONST_DOUBLE
2605 && trueop1 != CONST0_RTX (mode))
2606 {
2607 REAL_VALUE_TYPE d;
2608 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2609
2610 /* x/-1.0 is -x. */
2611 if (REAL_VALUES_EQUAL (d, dconstm1)
2612 && !HONOR_SNANS (mode))
2613 return simplify_gen_unary (NEG, mode, op0, mode);
2614
2615 /* Change FP division by a constant into multiplication.
2616 Only do this with -freciprocal-math. */
2617 if (flag_reciprocal_math
2618 && !REAL_VALUES_EQUAL (d, dconst0))
2619 {
2620 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2621 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2622 return simplify_gen_binary (MULT, mode, op0, tem);
2623 }
2624 }
2625 }
2626 else
2627 {
2628 /* 0/x is 0 (or x&0 if x has side-effects). */
2629 if (trueop0 == CONST0_RTX (mode))
2630 {
2631 if (side_effects_p (op1))
2632 return simplify_gen_binary (AND, mode, op1, trueop0);
2633 return trueop0;
2634 }
2635 /* x/1 is x. */
2636 if (trueop1 == CONST1_RTX (mode))
2637 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2638 /* x/-1 is -x. */
2639 if (trueop1 == constm1_rtx)
2640 {
2641 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2642 return simplify_gen_unary (NEG, mode, x, mode);
2643 }
2644 }
2645 break;
2646
2647 case UMOD:
2648 /* 0%x is 0 (or x&0 if x has side-effects). */
2649 if (trueop0 == CONST0_RTX (mode))
2650 {
2651 if (side_effects_p (op1))
2652 return simplify_gen_binary (AND, mode, op1, trueop0);
2653 return trueop0;
2654 }
2655 /* x%1 is 0 (of x&0 if x has side-effects). */
2656 if (trueop1 == CONST1_RTX (mode))
2657 {
2658 if (side_effects_p (op0))
2659 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2660 return CONST0_RTX (mode);
2661 }
2662 /* Implement modulus by power of two as AND. */
2663 if (CONST_INT_P (trueop1)
2664 && exact_log2 (INTVAL (trueop1)) > 0)
2665 return simplify_gen_binary (AND, mode, op0,
2666 GEN_INT (INTVAL (op1) - 1));
2667 break;
2668
2669 case MOD:
2670 /* 0%x is 0 (or x&0 if x has side-effects). */
2671 if (trueop0 == CONST0_RTX (mode))
2672 {
2673 if (side_effects_p (op1))
2674 return simplify_gen_binary (AND, mode, op1, trueop0);
2675 return trueop0;
2676 }
2677 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2678 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2679 {
2680 if (side_effects_p (op0))
2681 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2682 return CONST0_RTX (mode);
2683 }
2684 break;
2685
2686 case ROTATERT:
2687 case ROTATE:
2688 case ASHIFTRT:
2689 if (trueop1 == CONST0_RTX (mode))
2690 return op0;
2691 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2692 return op0;
2693 /* Rotating ~0 always results in ~0. */
2694 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2695 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2696 && ! side_effects_p (op1))
2697 return op0;
2698 canonicalize_shift:
2699 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2700 {
2701 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2702 if (val != INTVAL (op1))
2703 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2704 }
2705 break;
2706
2707 case ASHIFT:
2708 case SS_ASHIFT:
2709 case US_ASHIFT:
2710 if (trueop1 == CONST0_RTX (mode))
2711 return op0;
2712 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2713 return op0;
2714 goto canonicalize_shift;
2715
2716 case LSHIFTRT:
2717 if (trueop1 == CONST0_RTX (mode))
2718 return op0;
2719 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2720 return op0;
2721 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2722 if (GET_CODE (op0) == CLZ
2723 && CONST_INT_P (trueop1)
2724 && STORE_FLAG_VALUE == 1
2725 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2726 {
2727 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2728 unsigned HOST_WIDE_INT zero_val = 0;
2729
2730 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2731 && zero_val == GET_MODE_BITSIZE (imode)
2732 && INTVAL (trueop1) == exact_log2 (zero_val))
2733 return simplify_gen_relational (EQ, mode, imode,
2734 XEXP (op0, 0), const0_rtx);
2735 }
2736 goto canonicalize_shift;
2737
2738 case SMIN:
2739 if (width <= HOST_BITS_PER_WIDE_INT
2740 && CONST_INT_P (trueop1)
2741 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2742 && ! side_effects_p (op0))
2743 return op1;
2744 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2745 return op0;
2746 tem = simplify_associative_operation (code, mode, op0, op1);
2747 if (tem)
2748 return tem;
2749 break;
2750
2751 case SMAX:
2752 if (width <= HOST_BITS_PER_WIDE_INT
2753 && CONST_INT_P (trueop1)
2754 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2755 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2756 && ! side_effects_p (op0))
2757 return op1;
2758 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2759 return op0;
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2764
2765 case UMIN:
2766 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2767 return op1;
2768 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2769 return op0;
2770 tem = simplify_associative_operation (code, mode, op0, op1);
2771 if (tem)
2772 return tem;
2773 break;
2774
2775 case UMAX:
2776 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2777 return op1;
2778 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2779 return op0;
2780 tem = simplify_associative_operation (code, mode, op0, op1);
2781 if (tem)
2782 return tem;
2783 break;
2784
2785 case SS_PLUS:
2786 case US_PLUS:
2787 case SS_MINUS:
2788 case US_MINUS:
2789 case SS_MULT:
2790 case US_MULT:
2791 case SS_DIV:
2792 case US_DIV:
2793 /* ??? There are simplifications that can be done. */
2794 return 0;
2795
2796 case VEC_SELECT:
2797 if (!VECTOR_MODE_P (mode))
2798 {
2799 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2800 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2801 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2802 gcc_assert (XVECLEN (trueop1, 0) == 1);
2803 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2804
2805 if (GET_CODE (trueop0) == CONST_VECTOR)
2806 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2807 (trueop1, 0, 0)));
2808
2809 /* Extract a scalar element from a nested VEC_SELECT expression
2810 (with optional nested VEC_CONCAT expression). Some targets
2811 (i386) extract scalar element from a vector using chain of
2812 nested VEC_SELECT expressions. When input operand is a memory
2813 operand, this operation can be simplified to a simple scalar
2814 load from an offseted memory address. */
2815 if (GET_CODE (trueop0) == VEC_SELECT)
2816 {
2817 rtx op0 = XEXP (trueop0, 0);
2818 rtx op1 = XEXP (trueop0, 1);
2819
2820 enum machine_mode opmode = GET_MODE (op0);
2821 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2822 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2823
2824 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2825 int elem;
2826
2827 rtvec vec;
2828 rtx tmp_op, tmp;
2829
2830 gcc_assert (GET_CODE (op1) == PARALLEL);
2831 gcc_assert (i < n_elts);
2832
2833 /* Select element, pointed by nested selector. */
2834 elem = INTVAL (XVECEXP (op1, 0, i));
2835
2836 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2837 if (GET_CODE (op0) == VEC_CONCAT)
2838 {
2839 rtx op00 = XEXP (op0, 0);
2840 rtx op01 = XEXP (op0, 1);
2841
2842 enum machine_mode mode00, mode01;
2843 int n_elts00, n_elts01;
2844
2845 mode00 = GET_MODE (op00);
2846 mode01 = GET_MODE (op01);
2847
2848 /* Find out number of elements of each operand. */
2849 if (VECTOR_MODE_P (mode00))
2850 {
2851 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2852 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2853 }
2854 else
2855 n_elts00 = 1;
2856
2857 if (VECTOR_MODE_P (mode01))
2858 {
2859 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2860 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2861 }
2862 else
2863 n_elts01 = 1;
2864
2865 gcc_assert (n_elts == n_elts00 + n_elts01);
2866
2867 /* Select correct operand of VEC_CONCAT
2868 and adjust selector. */
2869 if (elem < n_elts01)
2870 tmp_op = op00;
2871 else
2872 {
2873 tmp_op = op01;
2874 elem -= n_elts00;
2875 }
2876 }
2877 else
2878 tmp_op = op0;
2879
2880 vec = rtvec_alloc (1);
2881 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2882
2883 tmp = gen_rtx_fmt_ee (code, mode,
2884 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2885 return tmp;
2886 }
2887 }
2888 else
2889 {
2890 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2891 gcc_assert (GET_MODE_INNER (mode)
2892 == GET_MODE_INNER (GET_MODE (trueop0)));
2893 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2894
2895 if (GET_CODE (trueop0) == CONST_VECTOR)
2896 {
2897 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2898 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2899 rtvec v = rtvec_alloc (n_elts);
2900 unsigned int i;
2901
2902 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2903 for (i = 0; i < n_elts; i++)
2904 {
2905 rtx x = XVECEXP (trueop1, 0, i);
2906
2907 gcc_assert (CONST_INT_P (x));
2908 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2909 INTVAL (x));
2910 }
2911
2912 return gen_rtx_CONST_VECTOR (mode, v);
2913 }
2914 }
2915
2916 if (XVECLEN (trueop1, 0) == 1
2917 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2918 && GET_CODE (trueop0) == VEC_CONCAT)
2919 {
2920 rtx vec = trueop0;
2921 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2922
2923 /* Try to find the element in the VEC_CONCAT. */
2924 while (GET_MODE (vec) != mode
2925 && GET_CODE (vec) == VEC_CONCAT)
2926 {
2927 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2928 if (offset < vec_size)
2929 vec = XEXP (vec, 0);
2930 else
2931 {
2932 offset -= vec_size;
2933 vec = XEXP (vec, 1);
2934 }
2935 vec = avoid_constant_pool_reference (vec);
2936 }
2937
2938 if (GET_MODE (vec) == mode)
2939 return vec;
2940 }
2941
2942 return 0;
2943 case VEC_CONCAT:
2944 {
2945 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2946 ? GET_MODE (trueop0)
2947 : GET_MODE_INNER (mode));
2948 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2949 ? GET_MODE (trueop1)
2950 : GET_MODE_INNER (mode));
2951
2952 gcc_assert (VECTOR_MODE_P (mode));
2953 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2954 == GET_MODE_SIZE (mode));
2955
2956 if (VECTOR_MODE_P (op0_mode))
2957 gcc_assert (GET_MODE_INNER (mode)
2958 == GET_MODE_INNER (op0_mode));
2959 else
2960 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2961
2962 if (VECTOR_MODE_P (op1_mode))
2963 gcc_assert (GET_MODE_INNER (mode)
2964 == GET_MODE_INNER (op1_mode));
2965 else
2966 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2967
2968 if ((GET_CODE (trueop0) == CONST_VECTOR
2969 || CONST_INT_P (trueop0)
2970 || GET_CODE (trueop0) == CONST_DOUBLE)
2971 && (GET_CODE (trueop1) == CONST_VECTOR
2972 || CONST_INT_P (trueop1)
2973 || GET_CODE (trueop1) == CONST_DOUBLE))
2974 {
2975 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2976 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2977 rtvec v = rtvec_alloc (n_elts);
2978 unsigned int i;
2979 unsigned in_n_elts = 1;
2980
2981 if (VECTOR_MODE_P (op0_mode))
2982 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2983 for (i = 0; i < n_elts; i++)
2984 {
2985 if (i < in_n_elts)
2986 {
2987 if (!VECTOR_MODE_P (op0_mode))
2988 RTVEC_ELT (v, i) = trueop0;
2989 else
2990 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2991 }
2992 else
2993 {
2994 if (!VECTOR_MODE_P (op1_mode))
2995 RTVEC_ELT (v, i) = trueop1;
2996 else
2997 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2998 i - in_n_elts);
2999 }
3000 }
3001
3002 return gen_rtx_CONST_VECTOR (mode, v);
3003 }
3004 }
3005 return 0;
3006
3007 default:
3008 gcc_unreachable ();
3009 }
3010
3011 return 0;
3012 }
3013
3014 rtx
3015 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3016 rtx op0, rtx op1)
3017 {
3018 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3019 HOST_WIDE_INT val;
3020 unsigned int width = GET_MODE_BITSIZE (mode);
3021
3022 if (VECTOR_MODE_P (mode)
3023 && code != VEC_CONCAT
3024 && GET_CODE (op0) == CONST_VECTOR
3025 && GET_CODE (op1) == CONST_VECTOR)
3026 {
3027 unsigned n_elts = GET_MODE_NUNITS (mode);
3028 enum machine_mode op0mode = GET_MODE (op0);
3029 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3030 enum machine_mode op1mode = GET_MODE (op1);
3031 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3032 rtvec v = rtvec_alloc (n_elts);
3033 unsigned int i;
3034
3035 gcc_assert (op0_n_elts == n_elts);
3036 gcc_assert (op1_n_elts == n_elts);
3037 for (i = 0; i < n_elts; i++)
3038 {
3039 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3040 CONST_VECTOR_ELT (op0, i),
3041 CONST_VECTOR_ELT (op1, i));
3042 if (!x)
3043 return 0;
3044 RTVEC_ELT (v, i) = x;
3045 }
3046
3047 return gen_rtx_CONST_VECTOR (mode, v);
3048 }
3049
3050 if (VECTOR_MODE_P (mode)
3051 && code == VEC_CONCAT
3052 && (CONST_INT_P (op0)
3053 || GET_CODE (op0) == CONST_DOUBLE
3054 || GET_CODE (op0) == CONST_FIXED)
3055 && (CONST_INT_P (op1)
3056 || GET_CODE (op1) == CONST_DOUBLE
3057 || GET_CODE (op1) == CONST_FIXED))
3058 {
3059 unsigned n_elts = GET_MODE_NUNITS (mode);
3060 rtvec v = rtvec_alloc (n_elts);
3061
3062 gcc_assert (n_elts >= 2);
3063 if (n_elts == 2)
3064 {
3065 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3066 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3067
3068 RTVEC_ELT (v, 0) = op0;
3069 RTVEC_ELT (v, 1) = op1;
3070 }
3071 else
3072 {
3073 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3074 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3075 unsigned i;
3076
3077 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3078 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3079 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3080
3081 for (i = 0; i < op0_n_elts; ++i)
3082 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3083 for (i = 0; i < op1_n_elts; ++i)
3084 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3085 }
3086
3087 return gen_rtx_CONST_VECTOR (mode, v);
3088 }
3089
3090 if (SCALAR_FLOAT_MODE_P (mode)
3091 && GET_CODE (op0) == CONST_DOUBLE
3092 && GET_CODE (op1) == CONST_DOUBLE
3093 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3094 {
3095 if (code == AND
3096 || code == IOR
3097 || code == XOR)
3098 {
3099 long tmp0[4];
3100 long tmp1[4];
3101 REAL_VALUE_TYPE r;
3102 int i;
3103
3104 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3105 GET_MODE (op0));
3106 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3107 GET_MODE (op1));
3108 for (i = 0; i < 4; i++)
3109 {
3110 switch (code)
3111 {
3112 case AND:
3113 tmp0[i] &= tmp1[i];
3114 break;
3115 case IOR:
3116 tmp0[i] |= tmp1[i];
3117 break;
3118 case XOR:
3119 tmp0[i] ^= tmp1[i];
3120 break;
3121 default:
3122 gcc_unreachable ();
3123 }
3124 }
3125 real_from_target (&r, tmp0, mode);
3126 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3127 }
3128 else
3129 {
3130 REAL_VALUE_TYPE f0, f1, value, result;
3131 bool inexact;
3132
3133 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3134 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3135 real_convert (&f0, mode, &f0);
3136 real_convert (&f1, mode, &f1);
3137
3138 if (HONOR_SNANS (mode)
3139 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3140 return 0;
3141
3142 if (code == DIV
3143 && REAL_VALUES_EQUAL (f1, dconst0)
3144 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3145 return 0;
3146
3147 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3148 && flag_trapping_math
3149 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3150 {
3151 int s0 = REAL_VALUE_NEGATIVE (f0);
3152 int s1 = REAL_VALUE_NEGATIVE (f1);
3153
3154 switch (code)
3155 {
3156 case PLUS:
3157 /* Inf + -Inf = NaN plus exception. */
3158 if (s0 != s1)
3159 return 0;
3160 break;
3161 case MINUS:
3162 /* Inf - Inf = NaN plus exception. */
3163 if (s0 == s1)
3164 return 0;
3165 break;
3166 case DIV:
3167 /* Inf / Inf = NaN plus exception. */
3168 return 0;
3169 default:
3170 break;
3171 }
3172 }
3173
3174 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3175 && flag_trapping_math
3176 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3177 || (REAL_VALUE_ISINF (f1)
3178 && REAL_VALUES_EQUAL (f0, dconst0))))
3179 /* Inf * 0 = NaN plus exception. */
3180 return 0;
3181
3182 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3183 &f0, &f1);
3184 real_convert (&result, mode, &value);
3185
3186 /* Don't constant fold this floating point operation if
3187 the result has overflowed and flag_trapping_math. */
3188
3189 if (flag_trapping_math
3190 && MODE_HAS_INFINITIES (mode)
3191 && REAL_VALUE_ISINF (result)
3192 && !REAL_VALUE_ISINF (f0)
3193 && !REAL_VALUE_ISINF (f1))
3194 /* Overflow plus exception. */
3195 return 0;
3196
3197 /* Don't constant fold this floating point operation if the
3198 result may dependent upon the run-time rounding mode and
3199 flag_rounding_math is set, or if GCC's software emulation
3200 is unable to accurately represent the result. */
3201
3202 if ((flag_rounding_math
3203 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3204 && (inexact || !real_identical (&result, &value)))
3205 return NULL_RTX;
3206
3207 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3208 }
3209 }
3210
3211 /* We can fold some multi-word operations. */
3212 if (GET_MODE_CLASS (mode) == MODE_INT
3213 && width == HOST_BITS_PER_WIDE_INT * 2
3214 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3215 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3216 {
3217 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3218 HOST_WIDE_INT h1, h2, hv, ht;
3219
3220 if (GET_CODE (op0) == CONST_DOUBLE)
3221 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3222 else
3223 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3224
3225 if (GET_CODE (op1) == CONST_DOUBLE)
3226 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3227 else
3228 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3229
3230 switch (code)
3231 {
3232 case MINUS:
3233 /* A - B == A + (-B). */
3234 neg_double (l2, h2, &lv, &hv);
3235 l2 = lv, h2 = hv;
3236
3237 /* Fall through.... */
3238
3239 case PLUS:
3240 add_double (l1, h1, l2, h2, &lv, &hv);
3241 break;
3242
3243 case MULT:
3244 mul_double (l1, h1, l2, h2, &lv, &hv);
3245 break;
3246
3247 case DIV:
3248 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3249 &lv, &hv, &lt, &ht))
3250 return 0;
3251 break;
3252
3253 case MOD:
3254 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3255 &lt, &ht, &lv, &hv))
3256 return 0;
3257 break;
3258
3259 case UDIV:
3260 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3261 &lv, &hv, &lt, &ht))
3262 return 0;
3263 break;
3264
3265 case UMOD:
3266 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3267 &lt, &ht, &lv, &hv))
3268 return 0;
3269 break;
3270
3271 case AND:
3272 lv = l1 & l2, hv = h1 & h2;
3273 break;
3274
3275 case IOR:
3276 lv = l1 | l2, hv = h1 | h2;
3277 break;
3278
3279 case XOR:
3280 lv = l1 ^ l2, hv = h1 ^ h2;
3281 break;
3282
3283 case SMIN:
3284 if (h1 < h2
3285 || (h1 == h2
3286 && ((unsigned HOST_WIDE_INT) l1
3287 < (unsigned HOST_WIDE_INT) l2)))
3288 lv = l1, hv = h1;
3289 else
3290 lv = l2, hv = h2;
3291 break;
3292
3293 case SMAX:
3294 if (h1 > h2
3295 || (h1 == h2
3296 && ((unsigned HOST_WIDE_INT) l1
3297 > (unsigned HOST_WIDE_INT) l2)))
3298 lv = l1, hv = h1;
3299 else
3300 lv = l2, hv = h2;
3301 break;
3302
3303 case UMIN:
3304 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3305 || (h1 == h2
3306 && ((unsigned HOST_WIDE_INT) l1
3307 < (unsigned HOST_WIDE_INT) l2)))
3308 lv = l1, hv = h1;
3309 else
3310 lv = l2, hv = h2;
3311 break;
3312
3313 case UMAX:
3314 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3315 || (h1 == h2
3316 && ((unsigned HOST_WIDE_INT) l1
3317 > (unsigned HOST_WIDE_INT) l2)))
3318 lv = l1, hv = h1;
3319 else
3320 lv = l2, hv = h2;
3321 break;
3322
3323 case LSHIFTRT: case ASHIFTRT:
3324 case ASHIFT:
3325 case ROTATE: case ROTATERT:
3326 if (SHIFT_COUNT_TRUNCATED)
3327 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3328
3329 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3330 return 0;
3331
3332 if (code == LSHIFTRT || code == ASHIFTRT)
3333 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3334 code == ASHIFTRT);
3335 else if (code == ASHIFT)
3336 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3337 else if (code == ROTATE)
3338 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3339 else /* code == ROTATERT */
3340 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3341 break;
3342
3343 default:
3344 return 0;
3345 }
3346
3347 return immed_double_const (lv, hv, mode);
3348 }
3349
3350 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3351 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3352 {
3353 /* Get the integer argument values in two forms:
3354 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3355
3356 arg0 = INTVAL (op0);
3357 arg1 = INTVAL (op1);
3358
3359 if (width < HOST_BITS_PER_WIDE_INT)
3360 {
3361 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3362 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3363
3364 arg0s = arg0;
3365 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3366 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3367
3368 arg1s = arg1;
3369 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3370 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3371 }
3372 else
3373 {
3374 arg0s = arg0;
3375 arg1s = arg1;
3376 }
3377
3378 /* Compute the value of the arithmetic. */
3379
3380 switch (code)
3381 {
3382 case PLUS:
3383 val = arg0s + arg1s;
3384 break;
3385
3386 case MINUS:
3387 val = arg0s - arg1s;
3388 break;
3389
3390 case MULT:
3391 val = arg0s * arg1s;
3392 break;
3393
3394 case DIV:
3395 if (arg1s == 0
3396 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3397 && arg1s == -1))
3398 return 0;
3399 val = arg0s / arg1s;
3400 break;
3401
3402 case MOD:
3403 if (arg1s == 0
3404 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3405 && arg1s == -1))
3406 return 0;
3407 val = arg0s % arg1s;
3408 break;
3409
3410 case UDIV:
3411 if (arg1 == 0
3412 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3413 && arg1s == -1))
3414 return 0;
3415 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3416 break;
3417
3418 case UMOD:
3419 if (arg1 == 0
3420 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3421 && arg1s == -1))
3422 return 0;
3423 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3424 break;
3425
3426 case AND:
3427 val = arg0 & arg1;
3428 break;
3429
3430 case IOR:
3431 val = arg0 | arg1;
3432 break;
3433
3434 case XOR:
3435 val = arg0 ^ arg1;
3436 break;
3437
3438 case LSHIFTRT:
3439 case ASHIFT:
3440 case ASHIFTRT:
3441 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3442 the value is in range. We can't return any old value for
3443 out-of-range arguments because either the middle-end (via
3444 shift_truncation_mask) or the back-end might be relying on
3445 target-specific knowledge. Nor can we rely on
3446 shift_truncation_mask, since the shift might not be part of an
3447 ashlM3, lshrM3 or ashrM3 instruction. */
3448 if (SHIFT_COUNT_TRUNCATED)
3449 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3450 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3451 return 0;
3452
3453 val = (code == ASHIFT
3454 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3455 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3456
3457 /* Sign-extend the result for arithmetic right shifts. */
3458 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3459 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3460 break;
3461
3462 case ROTATERT:
3463 if (arg1 < 0)
3464 return 0;
3465
3466 arg1 %= width;
3467 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3468 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3469 break;
3470
3471 case ROTATE:
3472 if (arg1 < 0)
3473 return 0;
3474
3475 arg1 %= width;
3476 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3477 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3478 break;
3479
3480 case COMPARE:
3481 /* Do nothing here. */
3482 return 0;
3483
3484 case SMIN:
3485 val = arg0s <= arg1s ? arg0s : arg1s;
3486 break;
3487
3488 case UMIN:
3489 val = ((unsigned HOST_WIDE_INT) arg0
3490 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3491 break;
3492
3493 case SMAX:
3494 val = arg0s > arg1s ? arg0s : arg1s;
3495 break;
3496
3497 case UMAX:
3498 val = ((unsigned HOST_WIDE_INT) arg0
3499 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3500 break;
3501
3502 case SS_PLUS:
3503 case US_PLUS:
3504 case SS_MINUS:
3505 case US_MINUS:
3506 case SS_MULT:
3507 case US_MULT:
3508 case SS_DIV:
3509 case US_DIV:
3510 case SS_ASHIFT:
3511 case US_ASHIFT:
3512 /* ??? There are simplifications that can be done. */
3513 return 0;
3514
3515 default:
3516 gcc_unreachable ();
3517 }
3518
3519 return gen_int_mode (val, mode);
3520 }
3521
3522 return NULL_RTX;
3523 }
3524
3525
3526 \f
3527 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3528 PLUS or MINUS.
3529
3530 Rather than test for specific case, we do this by a brute-force method
3531 and do all possible simplifications until no more changes occur. Then
3532 we rebuild the operation. */
3533
3534 struct simplify_plus_minus_op_data
3535 {
3536 rtx op;
3537 short neg;
3538 };
3539
3540 static bool
3541 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3542 {
3543 int result;
3544
3545 result = (commutative_operand_precedence (y)
3546 - commutative_operand_precedence (x));
3547 if (result)
3548 return result > 0;
3549
3550 /* Group together equal REGs to do more simplification. */
3551 if (REG_P (x) && REG_P (y))
3552 return REGNO (x) > REGNO (y);
3553 else
3554 return false;
3555 }
3556
3557 static rtx
3558 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3559 rtx op1)
3560 {
3561 struct simplify_plus_minus_op_data ops[8];
3562 rtx result, tem;
3563 int n_ops = 2, input_ops = 2;
3564 int changed, n_constants = 0, canonicalized = 0;
3565 int i, j;
3566
3567 memset (ops, 0, sizeof ops);
3568
3569 /* Set up the two operands and then expand them until nothing has been
3570 changed. If we run out of room in our array, give up; this should
3571 almost never happen. */
3572
3573 ops[0].op = op0;
3574 ops[0].neg = 0;
3575 ops[1].op = op1;
3576 ops[1].neg = (code == MINUS);
3577
3578 do
3579 {
3580 changed = 0;
3581
3582 for (i = 0; i < n_ops; i++)
3583 {
3584 rtx this_op = ops[i].op;
3585 int this_neg = ops[i].neg;
3586 enum rtx_code this_code = GET_CODE (this_op);
3587
3588 switch (this_code)
3589 {
3590 case PLUS:
3591 case MINUS:
3592 if (n_ops == 7)
3593 return NULL_RTX;
3594
3595 ops[n_ops].op = XEXP (this_op, 1);
3596 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3597 n_ops++;
3598
3599 ops[i].op = XEXP (this_op, 0);
3600 input_ops++;
3601 changed = 1;
3602 canonicalized |= this_neg;
3603 break;
3604
3605 case NEG:
3606 ops[i].op = XEXP (this_op, 0);
3607 ops[i].neg = ! this_neg;
3608 changed = 1;
3609 canonicalized = 1;
3610 break;
3611
3612 case CONST:
3613 if (n_ops < 7
3614 && GET_CODE (XEXP (this_op, 0)) == PLUS
3615 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3616 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3617 {
3618 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3619 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3620 ops[n_ops].neg = this_neg;
3621 n_ops++;
3622 changed = 1;
3623 canonicalized = 1;
3624 }
3625 break;
3626
3627 case NOT:
3628 /* ~a -> (-a - 1) */
3629 if (n_ops != 7)
3630 {
3631 ops[n_ops].op = constm1_rtx;
3632 ops[n_ops++].neg = this_neg;
3633 ops[i].op = XEXP (this_op, 0);
3634 ops[i].neg = !this_neg;
3635 changed = 1;
3636 canonicalized = 1;
3637 }
3638 break;
3639
3640 case CONST_INT:
3641 n_constants++;
3642 if (this_neg)
3643 {
3644 ops[i].op = neg_const_int (mode, this_op);
3645 ops[i].neg = 0;
3646 changed = 1;
3647 canonicalized = 1;
3648 }
3649 break;
3650
3651 default:
3652 break;
3653 }
3654 }
3655 }
3656 while (changed);
3657
3658 if (n_constants > 1)
3659 canonicalized = 1;
3660
3661 gcc_assert (n_ops >= 2);
3662
3663 /* If we only have two operands, we can avoid the loops. */
3664 if (n_ops == 2)
3665 {
3666 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3667 rtx lhs, rhs;
3668
3669 /* Get the two operands. Be careful with the order, especially for
3670 the cases where code == MINUS. */
3671 if (ops[0].neg && ops[1].neg)
3672 {
3673 lhs = gen_rtx_NEG (mode, ops[0].op);
3674 rhs = ops[1].op;
3675 }
3676 else if (ops[0].neg)
3677 {
3678 lhs = ops[1].op;
3679 rhs = ops[0].op;
3680 }
3681 else
3682 {
3683 lhs = ops[0].op;
3684 rhs = ops[1].op;
3685 }
3686
3687 return simplify_const_binary_operation (code, mode, lhs, rhs);
3688 }
3689
3690 /* Now simplify each pair of operands until nothing changes. */
3691 do
3692 {
3693 /* Insertion sort is good enough for an eight-element array. */
3694 for (i = 1; i < n_ops; i++)
3695 {
3696 struct simplify_plus_minus_op_data save;
3697 j = i - 1;
3698 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3699 continue;
3700
3701 canonicalized = 1;
3702 save = ops[i];
3703 do
3704 ops[j + 1] = ops[j];
3705 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3706 ops[j + 1] = save;
3707 }
3708
3709 changed = 0;
3710 for (i = n_ops - 1; i > 0; i--)
3711 for (j = i - 1; j >= 0; j--)
3712 {
3713 rtx lhs = ops[j].op, rhs = ops[i].op;
3714 int lneg = ops[j].neg, rneg = ops[i].neg;
3715
3716 if (lhs != 0 && rhs != 0)
3717 {
3718 enum rtx_code ncode = PLUS;
3719
3720 if (lneg != rneg)
3721 {
3722 ncode = MINUS;
3723 if (lneg)
3724 tem = lhs, lhs = rhs, rhs = tem;
3725 }
3726 else if (swap_commutative_operands_p (lhs, rhs))
3727 tem = lhs, lhs = rhs, rhs = tem;
3728
3729 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3730 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3731 {
3732 rtx tem_lhs, tem_rhs;
3733
3734 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3735 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3736 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3737
3738 if (tem && !CONSTANT_P (tem))
3739 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3740 }
3741 else
3742 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3743
3744 /* Reject "simplifications" that just wrap the two
3745 arguments in a CONST. Failure to do so can result
3746 in infinite recursion with simplify_binary_operation
3747 when it calls us to simplify CONST operations. */
3748 if (tem
3749 && ! (GET_CODE (tem) == CONST
3750 && GET_CODE (XEXP (tem, 0)) == ncode
3751 && XEXP (XEXP (tem, 0), 0) == lhs
3752 && XEXP (XEXP (tem, 0), 1) == rhs))
3753 {
3754 lneg &= rneg;
3755 if (GET_CODE (tem) == NEG)
3756 tem = XEXP (tem, 0), lneg = !lneg;
3757 if (CONST_INT_P (tem) && lneg)
3758 tem = neg_const_int (mode, tem), lneg = 0;
3759
3760 ops[i].op = tem;
3761 ops[i].neg = lneg;
3762 ops[j].op = NULL_RTX;
3763 changed = 1;
3764 canonicalized = 1;
3765 }
3766 }
3767 }
3768
3769 /* If nothing changed, fail. */
3770 if (!canonicalized)
3771 return NULL_RTX;
3772
3773 /* Pack all the operands to the lower-numbered entries. */
3774 for (i = 0, j = 0; j < n_ops; j++)
3775 if (ops[j].op)
3776 {
3777 ops[i] = ops[j];
3778 i++;
3779 }
3780 n_ops = i;
3781 }
3782 while (changed);
3783
3784 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3785 if (n_ops == 2
3786 && CONST_INT_P (ops[1].op)
3787 && CONSTANT_P (ops[0].op)
3788 && ops[0].neg)
3789 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3790
3791 /* We suppressed creation of trivial CONST expressions in the
3792 combination loop to avoid recursion. Create one manually now.
3793 The combination loop should have ensured that there is exactly
3794 one CONST_INT, and the sort will have ensured that it is last
3795 in the array and that any other constant will be next-to-last. */
3796
3797 if (n_ops > 1
3798 && CONST_INT_P (ops[n_ops - 1].op)
3799 && CONSTANT_P (ops[n_ops - 2].op))
3800 {
3801 rtx value = ops[n_ops - 1].op;
3802 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3803 value = neg_const_int (mode, value);
3804 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3805 n_ops--;
3806 }
3807
3808 /* Put a non-negated operand first, if possible. */
3809
3810 for (i = 0; i < n_ops && ops[i].neg; i++)
3811 continue;
3812 if (i == n_ops)
3813 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3814 else if (i != 0)
3815 {
3816 tem = ops[0].op;
3817 ops[0] = ops[i];
3818 ops[i].op = tem;
3819 ops[i].neg = 1;
3820 }
3821
3822 /* Now make the result by performing the requested operations. */
3823 result = ops[0].op;
3824 for (i = 1; i < n_ops; i++)
3825 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3826 mode, result, ops[i].op);
3827
3828 return result;
3829 }
3830
3831 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3832 static bool
3833 plus_minus_operand_p (const_rtx x)
3834 {
3835 return GET_CODE (x) == PLUS
3836 || GET_CODE (x) == MINUS
3837 || (GET_CODE (x) == CONST
3838 && GET_CODE (XEXP (x, 0)) == PLUS
3839 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3840 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3841 }
3842
3843 /* Like simplify_binary_operation except used for relational operators.
3844 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3845 not also be VOIDmode.
3846
3847 CMP_MODE specifies in which mode the comparison is done in, so it is
3848 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3849 the operands or, if both are VOIDmode, the operands are compared in
3850 "infinite precision". */
3851 rtx
3852 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3853 enum machine_mode cmp_mode, rtx op0, rtx op1)
3854 {
3855 rtx tem, trueop0, trueop1;
3856
3857 if (cmp_mode == VOIDmode)
3858 cmp_mode = GET_MODE (op0);
3859 if (cmp_mode == VOIDmode)
3860 cmp_mode = GET_MODE (op1);
3861
3862 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3863 if (tem)
3864 {
3865 if (SCALAR_FLOAT_MODE_P (mode))
3866 {
3867 if (tem == const0_rtx)
3868 return CONST0_RTX (mode);
3869 #ifdef FLOAT_STORE_FLAG_VALUE
3870 {
3871 REAL_VALUE_TYPE val;
3872 val = FLOAT_STORE_FLAG_VALUE (mode);
3873 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3874 }
3875 #else
3876 return NULL_RTX;
3877 #endif
3878 }
3879 if (VECTOR_MODE_P (mode))
3880 {
3881 if (tem == const0_rtx)
3882 return CONST0_RTX (mode);
3883 #ifdef VECTOR_STORE_FLAG_VALUE
3884 {
3885 int i, units;
3886 rtvec v;
3887
3888 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3889 if (val == NULL_RTX)
3890 return NULL_RTX;
3891 if (val == const1_rtx)
3892 return CONST1_RTX (mode);
3893
3894 units = GET_MODE_NUNITS (mode);
3895 v = rtvec_alloc (units);
3896 for (i = 0; i < units; i++)
3897 RTVEC_ELT (v, i) = val;
3898 return gen_rtx_raw_CONST_VECTOR (mode, v);
3899 }
3900 #else
3901 return NULL_RTX;
3902 #endif
3903 }
3904
3905 return tem;
3906 }
3907
3908 /* For the following tests, ensure const0_rtx is op1. */
3909 if (swap_commutative_operands_p (op0, op1)
3910 || (op0 == const0_rtx && op1 != const0_rtx))
3911 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3912
3913 /* If op0 is a compare, extract the comparison arguments from it. */
3914 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3915 return simplify_gen_relational (code, mode, VOIDmode,
3916 XEXP (op0, 0), XEXP (op0, 1));
3917
3918 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3919 || CC0_P (op0))
3920 return NULL_RTX;
3921
3922 trueop0 = avoid_constant_pool_reference (op0);
3923 trueop1 = avoid_constant_pool_reference (op1);
3924 return simplify_relational_operation_1 (code, mode, cmp_mode,
3925 trueop0, trueop1);
3926 }
3927
3928 /* This part of simplify_relational_operation is only used when CMP_MODE
3929 is not in class MODE_CC (i.e. it is a real comparison).
3930
3931 MODE is the mode of the result, while CMP_MODE specifies in which
3932 mode the comparison is done in, so it is the mode of the operands. */
3933
3934 static rtx
3935 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3936 enum machine_mode cmp_mode, rtx op0, rtx op1)
3937 {
3938 enum rtx_code op0code = GET_CODE (op0);
3939
3940 if (op1 == const0_rtx && COMPARISON_P (op0))
3941 {
3942 /* If op0 is a comparison, extract the comparison arguments
3943 from it. */
3944 if (code == NE)
3945 {
3946 if (GET_MODE (op0) == mode)
3947 return simplify_rtx (op0);
3948 else
3949 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3950 XEXP (op0, 0), XEXP (op0, 1));
3951 }
3952 else if (code == EQ)
3953 {
3954 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3955 if (new_code != UNKNOWN)
3956 return simplify_gen_relational (new_code, mode, VOIDmode,
3957 XEXP (op0, 0), XEXP (op0, 1));
3958 }
3959 }
3960
3961 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3962 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3963 if ((code == LTU || code == GEU)
3964 && GET_CODE (op0) == PLUS
3965 && CONST_INT_P (XEXP (op0, 1))
3966 && (rtx_equal_p (op1, XEXP (op0, 0))
3967 || rtx_equal_p (op1, XEXP (op0, 1))))
3968 {
3969 rtx new_cmp
3970 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3971 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3972 cmp_mode, XEXP (op0, 0), new_cmp);
3973 }
3974
3975 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3976 if ((code == LTU || code == GEU)
3977 && GET_CODE (op0) == PLUS
3978 && rtx_equal_p (op1, XEXP (op0, 1))
3979 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3980 && !rtx_equal_p (op1, XEXP (op0, 0)))
3981 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3982
3983 if (op1 == const0_rtx)
3984 {
3985 /* Canonicalize (GTU x 0) as (NE x 0). */
3986 if (code == GTU)
3987 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3988 /* Canonicalize (LEU x 0) as (EQ x 0). */
3989 if (code == LEU)
3990 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3991 }
3992 else if (op1 == const1_rtx)
3993 {
3994 switch (code)
3995 {
3996 case GE:
3997 /* Canonicalize (GE x 1) as (GT x 0). */
3998 return simplify_gen_relational (GT, mode, cmp_mode,
3999 op0, const0_rtx);
4000 case GEU:
4001 /* Canonicalize (GEU x 1) as (NE x 0). */
4002 return simplify_gen_relational (NE, mode, cmp_mode,
4003 op0, const0_rtx);
4004 case LT:
4005 /* Canonicalize (LT x 1) as (LE x 0). */
4006 return simplify_gen_relational (LE, mode, cmp_mode,
4007 op0, const0_rtx);
4008 case LTU:
4009 /* Canonicalize (LTU x 1) as (EQ x 0). */
4010 return simplify_gen_relational (EQ, mode, cmp_mode,
4011 op0, const0_rtx);
4012 default:
4013 break;
4014 }
4015 }
4016 else if (op1 == constm1_rtx)
4017 {
4018 /* Canonicalize (LE x -1) as (LT x 0). */
4019 if (code == LE)
4020 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4021 /* Canonicalize (GT x -1) as (GE x 0). */
4022 if (code == GT)
4023 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4024 }
4025
4026 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4027 if ((code == EQ || code == NE)
4028 && (op0code == PLUS || op0code == MINUS)
4029 && CONSTANT_P (op1)
4030 && CONSTANT_P (XEXP (op0, 1))
4031 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4032 {
4033 rtx x = XEXP (op0, 0);
4034 rtx c = XEXP (op0, 1);
4035
4036 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4037 cmp_mode, op1, c);
4038 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4039 }
4040
4041 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4042 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4043 if (code == NE
4044 && op1 == const0_rtx
4045 && GET_MODE_CLASS (mode) == MODE_INT
4046 && cmp_mode != VOIDmode
4047 /* ??? Work-around BImode bugs in the ia64 backend. */
4048 && mode != BImode
4049 && cmp_mode != BImode
4050 && nonzero_bits (op0, cmp_mode) == 1
4051 && STORE_FLAG_VALUE == 1)
4052 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4053 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4054 : lowpart_subreg (mode, op0, cmp_mode);
4055
4056 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4057 if ((code == EQ || code == NE)
4058 && op1 == const0_rtx
4059 && op0code == XOR)
4060 return simplify_gen_relational (code, mode, cmp_mode,
4061 XEXP (op0, 0), XEXP (op0, 1));
4062
4063 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4064 if ((code == EQ || code == NE)
4065 && op0code == XOR
4066 && rtx_equal_p (XEXP (op0, 0), op1)
4067 && !side_effects_p (XEXP (op0, 0)))
4068 return simplify_gen_relational (code, mode, cmp_mode,
4069 XEXP (op0, 1), const0_rtx);
4070
4071 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4072 if ((code == EQ || code == NE)
4073 && op0code == XOR
4074 && rtx_equal_p (XEXP (op0, 1), op1)
4075 && !side_effects_p (XEXP (op0, 1)))
4076 return simplify_gen_relational (code, mode, cmp_mode,
4077 XEXP (op0, 0), const0_rtx);
4078
4079 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4080 if ((code == EQ || code == NE)
4081 && op0code == XOR
4082 && (CONST_INT_P (op1)
4083 || GET_CODE (op1) == CONST_DOUBLE)
4084 && (CONST_INT_P (XEXP (op0, 1))
4085 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4086 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4087 simplify_gen_binary (XOR, cmp_mode,
4088 XEXP (op0, 1), op1));
4089
4090 if (op0code == POPCOUNT && op1 == const0_rtx)
4091 switch (code)
4092 {
4093 case EQ:
4094 case LE:
4095 case LEU:
4096 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4097 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4098 XEXP (op0, 0), const0_rtx);
4099
4100 case NE:
4101 case GT:
4102 case GTU:
4103 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4104 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4105 XEXP (op0, 0), const0_rtx);
4106
4107 default:
4108 break;
4109 }
4110
4111 return NULL_RTX;
4112 }
4113
4114 enum
4115 {
4116 CMP_EQ = 1,
4117 CMP_LT = 2,
4118 CMP_GT = 4,
4119 CMP_LTU = 8,
4120 CMP_GTU = 16
4121 };
4122
4123
4124 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4125 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4126 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4127 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4128 For floating-point comparisons, assume that the operands were ordered. */
4129
4130 static rtx
4131 comparison_result (enum rtx_code code, int known_results)
4132 {
4133 switch (code)
4134 {
4135 case EQ:
4136 case UNEQ:
4137 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4138 case NE:
4139 case LTGT:
4140 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4141
4142 case LT:
4143 case UNLT:
4144 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4145 case GE:
4146 case UNGE:
4147 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4148
4149 case GT:
4150 case UNGT:
4151 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4152 case LE:
4153 case UNLE:
4154 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4155
4156 case LTU:
4157 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4158 case GEU:
4159 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4160
4161 case GTU:
4162 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4163 case LEU:
4164 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4165
4166 case ORDERED:
4167 return const_true_rtx;
4168 case UNORDERED:
4169 return const0_rtx;
4170 default:
4171 gcc_unreachable ();
4172 }
4173 }
4174
4175 /* Check if the given comparison (done in the given MODE) is actually a
4176 tautology or a contradiction.
4177 If no simplification is possible, this function returns zero.
4178 Otherwise, it returns either const_true_rtx or const0_rtx. */
4179
4180 rtx
4181 simplify_const_relational_operation (enum rtx_code code,
4182 enum machine_mode mode,
4183 rtx op0, rtx op1)
4184 {
4185 rtx tem;
4186 rtx trueop0;
4187 rtx trueop1;
4188
4189 gcc_assert (mode != VOIDmode
4190 || (GET_MODE (op0) == VOIDmode
4191 && GET_MODE (op1) == VOIDmode));
4192
4193 /* If op0 is a compare, extract the comparison arguments from it. */
4194 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4195 {
4196 op1 = XEXP (op0, 1);
4197 op0 = XEXP (op0, 0);
4198
4199 if (GET_MODE (op0) != VOIDmode)
4200 mode = GET_MODE (op0);
4201 else if (GET_MODE (op1) != VOIDmode)
4202 mode = GET_MODE (op1);
4203 else
4204 return 0;
4205 }
4206
4207 /* We can't simplify MODE_CC values since we don't know what the
4208 actual comparison is. */
4209 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4210 return 0;
4211
4212 /* Make sure the constant is second. */
4213 if (swap_commutative_operands_p (op0, op1))
4214 {
4215 tem = op0, op0 = op1, op1 = tem;
4216 code = swap_condition (code);
4217 }
4218
4219 trueop0 = avoid_constant_pool_reference (op0);
4220 trueop1 = avoid_constant_pool_reference (op1);
4221
4222 /* For integer comparisons of A and B maybe we can simplify A - B and can
4223 then simplify a comparison of that with zero. If A and B are both either
4224 a register or a CONST_INT, this can't help; testing for these cases will
4225 prevent infinite recursion here and speed things up.
4226
4227 We can only do this for EQ and NE comparisons as otherwise we may
4228 lose or introduce overflow which we cannot disregard as undefined as
4229 we do not know the signedness of the operation on either the left or
4230 the right hand side of the comparison. */
4231
4232 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4233 && (code == EQ || code == NE)
4234 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4235 && (REG_P (op1) || CONST_INT_P (trueop1)))
4236 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4237 /* We cannot do this if tem is a nonzero address. */
4238 && ! nonzero_address_p (tem))
4239 return simplify_const_relational_operation (signed_condition (code),
4240 mode, tem, const0_rtx);
4241
4242 if (! HONOR_NANS (mode) && code == ORDERED)
4243 return const_true_rtx;
4244
4245 if (! HONOR_NANS (mode) && code == UNORDERED)
4246 return const0_rtx;
4247
4248 /* For modes without NaNs, if the two operands are equal, we know the
4249 result except if they have side-effects. Even with NaNs we know
4250 the result of unordered comparisons and, if signaling NaNs are
4251 irrelevant, also the result of LT/GT/LTGT. */
4252 if ((! HONOR_NANS (GET_MODE (trueop0))
4253 || code == UNEQ || code == UNLE || code == UNGE
4254 || ((code == LT || code == GT || code == LTGT)
4255 && ! HONOR_SNANS (GET_MODE (trueop0))))
4256 && rtx_equal_p (trueop0, trueop1)
4257 && ! side_effects_p (trueop0))
4258 return comparison_result (code, CMP_EQ);
4259
4260 /* If the operands are floating-point constants, see if we can fold
4261 the result. */
4262 if (GET_CODE (trueop0) == CONST_DOUBLE
4263 && GET_CODE (trueop1) == CONST_DOUBLE
4264 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4265 {
4266 REAL_VALUE_TYPE d0, d1;
4267
4268 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4269 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4270
4271 /* Comparisons are unordered iff at least one of the values is NaN. */
4272 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4273 switch (code)
4274 {
4275 case UNEQ:
4276 case UNLT:
4277 case UNGT:
4278 case UNLE:
4279 case UNGE:
4280 case NE:
4281 case UNORDERED:
4282 return const_true_rtx;
4283 case EQ:
4284 case LT:
4285 case GT:
4286 case LE:
4287 case GE:
4288 case LTGT:
4289 case ORDERED:
4290 return const0_rtx;
4291 default:
4292 return 0;
4293 }
4294
4295 return comparison_result (code,
4296 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4297 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4298 }
4299
4300 /* Otherwise, see if the operands are both integers. */
4301 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4302 && (GET_CODE (trueop0) == CONST_DOUBLE
4303 || CONST_INT_P (trueop0))
4304 && (GET_CODE (trueop1) == CONST_DOUBLE
4305 || CONST_INT_P (trueop1)))
4306 {
4307 int width = GET_MODE_BITSIZE (mode);
4308 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4309 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4310
4311 /* Get the two words comprising each integer constant. */
4312 if (GET_CODE (trueop0) == CONST_DOUBLE)
4313 {
4314 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4315 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4316 }
4317 else
4318 {
4319 l0u = l0s = INTVAL (trueop0);
4320 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4321 }
4322
4323 if (GET_CODE (trueop1) == CONST_DOUBLE)
4324 {
4325 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4326 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4327 }
4328 else
4329 {
4330 l1u = l1s = INTVAL (trueop1);
4331 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4332 }
4333
4334 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4335 we have to sign or zero-extend the values. */
4336 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4337 {
4338 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4339 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4340
4341 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4342 l0s |= ((HOST_WIDE_INT) (-1) << width);
4343
4344 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4345 l1s |= ((HOST_WIDE_INT) (-1) << width);
4346 }
4347 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4348 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4349
4350 if (h0u == h1u && l0u == l1u)
4351 return comparison_result (code, CMP_EQ);
4352 else
4353 {
4354 int cr;
4355 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4356 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4357 return comparison_result (code, cr);
4358 }
4359 }
4360
4361 /* Optimize comparisons with upper and lower bounds. */
4362 if (SCALAR_INT_MODE_P (mode)
4363 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4364 && CONST_INT_P (trueop1))
4365 {
4366 int sign;
4367 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4368 HOST_WIDE_INT val = INTVAL (trueop1);
4369 HOST_WIDE_INT mmin, mmax;
4370
4371 if (code == GEU
4372 || code == LEU
4373 || code == GTU
4374 || code == LTU)
4375 sign = 0;
4376 else
4377 sign = 1;
4378
4379 /* Get a reduced range if the sign bit is zero. */
4380 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4381 {
4382 mmin = 0;
4383 mmax = nonzero;
4384 }
4385 else
4386 {
4387 rtx mmin_rtx, mmax_rtx;
4388 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4389
4390 mmin = INTVAL (mmin_rtx);
4391 mmax = INTVAL (mmax_rtx);
4392 if (sign)
4393 {
4394 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4395
4396 mmin >>= (sign_copies - 1);
4397 mmax >>= (sign_copies - 1);
4398 }
4399 }
4400
4401 switch (code)
4402 {
4403 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4404 case GEU:
4405 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4406 return const_true_rtx;
4407 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4408 return const0_rtx;
4409 break;
4410 case GE:
4411 if (val <= mmin)
4412 return const_true_rtx;
4413 if (val > mmax)
4414 return const0_rtx;
4415 break;
4416
4417 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4418 case LEU:
4419 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4420 return const_true_rtx;
4421 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4422 return const0_rtx;
4423 break;
4424 case LE:
4425 if (val >= mmax)
4426 return const_true_rtx;
4427 if (val < mmin)
4428 return const0_rtx;
4429 break;
4430
4431 case EQ:
4432 /* x == y is always false for y out of range. */
4433 if (val < mmin || val > mmax)
4434 return const0_rtx;
4435 break;
4436
4437 /* x > y is always false for y >= mmax, always true for y < mmin. */
4438 case GTU:
4439 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4440 return const0_rtx;
4441 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4442 return const_true_rtx;
4443 break;
4444 case GT:
4445 if (val >= mmax)
4446 return const0_rtx;
4447 if (val < mmin)
4448 return const_true_rtx;
4449 break;
4450
4451 /* x < y is always false for y <= mmin, always true for y > mmax. */
4452 case LTU:
4453 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4454 return const0_rtx;
4455 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4456 return const_true_rtx;
4457 break;
4458 case LT:
4459 if (val <= mmin)
4460 return const0_rtx;
4461 if (val > mmax)
4462 return const_true_rtx;
4463 break;
4464
4465 case NE:
4466 /* x != y is always true for y out of range. */
4467 if (val < mmin || val > mmax)
4468 return const_true_rtx;
4469 break;
4470
4471 default:
4472 break;
4473 }
4474 }
4475
4476 /* Optimize integer comparisons with zero. */
4477 if (trueop1 == const0_rtx)
4478 {
4479 /* Some addresses are known to be nonzero. We don't know
4480 their sign, but equality comparisons are known. */
4481 if (nonzero_address_p (trueop0))
4482 {
4483 if (code == EQ || code == LEU)
4484 return const0_rtx;
4485 if (code == NE || code == GTU)
4486 return const_true_rtx;
4487 }
4488
4489 /* See if the first operand is an IOR with a constant. If so, we
4490 may be able to determine the result of this comparison. */
4491 if (GET_CODE (op0) == IOR)
4492 {
4493 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4494 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4495 {
4496 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4497 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4498 && (INTVAL (inner_const)
4499 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4500
4501 switch (code)
4502 {
4503 case EQ:
4504 case LEU:
4505 return const0_rtx;
4506 case NE:
4507 case GTU:
4508 return const_true_rtx;
4509 case LT:
4510 case LE:
4511 if (has_sign)
4512 return const_true_rtx;
4513 break;
4514 case GT:
4515 case GE:
4516 if (has_sign)
4517 return const0_rtx;
4518 break;
4519 default:
4520 break;
4521 }
4522 }
4523 }
4524 }
4525
4526 /* Optimize comparison of ABS with zero. */
4527 if (trueop1 == CONST0_RTX (mode)
4528 && (GET_CODE (trueop0) == ABS
4529 || (GET_CODE (trueop0) == FLOAT_EXTEND
4530 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4531 {
4532 switch (code)
4533 {
4534 case LT:
4535 /* Optimize abs(x) < 0.0. */
4536 if (!HONOR_SNANS (mode)
4537 && (!INTEGRAL_MODE_P (mode)
4538 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4539 {
4540 if (INTEGRAL_MODE_P (mode)
4541 && (issue_strict_overflow_warning
4542 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4543 warning (OPT_Wstrict_overflow,
4544 ("assuming signed overflow does not occur when "
4545 "assuming abs (x) < 0 is false"));
4546 return const0_rtx;
4547 }
4548 break;
4549
4550 case GE:
4551 /* Optimize abs(x) >= 0.0. */
4552 if (!HONOR_NANS (mode)
4553 && (!INTEGRAL_MODE_P (mode)
4554 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4555 {
4556 if (INTEGRAL_MODE_P (mode)
4557 && (issue_strict_overflow_warning
4558 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4559 warning (OPT_Wstrict_overflow,
4560 ("assuming signed overflow does not occur when "
4561 "assuming abs (x) >= 0 is true"));
4562 return const_true_rtx;
4563 }
4564 break;
4565
4566 case UNGE:
4567 /* Optimize ! (abs(x) < 0.0). */
4568 return const_true_rtx;
4569
4570 default:
4571 break;
4572 }
4573 }
4574
4575 return 0;
4576 }
4577 \f
4578 /* Simplify CODE, an operation with result mode MODE and three operands,
4579 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4580 a constant. Return 0 if no simplifications is possible. */
4581
4582 rtx
4583 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4584 enum machine_mode op0_mode, rtx op0, rtx op1,
4585 rtx op2)
4586 {
4587 unsigned int width = GET_MODE_BITSIZE (mode);
4588
4589 /* VOIDmode means "infinite" precision. */
4590 if (width == 0)
4591 width = HOST_BITS_PER_WIDE_INT;
4592
4593 switch (code)
4594 {
4595 case SIGN_EXTRACT:
4596 case ZERO_EXTRACT:
4597 if (CONST_INT_P (op0)
4598 && CONST_INT_P (op1)
4599 && CONST_INT_P (op2)
4600 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4601 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4602 {
4603 /* Extracting a bit-field from a constant */
4604 HOST_WIDE_INT val = INTVAL (op0);
4605
4606 if (BITS_BIG_ENDIAN)
4607 val >>= (GET_MODE_BITSIZE (op0_mode)
4608 - INTVAL (op2) - INTVAL (op1));
4609 else
4610 val >>= INTVAL (op2);
4611
4612 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4613 {
4614 /* First zero-extend. */
4615 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4616 /* If desired, propagate sign bit. */
4617 if (code == SIGN_EXTRACT
4618 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4619 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4620 }
4621
4622 /* Clear the bits that don't belong in our mode,
4623 unless they and our sign bit are all one.
4624 So we get either a reasonable negative value or a reasonable
4625 unsigned value for this mode. */
4626 if (width < HOST_BITS_PER_WIDE_INT
4627 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4628 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4629 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4630
4631 return gen_int_mode (val, mode);
4632 }
4633 break;
4634
4635 case IF_THEN_ELSE:
4636 if (CONST_INT_P (op0))
4637 return op0 != const0_rtx ? op1 : op2;
4638
4639 /* Convert c ? a : a into "a". */
4640 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4641 return op1;
4642
4643 /* Convert a != b ? a : b into "a". */
4644 if (GET_CODE (op0) == NE
4645 && ! side_effects_p (op0)
4646 && ! HONOR_NANS (mode)
4647 && ! HONOR_SIGNED_ZEROS (mode)
4648 && ((rtx_equal_p (XEXP (op0, 0), op1)
4649 && rtx_equal_p (XEXP (op0, 1), op2))
4650 || (rtx_equal_p (XEXP (op0, 0), op2)
4651 && rtx_equal_p (XEXP (op0, 1), op1))))
4652 return op1;
4653
4654 /* Convert a == b ? a : b into "b". */
4655 if (GET_CODE (op0) == EQ
4656 && ! side_effects_p (op0)
4657 && ! HONOR_NANS (mode)
4658 && ! HONOR_SIGNED_ZEROS (mode)
4659 && ((rtx_equal_p (XEXP (op0, 0), op1)
4660 && rtx_equal_p (XEXP (op0, 1), op2))
4661 || (rtx_equal_p (XEXP (op0, 0), op2)
4662 && rtx_equal_p (XEXP (op0, 1), op1))))
4663 return op2;
4664
4665 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4666 {
4667 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4668 ? GET_MODE (XEXP (op0, 1))
4669 : GET_MODE (XEXP (op0, 0)));
4670 rtx temp;
4671
4672 /* Look for happy constants in op1 and op2. */
4673 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4674 {
4675 HOST_WIDE_INT t = INTVAL (op1);
4676 HOST_WIDE_INT f = INTVAL (op2);
4677
4678 if (t == STORE_FLAG_VALUE && f == 0)
4679 code = GET_CODE (op0);
4680 else if (t == 0 && f == STORE_FLAG_VALUE)
4681 {
4682 enum rtx_code tmp;
4683 tmp = reversed_comparison_code (op0, NULL_RTX);
4684 if (tmp == UNKNOWN)
4685 break;
4686 code = tmp;
4687 }
4688 else
4689 break;
4690
4691 return simplify_gen_relational (code, mode, cmp_mode,
4692 XEXP (op0, 0), XEXP (op0, 1));
4693 }
4694
4695 if (cmp_mode == VOIDmode)
4696 cmp_mode = op0_mode;
4697 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4698 cmp_mode, XEXP (op0, 0),
4699 XEXP (op0, 1));
4700
4701 /* See if any simplifications were possible. */
4702 if (temp)
4703 {
4704 if (CONST_INT_P (temp))
4705 return temp == const0_rtx ? op2 : op1;
4706 else if (temp)
4707 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4708 }
4709 }
4710 break;
4711
4712 case VEC_MERGE:
4713 gcc_assert (GET_MODE (op0) == mode);
4714 gcc_assert (GET_MODE (op1) == mode);
4715 gcc_assert (VECTOR_MODE_P (mode));
4716 op2 = avoid_constant_pool_reference (op2);
4717 if (CONST_INT_P (op2))
4718 {
4719 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4720 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4721 int mask = (1 << n_elts) - 1;
4722
4723 if (!(INTVAL (op2) & mask))
4724 return op1;
4725 if ((INTVAL (op2) & mask) == mask)
4726 return op0;
4727
4728 op0 = avoid_constant_pool_reference (op0);
4729 op1 = avoid_constant_pool_reference (op1);
4730 if (GET_CODE (op0) == CONST_VECTOR
4731 && GET_CODE (op1) == CONST_VECTOR)
4732 {
4733 rtvec v = rtvec_alloc (n_elts);
4734 unsigned int i;
4735
4736 for (i = 0; i < n_elts; i++)
4737 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4738 ? CONST_VECTOR_ELT (op0, i)
4739 : CONST_VECTOR_ELT (op1, i));
4740 return gen_rtx_CONST_VECTOR (mode, v);
4741 }
4742 }
4743 break;
4744
4745 default:
4746 gcc_unreachable ();
4747 }
4748
4749 return 0;
4750 }
4751
4752 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4753 or CONST_VECTOR,
4754 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4755
4756 Works by unpacking OP into a collection of 8-bit values
4757 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4758 and then repacking them again for OUTERMODE. */
4759
4760 static rtx
4761 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4762 enum machine_mode innermode, unsigned int byte)
4763 {
4764 /* We support up to 512-bit values (for V8DFmode). */
4765 enum {
4766 max_bitsize = 512,
4767 value_bit = 8,
4768 value_mask = (1 << value_bit) - 1
4769 };
4770 unsigned char value[max_bitsize / value_bit];
4771 int value_start;
4772 int i;
4773 int elem;
4774
4775 int num_elem;
4776 rtx * elems;
4777 int elem_bitsize;
4778 rtx result_s;
4779 rtvec result_v = NULL;
4780 enum mode_class outer_class;
4781 enum machine_mode outer_submode;
4782
4783 /* Some ports misuse CCmode. */
4784 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4785 return op;
4786
4787 /* We have no way to represent a complex constant at the rtl level. */
4788 if (COMPLEX_MODE_P (outermode))
4789 return NULL_RTX;
4790
4791 /* Unpack the value. */
4792
4793 if (GET_CODE (op) == CONST_VECTOR)
4794 {
4795 num_elem = CONST_VECTOR_NUNITS (op);
4796 elems = &CONST_VECTOR_ELT (op, 0);
4797 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4798 }
4799 else
4800 {
4801 num_elem = 1;
4802 elems = &op;
4803 elem_bitsize = max_bitsize;
4804 }
4805 /* If this asserts, it is too complicated; reducing value_bit may help. */
4806 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4807 /* I don't know how to handle endianness of sub-units. */
4808 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4809
4810 for (elem = 0; elem < num_elem; elem++)
4811 {
4812 unsigned char * vp;
4813 rtx el = elems[elem];
4814
4815 /* Vectors are kept in target memory order. (This is probably
4816 a mistake.) */
4817 {
4818 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4819 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4820 / BITS_PER_UNIT);
4821 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4822 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4823 unsigned bytele = (subword_byte % UNITS_PER_WORD
4824 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4825 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4826 }
4827
4828 switch (GET_CODE (el))
4829 {
4830 case CONST_INT:
4831 for (i = 0;
4832 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4833 i += value_bit)
4834 *vp++ = INTVAL (el) >> i;
4835 /* CONST_INTs are always logically sign-extended. */
4836 for (; i < elem_bitsize; i += value_bit)
4837 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4838 break;
4839
4840 case CONST_DOUBLE:
4841 if (GET_MODE (el) == VOIDmode)
4842 {
4843 /* If this triggers, someone should have generated a
4844 CONST_INT instead. */
4845 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4846
4847 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4848 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4849 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4850 {
4851 *vp++
4852 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4853 i += value_bit;
4854 }
4855 /* It shouldn't matter what's done here, so fill it with
4856 zero. */
4857 for (; i < elem_bitsize; i += value_bit)
4858 *vp++ = 0;
4859 }
4860 else
4861 {
4862 long tmp[max_bitsize / 32];
4863 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4864
4865 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4866 gcc_assert (bitsize <= elem_bitsize);
4867 gcc_assert (bitsize % value_bit == 0);
4868
4869 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4870 GET_MODE (el));
4871
4872 /* real_to_target produces its result in words affected by
4873 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4874 and use WORDS_BIG_ENDIAN instead; see the documentation
4875 of SUBREG in rtl.texi. */
4876 for (i = 0; i < bitsize; i += value_bit)
4877 {
4878 int ibase;
4879 if (WORDS_BIG_ENDIAN)
4880 ibase = bitsize - 1 - i;
4881 else
4882 ibase = i;
4883 *vp++ = tmp[ibase / 32] >> i % 32;
4884 }
4885
4886 /* It shouldn't matter what's done here, so fill it with
4887 zero. */
4888 for (; i < elem_bitsize; i += value_bit)
4889 *vp++ = 0;
4890 }
4891 break;
4892
4893 case CONST_FIXED:
4894 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4895 {
4896 for (i = 0; i < elem_bitsize; i += value_bit)
4897 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4898 }
4899 else
4900 {
4901 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4902 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4903 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4904 i += value_bit)
4905 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4906 >> (i - HOST_BITS_PER_WIDE_INT);
4907 for (; i < elem_bitsize; i += value_bit)
4908 *vp++ = 0;
4909 }
4910 break;
4911
4912 default:
4913 gcc_unreachable ();
4914 }
4915 }
4916
4917 /* Now, pick the right byte to start with. */
4918 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4919 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4920 will already have offset 0. */
4921 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4922 {
4923 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4924 - byte);
4925 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4926 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4927 byte = (subword_byte % UNITS_PER_WORD
4928 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4929 }
4930
4931 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4932 so if it's become negative it will instead be very large.) */
4933 gcc_assert (byte < GET_MODE_SIZE (innermode));
4934
4935 /* Convert from bytes to chunks of size value_bit. */
4936 value_start = byte * (BITS_PER_UNIT / value_bit);
4937
4938 /* Re-pack the value. */
4939
4940 if (VECTOR_MODE_P (outermode))
4941 {
4942 num_elem = GET_MODE_NUNITS (outermode);
4943 result_v = rtvec_alloc (num_elem);
4944 elems = &RTVEC_ELT (result_v, 0);
4945 outer_submode = GET_MODE_INNER (outermode);
4946 }
4947 else
4948 {
4949 num_elem = 1;
4950 elems = &result_s;
4951 outer_submode = outermode;
4952 }
4953
4954 outer_class = GET_MODE_CLASS (outer_submode);
4955 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4956
4957 gcc_assert (elem_bitsize % value_bit == 0);
4958 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4959
4960 for (elem = 0; elem < num_elem; elem++)
4961 {
4962 unsigned char *vp;
4963
4964 /* Vectors are stored in target memory order. (This is probably
4965 a mistake.) */
4966 {
4967 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4968 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4969 / BITS_PER_UNIT);
4970 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4971 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4972 unsigned bytele = (subword_byte % UNITS_PER_WORD
4973 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4974 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4975 }
4976
4977 switch (outer_class)
4978 {
4979 case MODE_INT:
4980 case MODE_PARTIAL_INT:
4981 {
4982 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4983
4984 for (i = 0;
4985 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4986 i += value_bit)
4987 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4988 for (; i < elem_bitsize; i += value_bit)
4989 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4990 << (i - HOST_BITS_PER_WIDE_INT));
4991
4992 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4993 know why. */
4994 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4995 elems[elem] = gen_int_mode (lo, outer_submode);
4996 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4997 elems[elem] = immed_double_const (lo, hi, outer_submode);
4998 else
4999 return NULL_RTX;
5000 }
5001 break;
5002
5003 case MODE_FLOAT:
5004 case MODE_DECIMAL_FLOAT:
5005 {
5006 REAL_VALUE_TYPE r;
5007 long tmp[max_bitsize / 32];
5008
5009 /* real_from_target wants its input in words affected by
5010 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5011 and use WORDS_BIG_ENDIAN instead; see the documentation
5012 of SUBREG in rtl.texi. */
5013 for (i = 0; i < max_bitsize / 32; i++)
5014 tmp[i] = 0;
5015 for (i = 0; i < elem_bitsize; i += value_bit)
5016 {
5017 int ibase;
5018 if (WORDS_BIG_ENDIAN)
5019 ibase = elem_bitsize - 1 - i;
5020 else
5021 ibase = i;
5022 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5023 }
5024
5025 real_from_target (&r, tmp, outer_submode);
5026 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5027 }
5028 break;
5029
5030 case MODE_FRACT:
5031 case MODE_UFRACT:
5032 case MODE_ACCUM:
5033 case MODE_UACCUM:
5034 {
5035 FIXED_VALUE_TYPE f;
5036 f.data.low = 0;
5037 f.data.high = 0;
5038 f.mode = outer_submode;
5039
5040 for (i = 0;
5041 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5042 i += value_bit)
5043 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5044 for (; i < elem_bitsize; i += value_bit)
5045 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5046 << (i - HOST_BITS_PER_WIDE_INT));
5047
5048 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5049 }
5050 break;
5051
5052 default:
5053 gcc_unreachable ();
5054 }
5055 }
5056 if (VECTOR_MODE_P (outermode))
5057 return gen_rtx_CONST_VECTOR (outermode, result_v);
5058 else
5059 return result_s;
5060 }
5061
5062 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5063 Return 0 if no simplifications are possible. */
5064 rtx
5065 simplify_subreg (enum machine_mode outermode, rtx op,
5066 enum machine_mode innermode, unsigned int byte)
5067 {
5068 /* Little bit of sanity checking. */
5069 gcc_assert (innermode != VOIDmode);
5070 gcc_assert (outermode != VOIDmode);
5071 gcc_assert (innermode != BLKmode);
5072 gcc_assert (outermode != BLKmode);
5073
5074 gcc_assert (GET_MODE (op) == innermode
5075 || GET_MODE (op) == VOIDmode);
5076
5077 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5078 gcc_assert (byte < GET_MODE_SIZE (innermode));
5079
5080 if (outermode == innermode && !byte)
5081 return op;
5082
5083 if (CONST_INT_P (op)
5084 || GET_CODE (op) == CONST_DOUBLE
5085 || GET_CODE (op) == CONST_FIXED
5086 || GET_CODE (op) == CONST_VECTOR)
5087 return simplify_immed_subreg (outermode, op, innermode, byte);
5088
5089 /* Changing mode twice with SUBREG => just change it once,
5090 or not at all if changing back op starting mode. */
5091 if (GET_CODE (op) == SUBREG)
5092 {
5093 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5094 int final_offset = byte + SUBREG_BYTE (op);
5095 rtx newx;
5096
5097 if (outermode == innermostmode
5098 && byte == 0 && SUBREG_BYTE (op) == 0)
5099 return SUBREG_REG (op);
5100
5101 /* The SUBREG_BYTE represents offset, as if the value were stored
5102 in memory. Irritating exception is paradoxical subreg, where
5103 we define SUBREG_BYTE to be 0. On big endian machines, this
5104 value should be negative. For a moment, undo this exception. */
5105 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5106 {
5107 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5108 if (WORDS_BIG_ENDIAN)
5109 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5110 if (BYTES_BIG_ENDIAN)
5111 final_offset += difference % UNITS_PER_WORD;
5112 }
5113 if (SUBREG_BYTE (op) == 0
5114 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5115 {
5116 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5117 if (WORDS_BIG_ENDIAN)
5118 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5119 if (BYTES_BIG_ENDIAN)
5120 final_offset += difference % UNITS_PER_WORD;
5121 }
5122
5123 /* See whether resulting subreg will be paradoxical. */
5124 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5125 {
5126 /* In nonparadoxical subregs we can't handle negative offsets. */
5127 if (final_offset < 0)
5128 return NULL_RTX;
5129 /* Bail out in case resulting subreg would be incorrect. */
5130 if (final_offset % GET_MODE_SIZE (outermode)
5131 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5132 return NULL_RTX;
5133 }
5134 else
5135 {
5136 int offset = 0;
5137 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5138
5139 /* In paradoxical subreg, see if we are still looking on lower part.
5140 If so, our SUBREG_BYTE will be 0. */
5141 if (WORDS_BIG_ENDIAN)
5142 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5143 if (BYTES_BIG_ENDIAN)
5144 offset += difference % UNITS_PER_WORD;
5145 if (offset == final_offset)
5146 final_offset = 0;
5147 else
5148 return NULL_RTX;
5149 }
5150
5151 /* Recurse for further possible simplifications. */
5152 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5153 final_offset);
5154 if (newx)
5155 return newx;
5156 if (validate_subreg (outermode, innermostmode,
5157 SUBREG_REG (op), final_offset))
5158 {
5159 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5160 if (SUBREG_PROMOTED_VAR_P (op)
5161 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5162 && GET_MODE_CLASS (outermode) == MODE_INT
5163 && IN_RANGE (GET_MODE_SIZE (outermode),
5164 GET_MODE_SIZE (innermode),
5165 GET_MODE_SIZE (innermostmode))
5166 && subreg_lowpart_p (newx))
5167 {
5168 SUBREG_PROMOTED_VAR_P (newx) = 1;
5169 SUBREG_PROMOTED_UNSIGNED_SET
5170 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5171 }
5172 return newx;
5173 }
5174 return NULL_RTX;
5175 }
5176
5177 /* Merge implicit and explicit truncations. */
5178
5179 if (GET_CODE (op) == TRUNCATE
5180 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5181 && subreg_lowpart_offset (outermode, innermode) == byte)
5182 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5183 GET_MODE (XEXP (op, 0)));
5184
5185 /* SUBREG of a hard register => just change the register number
5186 and/or mode. If the hard register is not valid in that mode,
5187 suppress this simplification. If the hard register is the stack,
5188 frame, or argument pointer, leave this as a SUBREG. */
5189
5190 if (REG_P (op) && HARD_REGISTER_P (op))
5191 {
5192 unsigned int regno, final_regno;
5193
5194 regno = REGNO (op);
5195 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5196 if (HARD_REGISTER_NUM_P (final_regno))
5197 {
5198 rtx x;
5199 int final_offset = byte;
5200
5201 /* Adjust offset for paradoxical subregs. */
5202 if (byte == 0
5203 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5204 {
5205 int difference = (GET_MODE_SIZE (innermode)
5206 - GET_MODE_SIZE (outermode));
5207 if (WORDS_BIG_ENDIAN)
5208 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5209 if (BYTES_BIG_ENDIAN)
5210 final_offset += difference % UNITS_PER_WORD;
5211 }
5212
5213 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5214
5215 /* Propagate original regno. We don't have any way to specify
5216 the offset inside original regno, so do so only for lowpart.
5217 The information is used only by alias analysis that can not
5218 grog partial register anyway. */
5219
5220 if (subreg_lowpart_offset (outermode, innermode) == byte)
5221 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5222 return x;
5223 }
5224 }
5225
5226 /* If we have a SUBREG of a register that we are replacing and we are
5227 replacing it with a MEM, make a new MEM and try replacing the
5228 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5229 or if we would be widening it. */
5230
5231 if (MEM_P (op)
5232 && ! mode_dependent_address_p (XEXP (op, 0))
5233 /* Allow splitting of volatile memory references in case we don't
5234 have instruction to move the whole thing. */
5235 && (! MEM_VOLATILE_P (op)
5236 || ! have_insn_for (SET, innermode))
5237 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5238 return adjust_address_nv (op, outermode, byte);
5239
5240 /* Handle complex values represented as CONCAT
5241 of real and imaginary part. */
5242 if (GET_CODE (op) == CONCAT)
5243 {
5244 unsigned int part_size, final_offset;
5245 rtx part, res;
5246
5247 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5248 if (byte < part_size)
5249 {
5250 part = XEXP (op, 0);
5251 final_offset = byte;
5252 }
5253 else
5254 {
5255 part = XEXP (op, 1);
5256 final_offset = byte - part_size;
5257 }
5258
5259 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5260 return NULL_RTX;
5261
5262 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5263 if (res)
5264 return res;
5265 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5266 return gen_rtx_SUBREG (outermode, part, final_offset);
5267 return NULL_RTX;
5268 }
5269
5270 /* Optimize SUBREG truncations of zero and sign extended values. */
5271 if ((GET_CODE (op) == ZERO_EXTEND
5272 || GET_CODE (op) == SIGN_EXTEND)
5273 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5274 {
5275 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5276
5277 /* If we're requesting the lowpart of a zero or sign extension,
5278 there are three possibilities. If the outermode is the same
5279 as the origmode, we can omit both the extension and the subreg.
5280 If the outermode is not larger than the origmode, we can apply
5281 the truncation without the extension. Finally, if the outermode
5282 is larger than the origmode, but both are integer modes, we
5283 can just extend to the appropriate mode. */
5284 if (bitpos == 0)
5285 {
5286 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5287 if (outermode == origmode)
5288 return XEXP (op, 0);
5289 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5290 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5291 subreg_lowpart_offset (outermode,
5292 origmode));
5293 if (SCALAR_INT_MODE_P (outermode))
5294 return simplify_gen_unary (GET_CODE (op), outermode,
5295 XEXP (op, 0), origmode);
5296 }
5297
5298 /* A SUBREG resulting from a zero extension may fold to zero if
5299 it extracts higher bits that the ZERO_EXTEND's source bits. */
5300 if (GET_CODE (op) == ZERO_EXTEND
5301 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5302 return CONST0_RTX (outermode);
5303 }
5304
5305 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5306 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5307 the outer subreg is effectively a truncation to the original mode. */
5308 if ((GET_CODE (op) == LSHIFTRT
5309 || GET_CODE (op) == ASHIFTRT)
5310 && SCALAR_INT_MODE_P (outermode)
5311 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5312 to avoid the possibility that an outer LSHIFTRT shifts by more
5313 than the sign extension's sign_bit_copies and introduces zeros
5314 into the high bits of the result. */
5315 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5316 && CONST_INT_P (XEXP (op, 1))
5317 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5318 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5319 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5320 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5321 return simplify_gen_binary (ASHIFTRT, outermode,
5322 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5323
5324 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5325 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5326 the outer subreg is effectively a truncation to the original mode. */
5327 if ((GET_CODE (op) == LSHIFTRT
5328 || GET_CODE (op) == ASHIFTRT)
5329 && SCALAR_INT_MODE_P (outermode)
5330 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5331 && CONST_INT_P (XEXP (op, 1))
5332 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5333 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5334 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5335 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5336 return simplify_gen_binary (LSHIFTRT, outermode,
5337 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5338
5339 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5340 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5341 the outer subreg is effectively a truncation to the original mode. */
5342 if (GET_CODE (op) == ASHIFT
5343 && SCALAR_INT_MODE_P (outermode)
5344 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5345 && CONST_INT_P (XEXP (op, 1))
5346 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5347 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5348 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5349 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5350 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5351 return simplify_gen_binary (ASHIFT, outermode,
5352 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5353
5354 /* Recognize a word extraction from a multi-word subreg. */
5355 if ((GET_CODE (op) == LSHIFTRT
5356 || GET_CODE (op) == ASHIFTRT)
5357 && SCALAR_INT_MODE_P (outermode)
5358 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5359 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5360 && CONST_INT_P (XEXP (op, 1))
5361 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5362 && INTVAL (XEXP (op, 1)) >= 0
5363 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5364 && byte == subreg_lowpart_offset (outermode, innermode))
5365 {
5366 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5367 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5368 (WORDS_BIG_ENDIAN
5369 ? byte - shifted_bytes
5370 : byte + shifted_bytes));
5371 }
5372
5373 return NULL_RTX;
5374 }
5375
5376 /* Make a SUBREG operation or equivalent if it folds. */
5377
5378 rtx
5379 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5380 enum machine_mode innermode, unsigned int byte)
5381 {
5382 rtx newx;
5383
5384 newx = simplify_subreg (outermode, op, innermode, byte);
5385 if (newx)
5386 return newx;
5387
5388 if (GET_CODE (op) == SUBREG
5389 || GET_CODE (op) == CONCAT
5390 || GET_MODE (op) == VOIDmode)
5391 return NULL_RTX;
5392
5393 if (validate_subreg (outermode, innermode, op, byte))
5394 return gen_rtx_SUBREG (outermode, op, byte);
5395
5396 return NULL_RTX;
5397 }
5398
5399 /* Simplify X, an rtx expression.
5400
5401 Return the simplified expression or NULL if no simplifications
5402 were possible.
5403
5404 This is the preferred entry point into the simplification routines;
5405 however, we still allow passes to call the more specific routines.
5406
5407 Right now GCC has three (yes, three) major bodies of RTL simplification
5408 code that need to be unified.
5409
5410 1. fold_rtx in cse.c. This code uses various CSE specific
5411 information to aid in RTL simplification.
5412
5413 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5414 it uses combine specific information to aid in RTL
5415 simplification.
5416
5417 3. The routines in this file.
5418
5419
5420 Long term we want to only have one body of simplification code; to
5421 get to that state I recommend the following steps:
5422
5423 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5424 which are not pass dependent state into these routines.
5425
5426 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5427 use this routine whenever possible.
5428
5429 3. Allow for pass dependent state to be provided to these
5430 routines and add simplifications based on the pass dependent
5431 state. Remove code from cse.c & combine.c that becomes
5432 redundant/dead.
5433
5434 It will take time, but ultimately the compiler will be easier to
5435 maintain and improve. It's totally silly that when we add a
5436 simplification that it needs to be added to 4 places (3 for RTL
5437 simplification and 1 for tree simplification. */
5438
5439 rtx
5440 simplify_rtx (const_rtx x)
5441 {
5442 const enum rtx_code code = GET_CODE (x);
5443 const enum machine_mode mode = GET_MODE (x);
5444
5445 switch (GET_RTX_CLASS (code))
5446 {
5447 case RTX_UNARY:
5448 return simplify_unary_operation (code, mode,
5449 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5450 case RTX_COMM_ARITH:
5451 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5452 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5453
5454 /* Fall through.... */
5455
5456 case RTX_BIN_ARITH:
5457 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5458
5459 case RTX_TERNARY:
5460 case RTX_BITFIELD_OPS:
5461 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5462 XEXP (x, 0), XEXP (x, 1),
5463 XEXP (x, 2));
5464
5465 case RTX_COMPARE:
5466 case RTX_COMM_COMPARE:
5467 return simplify_relational_operation (code, mode,
5468 ((GET_MODE (XEXP (x, 0))
5469 != VOIDmode)
5470 ? GET_MODE (XEXP (x, 0))
5471 : GET_MODE (XEXP (x, 1))),
5472 XEXP (x, 0),
5473 XEXP (x, 1));
5474
5475 case RTX_EXTRA:
5476 if (code == SUBREG)
5477 return simplify_subreg (mode, SUBREG_REG (x),
5478 GET_MODE (SUBREG_REG (x)),
5479 SUBREG_BYTE (x));
5480 break;
5481
5482 case RTX_OBJ:
5483 if (code == LO_SUM)
5484 {
5485 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5486 if (GET_CODE (XEXP (x, 0)) == HIGH
5487 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5488 return XEXP (x, 1);
5489 }
5490 break;
5491
5492 default:
5493 break;
5494 }
5495 return NULL;
5496 }