simplify-rtx.c (simplify_ternary_operation): Simplify (fma (neg a) (neg b) c) and...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "diagnostic-core.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
208
209 rtx
210 delegitimize_mem_from_attrs (rtx x)
211 {
212 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
213 use their base addresses as equivalent. */
214 if (MEM_P (x)
215 && MEM_EXPR (x)
216 && MEM_OFFSET (x))
217 {
218 tree decl = MEM_EXPR (x);
219 enum machine_mode mode = GET_MODE (x);
220 HOST_WIDE_INT offset = 0;
221
222 switch (TREE_CODE (decl))
223 {
224 default:
225 decl = NULL;
226 break;
227
228 case VAR_DECL:
229 break;
230
231 case ARRAY_REF:
232 case ARRAY_RANGE_REF:
233 case COMPONENT_REF:
234 case BIT_FIELD_REF:
235 case REALPART_EXPR:
236 case IMAGPART_EXPR:
237 case VIEW_CONVERT_EXPR:
238 {
239 HOST_WIDE_INT bitsize, bitpos;
240 tree toffset;
241 int unsignedp = 0, volatilep = 0;
242
243 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
244 &mode, &unsignedp, &volatilep, false);
245 if (bitsize != GET_MODE_BITSIZE (mode)
246 || (bitpos % BITS_PER_UNIT)
247 || (toffset && !host_integerp (toffset, 0)))
248 decl = NULL;
249 else
250 {
251 offset += bitpos / BITS_PER_UNIT;
252 if (toffset)
253 offset += TREE_INT_CST_LOW (toffset);
254 }
255 break;
256 }
257 }
258
259 if (decl
260 && mode == GET_MODE (x)
261 && TREE_CODE (decl) == VAR_DECL
262 && (TREE_STATIC (decl)
263 || DECL_THREAD_LOCAL_P (decl))
264 && DECL_RTL_SET_P (decl)
265 && MEM_P (DECL_RTL (decl)))
266 {
267 rtx newx;
268
269 offset += INTVAL (MEM_OFFSET (x));
270
271 newx = DECL_RTL (decl);
272
273 if (MEM_P (newx))
274 {
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
295 }
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
299 }
300 }
301
302 return x;
303 }
304 \f
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
307
308 rtx
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
311 {
312 rtx tem;
313
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
317
318 return gen_rtx_fmt_e (code, mode, op);
319 }
320
321 /* Likewise for ternary operations. */
322
323 rtx
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 {
327 rtx tem;
328
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
333
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
335 }
336
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
339
340 rtx
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 {
344 rtx tem;
345
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
349
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
351 }
352 \f
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
356 result. */
357
358 rtx
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 {
362 enum rtx_code code = GET_CODE (x);
363 enum machine_mode mode = GET_MODE (x);
364 enum machine_mode op_mode;
365 const char *fmt;
366 rtx op0, op1, op2, newx, op;
367 rtvec vec, newvec;
368 int i, j;
369
370 if (__builtin_expect (fn != NULL, 0))
371 {
372 newx = fn (x, old_rtx, data);
373 if (newx)
374 return newx;
375 }
376 else if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
378
379 switch (GET_RTX_CLASS (code))
380 {
381 case RTX_UNARY:
382 op0 = XEXP (x, 0);
383 op_mode = GET_MODE (op0);
384 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385 if (op0 == XEXP (x, 0))
386 return x;
387 return simplify_gen_unary (code, mode, op0, op_mode);
388
389 case RTX_BIN_ARITH:
390 case RTX_COMM_ARITH:
391 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
394 return x;
395 return simplify_gen_binary (code, mode, op0, op1);
396
397 case RTX_COMPARE:
398 case RTX_COMM_COMPARE:
399 op0 = XEXP (x, 0);
400 op1 = XEXP (x, 1);
401 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
405 return x;
406 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407
408 case RTX_TERNARY:
409 case RTX_BITFIELD_OPS:
410 op0 = XEXP (x, 0);
411 op_mode = GET_MODE (op0);
412 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
416 return x;
417 if (op_mode == VOIDmode)
418 op_mode = GET_MODE (op0);
419 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420
421 case RTX_EXTRA:
422 if (code == SUBREG)
423 {
424 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 if (op0 == SUBREG_REG (x))
426 return x;
427 op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 GET_MODE (SUBREG_REG (x)),
429 SUBREG_BYTE (x));
430 return op0 ? op0 : x;
431 }
432 break;
433
434 case RTX_OBJ:
435 if (code == MEM)
436 {
437 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
439 return x;
440 return replace_equiv_address_nv (x, op0);
441 }
442 else if (code == LO_SUM)
443 {
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
449 return op1;
450
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return gen_rtx_LO_SUM (mode, op0, op1);
454 }
455 break;
456
457 default:
458 break;
459 }
460
461 newx = x;
462 fmt = GET_RTX_FORMAT (code);
463 for (i = 0; fmt[i]; i++)
464 switch (fmt[i])
465 {
466 case 'E':
467 vec = XVEC (x, i);
468 newvec = XVEC (newx, i);
469 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 {
471 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
472 old_rtx, fn, data);
473 if (op != RTVEC_ELT (vec, j))
474 {
475 if (newvec == vec)
476 {
477 newvec = shallow_copy_rtvec (vec);
478 if (x == newx)
479 newx = shallow_copy_rtx (x);
480 XVEC (newx, i) = newvec;
481 }
482 RTVEC_ELT (newvec, j) = op;
483 }
484 }
485 break;
486
487 case 'e':
488 if (XEXP (x, i))
489 {
490 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 if (op != XEXP (x, i))
492 {
493 if (x == newx)
494 newx = shallow_copy_rtx (x);
495 XEXP (newx, i) = op;
496 }
497 }
498 break;
499 }
500 return newx;
501 }
502
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
505
506 rtx
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 {
509 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
510 }
511 \f
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
515 rtx
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 rtx op, enum machine_mode op_mode)
518 {
519 rtx trueop, tem;
520
521 trueop = avoid_constant_pool_reference (op);
522
523 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
524 if (tem)
525 return tem;
526
527 return simplify_unary_operation_1 (code, mode, op);
528 }
529
530 /* Perform some simplifications we can do even if the operands
531 aren't constant. */
532 static rtx
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 {
535 enum rtx_code reversed;
536 rtx temp;
537
538 switch (code)
539 {
540 case NOT:
541 /* (not (not X)) == X. */
542 if (GET_CODE (op) == NOT)
543 return XEXP (op, 0);
544
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op)
548 && (mode == BImode || STORE_FLAG_VALUE == -1)
549 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 return simplify_gen_relational (reversed, mode, VOIDmode,
551 XEXP (op, 0), XEXP (op, 1));
552
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op) == PLUS
555 && XEXP (op, 1) == constm1_rtx)
556 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op) == NEG)
560 return plus_constant (XEXP (op, 0), -1);
561
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op) == XOR
564 && CONST_INT_P (XEXP (op, 1))
565 && (temp = simplify_unary_operation (NOT, mode,
566 XEXP (op, 1), mode)) != 0)
567 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op) == PLUS
571 && CONST_INT_P (XEXP (op, 1))
572 && mode_signbit_p (mode, XEXP (op, 1))
573 && (temp = simplify_unary_operation (NOT, mode,
574 XEXP (op, 1), mode)) != 0)
575 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
576
577
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
582 bother with. */
583 if (GET_CODE (op) == ASHIFT
584 && XEXP (op, 0) == const1_rtx)
585 {
586 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
588 }
589
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
593
594 if (STORE_FLAG_VALUE == -1
595 && GET_CODE (op) == ASHIFTRT
596 && GET_CODE (XEXP (op, 1))
597 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 return simplify_gen_relational (GE, mode, VOIDmode,
599 XEXP (op, 0), const0_rtx);
600
601
602 if (GET_CODE (op) == SUBREG
603 && subreg_lowpart_p (op)
604 && (GET_MODE_SIZE (GET_MODE (op))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 {
609 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
610 rtx x;
611
612 x = gen_rtx_ROTATE (inner_mode,
613 simplify_gen_unary (NOT, inner_mode, const1_rtx,
614 inner_mode),
615 XEXP (SUBREG_REG (op), 1));
616 return rtl_hooks.gen_lowpart_no_emit (mode, x);
617 }
618
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
622 coded. */
623
624 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 {
626 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 enum machine_mode op_mode;
628
629 op_mode = GET_MODE (in1);
630 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631
632 op_mode = GET_MODE (in2);
633 if (op_mode == VOIDmode)
634 op_mode = mode;
635 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636
637 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 {
639 rtx tem = in2;
640 in2 = in1; in1 = tem;
641 }
642
643 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
644 mode, in1, in2);
645 }
646 break;
647
648 case NEG:
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op) == NEG)
651 return XEXP (op, 0);
652
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op) == PLUS
655 && XEXP (op, 1) == const1_rtx)
656 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op) == NOT)
660 return plus_constant (XEXP (op, 0), 1);
661
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671
672 if (GET_CODE (op) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 {
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op, 1))
678 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 {
680 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
681 if (temp)
682 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
683 }
684
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
688 }
689
690 /* (neg (mult A B)) becomes (mult (neg A) B).
691 This works even for floating-point values. */
692 if (GET_CODE (op) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 {
695 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
696 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
697 }
698
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
701 is a constant). */
702 if (GET_CODE (op) == ASHIFT)
703 {
704 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
705 if (temp)
706 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
707 }
708
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op) == ASHIFTRT
712 && CONST_INT_P (XEXP (op, 1))
713 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (op, 0), XEXP (op, 1));
716
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op) == LSHIFTRT
720 && CONST_INT_P (XEXP (op, 1))
721 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 return simplify_gen_binary (ASHIFTRT, mode,
723 XEXP (op, 0), XEXP (op, 1));
724
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op) == XOR
727 && XEXP (op, 1) == const1_rtx
728 && nonzero_bits (XEXP (op, 0), mode) == 1)
729 return plus_constant (XEXP (op, 0), -1);
730
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op) == LT
734 && XEXP (op, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 {
737 enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 int isize = GET_MODE_BITSIZE (inner);
739 if (STORE_FLAG_VALUE == 1)
740 {
741 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 GEN_INT (isize - 1));
743 if (mode == inner)
744 return temp;
745 if (GET_MODE_BITSIZE (mode) > isize)
746 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 }
749 else if (STORE_FLAG_VALUE == -1)
750 {
751 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 GEN_INT (isize - 1));
753 if (mode == inner)
754 return temp;
755 if (GET_MODE_BITSIZE (mode) > isize)
756 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
758 }
759 }
760 break;
761
762 case TRUNCATE:
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
765 integer mode. */
766 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
767 break;
768
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op) == SIGN_EXTEND
771 || GET_CODE (op) == ZERO_EXTEND)
772 && GET_MODE (XEXP (op, 0)) == mode)
773 return XEXP (op, 0);
774
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op) == ABS
778 || GET_CODE (op) == NEG)
779 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 return simplify_gen_unary (GET_CODE (op), mode,
783 XEXP (XEXP (op, 0), 0), mode);
784
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 (truncate:A X). */
787 if (GET_CODE (op) == SUBREG
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
790 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
799 patterns. */
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 GET_MODE_BITSIZE (GET_MODE (op)))
802 ? (num_sign_bit_copies (op, GET_MODE (op))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 - GET_MODE_BITSIZE (mode)))
805 : truncated_to_mode (mode, op))
806 && ! (GET_CODE (op) == LSHIFTRT
807 && GET_CODE (XEXP (op, 0)) == MULT))
808 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
815 && COMPARISON_P (op)
816 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 return rtl_hooks.gen_lowpart_no_emit (mode, op);
818 break;
819
820 case FLOAT_TRUNCATE:
821 if (DECIMAL_FLOAT_MODE_P (mode))
822 break;
823
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op, 0)) == mode)
827 return XEXP (op, 0);
828
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
832
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
835
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations)
840 || GET_CODE (op) == FLOAT_EXTEND)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
842 0)))
843 > GET_MODE_SIZE (mode)
844 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
845 mode,
846 XEXP (op, 0), mode);
847
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 && ((unsigned)significand_size (GET_MODE (op))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 - num_sign_bit_copies (XEXP (op, 0),
855 GET_MODE (XEXP (op, 0))))))))
856 return simplify_gen_unary (FLOAT, mode,
857 XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
859
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op) == ABS
863 || GET_CODE (op) == NEG)
864 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 return simplify_gen_unary (GET_CODE (op), mode,
867 XEXP (XEXP (op, 0), 0), mode);
868
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 return SUBREG_REG (op);
875 break;
876
877 case FLOAT_EXTEND:
878 if (DECIMAL_FLOAT_MODE_P (mode))
879 break;
880
881 /* (float_extend (float_extend x)) is (float_extend x)
882
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
885 */
886 if (GET_CODE (op) == FLOAT_EXTEND
887 || (GET_CODE (op) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 && ((unsigned)significand_size (GET_MODE (op))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 - num_sign_bit_copies (XEXP (op, 0),
892 GET_MODE (XEXP (op, 0)))))))
893 return simplify_gen_unary (GET_CODE (op), mode,
894 XEXP (op, 0),
895 GET_MODE (XEXP (op, 0)));
896
897 break;
898
899 case ABS:
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op) == NEG)
902 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
904
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906 do nothing. */
907 if (GET_MODE (op) == VOIDmode)
908 break;
909
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op))
913 <= HOST_BITS_PER_WIDE_INT)
914 && ((nonzero_bits (op, GET_MODE (op))
915 & ((unsigned HOST_WIDE_INT) 1
916 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
917 == 0)))
918 return op;
919
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 return gen_rtx_NEG (mode, op);
923
924 break;
925
926 case FFS:
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op) == SIGN_EXTEND
929 || GET_CODE (op) == ZERO_EXTEND)
930 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 GET_MODE (XEXP (op, 0)));
932 break;
933
934 case POPCOUNT:
935 switch (GET_CODE (op))
936 {
937 case BSWAP:
938 case ZERO_EXTEND:
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
942
943 case ROTATE:
944 case ROTATERT:
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op, 1)))
947 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 GET_MODE (XEXP (op, 0)));
949 break;
950
951 default:
952 break;
953 }
954 break;
955
956 case PARITY:
957 switch (GET_CODE (op))
958 {
959 case NOT:
960 case BSWAP:
961 case ZERO_EXTEND:
962 case SIGN_EXTEND:
963 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 GET_MODE (XEXP (op, 0)));
965
966 case ROTATE:
967 case ROTATERT:
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op, 1)))
970 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 GET_MODE (XEXP (op, 0)));
972 break;
973
974 default:
975 break;
976 }
977 break;
978
979 case BSWAP:
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op) == BSWAP)
982 return XEXP (op, 0);
983 break;
984
985 case FLOAT:
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op) == SIGN_EXTEND)
988 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
990 break;
991
992 case SIGN_EXTEND:
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
996 the VAX). */
997 if (GET_CODE (op) == TRUNCATE
998 && GET_MODE (XEXP (op, 0)) == mode
999 && GET_CODE (XEXP (op, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 return XEXP (op, 0);
1003
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012
1013 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1014 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1015 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1016 {
1017 gcc_assert (GET_MODE_BITSIZE (mode)
1018 > GET_MODE_BITSIZE (GET_MODE (op)));
1019 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1020 GET_MODE (XEXP (op, 0)));
1021 }
1022
1023 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1024 is (sign_extend:M (subreg:O <X>)) if there is mode with
1025 GET_MODE_BITSIZE (N) - I bits.
1026 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1027 is similarly (zero_extend:M (subreg:O <X>)). */
1028 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1029 && GET_CODE (XEXP (op, 0)) == ASHIFT
1030 && CONST_INT_P (XEXP (op, 1))
1031 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1032 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1033 {
1034 enum machine_mode tmode
1035 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1036 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1037 gcc_assert (GET_MODE_BITSIZE (mode)
1038 > GET_MODE_BITSIZE (GET_MODE (op)));
1039 if (tmode != BLKmode)
1040 {
1041 rtx inner =
1042 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1043 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1044 ? SIGN_EXTEND : ZERO_EXTEND,
1045 mode, inner, tmode);
1046 }
1047 }
1048
1049 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1050 /* As we do not know which address space the pointer is refering to,
1051 we can do this only if the target does not support different pointer
1052 or address modes depending on the address space. */
1053 if (target_default_pointer_address_modes_p ()
1054 && ! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1056 && (CONSTANT_P (op)
1057 || (GET_CODE (op) == SUBREG
1058 && REG_P (SUBREG_REG (op))
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1062 #endif
1063 break;
1064
1065 case ZERO_EXTEND:
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1072 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1073 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1074
1075 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1076 if (GET_CODE (op) == ZERO_EXTEND)
1077 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1078 GET_MODE (XEXP (op, 0)));
1079
1080 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1081 is (zero_extend:M (subreg:O <X>)) if there is mode with
1082 GET_MODE_BITSIZE (N) - I bits. */
1083 if (GET_CODE (op) == LSHIFTRT
1084 && GET_CODE (XEXP (op, 0)) == ASHIFT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1087 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1088 {
1089 enum machine_mode tmode
1090 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1091 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1092 if (tmode != BLKmode)
1093 {
1094 rtx inner =
1095 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1096 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1097 }
1098 }
1099
1100 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1101 /* As we do not know which address space the pointer is refering to,
1102 we can do this only if the target does not support different pointer
1103 or address modes depending on the address space. */
1104 if (target_default_pointer_address_modes_p ()
1105 && POINTERS_EXTEND_UNSIGNED > 0
1106 && mode == Pmode && GET_MODE (op) == ptr_mode
1107 && (CONSTANT_P (op)
1108 || (GET_CODE (op) == SUBREG
1109 && REG_P (SUBREG_REG (op))
1110 && REG_POINTER (SUBREG_REG (op))
1111 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1112 return convert_memory_address (Pmode, op);
1113 #endif
1114 break;
1115
1116 default:
1117 break;
1118 }
1119
1120 return 0;
1121 }
1122
1123 /* Try to compute the value of a unary operation CODE whose output mode is to
1124 be MODE with input operand OP whose mode was originally OP_MODE.
1125 Return zero if the value cannot be computed. */
1126 rtx
1127 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1128 rtx op, enum machine_mode op_mode)
1129 {
1130 unsigned int width = GET_MODE_BITSIZE (mode);
1131
1132 if (code == VEC_DUPLICATE)
1133 {
1134 gcc_assert (VECTOR_MODE_P (mode));
1135 if (GET_MODE (op) != VOIDmode)
1136 {
1137 if (!VECTOR_MODE_P (GET_MODE (op)))
1138 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1139 else
1140 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1141 (GET_MODE (op)));
1142 }
1143 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1144 || GET_CODE (op) == CONST_VECTOR)
1145 {
1146 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1147 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1148 rtvec v = rtvec_alloc (n_elts);
1149 unsigned int i;
1150
1151 if (GET_CODE (op) != CONST_VECTOR)
1152 for (i = 0; i < n_elts; i++)
1153 RTVEC_ELT (v, i) = op;
1154 else
1155 {
1156 enum machine_mode inmode = GET_MODE (op);
1157 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1158 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1159
1160 gcc_assert (in_n_elts < n_elts);
1161 gcc_assert ((n_elts % in_n_elts) == 0);
1162 for (i = 0; i < n_elts; i++)
1163 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1164 }
1165 return gen_rtx_CONST_VECTOR (mode, v);
1166 }
1167 }
1168
1169 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1170 {
1171 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1172 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1173 enum machine_mode opmode = GET_MODE (op);
1174 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1175 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1176 rtvec v = rtvec_alloc (n_elts);
1177 unsigned int i;
1178
1179 gcc_assert (op_n_elts == n_elts);
1180 for (i = 0; i < n_elts; i++)
1181 {
1182 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1183 CONST_VECTOR_ELT (op, i),
1184 GET_MODE_INNER (opmode));
1185 if (!x)
1186 return 0;
1187 RTVEC_ELT (v, i) = x;
1188 }
1189 return gen_rtx_CONST_VECTOR (mode, v);
1190 }
1191
1192 /* The order of these tests is critical so that, for example, we don't
1193 check the wrong mode (input vs. output) for a conversion operation,
1194 such as FIX. At some point, this should be simplified. */
1195
1196 if (code == FLOAT && GET_MODE (op) == VOIDmode
1197 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1198 {
1199 HOST_WIDE_INT hv, lv;
1200 REAL_VALUE_TYPE d;
1201
1202 if (CONST_INT_P (op))
1203 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1204 else
1205 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1206
1207 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1208 d = real_value_truncate (mode, d);
1209 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1210 }
1211 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1212 && (GET_CODE (op) == CONST_DOUBLE
1213 || CONST_INT_P (op)))
1214 {
1215 HOST_WIDE_INT hv, lv;
1216 REAL_VALUE_TYPE d;
1217
1218 if (CONST_INT_P (op))
1219 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1220 else
1221 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1222
1223 if (op_mode == VOIDmode)
1224 {
1225 /* We don't know how to interpret negative-looking numbers in
1226 this case, so don't try to fold those. */
1227 if (hv < 0)
1228 return 0;
1229 }
1230 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1231 ;
1232 else
1233 hv = 0, lv &= GET_MODE_MASK (op_mode);
1234
1235 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1236 d = real_value_truncate (mode, d);
1237 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1238 }
1239
1240 if (CONST_INT_P (op)
1241 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1242 {
1243 HOST_WIDE_INT arg0 = INTVAL (op);
1244 HOST_WIDE_INT val;
1245
1246 switch (code)
1247 {
1248 case NOT:
1249 val = ~ arg0;
1250 break;
1251
1252 case NEG:
1253 val = - arg0;
1254 break;
1255
1256 case ABS:
1257 val = (arg0 >= 0 ? arg0 : - arg0);
1258 break;
1259
1260 case FFS:
1261 arg0 &= GET_MODE_MASK (mode);
1262 val = ffs_hwi (arg0);
1263 break;
1264
1265 case CLZ:
1266 arg0 &= GET_MODE_MASK (mode);
1267 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1268 ;
1269 else
1270 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1271 break;
1272
1273 case CTZ:
1274 arg0 &= GET_MODE_MASK (mode);
1275 if (arg0 == 0)
1276 {
1277 /* Even if the value at zero is undefined, we have to come
1278 up with some replacement. Seems good enough. */
1279 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1280 val = GET_MODE_BITSIZE (mode);
1281 }
1282 else
1283 val = ctz_hwi (arg0);
1284 break;
1285
1286 case POPCOUNT:
1287 arg0 &= GET_MODE_MASK (mode);
1288 val = 0;
1289 while (arg0)
1290 val++, arg0 &= arg0 - 1;
1291 break;
1292
1293 case PARITY:
1294 arg0 &= GET_MODE_MASK (mode);
1295 val = 0;
1296 while (arg0)
1297 val++, arg0 &= arg0 - 1;
1298 val &= 1;
1299 break;
1300
1301 case BSWAP:
1302 {
1303 unsigned int s;
1304
1305 val = 0;
1306 for (s = 0; s < width; s += 8)
1307 {
1308 unsigned int d = width - s - 8;
1309 unsigned HOST_WIDE_INT byte;
1310 byte = (arg0 >> s) & 0xff;
1311 val |= byte << d;
1312 }
1313 }
1314 break;
1315
1316 case TRUNCATE:
1317 val = arg0;
1318 break;
1319
1320 case ZERO_EXTEND:
1321 /* When zero-extending a CONST_INT, we need to know its
1322 original mode. */
1323 gcc_assert (op_mode != VOIDmode);
1324 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1325 {
1326 /* If we were really extending the mode,
1327 we would have to distinguish between zero-extension
1328 and sign-extension. */
1329 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1330 val = arg0;
1331 }
1332 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1333 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1334 << GET_MODE_BITSIZE (op_mode));
1335 else
1336 return 0;
1337 break;
1338
1339 case SIGN_EXTEND:
1340 if (op_mode == VOIDmode)
1341 op_mode = mode;
1342 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 {
1344 /* If we were really extending the mode,
1345 we would have to distinguish between zero-extension
1346 and sign-extension. */
1347 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1348 val = arg0;
1349 }
1350 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1351 {
1352 val
1353 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1354 << GET_MODE_BITSIZE (op_mode));
1355 if (val & ((unsigned HOST_WIDE_INT) 1
1356 << (GET_MODE_BITSIZE (op_mode) - 1)))
1357 val
1358 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1359 }
1360 else
1361 return 0;
1362 break;
1363
1364 case SQRT:
1365 case FLOAT_EXTEND:
1366 case FLOAT_TRUNCATE:
1367 case SS_TRUNCATE:
1368 case US_TRUNCATE:
1369 case SS_NEG:
1370 case US_NEG:
1371 case SS_ABS:
1372 return 0;
1373
1374 default:
1375 gcc_unreachable ();
1376 }
1377
1378 return gen_int_mode (val, mode);
1379 }
1380
1381 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1382 for a DImode operation on a CONST_INT. */
1383 else if (GET_MODE (op) == VOIDmode
1384 && width <= HOST_BITS_PER_WIDE_INT * 2
1385 && (GET_CODE (op) == CONST_DOUBLE
1386 || CONST_INT_P (op)))
1387 {
1388 unsigned HOST_WIDE_INT l1, lv;
1389 HOST_WIDE_INT h1, hv;
1390
1391 if (GET_CODE (op) == CONST_DOUBLE)
1392 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1393 else
1394 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1395
1396 switch (code)
1397 {
1398 case NOT:
1399 lv = ~ l1;
1400 hv = ~ h1;
1401 break;
1402
1403 case NEG:
1404 neg_double (l1, h1, &lv, &hv);
1405 break;
1406
1407 case ABS:
1408 if (h1 < 0)
1409 neg_double (l1, h1, &lv, &hv);
1410 else
1411 lv = l1, hv = h1;
1412 break;
1413
1414 case FFS:
1415 hv = 0;
1416 if (l1 != 0)
1417 lv = ffs_hwi (l1);
1418 else if (h1 != 0)
1419 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1420 else
1421 lv = 0;
1422 break;
1423
1424 case CLZ:
1425 hv = 0;
1426 if (h1 != 0)
1427 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1428 - HOST_BITS_PER_WIDE_INT;
1429 else if (l1 != 0)
1430 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1431 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1432 lv = GET_MODE_BITSIZE (mode);
1433 break;
1434
1435 case CTZ:
1436 hv = 0;
1437 if (l1 != 0)
1438 lv = ctz_hwi (l1);
1439 else if (h1 != 0)
1440 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1441 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1442 lv = GET_MODE_BITSIZE (mode);
1443 break;
1444
1445 case POPCOUNT:
1446 hv = 0;
1447 lv = 0;
1448 while (l1)
1449 lv++, l1 &= l1 - 1;
1450 while (h1)
1451 lv++, h1 &= h1 - 1;
1452 break;
1453
1454 case PARITY:
1455 hv = 0;
1456 lv = 0;
1457 while (l1)
1458 lv++, l1 &= l1 - 1;
1459 while (h1)
1460 lv++, h1 &= h1 - 1;
1461 lv &= 1;
1462 break;
1463
1464 case BSWAP:
1465 {
1466 unsigned int s;
1467
1468 hv = 0;
1469 lv = 0;
1470 for (s = 0; s < width; s += 8)
1471 {
1472 unsigned int d = width - s - 8;
1473 unsigned HOST_WIDE_INT byte;
1474
1475 if (s < HOST_BITS_PER_WIDE_INT)
1476 byte = (l1 >> s) & 0xff;
1477 else
1478 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1479
1480 if (d < HOST_BITS_PER_WIDE_INT)
1481 lv |= byte << d;
1482 else
1483 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1484 }
1485 }
1486 break;
1487
1488 case TRUNCATE:
1489 /* This is just a change-of-mode, so do nothing. */
1490 lv = l1, hv = h1;
1491 break;
1492
1493 case ZERO_EXTEND:
1494 gcc_assert (op_mode != VOIDmode);
1495
1496 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1497 return 0;
1498
1499 hv = 0;
1500 lv = l1 & GET_MODE_MASK (op_mode);
1501 break;
1502
1503 case SIGN_EXTEND:
1504 if (op_mode == VOIDmode
1505 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1506 return 0;
1507 else
1508 {
1509 lv = l1 & GET_MODE_MASK (op_mode);
1510 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1511 && (lv & ((unsigned HOST_WIDE_INT) 1
1512 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1513 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1514
1515 hv = HWI_SIGN_EXTEND (lv);
1516 }
1517 break;
1518
1519 case SQRT:
1520 return 0;
1521
1522 default:
1523 return 0;
1524 }
1525
1526 return immed_double_const (lv, hv, mode);
1527 }
1528
1529 else if (GET_CODE (op) == CONST_DOUBLE
1530 && SCALAR_FLOAT_MODE_P (mode))
1531 {
1532 REAL_VALUE_TYPE d, t;
1533 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1534
1535 switch (code)
1536 {
1537 case SQRT:
1538 if (HONOR_SNANS (mode) && real_isnan (&d))
1539 return 0;
1540 real_sqrt (&t, mode, &d);
1541 d = t;
1542 break;
1543 case ABS:
1544 d = real_value_abs (&d);
1545 break;
1546 case NEG:
1547 d = real_value_negate (&d);
1548 break;
1549 case FLOAT_TRUNCATE:
1550 d = real_value_truncate (mode, d);
1551 break;
1552 case FLOAT_EXTEND:
1553 /* All this does is change the mode. */
1554 break;
1555 case FIX:
1556 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1557 break;
1558 case NOT:
1559 {
1560 long tmp[4];
1561 int i;
1562
1563 real_to_target (tmp, &d, GET_MODE (op));
1564 for (i = 0; i < 4; i++)
1565 tmp[i] = ~tmp[i];
1566 real_from_target (&d, tmp, mode);
1567 break;
1568 }
1569 default:
1570 gcc_unreachable ();
1571 }
1572 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1573 }
1574
1575 else if (GET_CODE (op) == CONST_DOUBLE
1576 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1577 && GET_MODE_CLASS (mode) == MODE_INT
1578 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1579 {
1580 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1581 operators are intentionally left unspecified (to ease implementation
1582 by target backends), for consistency, this routine implements the
1583 same semantics for constant folding as used by the middle-end. */
1584
1585 /* This was formerly used only for non-IEEE float.
1586 eggert@twinsun.com says it is safe for IEEE also. */
1587 HOST_WIDE_INT xh, xl, th, tl;
1588 REAL_VALUE_TYPE x, t;
1589 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1590 switch (code)
1591 {
1592 case FIX:
1593 if (REAL_VALUE_ISNAN (x))
1594 return const0_rtx;
1595
1596 /* Test against the signed upper bound. */
1597 if (width > HOST_BITS_PER_WIDE_INT)
1598 {
1599 th = ((unsigned HOST_WIDE_INT) 1
1600 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1601 tl = -1;
1602 }
1603 else
1604 {
1605 th = 0;
1606 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1607 }
1608 real_from_integer (&t, VOIDmode, tl, th, 0);
1609 if (REAL_VALUES_LESS (t, x))
1610 {
1611 xh = th;
1612 xl = tl;
1613 break;
1614 }
1615
1616 /* Test against the signed lower bound. */
1617 if (width > HOST_BITS_PER_WIDE_INT)
1618 {
1619 th = (unsigned HOST_WIDE_INT) (-1)
1620 << (width - HOST_BITS_PER_WIDE_INT - 1);
1621 tl = 0;
1622 }
1623 else
1624 {
1625 th = -1;
1626 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1627 }
1628 real_from_integer (&t, VOIDmode, tl, th, 0);
1629 if (REAL_VALUES_LESS (x, t))
1630 {
1631 xh = th;
1632 xl = tl;
1633 break;
1634 }
1635 REAL_VALUE_TO_INT (&xl, &xh, x);
1636 break;
1637
1638 case UNSIGNED_FIX:
1639 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1640 return const0_rtx;
1641
1642 /* Test against the unsigned upper bound. */
1643 if (width == 2*HOST_BITS_PER_WIDE_INT)
1644 {
1645 th = -1;
1646 tl = -1;
1647 }
1648 else if (width >= HOST_BITS_PER_WIDE_INT)
1649 {
1650 th = ((unsigned HOST_WIDE_INT) 1
1651 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1652 tl = -1;
1653 }
1654 else
1655 {
1656 th = 0;
1657 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1658 }
1659 real_from_integer (&t, VOIDmode, tl, th, 1);
1660 if (REAL_VALUES_LESS (t, x))
1661 {
1662 xh = th;
1663 xl = tl;
1664 break;
1665 }
1666
1667 REAL_VALUE_TO_INT (&xl, &xh, x);
1668 break;
1669
1670 default:
1671 gcc_unreachable ();
1672 }
1673 return immed_double_const (xl, xh, mode);
1674 }
1675
1676 return NULL_RTX;
1677 }
1678 \f
1679 /* Subroutine of simplify_binary_operation to simplify a commutative,
1680 associative binary operation CODE with result mode MODE, operating
1681 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1682 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1683 canonicalization is possible. */
1684
1685 static rtx
1686 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1687 rtx op0, rtx op1)
1688 {
1689 rtx tem;
1690
1691 /* Linearize the operator to the left. */
1692 if (GET_CODE (op1) == code)
1693 {
1694 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1695 if (GET_CODE (op0) == code)
1696 {
1697 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1698 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1699 }
1700
1701 /* "a op (b op c)" becomes "(b op c) op a". */
1702 if (! swap_commutative_operands_p (op1, op0))
1703 return simplify_gen_binary (code, mode, op1, op0);
1704
1705 tem = op0;
1706 op0 = op1;
1707 op1 = tem;
1708 }
1709
1710 if (GET_CODE (op0) == code)
1711 {
1712 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1713 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1714 {
1715 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1716 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1717 }
1718
1719 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1720 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1721 if (tem != 0)
1722 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1723
1724 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1725 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1726 if (tem != 0)
1727 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1728 }
1729
1730 return 0;
1731 }
1732
1733
1734 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1735 and OP1. Return 0 if no simplification is possible.
1736
1737 Don't use this for relational operations such as EQ or LT.
1738 Use simplify_relational_operation instead. */
1739 rtx
1740 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1741 rtx op0, rtx op1)
1742 {
1743 rtx trueop0, trueop1;
1744 rtx tem;
1745
1746 /* Relational operations don't work here. We must know the mode
1747 of the operands in order to do the comparison correctly.
1748 Assuming a full word can give incorrect results.
1749 Consider comparing 128 with -128 in QImode. */
1750 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1751 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1752
1753 /* Make sure the constant is second. */
1754 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1755 && swap_commutative_operands_p (op0, op1))
1756 {
1757 tem = op0, op0 = op1, op1 = tem;
1758 }
1759
1760 trueop0 = avoid_constant_pool_reference (op0);
1761 trueop1 = avoid_constant_pool_reference (op1);
1762
1763 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1764 if (tem)
1765 return tem;
1766 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1767 }
1768
1769 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1770 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1771 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1772 actual constants. */
1773
1774 static rtx
1775 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1776 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1777 {
1778 rtx tem, reversed, opleft, opright;
1779 HOST_WIDE_INT val;
1780 unsigned int width = GET_MODE_BITSIZE (mode);
1781
1782 /* Even if we can't compute a constant result,
1783 there are some cases worth simplifying. */
1784
1785 switch (code)
1786 {
1787 case PLUS:
1788 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1789 when x is NaN, infinite, or finite and nonzero. They aren't
1790 when x is -0 and the rounding mode is not towards -infinity,
1791 since (-0) + 0 is then 0. */
1792 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1793 return op0;
1794
1795 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1796 transformations are safe even for IEEE. */
1797 if (GET_CODE (op0) == NEG)
1798 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1799 else if (GET_CODE (op1) == NEG)
1800 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1801
1802 /* (~a) + 1 -> -a */
1803 if (INTEGRAL_MODE_P (mode)
1804 && GET_CODE (op0) == NOT
1805 && trueop1 == const1_rtx)
1806 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1807
1808 /* Handle both-operands-constant cases. We can only add
1809 CONST_INTs to constants since the sum of relocatable symbols
1810 can't be handled by most assemblers. Don't add CONST_INT
1811 to CONST_INT since overflow won't be computed properly if wider
1812 than HOST_BITS_PER_WIDE_INT. */
1813
1814 if ((GET_CODE (op0) == CONST
1815 || GET_CODE (op0) == SYMBOL_REF
1816 || GET_CODE (op0) == LABEL_REF)
1817 && CONST_INT_P (op1))
1818 return plus_constant (op0, INTVAL (op1));
1819 else if ((GET_CODE (op1) == CONST
1820 || GET_CODE (op1) == SYMBOL_REF
1821 || GET_CODE (op1) == LABEL_REF)
1822 && CONST_INT_P (op0))
1823 return plus_constant (op1, INTVAL (op0));
1824
1825 /* See if this is something like X * C - X or vice versa or
1826 if the multiplication is written as a shift. If so, we can
1827 distribute and make a new multiply, shift, or maybe just
1828 have X (if C is 2 in the example above). But don't make
1829 something more expensive than we had before. */
1830
1831 if (SCALAR_INT_MODE_P (mode))
1832 {
1833 double_int coeff0, coeff1;
1834 rtx lhs = op0, rhs = op1;
1835
1836 coeff0 = double_int_one;
1837 coeff1 = double_int_one;
1838
1839 if (GET_CODE (lhs) == NEG)
1840 {
1841 coeff0 = double_int_minus_one;
1842 lhs = XEXP (lhs, 0);
1843 }
1844 else if (GET_CODE (lhs) == MULT
1845 && CONST_INT_P (XEXP (lhs, 1)))
1846 {
1847 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1848 lhs = XEXP (lhs, 0);
1849 }
1850 else if (GET_CODE (lhs) == ASHIFT
1851 && CONST_INT_P (XEXP (lhs, 1))
1852 && INTVAL (XEXP (lhs, 1)) >= 0
1853 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1854 {
1855 coeff0 = double_int_setbit (double_int_zero,
1856 INTVAL (XEXP (lhs, 1)));
1857 lhs = XEXP (lhs, 0);
1858 }
1859
1860 if (GET_CODE (rhs) == NEG)
1861 {
1862 coeff1 = double_int_minus_one;
1863 rhs = XEXP (rhs, 0);
1864 }
1865 else if (GET_CODE (rhs) == MULT
1866 && CONST_INT_P (XEXP (rhs, 1)))
1867 {
1868 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1869 rhs = XEXP (rhs, 0);
1870 }
1871 else if (GET_CODE (rhs) == ASHIFT
1872 && CONST_INT_P (XEXP (rhs, 1))
1873 && INTVAL (XEXP (rhs, 1)) >= 0
1874 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1875 {
1876 coeff1 = double_int_setbit (double_int_zero,
1877 INTVAL (XEXP (rhs, 1)));
1878 rhs = XEXP (rhs, 0);
1879 }
1880
1881 if (rtx_equal_p (lhs, rhs))
1882 {
1883 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1884 rtx coeff;
1885 double_int val;
1886 bool speed = optimize_function_for_speed_p (cfun);
1887
1888 val = double_int_add (coeff0, coeff1);
1889 coeff = immed_double_int_const (val, mode);
1890
1891 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1892 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1893 ? tem : 0;
1894 }
1895 }
1896
1897 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1898 if ((CONST_INT_P (op1)
1899 || GET_CODE (op1) == CONST_DOUBLE)
1900 && GET_CODE (op0) == XOR
1901 && (CONST_INT_P (XEXP (op0, 1))
1902 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1903 && mode_signbit_p (mode, op1))
1904 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1905 simplify_gen_binary (XOR, mode, op1,
1906 XEXP (op0, 1)));
1907
1908 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1909 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1910 && GET_CODE (op0) == MULT
1911 && GET_CODE (XEXP (op0, 0)) == NEG)
1912 {
1913 rtx in1, in2;
1914
1915 in1 = XEXP (XEXP (op0, 0), 0);
1916 in2 = XEXP (op0, 1);
1917 return simplify_gen_binary (MINUS, mode, op1,
1918 simplify_gen_binary (MULT, mode,
1919 in1, in2));
1920 }
1921
1922 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1923 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1924 is 1. */
1925 if (COMPARISON_P (op0)
1926 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1927 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1928 && (reversed = reversed_comparison (op0, mode)))
1929 return
1930 simplify_gen_unary (NEG, mode, reversed, mode);
1931
1932 /* If one of the operands is a PLUS or a MINUS, see if we can
1933 simplify this by the associative law.
1934 Don't use the associative law for floating point.
1935 The inaccuracy makes it nonassociative,
1936 and subtle programs can break if operations are associated. */
1937
1938 if (INTEGRAL_MODE_P (mode)
1939 && (plus_minus_operand_p (op0)
1940 || plus_minus_operand_p (op1))
1941 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1942 return tem;
1943
1944 /* Reassociate floating point addition only when the user
1945 specifies associative math operations. */
1946 if (FLOAT_MODE_P (mode)
1947 && flag_associative_math)
1948 {
1949 tem = simplify_associative_operation (code, mode, op0, op1);
1950 if (tem)
1951 return tem;
1952 }
1953 break;
1954
1955 case COMPARE:
1956 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1957 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1958 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1959 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1960 {
1961 rtx xop00 = XEXP (op0, 0);
1962 rtx xop10 = XEXP (op1, 0);
1963
1964 #ifdef HAVE_cc0
1965 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1966 #else
1967 if (REG_P (xop00) && REG_P (xop10)
1968 && GET_MODE (xop00) == GET_MODE (xop10)
1969 && REGNO (xop00) == REGNO (xop10)
1970 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1971 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1972 #endif
1973 return xop00;
1974 }
1975 break;
1976
1977 case MINUS:
1978 /* We can't assume x-x is 0 even with non-IEEE floating point,
1979 but since it is zero except in very strange circumstances, we
1980 will treat it as zero with -ffinite-math-only. */
1981 if (rtx_equal_p (trueop0, trueop1)
1982 && ! side_effects_p (op0)
1983 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1984 return CONST0_RTX (mode);
1985
1986 /* Change subtraction from zero into negation. (0 - x) is the
1987 same as -x when x is NaN, infinite, or finite and nonzero.
1988 But if the mode has signed zeros, and does not round towards
1989 -infinity, then 0 - 0 is 0, not -0. */
1990 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1991 return simplify_gen_unary (NEG, mode, op1, mode);
1992
1993 /* (-1 - a) is ~a. */
1994 if (trueop0 == constm1_rtx)
1995 return simplify_gen_unary (NOT, mode, op1, mode);
1996
1997 /* Subtracting 0 has no effect unless the mode has signed zeros
1998 and supports rounding towards -infinity. In such a case,
1999 0 - 0 is -0. */
2000 if (!(HONOR_SIGNED_ZEROS (mode)
2001 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2002 && trueop1 == CONST0_RTX (mode))
2003 return op0;
2004
2005 /* See if this is something like X * C - X or vice versa or
2006 if the multiplication is written as a shift. If so, we can
2007 distribute and make a new multiply, shift, or maybe just
2008 have X (if C is 2 in the example above). But don't make
2009 something more expensive than we had before. */
2010
2011 if (SCALAR_INT_MODE_P (mode))
2012 {
2013 double_int coeff0, negcoeff1;
2014 rtx lhs = op0, rhs = op1;
2015
2016 coeff0 = double_int_one;
2017 negcoeff1 = double_int_minus_one;
2018
2019 if (GET_CODE (lhs) == NEG)
2020 {
2021 coeff0 = double_int_minus_one;
2022 lhs = XEXP (lhs, 0);
2023 }
2024 else if (GET_CODE (lhs) == MULT
2025 && CONST_INT_P (XEXP (lhs, 1)))
2026 {
2027 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2028 lhs = XEXP (lhs, 0);
2029 }
2030 else if (GET_CODE (lhs) == ASHIFT
2031 && CONST_INT_P (XEXP (lhs, 1))
2032 && INTVAL (XEXP (lhs, 1)) >= 0
2033 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2034 {
2035 coeff0 = double_int_setbit (double_int_zero,
2036 INTVAL (XEXP (lhs, 1)));
2037 lhs = XEXP (lhs, 0);
2038 }
2039
2040 if (GET_CODE (rhs) == NEG)
2041 {
2042 negcoeff1 = double_int_one;
2043 rhs = XEXP (rhs, 0);
2044 }
2045 else if (GET_CODE (rhs) == MULT
2046 && CONST_INT_P (XEXP (rhs, 1)))
2047 {
2048 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2049 rhs = XEXP (rhs, 0);
2050 }
2051 else if (GET_CODE (rhs) == ASHIFT
2052 && CONST_INT_P (XEXP (rhs, 1))
2053 && INTVAL (XEXP (rhs, 1)) >= 0
2054 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2055 {
2056 negcoeff1 = double_int_setbit (double_int_zero,
2057 INTVAL (XEXP (rhs, 1)));
2058 negcoeff1 = double_int_neg (negcoeff1);
2059 rhs = XEXP (rhs, 0);
2060 }
2061
2062 if (rtx_equal_p (lhs, rhs))
2063 {
2064 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2065 rtx coeff;
2066 double_int val;
2067 bool speed = optimize_function_for_speed_p (cfun);
2068
2069 val = double_int_add (coeff0, negcoeff1);
2070 coeff = immed_double_int_const (val, mode);
2071
2072 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2073 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2074 ? tem : 0;
2075 }
2076 }
2077
2078 /* (a - (-b)) -> (a + b). True even for IEEE. */
2079 if (GET_CODE (op1) == NEG)
2080 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2081
2082 /* (-x - c) may be simplified as (-c - x). */
2083 if (GET_CODE (op0) == NEG
2084 && (CONST_INT_P (op1)
2085 || GET_CODE (op1) == CONST_DOUBLE))
2086 {
2087 tem = simplify_unary_operation (NEG, mode, op1, mode);
2088 if (tem)
2089 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2090 }
2091
2092 /* Don't let a relocatable value get a negative coeff. */
2093 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2094 return simplify_gen_binary (PLUS, mode,
2095 op0,
2096 neg_const_int (mode, op1));
2097
2098 /* (x - (x & y)) -> (x & ~y) */
2099 if (GET_CODE (op1) == AND)
2100 {
2101 if (rtx_equal_p (op0, XEXP (op1, 0)))
2102 {
2103 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2104 GET_MODE (XEXP (op1, 1)));
2105 return simplify_gen_binary (AND, mode, op0, tem);
2106 }
2107 if (rtx_equal_p (op0, XEXP (op1, 1)))
2108 {
2109 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2110 GET_MODE (XEXP (op1, 0)));
2111 return simplify_gen_binary (AND, mode, op0, tem);
2112 }
2113 }
2114
2115 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2116 by reversing the comparison code if valid. */
2117 if (STORE_FLAG_VALUE == 1
2118 && trueop0 == const1_rtx
2119 && COMPARISON_P (op1)
2120 && (reversed = reversed_comparison (op1, mode)))
2121 return reversed;
2122
2123 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2124 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2125 && GET_CODE (op1) == MULT
2126 && GET_CODE (XEXP (op1, 0)) == NEG)
2127 {
2128 rtx in1, in2;
2129
2130 in1 = XEXP (XEXP (op1, 0), 0);
2131 in2 = XEXP (op1, 1);
2132 return simplify_gen_binary (PLUS, mode,
2133 simplify_gen_binary (MULT, mode,
2134 in1, in2),
2135 op0);
2136 }
2137
2138 /* Canonicalize (minus (neg A) (mult B C)) to
2139 (minus (mult (neg B) C) A). */
2140 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2141 && GET_CODE (op1) == MULT
2142 && GET_CODE (op0) == NEG)
2143 {
2144 rtx in1, in2;
2145
2146 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2147 in2 = XEXP (op1, 1);
2148 return simplify_gen_binary (MINUS, mode,
2149 simplify_gen_binary (MULT, mode,
2150 in1, in2),
2151 XEXP (op0, 0));
2152 }
2153
2154 /* If one of the operands is a PLUS or a MINUS, see if we can
2155 simplify this by the associative law. This will, for example,
2156 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2157 Don't use the associative law for floating point.
2158 The inaccuracy makes it nonassociative,
2159 and subtle programs can break if operations are associated. */
2160
2161 if (INTEGRAL_MODE_P (mode)
2162 && (plus_minus_operand_p (op0)
2163 || plus_minus_operand_p (op1))
2164 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2165 return tem;
2166 break;
2167
2168 case MULT:
2169 if (trueop1 == constm1_rtx)
2170 return simplify_gen_unary (NEG, mode, op0, mode);
2171
2172 if (GET_CODE (op0) == NEG)
2173 {
2174 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2175 if (temp)
2176 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2177 }
2178 if (GET_CODE (op1) == NEG)
2179 {
2180 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2181 if (temp)
2182 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2183 }
2184
2185 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2186 x is NaN, since x * 0 is then also NaN. Nor is it valid
2187 when the mode has signed zeros, since multiplying a negative
2188 number by 0 will give -0, not 0. */
2189 if (!HONOR_NANS (mode)
2190 && !HONOR_SIGNED_ZEROS (mode)
2191 && trueop1 == CONST0_RTX (mode)
2192 && ! side_effects_p (op0))
2193 return op1;
2194
2195 /* In IEEE floating point, x*1 is not equivalent to x for
2196 signalling NaNs. */
2197 if (!HONOR_SNANS (mode)
2198 && trueop1 == CONST1_RTX (mode))
2199 return op0;
2200
2201 /* Convert multiply by constant power of two into shift unless
2202 we are still generating RTL. This test is a kludge. */
2203 if (CONST_INT_P (trueop1)
2204 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2205 /* If the mode is larger than the host word size, and the
2206 uppermost bit is set, then this isn't a power of two due
2207 to implicit sign extension. */
2208 && (width <= HOST_BITS_PER_WIDE_INT
2209 || val != HOST_BITS_PER_WIDE_INT - 1))
2210 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2211
2212 /* Likewise for multipliers wider than a word. */
2213 if (GET_CODE (trueop1) == CONST_DOUBLE
2214 && (GET_MODE (trueop1) == VOIDmode
2215 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2216 && GET_MODE (op0) == mode
2217 && CONST_DOUBLE_LOW (trueop1) == 0
2218 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2219 return simplify_gen_binary (ASHIFT, mode, op0,
2220 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2221
2222 /* x*2 is x+x and x*(-1) is -x */
2223 if (GET_CODE (trueop1) == CONST_DOUBLE
2224 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2225 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2226 && GET_MODE (op0) == mode)
2227 {
2228 REAL_VALUE_TYPE d;
2229 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2230
2231 if (REAL_VALUES_EQUAL (d, dconst2))
2232 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2233
2234 if (!HONOR_SNANS (mode)
2235 && REAL_VALUES_EQUAL (d, dconstm1))
2236 return simplify_gen_unary (NEG, mode, op0, mode);
2237 }
2238
2239 /* Optimize -x * -x as x * x. */
2240 if (FLOAT_MODE_P (mode)
2241 && GET_CODE (op0) == NEG
2242 && GET_CODE (op1) == NEG
2243 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2244 && !side_effects_p (XEXP (op0, 0)))
2245 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2246
2247 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2248 if (SCALAR_FLOAT_MODE_P (mode)
2249 && GET_CODE (op0) == ABS
2250 && GET_CODE (op1) == ABS
2251 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2252 && !side_effects_p (XEXP (op0, 0)))
2253 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2254
2255 /* Reassociate multiplication, but for floating point MULTs
2256 only when the user specifies unsafe math optimizations. */
2257 if (! FLOAT_MODE_P (mode)
2258 || flag_unsafe_math_optimizations)
2259 {
2260 tem = simplify_associative_operation (code, mode, op0, op1);
2261 if (tem)
2262 return tem;
2263 }
2264 break;
2265
2266 case IOR:
2267 if (trueop1 == CONST0_RTX (mode))
2268 return op0;
2269 if (CONST_INT_P (trueop1)
2270 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2271 == GET_MODE_MASK (mode)))
2272 return op1;
2273 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2274 return op0;
2275 /* A | (~A) -> -1 */
2276 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2277 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2278 && ! side_effects_p (op0)
2279 && SCALAR_INT_MODE_P (mode))
2280 return constm1_rtx;
2281
2282 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2283 if (CONST_INT_P (op1)
2284 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2285 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2286 return op1;
2287
2288 /* Canonicalize (X & C1) | C2. */
2289 if (GET_CODE (op0) == AND
2290 && CONST_INT_P (trueop1)
2291 && CONST_INT_P (XEXP (op0, 1)))
2292 {
2293 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2294 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2295 HOST_WIDE_INT c2 = INTVAL (trueop1);
2296
2297 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2298 if ((c1 & c2) == c1
2299 && !side_effects_p (XEXP (op0, 0)))
2300 return trueop1;
2301
2302 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2303 if (((c1|c2) & mask) == mask)
2304 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2305
2306 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2307 if (((c1 & ~c2) & mask) != (c1 & mask))
2308 {
2309 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2310 gen_int_mode (c1 & ~c2, mode));
2311 return simplify_gen_binary (IOR, mode, tem, op1);
2312 }
2313 }
2314
2315 /* Convert (A & B) | A to A. */
2316 if (GET_CODE (op0) == AND
2317 && (rtx_equal_p (XEXP (op0, 0), op1)
2318 || rtx_equal_p (XEXP (op0, 1), op1))
2319 && ! side_effects_p (XEXP (op0, 0))
2320 && ! side_effects_p (XEXP (op0, 1)))
2321 return op1;
2322
2323 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2324 mode size to (rotate A CX). */
2325
2326 if (GET_CODE (op1) == ASHIFT
2327 || GET_CODE (op1) == SUBREG)
2328 {
2329 opleft = op1;
2330 opright = op0;
2331 }
2332 else
2333 {
2334 opright = op1;
2335 opleft = op0;
2336 }
2337
2338 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2339 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2340 && CONST_INT_P (XEXP (opleft, 1))
2341 && CONST_INT_P (XEXP (opright, 1))
2342 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2343 == GET_MODE_BITSIZE (mode)))
2344 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2345
2346 /* Same, but for ashift that has been "simplified" to a wider mode
2347 by simplify_shift_const. */
2348
2349 if (GET_CODE (opleft) == SUBREG
2350 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2351 && GET_CODE (opright) == LSHIFTRT
2352 && GET_CODE (XEXP (opright, 0)) == SUBREG
2353 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2354 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2355 && (GET_MODE_SIZE (GET_MODE (opleft))
2356 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2357 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2358 SUBREG_REG (XEXP (opright, 0)))
2359 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2360 && CONST_INT_P (XEXP (opright, 1))
2361 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2362 == GET_MODE_BITSIZE (mode)))
2363 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2364 XEXP (SUBREG_REG (opleft), 1));
2365
2366 /* If we have (ior (and (X C1) C2)), simplify this by making
2367 C1 as small as possible if C1 actually changes. */
2368 if (CONST_INT_P (op1)
2369 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2370 || INTVAL (op1) > 0)
2371 && GET_CODE (op0) == AND
2372 && CONST_INT_P (XEXP (op0, 1))
2373 && CONST_INT_P (op1)
2374 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2375 return simplify_gen_binary (IOR, mode,
2376 simplify_gen_binary
2377 (AND, mode, XEXP (op0, 0),
2378 GEN_INT (UINTVAL (XEXP (op0, 1))
2379 & ~UINTVAL (op1))),
2380 op1);
2381
2382 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2383 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2384 the PLUS does not affect any of the bits in OP1: then we can do
2385 the IOR as a PLUS and we can associate. This is valid if OP1
2386 can be safely shifted left C bits. */
2387 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2388 && GET_CODE (XEXP (op0, 0)) == PLUS
2389 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2390 && CONST_INT_P (XEXP (op0, 1))
2391 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2392 {
2393 int count = INTVAL (XEXP (op0, 1));
2394 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2395
2396 if (mask >> count == INTVAL (trueop1)
2397 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2398 return simplify_gen_binary (ASHIFTRT, mode,
2399 plus_constant (XEXP (op0, 0), mask),
2400 XEXP (op0, 1));
2401 }
2402
2403 tem = simplify_associative_operation (code, mode, op0, op1);
2404 if (tem)
2405 return tem;
2406 break;
2407
2408 case XOR:
2409 if (trueop1 == CONST0_RTX (mode))
2410 return op0;
2411 if (CONST_INT_P (trueop1)
2412 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2413 == GET_MODE_MASK (mode)))
2414 return simplify_gen_unary (NOT, mode, op0, mode);
2415 if (rtx_equal_p (trueop0, trueop1)
2416 && ! side_effects_p (op0)
2417 && GET_MODE_CLASS (mode) != MODE_CC)
2418 return CONST0_RTX (mode);
2419
2420 /* Canonicalize XOR of the most significant bit to PLUS. */
2421 if ((CONST_INT_P (op1)
2422 || GET_CODE (op1) == CONST_DOUBLE)
2423 && mode_signbit_p (mode, op1))
2424 return simplify_gen_binary (PLUS, mode, op0, op1);
2425 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2426 if ((CONST_INT_P (op1)
2427 || GET_CODE (op1) == CONST_DOUBLE)
2428 && GET_CODE (op0) == PLUS
2429 && (CONST_INT_P (XEXP (op0, 1))
2430 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2431 && mode_signbit_p (mode, XEXP (op0, 1)))
2432 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2433 simplify_gen_binary (XOR, mode, op1,
2434 XEXP (op0, 1)));
2435
2436 /* If we are XORing two things that have no bits in common,
2437 convert them into an IOR. This helps to detect rotation encoded
2438 using those methods and possibly other simplifications. */
2439
2440 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2441 && (nonzero_bits (op0, mode)
2442 & nonzero_bits (op1, mode)) == 0)
2443 return (simplify_gen_binary (IOR, mode, op0, op1));
2444
2445 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2446 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2447 (NOT y). */
2448 {
2449 int num_negated = 0;
2450
2451 if (GET_CODE (op0) == NOT)
2452 num_negated++, op0 = XEXP (op0, 0);
2453 if (GET_CODE (op1) == NOT)
2454 num_negated++, op1 = XEXP (op1, 0);
2455
2456 if (num_negated == 2)
2457 return simplify_gen_binary (XOR, mode, op0, op1);
2458 else if (num_negated == 1)
2459 return simplify_gen_unary (NOT, mode,
2460 simplify_gen_binary (XOR, mode, op0, op1),
2461 mode);
2462 }
2463
2464 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2465 correspond to a machine insn or result in further simplifications
2466 if B is a constant. */
2467
2468 if (GET_CODE (op0) == AND
2469 && rtx_equal_p (XEXP (op0, 1), op1)
2470 && ! side_effects_p (op1))
2471 return simplify_gen_binary (AND, mode,
2472 simplify_gen_unary (NOT, mode,
2473 XEXP (op0, 0), mode),
2474 op1);
2475
2476 else if (GET_CODE (op0) == AND
2477 && rtx_equal_p (XEXP (op0, 0), op1)
2478 && ! side_effects_p (op1))
2479 return simplify_gen_binary (AND, mode,
2480 simplify_gen_unary (NOT, mode,
2481 XEXP (op0, 1), mode),
2482 op1);
2483
2484 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2485 comparison if STORE_FLAG_VALUE is 1. */
2486 if (STORE_FLAG_VALUE == 1
2487 && trueop1 == const1_rtx
2488 && COMPARISON_P (op0)
2489 && (reversed = reversed_comparison (op0, mode)))
2490 return reversed;
2491
2492 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2493 is (lt foo (const_int 0)), so we can perform the above
2494 simplification if STORE_FLAG_VALUE is 1. */
2495
2496 if (STORE_FLAG_VALUE == 1
2497 && trueop1 == const1_rtx
2498 && GET_CODE (op0) == LSHIFTRT
2499 && CONST_INT_P (XEXP (op0, 1))
2500 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2501 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2502
2503 /* (xor (comparison foo bar) (const_int sign-bit))
2504 when STORE_FLAG_VALUE is the sign bit. */
2505 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2506 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2507 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2508 && trueop1 == const_true_rtx
2509 && COMPARISON_P (op0)
2510 && (reversed = reversed_comparison (op0, mode)))
2511 return reversed;
2512
2513 tem = simplify_associative_operation (code, mode, op0, op1);
2514 if (tem)
2515 return tem;
2516 break;
2517
2518 case AND:
2519 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2520 return trueop1;
2521 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2522 {
2523 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2524 HOST_WIDE_INT nzop1;
2525 if (CONST_INT_P (trueop1))
2526 {
2527 HOST_WIDE_INT val1 = INTVAL (trueop1);
2528 /* If we are turning off bits already known off in OP0, we need
2529 not do an AND. */
2530 if ((nzop0 & ~val1) == 0)
2531 return op0;
2532 }
2533 nzop1 = nonzero_bits (trueop1, mode);
2534 /* If we are clearing all the nonzero bits, the result is zero. */
2535 if ((nzop1 & nzop0) == 0
2536 && !side_effects_p (op0) && !side_effects_p (op1))
2537 return CONST0_RTX (mode);
2538 }
2539 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2540 && GET_MODE_CLASS (mode) != MODE_CC)
2541 return op0;
2542 /* A & (~A) -> 0 */
2543 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2544 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2545 && ! side_effects_p (op0)
2546 && GET_MODE_CLASS (mode) != MODE_CC)
2547 return CONST0_RTX (mode);
2548
2549 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2550 there are no nonzero bits of C outside of X's mode. */
2551 if ((GET_CODE (op0) == SIGN_EXTEND
2552 || GET_CODE (op0) == ZERO_EXTEND)
2553 && CONST_INT_P (trueop1)
2554 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2555 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2556 & UINTVAL (trueop1)) == 0)
2557 {
2558 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2559 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2560 gen_int_mode (INTVAL (trueop1),
2561 imode));
2562 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2563 }
2564
2565 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2566 we might be able to further simplify the AND with X and potentially
2567 remove the truncation altogether. */
2568 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2569 {
2570 rtx x = XEXP (op0, 0);
2571 enum machine_mode xmode = GET_MODE (x);
2572 tem = simplify_gen_binary (AND, xmode, x,
2573 gen_int_mode (INTVAL (trueop1), xmode));
2574 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2575 }
2576
2577 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2578 if (GET_CODE (op0) == IOR
2579 && CONST_INT_P (trueop1)
2580 && CONST_INT_P (XEXP (op0, 1)))
2581 {
2582 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2583 return simplify_gen_binary (IOR, mode,
2584 simplify_gen_binary (AND, mode,
2585 XEXP (op0, 0), op1),
2586 gen_int_mode (tmp, mode));
2587 }
2588
2589 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2590 insn (and may simplify more). */
2591 if (GET_CODE (op0) == XOR
2592 && rtx_equal_p (XEXP (op0, 0), op1)
2593 && ! side_effects_p (op1))
2594 return simplify_gen_binary (AND, mode,
2595 simplify_gen_unary (NOT, mode,
2596 XEXP (op0, 1), mode),
2597 op1);
2598
2599 if (GET_CODE (op0) == XOR
2600 && rtx_equal_p (XEXP (op0, 1), op1)
2601 && ! side_effects_p (op1))
2602 return simplify_gen_binary (AND, mode,
2603 simplify_gen_unary (NOT, mode,
2604 XEXP (op0, 0), mode),
2605 op1);
2606
2607 /* Similarly for (~(A ^ B)) & A. */
2608 if (GET_CODE (op0) == NOT
2609 && GET_CODE (XEXP (op0, 0)) == XOR
2610 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2611 && ! side_effects_p (op1))
2612 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2613
2614 if (GET_CODE (op0) == NOT
2615 && GET_CODE (XEXP (op0, 0)) == XOR
2616 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2617 && ! side_effects_p (op1))
2618 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2619
2620 /* Convert (A | B) & A to A. */
2621 if (GET_CODE (op0) == IOR
2622 && (rtx_equal_p (XEXP (op0, 0), op1)
2623 || rtx_equal_p (XEXP (op0, 1), op1))
2624 && ! side_effects_p (XEXP (op0, 0))
2625 && ! side_effects_p (XEXP (op0, 1)))
2626 return op1;
2627
2628 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2629 ((A & N) + B) & M -> (A + B) & M
2630 Similarly if (N & M) == 0,
2631 ((A | N) + B) & M -> (A + B) & M
2632 and for - instead of + and/or ^ instead of |.
2633 Also, if (N & M) == 0, then
2634 (A +- N) & M -> A & M. */
2635 if (CONST_INT_P (trueop1)
2636 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2637 && ~UINTVAL (trueop1)
2638 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2639 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2640 {
2641 rtx pmop[2];
2642 int which;
2643
2644 pmop[0] = XEXP (op0, 0);
2645 pmop[1] = XEXP (op0, 1);
2646
2647 if (CONST_INT_P (pmop[1])
2648 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2649 return simplify_gen_binary (AND, mode, pmop[0], op1);
2650
2651 for (which = 0; which < 2; which++)
2652 {
2653 tem = pmop[which];
2654 switch (GET_CODE (tem))
2655 {
2656 case AND:
2657 if (CONST_INT_P (XEXP (tem, 1))
2658 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2659 == UINTVAL (trueop1))
2660 pmop[which] = XEXP (tem, 0);
2661 break;
2662 case IOR:
2663 case XOR:
2664 if (CONST_INT_P (XEXP (tem, 1))
2665 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2666 pmop[which] = XEXP (tem, 0);
2667 break;
2668 default:
2669 break;
2670 }
2671 }
2672
2673 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2674 {
2675 tem = simplify_gen_binary (GET_CODE (op0), mode,
2676 pmop[0], pmop[1]);
2677 return simplify_gen_binary (code, mode, tem, op1);
2678 }
2679 }
2680
2681 /* (and X (ior (not X) Y) -> (and X Y) */
2682 if (GET_CODE (op1) == IOR
2683 && GET_CODE (XEXP (op1, 0)) == NOT
2684 && op0 == XEXP (XEXP (op1, 0), 0))
2685 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2686
2687 /* (and (ior (not X) Y) X) -> (and X Y) */
2688 if (GET_CODE (op0) == IOR
2689 && GET_CODE (XEXP (op0, 0)) == NOT
2690 && op1 == XEXP (XEXP (op0, 0), 0))
2691 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2692
2693 tem = simplify_associative_operation (code, mode, op0, op1);
2694 if (tem)
2695 return tem;
2696 break;
2697
2698 case UDIV:
2699 /* 0/x is 0 (or x&0 if x has side-effects). */
2700 if (trueop0 == CONST0_RTX (mode))
2701 {
2702 if (side_effects_p (op1))
2703 return simplify_gen_binary (AND, mode, op1, trueop0);
2704 return trueop0;
2705 }
2706 /* x/1 is x. */
2707 if (trueop1 == CONST1_RTX (mode))
2708 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2709 /* Convert divide by power of two into shift. */
2710 if (CONST_INT_P (trueop1)
2711 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2712 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2713 break;
2714
2715 case DIV:
2716 /* Handle floating point and integers separately. */
2717 if (SCALAR_FLOAT_MODE_P (mode))
2718 {
2719 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2720 safe for modes with NaNs, since 0.0 / 0.0 will then be
2721 NaN rather than 0.0. Nor is it safe for modes with signed
2722 zeros, since dividing 0 by a negative number gives -0.0 */
2723 if (trueop0 == CONST0_RTX (mode)
2724 && !HONOR_NANS (mode)
2725 && !HONOR_SIGNED_ZEROS (mode)
2726 && ! side_effects_p (op1))
2727 return op0;
2728 /* x/1.0 is x. */
2729 if (trueop1 == CONST1_RTX (mode)
2730 && !HONOR_SNANS (mode))
2731 return op0;
2732
2733 if (GET_CODE (trueop1) == CONST_DOUBLE
2734 && trueop1 != CONST0_RTX (mode))
2735 {
2736 REAL_VALUE_TYPE d;
2737 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2738
2739 /* x/-1.0 is -x. */
2740 if (REAL_VALUES_EQUAL (d, dconstm1)
2741 && !HONOR_SNANS (mode))
2742 return simplify_gen_unary (NEG, mode, op0, mode);
2743
2744 /* Change FP division by a constant into multiplication.
2745 Only do this with -freciprocal-math. */
2746 if (flag_reciprocal_math
2747 && !REAL_VALUES_EQUAL (d, dconst0))
2748 {
2749 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2750 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2751 return simplify_gen_binary (MULT, mode, op0, tem);
2752 }
2753 }
2754 }
2755 else
2756 {
2757 /* 0/x is 0 (or x&0 if x has side-effects). */
2758 if (trueop0 == CONST0_RTX (mode))
2759 {
2760 if (side_effects_p (op1))
2761 return simplify_gen_binary (AND, mode, op1, trueop0);
2762 return trueop0;
2763 }
2764 /* x/1 is x. */
2765 if (trueop1 == CONST1_RTX (mode))
2766 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2767 /* x/-1 is -x. */
2768 if (trueop1 == constm1_rtx)
2769 {
2770 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2771 return simplify_gen_unary (NEG, mode, x, mode);
2772 }
2773 }
2774 break;
2775
2776 case UMOD:
2777 /* 0%x is 0 (or x&0 if x has side-effects). */
2778 if (trueop0 == CONST0_RTX (mode))
2779 {
2780 if (side_effects_p (op1))
2781 return simplify_gen_binary (AND, mode, op1, trueop0);
2782 return trueop0;
2783 }
2784 /* x%1 is 0 (of x&0 if x has side-effects). */
2785 if (trueop1 == CONST1_RTX (mode))
2786 {
2787 if (side_effects_p (op0))
2788 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2789 return CONST0_RTX (mode);
2790 }
2791 /* Implement modulus by power of two as AND. */
2792 if (CONST_INT_P (trueop1)
2793 && exact_log2 (UINTVAL (trueop1)) > 0)
2794 return simplify_gen_binary (AND, mode, op0,
2795 GEN_INT (INTVAL (op1) - 1));
2796 break;
2797
2798 case MOD:
2799 /* 0%x is 0 (or x&0 if x has side-effects). */
2800 if (trueop0 == CONST0_RTX (mode))
2801 {
2802 if (side_effects_p (op1))
2803 return simplify_gen_binary (AND, mode, op1, trueop0);
2804 return trueop0;
2805 }
2806 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2807 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2808 {
2809 if (side_effects_p (op0))
2810 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2811 return CONST0_RTX (mode);
2812 }
2813 break;
2814
2815 case ROTATERT:
2816 case ROTATE:
2817 case ASHIFTRT:
2818 if (trueop1 == CONST0_RTX (mode))
2819 return op0;
2820 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2821 return op0;
2822 /* Rotating ~0 always results in ~0. */
2823 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2824 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2825 && ! side_effects_p (op1))
2826 return op0;
2827 canonicalize_shift:
2828 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2829 {
2830 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2831 if (val != INTVAL (op1))
2832 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2833 }
2834 break;
2835
2836 case ASHIFT:
2837 case SS_ASHIFT:
2838 case US_ASHIFT:
2839 if (trueop1 == CONST0_RTX (mode))
2840 return op0;
2841 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2842 return op0;
2843 goto canonicalize_shift;
2844
2845 case LSHIFTRT:
2846 if (trueop1 == CONST0_RTX (mode))
2847 return op0;
2848 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2849 return op0;
2850 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2851 if (GET_CODE (op0) == CLZ
2852 && CONST_INT_P (trueop1)
2853 && STORE_FLAG_VALUE == 1
2854 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2855 {
2856 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2857 unsigned HOST_WIDE_INT zero_val = 0;
2858
2859 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2860 && zero_val == GET_MODE_BITSIZE (imode)
2861 && INTVAL (trueop1) == exact_log2 (zero_val))
2862 return simplify_gen_relational (EQ, mode, imode,
2863 XEXP (op0, 0), const0_rtx);
2864 }
2865 goto canonicalize_shift;
2866
2867 case SMIN:
2868 if (width <= HOST_BITS_PER_WIDE_INT
2869 && CONST_INT_P (trueop1)
2870 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2871 && ! side_effects_p (op0))
2872 return op1;
2873 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2874 return op0;
2875 tem = simplify_associative_operation (code, mode, op0, op1);
2876 if (tem)
2877 return tem;
2878 break;
2879
2880 case SMAX:
2881 if (width <= HOST_BITS_PER_WIDE_INT
2882 && CONST_INT_P (trueop1)
2883 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2884 && ! side_effects_p (op0))
2885 return op1;
2886 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2887 return op0;
2888 tem = simplify_associative_operation (code, mode, op0, op1);
2889 if (tem)
2890 return tem;
2891 break;
2892
2893 case UMIN:
2894 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2895 return op1;
2896 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2897 return op0;
2898 tem = simplify_associative_operation (code, mode, op0, op1);
2899 if (tem)
2900 return tem;
2901 break;
2902
2903 case UMAX:
2904 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2905 return op1;
2906 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2907 return op0;
2908 tem = simplify_associative_operation (code, mode, op0, op1);
2909 if (tem)
2910 return tem;
2911 break;
2912
2913 case SS_PLUS:
2914 case US_PLUS:
2915 case SS_MINUS:
2916 case US_MINUS:
2917 case SS_MULT:
2918 case US_MULT:
2919 case SS_DIV:
2920 case US_DIV:
2921 /* ??? There are simplifications that can be done. */
2922 return 0;
2923
2924 case VEC_SELECT:
2925 if (!VECTOR_MODE_P (mode))
2926 {
2927 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2928 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2929 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2930 gcc_assert (XVECLEN (trueop1, 0) == 1);
2931 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2932
2933 if (GET_CODE (trueop0) == CONST_VECTOR)
2934 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2935 (trueop1, 0, 0)));
2936
2937 /* Extract a scalar element from a nested VEC_SELECT expression
2938 (with optional nested VEC_CONCAT expression). Some targets
2939 (i386) extract scalar element from a vector using chain of
2940 nested VEC_SELECT expressions. When input operand is a memory
2941 operand, this operation can be simplified to a simple scalar
2942 load from an offseted memory address. */
2943 if (GET_CODE (trueop0) == VEC_SELECT)
2944 {
2945 rtx op0 = XEXP (trueop0, 0);
2946 rtx op1 = XEXP (trueop0, 1);
2947
2948 enum machine_mode opmode = GET_MODE (op0);
2949 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2950 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2951
2952 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2953 int elem;
2954
2955 rtvec vec;
2956 rtx tmp_op, tmp;
2957
2958 gcc_assert (GET_CODE (op1) == PARALLEL);
2959 gcc_assert (i < n_elts);
2960
2961 /* Select element, pointed by nested selector. */
2962 elem = INTVAL (XVECEXP (op1, 0, i));
2963
2964 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2965 if (GET_CODE (op0) == VEC_CONCAT)
2966 {
2967 rtx op00 = XEXP (op0, 0);
2968 rtx op01 = XEXP (op0, 1);
2969
2970 enum machine_mode mode00, mode01;
2971 int n_elts00, n_elts01;
2972
2973 mode00 = GET_MODE (op00);
2974 mode01 = GET_MODE (op01);
2975
2976 /* Find out number of elements of each operand. */
2977 if (VECTOR_MODE_P (mode00))
2978 {
2979 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2980 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2981 }
2982 else
2983 n_elts00 = 1;
2984
2985 if (VECTOR_MODE_P (mode01))
2986 {
2987 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2988 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2989 }
2990 else
2991 n_elts01 = 1;
2992
2993 gcc_assert (n_elts == n_elts00 + n_elts01);
2994
2995 /* Select correct operand of VEC_CONCAT
2996 and adjust selector. */
2997 if (elem < n_elts01)
2998 tmp_op = op00;
2999 else
3000 {
3001 tmp_op = op01;
3002 elem -= n_elts00;
3003 }
3004 }
3005 else
3006 tmp_op = op0;
3007
3008 vec = rtvec_alloc (1);
3009 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3010
3011 tmp = gen_rtx_fmt_ee (code, mode,
3012 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3013 return tmp;
3014 }
3015 if (GET_CODE (trueop0) == VEC_DUPLICATE
3016 && GET_MODE (XEXP (trueop0, 0)) == mode)
3017 return XEXP (trueop0, 0);
3018 }
3019 else
3020 {
3021 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3022 gcc_assert (GET_MODE_INNER (mode)
3023 == GET_MODE_INNER (GET_MODE (trueop0)));
3024 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3025
3026 if (GET_CODE (trueop0) == CONST_VECTOR)
3027 {
3028 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3029 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3030 rtvec v = rtvec_alloc (n_elts);
3031 unsigned int i;
3032
3033 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3034 for (i = 0; i < n_elts; i++)
3035 {
3036 rtx x = XVECEXP (trueop1, 0, i);
3037
3038 gcc_assert (CONST_INT_P (x));
3039 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3040 INTVAL (x));
3041 }
3042
3043 return gen_rtx_CONST_VECTOR (mode, v);
3044 }
3045 }
3046
3047 if (XVECLEN (trueop1, 0) == 1
3048 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3049 && GET_CODE (trueop0) == VEC_CONCAT)
3050 {
3051 rtx vec = trueop0;
3052 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3053
3054 /* Try to find the element in the VEC_CONCAT. */
3055 while (GET_MODE (vec) != mode
3056 && GET_CODE (vec) == VEC_CONCAT)
3057 {
3058 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3059 if (offset < vec_size)
3060 vec = XEXP (vec, 0);
3061 else
3062 {
3063 offset -= vec_size;
3064 vec = XEXP (vec, 1);
3065 }
3066 vec = avoid_constant_pool_reference (vec);
3067 }
3068
3069 if (GET_MODE (vec) == mode)
3070 return vec;
3071 }
3072
3073 return 0;
3074 case VEC_CONCAT:
3075 {
3076 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3077 ? GET_MODE (trueop0)
3078 : GET_MODE_INNER (mode));
3079 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3080 ? GET_MODE (trueop1)
3081 : GET_MODE_INNER (mode));
3082
3083 gcc_assert (VECTOR_MODE_P (mode));
3084 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3085 == GET_MODE_SIZE (mode));
3086
3087 if (VECTOR_MODE_P (op0_mode))
3088 gcc_assert (GET_MODE_INNER (mode)
3089 == GET_MODE_INNER (op0_mode));
3090 else
3091 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3092
3093 if (VECTOR_MODE_P (op1_mode))
3094 gcc_assert (GET_MODE_INNER (mode)
3095 == GET_MODE_INNER (op1_mode));
3096 else
3097 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3098
3099 if ((GET_CODE (trueop0) == CONST_VECTOR
3100 || CONST_INT_P (trueop0)
3101 || GET_CODE (trueop0) == CONST_DOUBLE)
3102 && (GET_CODE (trueop1) == CONST_VECTOR
3103 || CONST_INT_P (trueop1)
3104 || GET_CODE (trueop1) == CONST_DOUBLE))
3105 {
3106 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3107 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3108 rtvec v = rtvec_alloc (n_elts);
3109 unsigned int i;
3110 unsigned in_n_elts = 1;
3111
3112 if (VECTOR_MODE_P (op0_mode))
3113 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3114 for (i = 0; i < n_elts; i++)
3115 {
3116 if (i < in_n_elts)
3117 {
3118 if (!VECTOR_MODE_P (op0_mode))
3119 RTVEC_ELT (v, i) = trueop0;
3120 else
3121 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3122 }
3123 else
3124 {
3125 if (!VECTOR_MODE_P (op1_mode))
3126 RTVEC_ELT (v, i) = trueop1;
3127 else
3128 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3129 i - in_n_elts);
3130 }
3131 }
3132
3133 return gen_rtx_CONST_VECTOR (mode, v);
3134 }
3135 }
3136 return 0;
3137
3138 default:
3139 gcc_unreachable ();
3140 }
3141
3142 return 0;
3143 }
3144
3145 rtx
3146 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3147 rtx op0, rtx op1)
3148 {
3149 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3150 HOST_WIDE_INT val;
3151 unsigned int width = GET_MODE_BITSIZE (mode);
3152
3153 if (VECTOR_MODE_P (mode)
3154 && code != VEC_CONCAT
3155 && GET_CODE (op0) == CONST_VECTOR
3156 && GET_CODE (op1) == CONST_VECTOR)
3157 {
3158 unsigned n_elts = GET_MODE_NUNITS (mode);
3159 enum machine_mode op0mode = GET_MODE (op0);
3160 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3161 enum machine_mode op1mode = GET_MODE (op1);
3162 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3163 rtvec v = rtvec_alloc (n_elts);
3164 unsigned int i;
3165
3166 gcc_assert (op0_n_elts == n_elts);
3167 gcc_assert (op1_n_elts == n_elts);
3168 for (i = 0; i < n_elts; i++)
3169 {
3170 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3171 CONST_VECTOR_ELT (op0, i),
3172 CONST_VECTOR_ELT (op1, i));
3173 if (!x)
3174 return 0;
3175 RTVEC_ELT (v, i) = x;
3176 }
3177
3178 return gen_rtx_CONST_VECTOR (mode, v);
3179 }
3180
3181 if (VECTOR_MODE_P (mode)
3182 && code == VEC_CONCAT
3183 && (CONST_INT_P (op0)
3184 || GET_CODE (op0) == CONST_DOUBLE
3185 || GET_CODE (op0) == CONST_FIXED)
3186 && (CONST_INT_P (op1)
3187 || GET_CODE (op1) == CONST_DOUBLE
3188 || GET_CODE (op1) == CONST_FIXED))
3189 {
3190 unsigned n_elts = GET_MODE_NUNITS (mode);
3191 rtvec v = rtvec_alloc (n_elts);
3192
3193 gcc_assert (n_elts >= 2);
3194 if (n_elts == 2)
3195 {
3196 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3197 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3198
3199 RTVEC_ELT (v, 0) = op0;
3200 RTVEC_ELT (v, 1) = op1;
3201 }
3202 else
3203 {
3204 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3205 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3206 unsigned i;
3207
3208 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3209 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3210 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3211
3212 for (i = 0; i < op0_n_elts; ++i)
3213 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3214 for (i = 0; i < op1_n_elts; ++i)
3215 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3216 }
3217
3218 return gen_rtx_CONST_VECTOR (mode, v);
3219 }
3220
3221 if (SCALAR_FLOAT_MODE_P (mode)
3222 && GET_CODE (op0) == CONST_DOUBLE
3223 && GET_CODE (op1) == CONST_DOUBLE
3224 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3225 {
3226 if (code == AND
3227 || code == IOR
3228 || code == XOR)
3229 {
3230 long tmp0[4];
3231 long tmp1[4];
3232 REAL_VALUE_TYPE r;
3233 int i;
3234
3235 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3236 GET_MODE (op0));
3237 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3238 GET_MODE (op1));
3239 for (i = 0; i < 4; i++)
3240 {
3241 switch (code)
3242 {
3243 case AND:
3244 tmp0[i] &= tmp1[i];
3245 break;
3246 case IOR:
3247 tmp0[i] |= tmp1[i];
3248 break;
3249 case XOR:
3250 tmp0[i] ^= tmp1[i];
3251 break;
3252 default:
3253 gcc_unreachable ();
3254 }
3255 }
3256 real_from_target (&r, tmp0, mode);
3257 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3258 }
3259 else
3260 {
3261 REAL_VALUE_TYPE f0, f1, value, result;
3262 bool inexact;
3263
3264 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3265 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3266 real_convert (&f0, mode, &f0);
3267 real_convert (&f1, mode, &f1);
3268
3269 if (HONOR_SNANS (mode)
3270 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3271 return 0;
3272
3273 if (code == DIV
3274 && REAL_VALUES_EQUAL (f1, dconst0)
3275 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3276 return 0;
3277
3278 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3279 && flag_trapping_math
3280 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3281 {
3282 int s0 = REAL_VALUE_NEGATIVE (f0);
3283 int s1 = REAL_VALUE_NEGATIVE (f1);
3284
3285 switch (code)
3286 {
3287 case PLUS:
3288 /* Inf + -Inf = NaN plus exception. */
3289 if (s0 != s1)
3290 return 0;
3291 break;
3292 case MINUS:
3293 /* Inf - Inf = NaN plus exception. */
3294 if (s0 == s1)
3295 return 0;
3296 break;
3297 case DIV:
3298 /* Inf / Inf = NaN plus exception. */
3299 return 0;
3300 default:
3301 break;
3302 }
3303 }
3304
3305 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3306 && flag_trapping_math
3307 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3308 || (REAL_VALUE_ISINF (f1)
3309 && REAL_VALUES_EQUAL (f0, dconst0))))
3310 /* Inf * 0 = NaN plus exception. */
3311 return 0;
3312
3313 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3314 &f0, &f1);
3315 real_convert (&result, mode, &value);
3316
3317 /* Don't constant fold this floating point operation if
3318 the result has overflowed and flag_trapping_math. */
3319
3320 if (flag_trapping_math
3321 && MODE_HAS_INFINITIES (mode)
3322 && REAL_VALUE_ISINF (result)
3323 && !REAL_VALUE_ISINF (f0)
3324 && !REAL_VALUE_ISINF (f1))
3325 /* Overflow plus exception. */
3326 return 0;
3327
3328 /* Don't constant fold this floating point operation if the
3329 result may dependent upon the run-time rounding mode and
3330 flag_rounding_math is set, or if GCC's software emulation
3331 is unable to accurately represent the result. */
3332
3333 if ((flag_rounding_math
3334 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3335 && (inexact || !real_identical (&result, &value)))
3336 return NULL_RTX;
3337
3338 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3339 }
3340 }
3341
3342 /* We can fold some multi-word operations. */
3343 if (GET_MODE_CLASS (mode) == MODE_INT
3344 && width == HOST_BITS_PER_DOUBLE_INT
3345 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3346 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3347 {
3348 double_int o0, o1, res, tmp;
3349
3350 o0 = rtx_to_double_int (op0);
3351 o1 = rtx_to_double_int (op1);
3352
3353 switch (code)
3354 {
3355 case MINUS:
3356 /* A - B == A + (-B). */
3357 o1 = double_int_neg (o1);
3358
3359 /* Fall through.... */
3360
3361 case PLUS:
3362 res = double_int_add (o0, o1);
3363 break;
3364
3365 case MULT:
3366 res = double_int_mul (o0, o1);
3367 break;
3368
3369 case DIV:
3370 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3371 o0.low, o0.high, o1.low, o1.high,
3372 &res.low, &res.high,
3373 &tmp.low, &tmp.high))
3374 return 0;
3375 break;
3376
3377 case MOD:
3378 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3379 o0.low, o0.high, o1.low, o1.high,
3380 &tmp.low, &tmp.high,
3381 &res.low, &res.high))
3382 return 0;
3383 break;
3384
3385 case UDIV:
3386 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3387 o0.low, o0.high, o1.low, o1.high,
3388 &res.low, &res.high,
3389 &tmp.low, &tmp.high))
3390 return 0;
3391 break;
3392
3393 case UMOD:
3394 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3395 o0.low, o0.high, o1.low, o1.high,
3396 &tmp.low, &tmp.high,
3397 &res.low, &res.high))
3398 return 0;
3399 break;
3400
3401 case AND:
3402 res = double_int_and (o0, o1);
3403 break;
3404
3405 case IOR:
3406 res = double_int_ior (o0, o1);
3407 break;
3408
3409 case XOR:
3410 res = double_int_xor (o0, o1);
3411 break;
3412
3413 case SMIN:
3414 res = double_int_smin (o0, o1);
3415 break;
3416
3417 case SMAX:
3418 res = double_int_smax (o0, o1);
3419 break;
3420
3421 case UMIN:
3422 res = double_int_umin (o0, o1);
3423 break;
3424
3425 case UMAX:
3426 res = double_int_umax (o0, o1);
3427 break;
3428
3429 case LSHIFTRT: case ASHIFTRT:
3430 case ASHIFT:
3431 case ROTATE: case ROTATERT:
3432 {
3433 unsigned HOST_WIDE_INT cnt;
3434
3435 if (SHIFT_COUNT_TRUNCATED)
3436 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3437
3438 if (!double_int_fits_in_uhwi_p (o1)
3439 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3440 return 0;
3441
3442 cnt = double_int_to_uhwi (o1);
3443
3444 if (code == LSHIFTRT || code == ASHIFTRT)
3445 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3446 code == ASHIFTRT);
3447 else if (code == ASHIFT)
3448 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3449 true);
3450 else if (code == ROTATE)
3451 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3452 else /* code == ROTATERT */
3453 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3454 }
3455 break;
3456
3457 default:
3458 return 0;
3459 }
3460
3461 return immed_double_int_const (res, mode);
3462 }
3463
3464 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3465 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3466 {
3467 /* Get the integer argument values in two forms:
3468 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3469
3470 arg0 = INTVAL (op0);
3471 arg1 = INTVAL (op1);
3472
3473 if (width < HOST_BITS_PER_WIDE_INT)
3474 {
3475 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3476 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3477
3478 arg0s = arg0;
3479 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3480 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3481
3482 arg1s = arg1;
3483 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3484 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3485 }
3486 else
3487 {
3488 arg0s = arg0;
3489 arg1s = arg1;
3490 }
3491
3492 /* Compute the value of the arithmetic. */
3493
3494 switch (code)
3495 {
3496 case PLUS:
3497 val = arg0s + arg1s;
3498 break;
3499
3500 case MINUS:
3501 val = arg0s - arg1s;
3502 break;
3503
3504 case MULT:
3505 val = arg0s * arg1s;
3506 break;
3507
3508 case DIV:
3509 if (arg1s == 0
3510 || ((unsigned HOST_WIDE_INT) arg0s
3511 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3512 && arg1s == -1))
3513 return 0;
3514 val = arg0s / arg1s;
3515 break;
3516
3517 case MOD:
3518 if (arg1s == 0
3519 || ((unsigned HOST_WIDE_INT) arg0s
3520 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3521 && arg1s == -1))
3522 return 0;
3523 val = arg0s % arg1s;
3524 break;
3525
3526 case UDIV:
3527 if (arg1 == 0
3528 || ((unsigned HOST_WIDE_INT) arg0s
3529 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3530 && arg1s == -1))
3531 return 0;
3532 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3533 break;
3534
3535 case UMOD:
3536 if (arg1 == 0
3537 || ((unsigned HOST_WIDE_INT) arg0s
3538 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3539 && arg1s == -1))
3540 return 0;
3541 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3542 break;
3543
3544 case AND:
3545 val = arg0 & arg1;
3546 break;
3547
3548 case IOR:
3549 val = arg0 | arg1;
3550 break;
3551
3552 case XOR:
3553 val = arg0 ^ arg1;
3554 break;
3555
3556 case LSHIFTRT:
3557 case ASHIFT:
3558 case ASHIFTRT:
3559 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3560 the value is in range. We can't return any old value for
3561 out-of-range arguments because either the middle-end (via
3562 shift_truncation_mask) or the back-end might be relying on
3563 target-specific knowledge. Nor can we rely on
3564 shift_truncation_mask, since the shift might not be part of an
3565 ashlM3, lshrM3 or ashrM3 instruction. */
3566 if (SHIFT_COUNT_TRUNCATED)
3567 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3568 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3569 return 0;
3570
3571 val = (code == ASHIFT
3572 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3573 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3574
3575 /* Sign-extend the result for arithmetic right shifts. */
3576 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3577 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3578 break;
3579
3580 case ROTATERT:
3581 if (arg1 < 0)
3582 return 0;
3583
3584 arg1 %= width;
3585 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3586 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3587 break;
3588
3589 case ROTATE:
3590 if (arg1 < 0)
3591 return 0;
3592
3593 arg1 %= width;
3594 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3595 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3596 break;
3597
3598 case COMPARE:
3599 /* Do nothing here. */
3600 return 0;
3601
3602 case SMIN:
3603 val = arg0s <= arg1s ? arg0s : arg1s;
3604 break;
3605
3606 case UMIN:
3607 val = ((unsigned HOST_WIDE_INT) arg0
3608 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3609 break;
3610
3611 case SMAX:
3612 val = arg0s > arg1s ? arg0s : arg1s;
3613 break;
3614
3615 case UMAX:
3616 val = ((unsigned HOST_WIDE_INT) arg0
3617 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3618 break;
3619
3620 case SS_PLUS:
3621 case US_PLUS:
3622 case SS_MINUS:
3623 case US_MINUS:
3624 case SS_MULT:
3625 case US_MULT:
3626 case SS_DIV:
3627 case US_DIV:
3628 case SS_ASHIFT:
3629 case US_ASHIFT:
3630 /* ??? There are simplifications that can be done. */
3631 return 0;
3632
3633 default:
3634 gcc_unreachable ();
3635 }
3636
3637 return gen_int_mode (val, mode);
3638 }
3639
3640 return NULL_RTX;
3641 }
3642
3643
3644 \f
3645 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3646 PLUS or MINUS.
3647
3648 Rather than test for specific case, we do this by a brute-force method
3649 and do all possible simplifications until no more changes occur. Then
3650 we rebuild the operation. */
3651
3652 struct simplify_plus_minus_op_data
3653 {
3654 rtx op;
3655 short neg;
3656 };
3657
3658 static bool
3659 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3660 {
3661 int result;
3662
3663 result = (commutative_operand_precedence (y)
3664 - commutative_operand_precedence (x));
3665 if (result)
3666 return result > 0;
3667
3668 /* Group together equal REGs to do more simplification. */
3669 if (REG_P (x) && REG_P (y))
3670 return REGNO (x) > REGNO (y);
3671 else
3672 return false;
3673 }
3674
3675 static rtx
3676 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3677 rtx op1)
3678 {
3679 struct simplify_plus_minus_op_data ops[8];
3680 rtx result, tem;
3681 int n_ops = 2, input_ops = 2;
3682 int changed, n_constants = 0, canonicalized = 0;
3683 int i, j;
3684
3685 memset (ops, 0, sizeof ops);
3686
3687 /* Set up the two operands and then expand them until nothing has been
3688 changed. If we run out of room in our array, give up; this should
3689 almost never happen. */
3690
3691 ops[0].op = op0;
3692 ops[0].neg = 0;
3693 ops[1].op = op1;
3694 ops[1].neg = (code == MINUS);
3695
3696 do
3697 {
3698 changed = 0;
3699
3700 for (i = 0; i < n_ops; i++)
3701 {
3702 rtx this_op = ops[i].op;
3703 int this_neg = ops[i].neg;
3704 enum rtx_code this_code = GET_CODE (this_op);
3705
3706 switch (this_code)
3707 {
3708 case PLUS:
3709 case MINUS:
3710 if (n_ops == 7)
3711 return NULL_RTX;
3712
3713 ops[n_ops].op = XEXP (this_op, 1);
3714 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3715 n_ops++;
3716
3717 ops[i].op = XEXP (this_op, 0);
3718 input_ops++;
3719 changed = 1;
3720 canonicalized |= this_neg;
3721 break;
3722
3723 case NEG:
3724 ops[i].op = XEXP (this_op, 0);
3725 ops[i].neg = ! this_neg;
3726 changed = 1;
3727 canonicalized = 1;
3728 break;
3729
3730 case CONST:
3731 if (n_ops < 7
3732 && GET_CODE (XEXP (this_op, 0)) == PLUS
3733 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3734 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3735 {
3736 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3737 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3738 ops[n_ops].neg = this_neg;
3739 n_ops++;
3740 changed = 1;
3741 canonicalized = 1;
3742 }
3743 break;
3744
3745 case NOT:
3746 /* ~a -> (-a - 1) */
3747 if (n_ops != 7)
3748 {
3749 ops[n_ops].op = constm1_rtx;
3750 ops[n_ops++].neg = this_neg;
3751 ops[i].op = XEXP (this_op, 0);
3752 ops[i].neg = !this_neg;
3753 changed = 1;
3754 canonicalized = 1;
3755 }
3756 break;
3757
3758 case CONST_INT:
3759 n_constants++;
3760 if (this_neg)
3761 {
3762 ops[i].op = neg_const_int (mode, this_op);
3763 ops[i].neg = 0;
3764 changed = 1;
3765 canonicalized = 1;
3766 }
3767 break;
3768
3769 default:
3770 break;
3771 }
3772 }
3773 }
3774 while (changed);
3775
3776 if (n_constants > 1)
3777 canonicalized = 1;
3778
3779 gcc_assert (n_ops >= 2);
3780
3781 /* If we only have two operands, we can avoid the loops. */
3782 if (n_ops == 2)
3783 {
3784 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3785 rtx lhs, rhs;
3786
3787 /* Get the two operands. Be careful with the order, especially for
3788 the cases where code == MINUS. */
3789 if (ops[0].neg && ops[1].neg)
3790 {
3791 lhs = gen_rtx_NEG (mode, ops[0].op);
3792 rhs = ops[1].op;
3793 }
3794 else if (ops[0].neg)
3795 {
3796 lhs = ops[1].op;
3797 rhs = ops[0].op;
3798 }
3799 else
3800 {
3801 lhs = ops[0].op;
3802 rhs = ops[1].op;
3803 }
3804
3805 return simplify_const_binary_operation (code, mode, lhs, rhs);
3806 }
3807
3808 /* Now simplify each pair of operands until nothing changes. */
3809 do
3810 {
3811 /* Insertion sort is good enough for an eight-element array. */
3812 for (i = 1; i < n_ops; i++)
3813 {
3814 struct simplify_plus_minus_op_data save;
3815 j = i - 1;
3816 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3817 continue;
3818
3819 canonicalized = 1;
3820 save = ops[i];
3821 do
3822 ops[j + 1] = ops[j];
3823 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3824 ops[j + 1] = save;
3825 }
3826
3827 changed = 0;
3828 for (i = n_ops - 1; i > 0; i--)
3829 for (j = i - 1; j >= 0; j--)
3830 {
3831 rtx lhs = ops[j].op, rhs = ops[i].op;
3832 int lneg = ops[j].neg, rneg = ops[i].neg;
3833
3834 if (lhs != 0 && rhs != 0)
3835 {
3836 enum rtx_code ncode = PLUS;
3837
3838 if (lneg != rneg)
3839 {
3840 ncode = MINUS;
3841 if (lneg)
3842 tem = lhs, lhs = rhs, rhs = tem;
3843 }
3844 else if (swap_commutative_operands_p (lhs, rhs))
3845 tem = lhs, lhs = rhs, rhs = tem;
3846
3847 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3848 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3849 {
3850 rtx tem_lhs, tem_rhs;
3851
3852 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3853 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3854 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3855
3856 if (tem && !CONSTANT_P (tem))
3857 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3858 }
3859 else
3860 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3861
3862 /* Reject "simplifications" that just wrap the two
3863 arguments in a CONST. Failure to do so can result
3864 in infinite recursion with simplify_binary_operation
3865 when it calls us to simplify CONST operations. */
3866 if (tem
3867 && ! (GET_CODE (tem) == CONST
3868 && GET_CODE (XEXP (tem, 0)) == ncode
3869 && XEXP (XEXP (tem, 0), 0) == lhs
3870 && XEXP (XEXP (tem, 0), 1) == rhs))
3871 {
3872 lneg &= rneg;
3873 if (GET_CODE (tem) == NEG)
3874 tem = XEXP (tem, 0), lneg = !lneg;
3875 if (CONST_INT_P (tem) && lneg)
3876 tem = neg_const_int (mode, tem), lneg = 0;
3877
3878 ops[i].op = tem;
3879 ops[i].neg = lneg;
3880 ops[j].op = NULL_RTX;
3881 changed = 1;
3882 canonicalized = 1;
3883 }
3884 }
3885 }
3886
3887 /* If nothing changed, fail. */
3888 if (!canonicalized)
3889 return NULL_RTX;
3890
3891 /* Pack all the operands to the lower-numbered entries. */
3892 for (i = 0, j = 0; j < n_ops; j++)
3893 if (ops[j].op)
3894 {
3895 ops[i] = ops[j];
3896 i++;
3897 }
3898 n_ops = i;
3899 }
3900 while (changed);
3901
3902 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3903 if (n_ops == 2
3904 && CONST_INT_P (ops[1].op)
3905 && CONSTANT_P (ops[0].op)
3906 && ops[0].neg)
3907 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3908
3909 /* We suppressed creation of trivial CONST expressions in the
3910 combination loop to avoid recursion. Create one manually now.
3911 The combination loop should have ensured that there is exactly
3912 one CONST_INT, and the sort will have ensured that it is last
3913 in the array and that any other constant will be next-to-last. */
3914
3915 if (n_ops > 1
3916 && CONST_INT_P (ops[n_ops - 1].op)
3917 && CONSTANT_P (ops[n_ops - 2].op))
3918 {
3919 rtx value = ops[n_ops - 1].op;
3920 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3921 value = neg_const_int (mode, value);
3922 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3923 n_ops--;
3924 }
3925
3926 /* Put a non-negated operand first, if possible. */
3927
3928 for (i = 0; i < n_ops && ops[i].neg; i++)
3929 continue;
3930 if (i == n_ops)
3931 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3932 else if (i != 0)
3933 {
3934 tem = ops[0].op;
3935 ops[0] = ops[i];
3936 ops[i].op = tem;
3937 ops[i].neg = 1;
3938 }
3939
3940 /* Now make the result by performing the requested operations. */
3941 result = ops[0].op;
3942 for (i = 1; i < n_ops; i++)
3943 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3944 mode, result, ops[i].op);
3945
3946 return result;
3947 }
3948
3949 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3950 static bool
3951 plus_minus_operand_p (const_rtx x)
3952 {
3953 return GET_CODE (x) == PLUS
3954 || GET_CODE (x) == MINUS
3955 || (GET_CODE (x) == CONST
3956 && GET_CODE (XEXP (x, 0)) == PLUS
3957 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3958 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3959 }
3960
3961 /* Like simplify_binary_operation except used for relational operators.
3962 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3963 not also be VOIDmode.
3964
3965 CMP_MODE specifies in which mode the comparison is done in, so it is
3966 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3967 the operands or, if both are VOIDmode, the operands are compared in
3968 "infinite precision". */
3969 rtx
3970 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3971 enum machine_mode cmp_mode, rtx op0, rtx op1)
3972 {
3973 rtx tem, trueop0, trueop1;
3974
3975 if (cmp_mode == VOIDmode)
3976 cmp_mode = GET_MODE (op0);
3977 if (cmp_mode == VOIDmode)
3978 cmp_mode = GET_MODE (op1);
3979
3980 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3981 if (tem)
3982 {
3983 if (SCALAR_FLOAT_MODE_P (mode))
3984 {
3985 if (tem == const0_rtx)
3986 return CONST0_RTX (mode);
3987 #ifdef FLOAT_STORE_FLAG_VALUE
3988 {
3989 REAL_VALUE_TYPE val;
3990 val = FLOAT_STORE_FLAG_VALUE (mode);
3991 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3992 }
3993 #else
3994 return NULL_RTX;
3995 #endif
3996 }
3997 if (VECTOR_MODE_P (mode))
3998 {
3999 if (tem == const0_rtx)
4000 return CONST0_RTX (mode);
4001 #ifdef VECTOR_STORE_FLAG_VALUE
4002 {
4003 int i, units;
4004 rtvec v;
4005
4006 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4007 if (val == NULL_RTX)
4008 return NULL_RTX;
4009 if (val == const1_rtx)
4010 return CONST1_RTX (mode);
4011
4012 units = GET_MODE_NUNITS (mode);
4013 v = rtvec_alloc (units);
4014 for (i = 0; i < units; i++)
4015 RTVEC_ELT (v, i) = val;
4016 return gen_rtx_raw_CONST_VECTOR (mode, v);
4017 }
4018 #else
4019 return NULL_RTX;
4020 #endif
4021 }
4022
4023 return tem;
4024 }
4025
4026 /* For the following tests, ensure const0_rtx is op1. */
4027 if (swap_commutative_operands_p (op0, op1)
4028 || (op0 == const0_rtx && op1 != const0_rtx))
4029 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4030
4031 /* If op0 is a compare, extract the comparison arguments from it. */
4032 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4033 return simplify_gen_relational (code, mode, VOIDmode,
4034 XEXP (op0, 0), XEXP (op0, 1));
4035
4036 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4037 || CC0_P (op0))
4038 return NULL_RTX;
4039
4040 trueop0 = avoid_constant_pool_reference (op0);
4041 trueop1 = avoid_constant_pool_reference (op1);
4042 return simplify_relational_operation_1 (code, mode, cmp_mode,
4043 trueop0, trueop1);
4044 }
4045
4046 /* This part of simplify_relational_operation is only used when CMP_MODE
4047 is not in class MODE_CC (i.e. it is a real comparison).
4048
4049 MODE is the mode of the result, while CMP_MODE specifies in which
4050 mode the comparison is done in, so it is the mode of the operands. */
4051
4052 static rtx
4053 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4054 enum machine_mode cmp_mode, rtx op0, rtx op1)
4055 {
4056 enum rtx_code op0code = GET_CODE (op0);
4057
4058 if (op1 == const0_rtx && COMPARISON_P (op0))
4059 {
4060 /* If op0 is a comparison, extract the comparison arguments
4061 from it. */
4062 if (code == NE)
4063 {
4064 if (GET_MODE (op0) == mode)
4065 return simplify_rtx (op0);
4066 else
4067 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4068 XEXP (op0, 0), XEXP (op0, 1));
4069 }
4070 else if (code == EQ)
4071 {
4072 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4073 if (new_code != UNKNOWN)
4074 return simplify_gen_relational (new_code, mode, VOIDmode,
4075 XEXP (op0, 0), XEXP (op0, 1));
4076 }
4077 }
4078
4079 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4080 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4081 if ((code == LTU || code == GEU)
4082 && GET_CODE (op0) == PLUS
4083 && CONST_INT_P (XEXP (op0, 1))
4084 && (rtx_equal_p (op1, XEXP (op0, 0))
4085 || rtx_equal_p (op1, XEXP (op0, 1))))
4086 {
4087 rtx new_cmp
4088 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4089 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4090 cmp_mode, XEXP (op0, 0), new_cmp);
4091 }
4092
4093 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4094 if ((code == LTU || code == GEU)
4095 && GET_CODE (op0) == PLUS
4096 && rtx_equal_p (op1, XEXP (op0, 1))
4097 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4098 && !rtx_equal_p (op1, XEXP (op0, 0)))
4099 return simplify_gen_relational (code, mode, cmp_mode, op0,
4100 copy_rtx (XEXP (op0, 0)));
4101
4102 if (op1 == const0_rtx)
4103 {
4104 /* Canonicalize (GTU x 0) as (NE x 0). */
4105 if (code == GTU)
4106 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4107 /* Canonicalize (LEU x 0) as (EQ x 0). */
4108 if (code == LEU)
4109 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4110 }
4111 else if (op1 == const1_rtx)
4112 {
4113 switch (code)
4114 {
4115 case GE:
4116 /* Canonicalize (GE x 1) as (GT x 0). */
4117 return simplify_gen_relational (GT, mode, cmp_mode,
4118 op0, const0_rtx);
4119 case GEU:
4120 /* Canonicalize (GEU x 1) as (NE x 0). */
4121 return simplify_gen_relational (NE, mode, cmp_mode,
4122 op0, const0_rtx);
4123 case LT:
4124 /* Canonicalize (LT x 1) as (LE x 0). */
4125 return simplify_gen_relational (LE, mode, cmp_mode,
4126 op0, const0_rtx);
4127 case LTU:
4128 /* Canonicalize (LTU x 1) as (EQ x 0). */
4129 return simplify_gen_relational (EQ, mode, cmp_mode,
4130 op0, const0_rtx);
4131 default:
4132 break;
4133 }
4134 }
4135 else if (op1 == constm1_rtx)
4136 {
4137 /* Canonicalize (LE x -1) as (LT x 0). */
4138 if (code == LE)
4139 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4140 /* Canonicalize (GT x -1) as (GE x 0). */
4141 if (code == GT)
4142 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4143 }
4144
4145 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4146 if ((code == EQ || code == NE)
4147 && (op0code == PLUS || op0code == MINUS)
4148 && CONSTANT_P (op1)
4149 && CONSTANT_P (XEXP (op0, 1))
4150 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4151 {
4152 rtx x = XEXP (op0, 0);
4153 rtx c = XEXP (op0, 1);
4154
4155 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4156 cmp_mode, op1, c);
4157 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4158 }
4159
4160 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4161 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4162 if (code == NE
4163 && op1 == const0_rtx
4164 && GET_MODE_CLASS (mode) == MODE_INT
4165 && cmp_mode != VOIDmode
4166 /* ??? Work-around BImode bugs in the ia64 backend. */
4167 && mode != BImode
4168 && cmp_mode != BImode
4169 && nonzero_bits (op0, cmp_mode) == 1
4170 && STORE_FLAG_VALUE == 1)
4171 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4172 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4173 : lowpart_subreg (mode, op0, cmp_mode);
4174
4175 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4176 if ((code == EQ || code == NE)
4177 && op1 == const0_rtx
4178 && op0code == XOR)
4179 return simplify_gen_relational (code, mode, cmp_mode,
4180 XEXP (op0, 0), XEXP (op0, 1));
4181
4182 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4183 if ((code == EQ || code == NE)
4184 && op0code == XOR
4185 && rtx_equal_p (XEXP (op0, 0), op1)
4186 && !side_effects_p (XEXP (op0, 0)))
4187 return simplify_gen_relational (code, mode, cmp_mode,
4188 XEXP (op0, 1), const0_rtx);
4189
4190 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4191 if ((code == EQ || code == NE)
4192 && op0code == XOR
4193 && rtx_equal_p (XEXP (op0, 1), op1)
4194 && !side_effects_p (XEXP (op0, 1)))
4195 return simplify_gen_relational (code, mode, cmp_mode,
4196 XEXP (op0, 0), const0_rtx);
4197
4198 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4199 if ((code == EQ || code == NE)
4200 && op0code == XOR
4201 && (CONST_INT_P (op1)
4202 || GET_CODE (op1) == CONST_DOUBLE)
4203 && (CONST_INT_P (XEXP (op0, 1))
4204 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4205 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4206 simplify_gen_binary (XOR, cmp_mode,
4207 XEXP (op0, 1), op1));
4208
4209 if (op0code == POPCOUNT && op1 == const0_rtx)
4210 switch (code)
4211 {
4212 case EQ:
4213 case LE:
4214 case LEU:
4215 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4216 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4217 XEXP (op0, 0), const0_rtx);
4218
4219 case NE:
4220 case GT:
4221 case GTU:
4222 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4223 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4224 XEXP (op0, 0), const0_rtx);
4225
4226 default:
4227 break;
4228 }
4229
4230 return NULL_RTX;
4231 }
4232
4233 enum
4234 {
4235 CMP_EQ = 1,
4236 CMP_LT = 2,
4237 CMP_GT = 4,
4238 CMP_LTU = 8,
4239 CMP_GTU = 16
4240 };
4241
4242
4243 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4244 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4245 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4246 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4247 For floating-point comparisons, assume that the operands were ordered. */
4248
4249 static rtx
4250 comparison_result (enum rtx_code code, int known_results)
4251 {
4252 switch (code)
4253 {
4254 case EQ:
4255 case UNEQ:
4256 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4257 case NE:
4258 case LTGT:
4259 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4260
4261 case LT:
4262 case UNLT:
4263 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4264 case GE:
4265 case UNGE:
4266 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4267
4268 case GT:
4269 case UNGT:
4270 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4271 case LE:
4272 case UNLE:
4273 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4274
4275 case LTU:
4276 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4277 case GEU:
4278 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4279
4280 case GTU:
4281 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4282 case LEU:
4283 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4284
4285 case ORDERED:
4286 return const_true_rtx;
4287 case UNORDERED:
4288 return const0_rtx;
4289 default:
4290 gcc_unreachable ();
4291 }
4292 }
4293
4294 /* Check if the given comparison (done in the given MODE) is actually a
4295 tautology or a contradiction.
4296 If no simplification is possible, this function returns zero.
4297 Otherwise, it returns either const_true_rtx or const0_rtx. */
4298
4299 rtx
4300 simplify_const_relational_operation (enum rtx_code code,
4301 enum machine_mode mode,
4302 rtx op0, rtx op1)
4303 {
4304 rtx tem;
4305 rtx trueop0;
4306 rtx trueop1;
4307
4308 gcc_assert (mode != VOIDmode
4309 || (GET_MODE (op0) == VOIDmode
4310 && GET_MODE (op1) == VOIDmode));
4311
4312 /* If op0 is a compare, extract the comparison arguments from it. */
4313 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4314 {
4315 op1 = XEXP (op0, 1);
4316 op0 = XEXP (op0, 0);
4317
4318 if (GET_MODE (op0) != VOIDmode)
4319 mode = GET_MODE (op0);
4320 else if (GET_MODE (op1) != VOIDmode)
4321 mode = GET_MODE (op1);
4322 else
4323 return 0;
4324 }
4325
4326 /* We can't simplify MODE_CC values since we don't know what the
4327 actual comparison is. */
4328 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4329 return 0;
4330
4331 /* Make sure the constant is second. */
4332 if (swap_commutative_operands_p (op0, op1))
4333 {
4334 tem = op0, op0 = op1, op1 = tem;
4335 code = swap_condition (code);
4336 }
4337
4338 trueop0 = avoid_constant_pool_reference (op0);
4339 trueop1 = avoid_constant_pool_reference (op1);
4340
4341 /* For integer comparisons of A and B maybe we can simplify A - B and can
4342 then simplify a comparison of that with zero. If A and B are both either
4343 a register or a CONST_INT, this can't help; testing for these cases will
4344 prevent infinite recursion here and speed things up.
4345
4346 We can only do this for EQ and NE comparisons as otherwise we may
4347 lose or introduce overflow which we cannot disregard as undefined as
4348 we do not know the signedness of the operation on either the left or
4349 the right hand side of the comparison. */
4350
4351 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4352 && (code == EQ || code == NE)
4353 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4354 && (REG_P (op1) || CONST_INT_P (trueop1)))
4355 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4356 /* We cannot do this if tem is a nonzero address. */
4357 && ! nonzero_address_p (tem))
4358 return simplify_const_relational_operation (signed_condition (code),
4359 mode, tem, const0_rtx);
4360
4361 if (! HONOR_NANS (mode) && code == ORDERED)
4362 return const_true_rtx;
4363
4364 if (! HONOR_NANS (mode) && code == UNORDERED)
4365 return const0_rtx;
4366
4367 /* For modes without NaNs, if the two operands are equal, we know the
4368 result except if they have side-effects. Even with NaNs we know
4369 the result of unordered comparisons and, if signaling NaNs are
4370 irrelevant, also the result of LT/GT/LTGT. */
4371 if ((! HONOR_NANS (GET_MODE (trueop0))
4372 || code == UNEQ || code == UNLE || code == UNGE
4373 || ((code == LT || code == GT || code == LTGT)
4374 && ! HONOR_SNANS (GET_MODE (trueop0))))
4375 && rtx_equal_p (trueop0, trueop1)
4376 && ! side_effects_p (trueop0))
4377 return comparison_result (code, CMP_EQ);
4378
4379 /* If the operands are floating-point constants, see if we can fold
4380 the result. */
4381 if (GET_CODE (trueop0) == CONST_DOUBLE
4382 && GET_CODE (trueop1) == CONST_DOUBLE
4383 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4384 {
4385 REAL_VALUE_TYPE d0, d1;
4386
4387 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4388 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4389
4390 /* Comparisons are unordered iff at least one of the values is NaN. */
4391 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4392 switch (code)
4393 {
4394 case UNEQ:
4395 case UNLT:
4396 case UNGT:
4397 case UNLE:
4398 case UNGE:
4399 case NE:
4400 case UNORDERED:
4401 return const_true_rtx;
4402 case EQ:
4403 case LT:
4404 case GT:
4405 case LE:
4406 case GE:
4407 case LTGT:
4408 case ORDERED:
4409 return const0_rtx;
4410 default:
4411 return 0;
4412 }
4413
4414 return comparison_result (code,
4415 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4416 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4417 }
4418
4419 /* Otherwise, see if the operands are both integers. */
4420 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4421 && (GET_CODE (trueop0) == CONST_DOUBLE
4422 || CONST_INT_P (trueop0))
4423 && (GET_CODE (trueop1) == CONST_DOUBLE
4424 || CONST_INT_P (trueop1)))
4425 {
4426 int width = GET_MODE_BITSIZE (mode);
4427 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4428 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4429
4430 /* Get the two words comprising each integer constant. */
4431 if (GET_CODE (trueop0) == CONST_DOUBLE)
4432 {
4433 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4434 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4435 }
4436 else
4437 {
4438 l0u = l0s = INTVAL (trueop0);
4439 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4440 }
4441
4442 if (GET_CODE (trueop1) == CONST_DOUBLE)
4443 {
4444 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4445 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4446 }
4447 else
4448 {
4449 l1u = l1s = INTVAL (trueop1);
4450 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4451 }
4452
4453 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4454 we have to sign or zero-extend the values. */
4455 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4456 {
4457 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4458 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4459
4460 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4461 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4462
4463 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4464 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4465 }
4466 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4467 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4468
4469 if (h0u == h1u && l0u == l1u)
4470 return comparison_result (code, CMP_EQ);
4471 else
4472 {
4473 int cr;
4474 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4475 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4476 return comparison_result (code, cr);
4477 }
4478 }
4479
4480 /* Optimize comparisons with upper and lower bounds. */
4481 if (SCALAR_INT_MODE_P (mode)
4482 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4483 && CONST_INT_P (trueop1))
4484 {
4485 int sign;
4486 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4487 HOST_WIDE_INT val = INTVAL (trueop1);
4488 HOST_WIDE_INT mmin, mmax;
4489
4490 if (code == GEU
4491 || code == LEU
4492 || code == GTU
4493 || code == LTU)
4494 sign = 0;
4495 else
4496 sign = 1;
4497
4498 /* Get a reduced range if the sign bit is zero. */
4499 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4500 {
4501 mmin = 0;
4502 mmax = nonzero;
4503 }
4504 else
4505 {
4506 rtx mmin_rtx, mmax_rtx;
4507 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4508
4509 mmin = INTVAL (mmin_rtx);
4510 mmax = INTVAL (mmax_rtx);
4511 if (sign)
4512 {
4513 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4514
4515 mmin >>= (sign_copies - 1);
4516 mmax >>= (sign_copies - 1);
4517 }
4518 }
4519
4520 switch (code)
4521 {
4522 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4523 case GEU:
4524 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4525 return const_true_rtx;
4526 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4527 return const0_rtx;
4528 break;
4529 case GE:
4530 if (val <= mmin)
4531 return const_true_rtx;
4532 if (val > mmax)
4533 return const0_rtx;
4534 break;
4535
4536 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4537 case LEU:
4538 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4539 return const_true_rtx;
4540 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4541 return const0_rtx;
4542 break;
4543 case LE:
4544 if (val >= mmax)
4545 return const_true_rtx;
4546 if (val < mmin)
4547 return const0_rtx;
4548 break;
4549
4550 case EQ:
4551 /* x == y is always false for y out of range. */
4552 if (val < mmin || val > mmax)
4553 return const0_rtx;
4554 break;
4555
4556 /* x > y is always false for y >= mmax, always true for y < mmin. */
4557 case GTU:
4558 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4559 return const0_rtx;
4560 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4561 return const_true_rtx;
4562 break;
4563 case GT:
4564 if (val >= mmax)
4565 return const0_rtx;
4566 if (val < mmin)
4567 return const_true_rtx;
4568 break;
4569
4570 /* x < y is always false for y <= mmin, always true for y > mmax. */
4571 case LTU:
4572 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4573 return const0_rtx;
4574 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4575 return const_true_rtx;
4576 break;
4577 case LT:
4578 if (val <= mmin)
4579 return const0_rtx;
4580 if (val > mmax)
4581 return const_true_rtx;
4582 break;
4583
4584 case NE:
4585 /* x != y is always true for y out of range. */
4586 if (val < mmin || val > mmax)
4587 return const_true_rtx;
4588 break;
4589
4590 default:
4591 break;
4592 }
4593 }
4594
4595 /* Optimize integer comparisons with zero. */
4596 if (trueop1 == const0_rtx)
4597 {
4598 /* Some addresses are known to be nonzero. We don't know
4599 their sign, but equality comparisons are known. */
4600 if (nonzero_address_p (trueop0))
4601 {
4602 if (code == EQ || code == LEU)
4603 return const0_rtx;
4604 if (code == NE || code == GTU)
4605 return const_true_rtx;
4606 }
4607
4608 /* See if the first operand is an IOR with a constant. If so, we
4609 may be able to determine the result of this comparison. */
4610 if (GET_CODE (op0) == IOR)
4611 {
4612 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4613 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4614 {
4615 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4616 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4617 && (UINTVAL (inner_const)
4618 & ((unsigned HOST_WIDE_INT) 1
4619 << sign_bitnum)));
4620
4621 switch (code)
4622 {
4623 case EQ:
4624 case LEU:
4625 return const0_rtx;
4626 case NE:
4627 case GTU:
4628 return const_true_rtx;
4629 case LT:
4630 case LE:
4631 if (has_sign)
4632 return const_true_rtx;
4633 break;
4634 case GT:
4635 case GE:
4636 if (has_sign)
4637 return const0_rtx;
4638 break;
4639 default:
4640 break;
4641 }
4642 }
4643 }
4644 }
4645
4646 /* Optimize comparison of ABS with zero. */
4647 if (trueop1 == CONST0_RTX (mode)
4648 && (GET_CODE (trueop0) == ABS
4649 || (GET_CODE (trueop0) == FLOAT_EXTEND
4650 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4651 {
4652 switch (code)
4653 {
4654 case LT:
4655 /* Optimize abs(x) < 0.0. */
4656 if (!HONOR_SNANS (mode)
4657 && (!INTEGRAL_MODE_P (mode)
4658 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4659 {
4660 if (INTEGRAL_MODE_P (mode)
4661 && (issue_strict_overflow_warning
4662 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4663 warning (OPT_Wstrict_overflow,
4664 ("assuming signed overflow does not occur when "
4665 "assuming abs (x) < 0 is false"));
4666 return const0_rtx;
4667 }
4668 break;
4669
4670 case GE:
4671 /* Optimize abs(x) >= 0.0. */
4672 if (!HONOR_NANS (mode)
4673 && (!INTEGRAL_MODE_P (mode)
4674 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4675 {
4676 if (INTEGRAL_MODE_P (mode)
4677 && (issue_strict_overflow_warning
4678 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4679 warning (OPT_Wstrict_overflow,
4680 ("assuming signed overflow does not occur when "
4681 "assuming abs (x) >= 0 is true"));
4682 return const_true_rtx;
4683 }
4684 break;
4685
4686 case UNGE:
4687 /* Optimize ! (abs(x) < 0.0). */
4688 return const_true_rtx;
4689
4690 default:
4691 break;
4692 }
4693 }
4694
4695 return 0;
4696 }
4697 \f
4698 /* Simplify CODE, an operation with result mode MODE and three operands,
4699 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4700 a constant. Return 0 if no simplifications is possible. */
4701
4702 rtx
4703 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4704 enum machine_mode op0_mode, rtx op0, rtx op1,
4705 rtx op2)
4706 {
4707 unsigned int width = GET_MODE_BITSIZE (mode);
4708 bool any_change = false;
4709 rtx tem;
4710
4711 /* VOIDmode means "infinite" precision. */
4712 if (width == 0)
4713 width = HOST_BITS_PER_WIDE_INT;
4714
4715 switch (code)
4716 {
4717 case FMA:
4718 /* Simplify negations around the multiplication. */
4719 /* -a * -b + c => a * b + c. */
4720 if (GET_CODE (op0) == NEG)
4721 {
4722 tem = simplify_unary_operation (NEG, mode, op1, mode);
4723 if (tem)
4724 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4725 }
4726 else if (GET_CODE (op1) == NEG)
4727 {
4728 tem = simplify_unary_operation (NEG, mode, op0, mode);
4729 if (tem)
4730 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4731 }
4732
4733 /* Canonicalize the two multiplication operands. */
4734 /* a * -b + c => -b * a + c. */
4735 if (swap_commutative_operands_p (op0, op1))
4736 tem = op0, op0 = op1, op1 = tem, any_change = true;
4737
4738 if (any_change)
4739 return gen_rtx_FMA (mode, op0, op1, op2);
4740 return NULL_RTX;
4741
4742 case SIGN_EXTRACT:
4743 case ZERO_EXTRACT:
4744 if (CONST_INT_P (op0)
4745 && CONST_INT_P (op1)
4746 && CONST_INT_P (op2)
4747 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4748 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4749 {
4750 /* Extracting a bit-field from a constant */
4751 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4752
4753 if (BITS_BIG_ENDIAN)
4754 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4755 else
4756 val >>= INTVAL (op2);
4757
4758 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4759 {
4760 /* First zero-extend. */
4761 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4762 /* If desired, propagate sign bit. */
4763 if (code == SIGN_EXTRACT
4764 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4765 != 0)
4766 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4767 }
4768
4769 /* Clear the bits that don't belong in our mode,
4770 unless they and our sign bit are all one.
4771 So we get either a reasonable negative value or a reasonable
4772 unsigned value for this mode. */
4773 if (width < HOST_BITS_PER_WIDE_INT
4774 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4775 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4776 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4777
4778 return gen_int_mode (val, mode);
4779 }
4780 break;
4781
4782 case IF_THEN_ELSE:
4783 if (CONST_INT_P (op0))
4784 return op0 != const0_rtx ? op1 : op2;
4785
4786 /* Convert c ? a : a into "a". */
4787 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4788 return op1;
4789
4790 /* Convert a != b ? a : b into "a". */
4791 if (GET_CODE (op0) == NE
4792 && ! side_effects_p (op0)
4793 && ! HONOR_NANS (mode)
4794 && ! HONOR_SIGNED_ZEROS (mode)
4795 && ((rtx_equal_p (XEXP (op0, 0), op1)
4796 && rtx_equal_p (XEXP (op0, 1), op2))
4797 || (rtx_equal_p (XEXP (op0, 0), op2)
4798 && rtx_equal_p (XEXP (op0, 1), op1))))
4799 return op1;
4800
4801 /* Convert a == b ? a : b into "b". */
4802 if (GET_CODE (op0) == EQ
4803 && ! side_effects_p (op0)
4804 && ! HONOR_NANS (mode)
4805 && ! HONOR_SIGNED_ZEROS (mode)
4806 && ((rtx_equal_p (XEXP (op0, 0), op1)
4807 && rtx_equal_p (XEXP (op0, 1), op2))
4808 || (rtx_equal_p (XEXP (op0, 0), op2)
4809 && rtx_equal_p (XEXP (op0, 1), op1))))
4810 return op2;
4811
4812 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4813 {
4814 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4815 ? GET_MODE (XEXP (op0, 1))
4816 : GET_MODE (XEXP (op0, 0)));
4817 rtx temp;
4818
4819 /* Look for happy constants in op1 and op2. */
4820 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4821 {
4822 HOST_WIDE_INT t = INTVAL (op1);
4823 HOST_WIDE_INT f = INTVAL (op2);
4824
4825 if (t == STORE_FLAG_VALUE && f == 0)
4826 code = GET_CODE (op0);
4827 else if (t == 0 && f == STORE_FLAG_VALUE)
4828 {
4829 enum rtx_code tmp;
4830 tmp = reversed_comparison_code (op0, NULL_RTX);
4831 if (tmp == UNKNOWN)
4832 break;
4833 code = tmp;
4834 }
4835 else
4836 break;
4837
4838 return simplify_gen_relational (code, mode, cmp_mode,
4839 XEXP (op0, 0), XEXP (op0, 1));
4840 }
4841
4842 if (cmp_mode == VOIDmode)
4843 cmp_mode = op0_mode;
4844 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4845 cmp_mode, XEXP (op0, 0),
4846 XEXP (op0, 1));
4847
4848 /* See if any simplifications were possible. */
4849 if (temp)
4850 {
4851 if (CONST_INT_P (temp))
4852 return temp == const0_rtx ? op2 : op1;
4853 else if (temp)
4854 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4855 }
4856 }
4857 break;
4858
4859 case VEC_MERGE:
4860 gcc_assert (GET_MODE (op0) == mode);
4861 gcc_assert (GET_MODE (op1) == mode);
4862 gcc_assert (VECTOR_MODE_P (mode));
4863 op2 = avoid_constant_pool_reference (op2);
4864 if (CONST_INT_P (op2))
4865 {
4866 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4867 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4868 int mask = (1 << n_elts) - 1;
4869
4870 if (!(INTVAL (op2) & mask))
4871 return op1;
4872 if ((INTVAL (op2) & mask) == mask)
4873 return op0;
4874
4875 op0 = avoid_constant_pool_reference (op0);
4876 op1 = avoid_constant_pool_reference (op1);
4877 if (GET_CODE (op0) == CONST_VECTOR
4878 && GET_CODE (op1) == CONST_VECTOR)
4879 {
4880 rtvec v = rtvec_alloc (n_elts);
4881 unsigned int i;
4882
4883 for (i = 0; i < n_elts; i++)
4884 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4885 ? CONST_VECTOR_ELT (op0, i)
4886 : CONST_VECTOR_ELT (op1, i));
4887 return gen_rtx_CONST_VECTOR (mode, v);
4888 }
4889 }
4890 break;
4891
4892 default:
4893 gcc_unreachable ();
4894 }
4895
4896 return 0;
4897 }
4898
4899 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4900 or CONST_VECTOR,
4901 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4902
4903 Works by unpacking OP into a collection of 8-bit values
4904 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4905 and then repacking them again for OUTERMODE. */
4906
4907 static rtx
4908 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4909 enum machine_mode innermode, unsigned int byte)
4910 {
4911 /* We support up to 512-bit values (for V8DFmode). */
4912 enum {
4913 max_bitsize = 512,
4914 value_bit = 8,
4915 value_mask = (1 << value_bit) - 1
4916 };
4917 unsigned char value[max_bitsize / value_bit];
4918 int value_start;
4919 int i;
4920 int elem;
4921
4922 int num_elem;
4923 rtx * elems;
4924 int elem_bitsize;
4925 rtx result_s;
4926 rtvec result_v = NULL;
4927 enum mode_class outer_class;
4928 enum machine_mode outer_submode;
4929
4930 /* Some ports misuse CCmode. */
4931 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4932 return op;
4933
4934 /* We have no way to represent a complex constant at the rtl level. */
4935 if (COMPLEX_MODE_P (outermode))
4936 return NULL_RTX;
4937
4938 /* Unpack the value. */
4939
4940 if (GET_CODE (op) == CONST_VECTOR)
4941 {
4942 num_elem = CONST_VECTOR_NUNITS (op);
4943 elems = &CONST_VECTOR_ELT (op, 0);
4944 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4945 }
4946 else
4947 {
4948 num_elem = 1;
4949 elems = &op;
4950 elem_bitsize = max_bitsize;
4951 }
4952 /* If this asserts, it is too complicated; reducing value_bit may help. */
4953 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4954 /* I don't know how to handle endianness of sub-units. */
4955 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4956
4957 for (elem = 0; elem < num_elem; elem++)
4958 {
4959 unsigned char * vp;
4960 rtx el = elems[elem];
4961
4962 /* Vectors are kept in target memory order. (This is probably
4963 a mistake.) */
4964 {
4965 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4966 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4967 / BITS_PER_UNIT);
4968 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4969 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4970 unsigned bytele = (subword_byte % UNITS_PER_WORD
4971 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4972 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4973 }
4974
4975 switch (GET_CODE (el))
4976 {
4977 case CONST_INT:
4978 for (i = 0;
4979 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4980 i += value_bit)
4981 *vp++ = INTVAL (el) >> i;
4982 /* CONST_INTs are always logically sign-extended. */
4983 for (; i < elem_bitsize; i += value_bit)
4984 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4985 break;
4986
4987 case CONST_DOUBLE:
4988 if (GET_MODE (el) == VOIDmode)
4989 {
4990 /* If this triggers, someone should have generated a
4991 CONST_INT instead. */
4992 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4993
4994 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4995 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4996 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4997 {
4998 *vp++
4999 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5000 i += value_bit;
5001 }
5002 /* It shouldn't matter what's done here, so fill it with
5003 zero. */
5004 for (; i < elem_bitsize; i += value_bit)
5005 *vp++ = 0;
5006 }
5007 else
5008 {
5009 long tmp[max_bitsize / 32];
5010 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5011
5012 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5013 gcc_assert (bitsize <= elem_bitsize);
5014 gcc_assert (bitsize % value_bit == 0);
5015
5016 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5017 GET_MODE (el));
5018
5019 /* real_to_target produces its result in words affected by
5020 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5021 and use WORDS_BIG_ENDIAN instead; see the documentation
5022 of SUBREG in rtl.texi. */
5023 for (i = 0; i < bitsize; i += value_bit)
5024 {
5025 int ibase;
5026 if (WORDS_BIG_ENDIAN)
5027 ibase = bitsize - 1 - i;
5028 else
5029 ibase = i;
5030 *vp++ = tmp[ibase / 32] >> i % 32;
5031 }
5032
5033 /* It shouldn't matter what's done here, so fill it with
5034 zero. */
5035 for (; i < elem_bitsize; i += value_bit)
5036 *vp++ = 0;
5037 }
5038 break;
5039
5040 case CONST_FIXED:
5041 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5042 {
5043 for (i = 0; i < elem_bitsize; i += value_bit)
5044 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5045 }
5046 else
5047 {
5048 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5049 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5050 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5051 i += value_bit)
5052 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5053 >> (i - HOST_BITS_PER_WIDE_INT);
5054 for (; i < elem_bitsize; i += value_bit)
5055 *vp++ = 0;
5056 }
5057 break;
5058
5059 default:
5060 gcc_unreachable ();
5061 }
5062 }
5063
5064 /* Now, pick the right byte to start with. */
5065 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5066 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5067 will already have offset 0. */
5068 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5069 {
5070 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5071 - byte);
5072 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5073 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5074 byte = (subword_byte % UNITS_PER_WORD
5075 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5076 }
5077
5078 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5079 so if it's become negative it will instead be very large.) */
5080 gcc_assert (byte < GET_MODE_SIZE (innermode));
5081
5082 /* Convert from bytes to chunks of size value_bit. */
5083 value_start = byte * (BITS_PER_UNIT / value_bit);
5084
5085 /* Re-pack the value. */
5086
5087 if (VECTOR_MODE_P (outermode))
5088 {
5089 num_elem = GET_MODE_NUNITS (outermode);
5090 result_v = rtvec_alloc (num_elem);
5091 elems = &RTVEC_ELT (result_v, 0);
5092 outer_submode = GET_MODE_INNER (outermode);
5093 }
5094 else
5095 {
5096 num_elem = 1;
5097 elems = &result_s;
5098 outer_submode = outermode;
5099 }
5100
5101 outer_class = GET_MODE_CLASS (outer_submode);
5102 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5103
5104 gcc_assert (elem_bitsize % value_bit == 0);
5105 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5106
5107 for (elem = 0; elem < num_elem; elem++)
5108 {
5109 unsigned char *vp;
5110
5111 /* Vectors are stored in target memory order. (This is probably
5112 a mistake.) */
5113 {
5114 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5115 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5116 / BITS_PER_UNIT);
5117 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5118 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5119 unsigned bytele = (subword_byte % UNITS_PER_WORD
5120 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5121 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5122 }
5123
5124 switch (outer_class)
5125 {
5126 case MODE_INT:
5127 case MODE_PARTIAL_INT:
5128 {
5129 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5130
5131 for (i = 0;
5132 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5133 i += value_bit)
5134 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5135 for (; i < elem_bitsize; i += value_bit)
5136 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5137 << (i - HOST_BITS_PER_WIDE_INT);
5138
5139 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5140 know why. */
5141 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5142 elems[elem] = gen_int_mode (lo, outer_submode);
5143 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5144 elems[elem] = immed_double_const (lo, hi, outer_submode);
5145 else
5146 return NULL_RTX;
5147 }
5148 break;
5149
5150 case MODE_FLOAT:
5151 case MODE_DECIMAL_FLOAT:
5152 {
5153 REAL_VALUE_TYPE r;
5154 long tmp[max_bitsize / 32];
5155
5156 /* real_from_target wants its input in words affected by
5157 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5158 and use WORDS_BIG_ENDIAN instead; see the documentation
5159 of SUBREG in rtl.texi. */
5160 for (i = 0; i < max_bitsize / 32; i++)
5161 tmp[i] = 0;
5162 for (i = 0; i < elem_bitsize; i += value_bit)
5163 {
5164 int ibase;
5165 if (WORDS_BIG_ENDIAN)
5166 ibase = elem_bitsize - 1 - i;
5167 else
5168 ibase = i;
5169 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5170 }
5171
5172 real_from_target (&r, tmp, outer_submode);
5173 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5174 }
5175 break;
5176
5177 case MODE_FRACT:
5178 case MODE_UFRACT:
5179 case MODE_ACCUM:
5180 case MODE_UACCUM:
5181 {
5182 FIXED_VALUE_TYPE f;
5183 f.data.low = 0;
5184 f.data.high = 0;
5185 f.mode = outer_submode;
5186
5187 for (i = 0;
5188 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5189 i += value_bit)
5190 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5191 for (; i < elem_bitsize; i += value_bit)
5192 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5193 << (i - HOST_BITS_PER_WIDE_INT));
5194
5195 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5196 }
5197 break;
5198
5199 default:
5200 gcc_unreachable ();
5201 }
5202 }
5203 if (VECTOR_MODE_P (outermode))
5204 return gen_rtx_CONST_VECTOR (outermode, result_v);
5205 else
5206 return result_s;
5207 }
5208
5209 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5210 Return 0 if no simplifications are possible. */
5211 rtx
5212 simplify_subreg (enum machine_mode outermode, rtx op,
5213 enum machine_mode innermode, unsigned int byte)
5214 {
5215 /* Little bit of sanity checking. */
5216 gcc_assert (innermode != VOIDmode);
5217 gcc_assert (outermode != VOIDmode);
5218 gcc_assert (innermode != BLKmode);
5219 gcc_assert (outermode != BLKmode);
5220
5221 gcc_assert (GET_MODE (op) == innermode
5222 || GET_MODE (op) == VOIDmode);
5223
5224 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5225 gcc_assert (byte < GET_MODE_SIZE (innermode));
5226
5227 if (outermode == innermode && !byte)
5228 return op;
5229
5230 if (CONST_INT_P (op)
5231 || GET_CODE (op) == CONST_DOUBLE
5232 || GET_CODE (op) == CONST_FIXED
5233 || GET_CODE (op) == CONST_VECTOR)
5234 return simplify_immed_subreg (outermode, op, innermode, byte);
5235
5236 /* Changing mode twice with SUBREG => just change it once,
5237 or not at all if changing back op starting mode. */
5238 if (GET_CODE (op) == SUBREG)
5239 {
5240 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5241 int final_offset = byte + SUBREG_BYTE (op);
5242 rtx newx;
5243
5244 if (outermode == innermostmode
5245 && byte == 0 && SUBREG_BYTE (op) == 0)
5246 return SUBREG_REG (op);
5247
5248 /* The SUBREG_BYTE represents offset, as if the value were stored
5249 in memory. Irritating exception is paradoxical subreg, where
5250 we define SUBREG_BYTE to be 0. On big endian machines, this
5251 value should be negative. For a moment, undo this exception. */
5252 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5253 {
5254 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5255 if (WORDS_BIG_ENDIAN)
5256 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5257 if (BYTES_BIG_ENDIAN)
5258 final_offset += difference % UNITS_PER_WORD;
5259 }
5260 if (SUBREG_BYTE (op) == 0
5261 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5262 {
5263 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5264 if (WORDS_BIG_ENDIAN)
5265 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5266 if (BYTES_BIG_ENDIAN)
5267 final_offset += difference % UNITS_PER_WORD;
5268 }
5269
5270 /* See whether resulting subreg will be paradoxical. */
5271 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5272 {
5273 /* In nonparadoxical subregs we can't handle negative offsets. */
5274 if (final_offset < 0)
5275 return NULL_RTX;
5276 /* Bail out in case resulting subreg would be incorrect. */
5277 if (final_offset % GET_MODE_SIZE (outermode)
5278 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5279 return NULL_RTX;
5280 }
5281 else
5282 {
5283 int offset = 0;
5284 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5285
5286 /* In paradoxical subreg, see if we are still looking on lower part.
5287 If so, our SUBREG_BYTE will be 0. */
5288 if (WORDS_BIG_ENDIAN)
5289 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5290 if (BYTES_BIG_ENDIAN)
5291 offset += difference % UNITS_PER_WORD;
5292 if (offset == final_offset)
5293 final_offset = 0;
5294 else
5295 return NULL_RTX;
5296 }
5297
5298 /* Recurse for further possible simplifications. */
5299 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5300 final_offset);
5301 if (newx)
5302 return newx;
5303 if (validate_subreg (outermode, innermostmode,
5304 SUBREG_REG (op), final_offset))
5305 {
5306 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5307 if (SUBREG_PROMOTED_VAR_P (op)
5308 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5309 && GET_MODE_CLASS (outermode) == MODE_INT
5310 && IN_RANGE (GET_MODE_SIZE (outermode),
5311 GET_MODE_SIZE (innermode),
5312 GET_MODE_SIZE (innermostmode))
5313 && subreg_lowpart_p (newx))
5314 {
5315 SUBREG_PROMOTED_VAR_P (newx) = 1;
5316 SUBREG_PROMOTED_UNSIGNED_SET
5317 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5318 }
5319 return newx;
5320 }
5321 return NULL_RTX;
5322 }
5323
5324 /* Merge implicit and explicit truncations. */
5325
5326 if (GET_CODE (op) == TRUNCATE
5327 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5328 && subreg_lowpart_offset (outermode, innermode) == byte)
5329 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5330 GET_MODE (XEXP (op, 0)));
5331
5332 /* SUBREG of a hard register => just change the register number
5333 and/or mode. If the hard register is not valid in that mode,
5334 suppress this simplification. If the hard register is the stack,
5335 frame, or argument pointer, leave this as a SUBREG. */
5336
5337 if (REG_P (op) && HARD_REGISTER_P (op))
5338 {
5339 unsigned int regno, final_regno;
5340
5341 regno = REGNO (op);
5342 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5343 if (HARD_REGISTER_NUM_P (final_regno))
5344 {
5345 rtx x;
5346 int final_offset = byte;
5347
5348 /* Adjust offset for paradoxical subregs. */
5349 if (byte == 0
5350 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5351 {
5352 int difference = (GET_MODE_SIZE (innermode)
5353 - GET_MODE_SIZE (outermode));
5354 if (WORDS_BIG_ENDIAN)
5355 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5356 if (BYTES_BIG_ENDIAN)
5357 final_offset += difference % UNITS_PER_WORD;
5358 }
5359
5360 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5361
5362 /* Propagate original regno. We don't have any way to specify
5363 the offset inside original regno, so do so only for lowpart.
5364 The information is used only by alias analysis that can not
5365 grog partial register anyway. */
5366
5367 if (subreg_lowpart_offset (outermode, innermode) == byte)
5368 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5369 return x;
5370 }
5371 }
5372
5373 /* If we have a SUBREG of a register that we are replacing and we are
5374 replacing it with a MEM, make a new MEM and try replacing the
5375 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5376 or if we would be widening it. */
5377
5378 if (MEM_P (op)
5379 && ! mode_dependent_address_p (XEXP (op, 0))
5380 /* Allow splitting of volatile memory references in case we don't
5381 have instruction to move the whole thing. */
5382 && (! MEM_VOLATILE_P (op)
5383 || ! have_insn_for (SET, innermode))
5384 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5385 return adjust_address_nv (op, outermode, byte);
5386
5387 /* Handle complex values represented as CONCAT
5388 of real and imaginary part. */
5389 if (GET_CODE (op) == CONCAT)
5390 {
5391 unsigned int part_size, final_offset;
5392 rtx part, res;
5393
5394 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5395 if (byte < part_size)
5396 {
5397 part = XEXP (op, 0);
5398 final_offset = byte;
5399 }
5400 else
5401 {
5402 part = XEXP (op, 1);
5403 final_offset = byte - part_size;
5404 }
5405
5406 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5407 return NULL_RTX;
5408
5409 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5410 if (res)
5411 return res;
5412 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5413 return gen_rtx_SUBREG (outermode, part, final_offset);
5414 return NULL_RTX;
5415 }
5416
5417 /* Optimize SUBREG truncations of zero and sign extended values. */
5418 if ((GET_CODE (op) == ZERO_EXTEND
5419 || GET_CODE (op) == SIGN_EXTEND)
5420 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5421 {
5422 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5423
5424 /* If we're requesting the lowpart of a zero or sign extension,
5425 there are three possibilities. If the outermode is the same
5426 as the origmode, we can omit both the extension and the subreg.
5427 If the outermode is not larger than the origmode, we can apply
5428 the truncation without the extension. Finally, if the outermode
5429 is larger than the origmode, but both are integer modes, we
5430 can just extend to the appropriate mode. */
5431 if (bitpos == 0)
5432 {
5433 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5434 if (outermode == origmode)
5435 return XEXP (op, 0);
5436 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5437 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5438 subreg_lowpart_offset (outermode,
5439 origmode));
5440 if (SCALAR_INT_MODE_P (outermode))
5441 return simplify_gen_unary (GET_CODE (op), outermode,
5442 XEXP (op, 0), origmode);
5443 }
5444
5445 /* A SUBREG resulting from a zero extension may fold to zero if
5446 it extracts higher bits that the ZERO_EXTEND's source bits. */
5447 if (GET_CODE (op) == ZERO_EXTEND
5448 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5449 return CONST0_RTX (outermode);
5450 }
5451
5452 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5453 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5454 the outer subreg is effectively a truncation to the original mode. */
5455 if ((GET_CODE (op) == LSHIFTRT
5456 || GET_CODE (op) == ASHIFTRT)
5457 && SCALAR_INT_MODE_P (outermode)
5458 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5459 to avoid the possibility that an outer LSHIFTRT shifts by more
5460 than the sign extension's sign_bit_copies and introduces zeros
5461 into the high bits of the result. */
5462 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5463 && CONST_INT_P (XEXP (op, 1))
5464 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5465 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5466 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5467 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5468 return simplify_gen_binary (ASHIFTRT, outermode,
5469 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5470
5471 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5472 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5473 the outer subreg is effectively a truncation to the original mode. */
5474 if ((GET_CODE (op) == LSHIFTRT
5475 || GET_CODE (op) == ASHIFTRT)
5476 && SCALAR_INT_MODE_P (outermode)
5477 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5478 && CONST_INT_P (XEXP (op, 1))
5479 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5480 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5481 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5482 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5483 return simplify_gen_binary (LSHIFTRT, outermode,
5484 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5485
5486 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5487 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5488 the outer subreg is effectively a truncation to the original mode. */
5489 if (GET_CODE (op) == ASHIFT
5490 && SCALAR_INT_MODE_P (outermode)
5491 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5492 && CONST_INT_P (XEXP (op, 1))
5493 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5494 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5495 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5496 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5497 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5498 return simplify_gen_binary (ASHIFT, outermode,
5499 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5500
5501 /* Recognize a word extraction from a multi-word subreg. */
5502 if ((GET_CODE (op) == LSHIFTRT
5503 || GET_CODE (op) == ASHIFTRT)
5504 && SCALAR_INT_MODE_P (outermode)
5505 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5506 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5507 && CONST_INT_P (XEXP (op, 1))
5508 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5509 && INTVAL (XEXP (op, 1)) >= 0
5510 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5511 && byte == subreg_lowpart_offset (outermode, innermode))
5512 {
5513 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5514 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5515 (WORDS_BIG_ENDIAN
5516 ? byte - shifted_bytes
5517 : byte + shifted_bytes));
5518 }
5519
5520 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5521 and try replacing the SUBREG and shift with it. Don't do this if
5522 the MEM has a mode-dependent address or if we would be widening it. */
5523
5524 if ((GET_CODE (op) == LSHIFTRT
5525 || GET_CODE (op) == ASHIFTRT)
5526 && MEM_P (XEXP (op, 0))
5527 && CONST_INT_P (XEXP (op, 1))
5528 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5529 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5530 && INTVAL (XEXP (op, 1)) > 0
5531 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5532 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5533 && ! MEM_VOLATILE_P (XEXP (op, 0))
5534 && byte == subreg_lowpart_offset (outermode, innermode)
5535 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5536 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5537 {
5538 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5539 return adjust_address_nv (XEXP (op, 0), outermode,
5540 (WORDS_BIG_ENDIAN
5541 ? byte - shifted_bytes
5542 : byte + shifted_bytes));
5543 }
5544
5545 return NULL_RTX;
5546 }
5547
5548 /* Make a SUBREG operation or equivalent if it folds. */
5549
5550 rtx
5551 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5552 enum machine_mode innermode, unsigned int byte)
5553 {
5554 rtx newx;
5555
5556 newx = simplify_subreg (outermode, op, innermode, byte);
5557 if (newx)
5558 return newx;
5559
5560 if (GET_CODE (op) == SUBREG
5561 || GET_CODE (op) == CONCAT
5562 || GET_MODE (op) == VOIDmode)
5563 return NULL_RTX;
5564
5565 if (validate_subreg (outermode, innermode, op, byte))
5566 return gen_rtx_SUBREG (outermode, op, byte);
5567
5568 return NULL_RTX;
5569 }
5570
5571 /* Simplify X, an rtx expression.
5572
5573 Return the simplified expression or NULL if no simplifications
5574 were possible.
5575
5576 This is the preferred entry point into the simplification routines;
5577 however, we still allow passes to call the more specific routines.
5578
5579 Right now GCC has three (yes, three) major bodies of RTL simplification
5580 code that need to be unified.
5581
5582 1. fold_rtx in cse.c. This code uses various CSE specific
5583 information to aid in RTL simplification.
5584
5585 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5586 it uses combine specific information to aid in RTL
5587 simplification.
5588
5589 3. The routines in this file.
5590
5591
5592 Long term we want to only have one body of simplification code; to
5593 get to that state I recommend the following steps:
5594
5595 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5596 which are not pass dependent state into these routines.
5597
5598 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5599 use this routine whenever possible.
5600
5601 3. Allow for pass dependent state to be provided to these
5602 routines and add simplifications based on the pass dependent
5603 state. Remove code from cse.c & combine.c that becomes
5604 redundant/dead.
5605
5606 It will take time, but ultimately the compiler will be easier to
5607 maintain and improve. It's totally silly that when we add a
5608 simplification that it needs to be added to 4 places (3 for RTL
5609 simplification and 1 for tree simplification. */
5610
5611 rtx
5612 simplify_rtx (const_rtx x)
5613 {
5614 const enum rtx_code code = GET_CODE (x);
5615 const enum machine_mode mode = GET_MODE (x);
5616
5617 switch (GET_RTX_CLASS (code))
5618 {
5619 case RTX_UNARY:
5620 return simplify_unary_operation (code, mode,
5621 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5622 case RTX_COMM_ARITH:
5623 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5624 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5625
5626 /* Fall through.... */
5627
5628 case RTX_BIN_ARITH:
5629 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5630
5631 case RTX_TERNARY:
5632 case RTX_BITFIELD_OPS:
5633 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5634 XEXP (x, 0), XEXP (x, 1),
5635 XEXP (x, 2));
5636
5637 case RTX_COMPARE:
5638 case RTX_COMM_COMPARE:
5639 return simplify_relational_operation (code, mode,
5640 ((GET_MODE (XEXP (x, 0))
5641 != VOIDmode)
5642 ? GET_MODE (XEXP (x, 0))
5643 : GET_MODE (XEXP (x, 1))),
5644 XEXP (x, 0),
5645 XEXP (x, 1));
5646
5647 case RTX_EXTRA:
5648 if (code == SUBREG)
5649 return simplify_subreg (mode, SUBREG_REG (x),
5650 GET_MODE (SUBREG_REG (x)),
5651 SUBREG_BYTE (x));
5652 break;
5653
5654 case RTX_OBJ:
5655 if (code == LO_SUM)
5656 {
5657 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5658 if (GET_CODE (XEXP (x, 0)) == HIGH
5659 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5660 return XEXP (x, 1);
5661 }
5662 break;
5663
5664 default:
5665 break;
5666 }
5667 return NULL;
5668 }