re PR rtl-optimization/49619 (ICE in simplify_subreg, at simplify-rtx.c:5362)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
41
42 /* Simplification and canonicalization of RTL. */
43
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
64 \f
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
69 {
70 return gen_int_mode (- INTVAL (i), mode);
71 }
72
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
75
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 {
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
81
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
84
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
95 {
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
98 }
99 else
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106 \f
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
109
110 rtx
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
113 {
114 rtx tem;
115
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
120
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
125
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
127 }
128 \f
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
131 rtx
132 avoid_constant_pool_reference (rtx x)
133 {
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
137
138 switch (GET_CODE (x))
139 {
140 case MEM:
141 break;
142
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
148 {
149 REAL_VALUE_TYPE d;
150
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
153 }
154 return x;
155
156 default:
157 return x;
158 }
159
160 if (GET_MODE (x) == BLKmode)
161 return x;
162
163 addr = XEXP (x, 0);
164
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
167
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
172 {
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
175 }
176
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
179
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
184 {
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
187
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
192 {
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
196 }
197 else
198 return c;
199 }
200
201 return x;
202 }
203 \f
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
207
208 rtx
209 delegitimize_mem_from_attrs (rtx x)
210 {
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
213 if (MEM_P (x)
214 && MEM_EXPR (x)
215 && MEM_OFFSET (x))
216 {
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
220
221 switch (TREE_CODE (decl))
222 {
223 default:
224 decl = NULL;
225 break;
226
227 case VAR_DECL:
228 break;
229
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
237 {
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
241
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
249 {
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
253 }
254 break;
255 }
256 }
257
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
265 {
266 rtx newx;
267
268 offset += INTVAL (MEM_OFFSET (x));
269
270 newx = DECL_RTL (decl);
271
272 if (MEM_P (newx))
273 {
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
275
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
294 }
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
298 }
299 }
300
301 return x;
302 }
303 \f
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
306
307 rtx
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
310 {
311 rtx tem;
312
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
316
317 return gen_rtx_fmt_e (code, mode, op);
318 }
319
320 /* Likewise for ternary operations. */
321
322 rtx
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
325 {
326 rtx tem;
327
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
332
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
334 }
335
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
338
339 rtx
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
342 {
343 rtx tem;
344
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
348
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
350 }
351 \f
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
356
357 rtx
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
360 {
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
368
369 if (__builtin_expect (fn != NULL, 0))
370 {
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
374 }
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
377
378 switch (GET_RTX_CLASS (code))
379 {
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
387
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
395
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
406
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
419
420 case RTX_EXTRA:
421 if (code == SUBREG)
422 {
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
430 }
431 break;
432
433 case RTX_OBJ:
434 if (code == MEM)
435 {
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
440 }
441 else if (code == LO_SUM)
442 {
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
445
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
449
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
453 }
454 break;
455
456 default:
457 break;
458 }
459
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
464 {
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
469 {
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
473 {
474 if (newvec == vec)
475 {
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
480 }
481 RTVEC_ELT (newvec, j) = op;
482 }
483 }
484 break;
485
486 case 'e':
487 if (XEXP (x, i))
488 {
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
491 {
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
495 }
496 }
497 break;
498 }
499 return newx;
500 }
501
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
504
505 rtx
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
507 {
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
509 }
510 \f
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
514 rtx
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
517 {
518 rtx trueop, tem;
519
520 trueop = avoid_constant_pool_reference (op);
521
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
525
526 return simplify_unary_operation_1 (code, mode, op);
527 }
528
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
533 {
534 enum rtx_code reversed;
535 rtx temp;
536
537 switch (code)
538 {
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
543
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
551
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
556
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
560
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
567
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
575
576
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
584 {
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
587 }
588
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
592
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
599
600
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
607 {
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
610
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
616 }
617
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
622
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
624 {
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
627
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
630
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
635
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
637 {
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
640 }
641
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
644 }
645 break;
646
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
651
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
656
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
660
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
670
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
674 {
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
678 {
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
682 }
683
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
687 }
688
689 /* (neg (mult A B)) becomes (mult A (neg B)).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
693 {
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
695 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
696 }
697
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
702 {
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
706 }
707
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
715
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
723
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
729
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
735 {
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
739 {
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
747 }
748 else if (STORE_FLAG_VALUE == -1)
749 {
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
757 }
758 }
759 break;
760
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
767
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
773
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
783
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
791
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
808
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
818
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
822
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
827
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
831
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
834
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
846
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
858
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
867
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
875
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
879
880 /* (float_extend (float_extend x)) is (float_extend x)
881
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
884 */
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
895
896 break;
897
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
903
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
908
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
918
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
922
923 break;
924
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
932
933 case POPCOUNT:
934 switch (GET_CODE (op))
935 {
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
941
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
949
950 default:
951 break;
952 }
953 break;
954
955 case PARITY:
956 switch (GET_CODE (op))
957 {
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
964
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
972
973 default:
974 break;
975 }
976 break;
977
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
983
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
990
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1002
1003 /* Extending a widening multiplication should be canonicalized to
1004 a wider widening multiplication. */
1005 if (GET_CODE (op) == MULT)
1006 {
1007 rtx lhs = XEXP (op, 0);
1008 rtx rhs = XEXP (op, 1);
1009 enum rtx_code lcode = GET_CODE (lhs);
1010 enum rtx_code rcode = GET_CODE (rhs);
1011
1012 /* Widening multiplies usually extend both operands, but sometimes
1013 they use a shift to extract a portion of a register. */
1014 if ((lcode == SIGN_EXTEND
1015 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1016 && (rcode == SIGN_EXTEND
1017 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1018 {
1019 enum machine_mode lmode = GET_MODE (lhs);
1020 enum machine_mode rmode = GET_MODE (rhs);
1021 int bits;
1022
1023 if (lcode == ASHIFTRT)
1024 /* Number of bits not shifted off the end. */
1025 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1026 else /* lcode == SIGN_EXTEND */
1027 /* Size of inner mode. */
1028 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1029
1030 if (rcode == ASHIFTRT)
1031 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1032 else /* rcode == SIGN_EXTEND */
1033 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1034
1035 /* We can only widen multiplies if the result is mathematiclly
1036 equivalent. I.e. if overflow was impossible. */
1037 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1038 return simplify_gen_binary
1039 (MULT, mode,
1040 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1041 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1042 }
1043 }
1044
1045 /* Check for a sign extension of a subreg of a promoted
1046 variable, where the promotion is sign-extended, and the
1047 target mode is the same as the variable's promotion. */
1048 if (GET_CODE (op) == SUBREG
1049 && SUBREG_PROMOTED_VAR_P (op)
1050 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1051 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1052 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1053
1054 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1055 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1056 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1057 {
1058 gcc_assert (GET_MODE_BITSIZE (mode)
1059 > GET_MODE_BITSIZE (GET_MODE (op)));
1060 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1061 GET_MODE (XEXP (op, 0)));
1062 }
1063
1064 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1065 is (sign_extend:M (subreg:O <X>)) if there is mode with
1066 GET_MODE_BITSIZE (N) - I bits.
1067 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1068 is similarly (zero_extend:M (subreg:O <X>)). */
1069 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1070 && GET_CODE (XEXP (op, 0)) == ASHIFT
1071 && CONST_INT_P (XEXP (op, 1))
1072 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1073 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1074 {
1075 enum machine_mode tmode
1076 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1077 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1078 gcc_assert (GET_MODE_BITSIZE (mode)
1079 > GET_MODE_BITSIZE (GET_MODE (op)));
1080 if (tmode != BLKmode)
1081 {
1082 rtx inner =
1083 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1084 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1085 ? SIGN_EXTEND : ZERO_EXTEND,
1086 mode, inner, tmode);
1087 }
1088 }
1089
1090 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1091 /* As we do not know which address space the pointer is refering to,
1092 we can do this only if the target does not support different pointer
1093 or address modes depending on the address space. */
1094 if (target_default_pointer_address_modes_p ()
1095 && ! POINTERS_EXTEND_UNSIGNED
1096 && mode == Pmode && GET_MODE (op) == ptr_mode
1097 && (CONSTANT_P (op)
1098 || (GET_CODE (op) == SUBREG
1099 && REG_P (SUBREG_REG (op))
1100 && REG_POINTER (SUBREG_REG (op))
1101 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1102 return convert_memory_address (Pmode, op);
1103 #endif
1104 break;
1105
1106 case ZERO_EXTEND:
1107 /* Check for a zero extension of a subreg of a promoted
1108 variable, where the promotion is zero-extended, and the
1109 target mode is the same as the variable's promotion. */
1110 if (GET_CODE (op) == SUBREG
1111 && SUBREG_PROMOTED_VAR_P (op)
1112 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1113 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1114 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1115
1116 /* Extending a widening multiplication should be canonicalized to
1117 a wider widening multiplication. */
1118 if (GET_CODE (op) == MULT)
1119 {
1120 rtx lhs = XEXP (op, 0);
1121 rtx rhs = XEXP (op, 1);
1122 enum rtx_code lcode = GET_CODE (lhs);
1123 enum rtx_code rcode = GET_CODE (rhs);
1124
1125 /* Widening multiplies usually extend both operands, but sometimes
1126 they use a shift to extract a portion of a register. */
1127 if ((lcode == ZERO_EXTEND
1128 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1129 && (rcode == ZERO_EXTEND
1130 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1131 {
1132 enum machine_mode lmode = GET_MODE (lhs);
1133 enum machine_mode rmode = GET_MODE (rhs);
1134 int bits;
1135
1136 if (lcode == LSHIFTRT)
1137 /* Number of bits not shifted off the end. */
1138 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1139 else /* lcode == ZERO_EXTEND */
1140 /* Size of inner mode. */
1141 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1142
1143 if (rcode == LSHIFTRT)
1144 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1145 else /* rcode == ZERO_EXTEND */
1146 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1147
1148 /* We can only widen multiplies if the result is mathematiclly
1149 equivalent. I.e. if overflow was impossible. */
1150 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1151 return simplify_gen_binary
1152 (MULT, mode,
1153 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1154 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1155 }
1156 }
1157
1158 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1159 if (GET_CODE (op) == ZERO_EXTEND)
1160 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1161 GET_MODE (XEXP (op, 0)));
1162
1163 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1164 is (zero_extend:M (subreg:O <X>)) if there is mode with
1165 GET_MODE_BITSIZE (N) - I bits. */
1166 if (GET_CODE (op) == LSHIFTRT
1167 && GET_CODE (XEXP (op, 0)) == ASHIFT
1168 && CONST_INT_P (XEXP (op, 1))
1169 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1170 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1171 {
1172 enum machine_mode tmode
1173 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1174 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1175 if (tmode != BLKmode)
1176 {
1177 rtx inner =
1178 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1179 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1180 }
1181 }
1182
1183 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1184 /* As we do not know which address space the pointer is refering to,
1185 we can do this only if the target does not support different pointer
1186 or address modes depending on the address space. */
1187 if (target_default_pointer_address_modes_p ()
1188 && POINTERS_EXTEND_UNSIGNED > 0
1189 && mode == Pmode && GET_MODE (op) == ptr_mode
1190 && (CONSTANT_P (op)
1191 || (GET_CODE (op) == SUBREG
1192 && REG_P (SUBREG_REG (op))
1193 && REG_POINTER (SUBREG_REG (op))
1194 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1195 return convert_memory_address (Pmode, op);
1196 #endif
1197 break;
1198
1199 default:
1200 break;
1201 }
1202
1203 return 0;
1204 }
1205
1206 /* Try to compute the value of a unary operation CODE whose output mode is to
1207 be MODE with input operand OP whose mode was originally OP_MODE.
1208 Return zero if the value cannot be computed. */
1209 rtx
1210 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1211 rtx op, enum machine_mode op_mode)
1212 {
1213 unsigned int width = GET_MODE_BITSIZE (mode);
1214 unsigned int op_width = GET_MODE_BITSIZE (op_mode);
1215
1216 if (code == VEC_DUPLICATE)
1217 {
1218 gcc_assert (VECTOR_MODE_P (mode));
1219 if (GET_MODE (op) != VOIDmode)
1220 {
1221 if (!VECTOR_MODE_P (GET_MODE (op)))
1222 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1223 else
1224 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1225 (GET_MODE (op)));
1226 }
1227 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1228 || GET_CODE (op) == CONST_VECTOR)
1229 {
1230 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1231 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1232 rtvec v = rtvec_alloc (n_elts);
1233 unsigned int i;
1234
1235 if (GET_CODE (op) != CONST_VECTOR)
1236 for (i = 0; i < n_elts; i++)
1237 RTVEC_ELT (v, i) = op;
1238 else
1239 {
1240 enum machine_mode inmode = GET_MODE (op);
1241 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1242 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1243
1244 gcc_assert (in_n_elts < n_elts);
1245 gcc_assert ((n_elts % in_n_elts) == 0);
1246 for (i = 0; i < n_elts; i++)
1247 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1248 }
1249 return gen_rtx_CONST_VECTOR (mode, v);
1250 }
1251 }
1252
1253 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1254 {
1255 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1256 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1257 enum machine_mode opmode = GET_MODE (op);
1258 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1259 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1260 rtvec v = rtvec_alloc (n_elts);
1261 unsigned int i;
1262
1263 gcc_assert (op_n_elts == n_elts);
1264 for (i = 0; i < n_elts; i++)
1265 {
1266 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1267 CONST_VECTOR_ELT (op, i),
1268 GET_MODE_INNER (opmode));
1269 if (!x)
1270 return 0;
1271 RTVEC_ELT (v, i) = x;
1272 }
1273 return gen_rtx_CONST_VECTOR (mode, v);
1274 }
1275
1276 /* The order of these tests is critical so that, for example, we don't
1277 check the wrong mode (input vs. output) for a conversion operation,
1278 such as FIX. At some point, this should be simplified. */
1279
1280 if (code == FLOAT && GET_MODE (op) == VOIDmode
1281 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1282 {
1283 HOST_WIDE_INT hv, lv;
1284 REAL_VALUE_TYPE d;
1285
1286 if (CONST_INT_P (op))
1287 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1288 else
1289 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1290
1291 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1292 d = real_value_truncate (mode, d);
1293 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1294 }
1295 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1296 && (GET_CODE (op) == CONST_DOUBLE
1297 || CONST_INT_P (op)))
1298 {
1299 HOST_WIDE_INT hv, lv;
1300 REAL_VALUE_TYPE d;
1301
1302 if (CONST_INT_P (op))
1303 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1304 else
1305 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1306
1307 if (op_mode == VOIDmode)
1308 {
1309 /* We don't know how to interpret negative-looking numbers in
1310 this case, so don't try to fold those. */
1311 if (hv < 0)
1312 return 0;
1313 }
1314 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1315 ;
1316 else
1317 hv = 0, lv &= GET_MODE_MASK (op_mode);
1318
1319 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1320 d = real_value_truncate (mode, d);
1321 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1322 }
1323
1324 if (CONST_INT_P (op)
1325 && width <= HOST_BITS_PER_WIDE_INT
1326 && op_width <= HOST_BITS_PER_WIDE_INT && op_width > 0)
1327 {
1328 HOST_WIDE_INT arg0 = INTVAL (op);
1329 HOST_WIDE_INT val;
1330
1331 switch (code)
1332 {
1333 case NOT:
1334 val = ~ arg0;
1335 break;
1336
1337 case NEG:
1338 val = - arg0;
1339 break;
1340
1341 case ABS:
1342 val = (arg0 >= 0 ? arg0 : - arg0);
1343 break;
1344
1345 case FFS:
1346 arg0 &= GET_MODE_MASK (op_mode);
1347 val = ffs_hwi (arg0);
1348 break;
1349
1350 case CLZ:
1351 arg0 &= GET_MODE_MASK (op_mode);
1352 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1353 ;
1354 else
1355 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 1;
1356 break;
1357
1358 case CLRSB:
1359 arg0 &= GET_MODE_MASK (op_mode);
1360 if (arg0 == 0)
1361 val = GET_MODE_BITSIZE (op_mode) - 1;
1362 else if (arg0 >= 0)
1363 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 2;
1364 else if (arg0 < 0)
1365 val = GET_MODE_BITSIZE (op_mode) - floor_log2 (~arg0) - 2;
1366 break;
1367
1368 case CTZ:
1369 arg0 &= GET_MODE_MASK (op_mode);
1370 if (arg0 == 0)
1371 {
1372 /* Even if the value at zero is undefined, we have to come
1373 up with some replacement. Seems good enough. */
1374 if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1375 val = GET_MODE_BITSIZE (op_mode);
1376 }
1377 else
1378 val = ctz_hwi (arg0);
1379 break;
1380
1381 case POPCOUNT:
1382 arg0 &= GET_MODE_MASK (op_mode);
1383 val = 0;
1384 while (arg0)
1385 val++, arg0 &= arg0 - 1;
1386 break;
1387
1388 case PARITY:
1389 arg0 &= GET_MODE_MASK (op_mode);
1390 val = 0;
1391 while (arg0)
1392 val++, arg0 &= arg0 - 1;
1393 val &= 1;
1394 break;
1395
1396 case BSWAP:
1397 {
1398 unsigned int s;
1399
1400 val = 0;
1401 for (s = 0; s < width; s += 8)
1402 {
1403 unsigned int d = width - s - 8;
1404 unsigned HOST_WIDE_INT byte;
1405 byte = (arg0 >> s) & 0xff;
1406 val |= byte << d;
1407 }
1408 }
1409 break;
1410
1411 case TRUNCATE:
1412 val = arg0;
1413 break;
1414
1415 case ZERO_EXTEND:
1416 /* When zero-extending a CONST_INT, we need to know its
1417 original mode. */
1418 gcc_assert (op_mode != VOIDmode);
1419 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1420 {
1421 /* If we were really extending the mode,
1422 we would have to distinguish between zero-extension
1423 and sign-extension. */
1424 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1425 val = arg0;
1426 }
1427 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1428 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1429 << GET_MODE_BITSIZE (op_mode));
1430 else
1431 return 0;
1432 break;
1433
1434 case SIGN_EXTEND:
1435 if (op_mode == VOIDmode)
1436 op_mode = mode;
1437 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1438 {
1439 /* If we were really extending the mode,
1440 we would have to distinguish between zero-extension
1441 and sign-extension. */
1442 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1443 val = arg0;
1444 }
1445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1446 {
1447 val
1448 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1449 << GET_MODE_BITSIZE (op_mode));
1450 if (val & ((unsigned HOST_WIDE_INT) 1
1451 << (GET_MODE_BITSIZE (op_mode) - 1)))
1452 val
1453 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1454 }
1455 else
1456 return 0;
1457 break;
1458
1459 case SQRT:
1460 case FLOAT_EXTEND:
1461 case FLOAT_TRUNCATE:
1462 case SS_TRUNCATE:
1463 case US_TRUNCATE:
1464 case SS_NEG:
1465 case US_NEG:
1466 case SS_ABS:
1467 return 0;
1468
1469 default:
1470 gcc_unreachable ();
1471 }
1472
1473 return gen_int_mode (val, mode);
1474 }
1475
1476 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1477 for a DImode operation on a CONST_INT. */
1478 else if (GET_MODE (op) == VOIDmode
1479 && width <= HOST_BITS_PER_WIDE_INT * 2
1480 && (GET_CODE (op) == CONST_DOUBLE
1481 || CONST_INT_P (op)))
1482 {
1483 unsigned HOST_WIDE_INT l1, lv;
1484 HOST_WIDE_INT h1, hv;
1485
1486 if (GET_CODE (op) == CONST_DOUBLE)
1487 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1488 else
1489 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1490
1491 switch (code)
1492 {
1493 case NOT:
1494 lv = ~ l1;
1495 hv = ~ h1;
1496 break;
1497
1498 case NEG:
1499 neg_double (l1, h1, &lv, &hv);
1500 break;
1501
1502 case ABS:
1503 if (h1 < 0)
1504 neg_double (l1, h1, &lv, &hv);
1505 else
1506 lv = l1, hv = h1;
1507 break;
1508
1509 case FFS:
1510 hv = 0;
1511 if (l1 != 0)
1512 lv = ffs_hwi (l1);
1513 else if (h1 != 0)
1514 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1515 else
1516 lv = 0;
1517 break;
1518
1519 case CLZ:
1520 hv = 0;
1521 if (h1 != 0)
1522 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1523 - HOST_BITS_PER_WIDE_INT;
1524 else if (l1 != 0)
1525 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1526 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1527 lv = GET_MODE_BITSIZE (mode);
1528 break;
1529
1530 case CTZ:
1531 hv = 0;
1532 if (l1 != 0)
1533 lv = ctz_hwi (l1);
1534 else if (h1 != 0)
1535 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1536 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1537 lv = GET_MODE_BITSIZE (mode);
1538 break;
1539
1540 case POPCOUNT:
1541 hv = 0;
1542 lv = 0;
1543 while (l1)
1544 lv++, l1 &= l1 - 1;
1545 while (h1)
1546 lv++, h1 &= h1 - 1;
1547 break;
1548
1549 case PARITY:
1550 hv = 0;
1551 lv = 0;
1552 while (l1)
1553 lv++, l1 &= l1 - 1;
1554 while (h1)
1555 lv++, h1 &= h1 - 1;
1556 lv &= 1;
1557 break;
1558
1559 case BSWAP:
1560 {
1561 unsigned int s;
1562
1563 hv = 0;
1564 lv = 0;
1565 for (s = 0; s < width; s += 8)
1566 {
1567 unsigned int d = width - s - 8;
1568 unsigned HOST_WIDE_INT byte;
1569
1570 if (s < HOST_BITS_PER_WIDE_INT)
1571 byte = (l1 >> s) & 0xff;
1572 else
1573 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1574
1575 if (d < HOST_BITS_PER_WIDE_INT)
1576 lv |= byte << d;
1577 else
1578 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1579 }
1580 }
1581 break;
1582
1583 case TRUNCATE:
1584 /* This is just a change-of-mode, so do nothing. */
1585 lv = l1, hv = h1;
1586 break;
1587
1588 case ZERO_EXTEND:
1589 gcc_assert (op_mode != VOIDmode);
1590
1591 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1592 return 0;
1593
1594 hv = 0;
1595 lv = l1 & GET_MODE_MASK (op_mode);
1596 break;
1597
1598 case SIGN_EXTEND:
1599 if (op_mode == VOIDmode
1600 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1601 return 0;
1602 else
1603 {
1604 lv = l1 & GET_MODE_MASK (op_mode);
1605 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1606 && (lv & ((unsigned HOST_WIDE_INT) 1
1607 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1608 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1609
1610 hv = HWI_SIGN_EXTEND (lv);
1611 }
1612 break;
1613
1614 case SQRT:
1615 return 0;
1616
1617 default:
1618 return 0;
1619 }
1620
1621 return immed_double_const (lv, hv, mode);
1622 }
1623
1624 else if (GET_CODE (op) == CONST_DOUBLE
1625 && SCALAR_FLOAT_MODE_P (mode)
1626 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1627 {
1628 REAL_VALUE_TYPE d, t;
1629 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1630
1631 switch (code)
1632 {
1633 case SQRT:
1634 if (HONOR_SNANS (mode) && real_isnan (&d))
1635 return 0;
1636 real_sqrt (&t, mode, &d);
1637 d = t;
1638 break;
1639 case ABS:
1640 d = real_value_abs (&d);
1641 break;
1642 case NEG:
1643 d = real_value_negate (&d);
1644 break;
1645 case FLOAT_TRUNCATE:
1646 d = real_value_truncate (mode, d);
1647 break;
1648 case FLOAT_EXTEND:
1649 /* All this does is change the mode, unless changing
1650 mode class. */
1651 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1652 real_convert (&d, mode, &d);
1653 break;
1654 case FIX:
1655 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1656 break;
1657 case NOT:
1658 {
1659 long tmp[4];
1660 int i;
1661
1662 real_to_target (tmp, &d, GET_MODE (op));
1663 for (i = 0; i < 4; i++)
1664 tmp[i] = ~tmp[i];
1665 real_from_target (&d, tmp, mode);
1666 break;
1667 }
1668 default:
1669 gcc_unreachable ();
1670 }
1671 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1672 }
1673
1674 else if (GET_CODE (op) == CONST_DOUBLE
1675 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1676 && GET_MODE_CLASS (mode) == MODE_INT
1677 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1678 {
1679 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1680 operators are intentionally left unspecified (to ease implementation
1681 by target backends), for consistency, this routine implements the
1682 same semantics for constant folding as used by the middle-end. */
1683
1684 /* This was formerly used only for non-IEEE float.
1685 eggert@twinsun.com says it is safe for IEEE also. */
1686 HOST_WIDE_INT xh, xl, th, tl;
1687 REAL_VALUE_TYPE x, t;
1688 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1689 switch (code)
1690 {
1691 case FIX:
1692 if (REAL_VALUE_ISNAN (x))
1693 return const0_rtx;
1694
1695 /* Test against the signed upper bound. */
1696 if (width > HOST_BITS_PER_WIDE_INT)
1697 {
1698 th = ((unsigned HOST_WIDE_INT) 1
1699 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1700 tl = -1;
1701 }
1702 else
1703 {
1704 th = 0;
1705 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1706 }
1707 real_from_integer (&t, VOIDmode, tl, th, 0);
1708 if (REAL_VALUES_LESS (t, x))
1709 {
1710 xh = th;
1711 xl = tl;
1712 break;
1713 }
1714
1715 /* Test against the signed lower bound. */
1716 if (width > HOST_BITS_PER_WIDE_INT)
1717 {
1718 th = (unsigned HOST_WIDE_INT) (-1)
1719 << (width - HOST_BITS_PER_WIDE_INT - 1);
1720 tl = 0;
1721 }
1722 else
1723 {
1724 th = -1;
1725 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1726 }
1727 real_from_integer (&t, VOIDmode, tl, th, 0);
1728 if (REAL_VALUES_LESS (x, t))
1729 {
1730 xh = th;
1731 xl = tl;
1732 break;
1733 }
1734 REAL_VALUE_TO_INT (&xl, &xh, x);
1735 break;
1736
1737 case UNSIGNED_FIX:
1738 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1739 return const0_rtx;
1740
1741 /* Test against the unsigned upper bound. */
1742 if (width == 2*HOST_BITS_PER_WIDE_INT)
1743 {
1744 th = -1;
1745 tl = -1;
1746 }
1747 else if (width >= HOST_BITS_PER_WIDE_INT)
1748 {
1749 th = ((unsigned HOST_WIDE_INT) 1
1750 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1751 tl = -1;
1752 }
1753 else
1754 {
1755 th = 0;
1756 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1757 }
1758 real_from_integer (&t, VOIDmode, tl, th, 1);
1759 if (REAL_VALUES_LESS (t, x))
1760 {
1761 xh = th;
1762 xl = tl;
1763 break;
1764 }
1765
1766 REAL_VALUE_TO_INT (&xl, &xh, x);
1767 break;
1768
1769 default:
1770 gcc_unreachable ();
1771 }
1772 return immed_double_const (xl, xh, mode);
1773 }
1774
1775 return NULL_RTX;
1776 }
1777 \f
1778 /* Subroutine of simplify_binary_operation to simplify a commutative,
1779 associative binary operation CODE with result mode MODE, operating
1780 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1781 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1782 canonicalization is possible. */
1783
1784 static rtx
1785 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1786 rtx op0, rtx op1)
1787 {
1788 rtx tem;
1789
1790 /* Linearize the operator to the left. */
1791 if (GET_CODE (op1) == code)
1792 {
1793 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1794 if (GET_CODE (op0) == code)
1795 {
1796 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1797 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1798 }
1799
1800 /* "a op (b op c)" becomes "(b op c) op a". */
1801 if (! swap_commutative_operands_p (op1, op0))
1802 return simplify_gen_binary (code, mode, op1, op0);
1803
1804 tem = op0;
1805 op0 = op1;
1806 op1 = tem;
1807 }
1808
1809 if (GET_CODE (op0) == code)
1810 {
1811 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1812 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1813 {
1814 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1815 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1816 }
1817
1818 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1819 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1820 if (tem != 0)
1821 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1822
1823 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1824 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1825 if (tem != 0)
1826 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1827 }
1828
1829 return 0;
1830 }
1831
1832
1833 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1834 and OP1. Return 0 if no simplification is possible.
1835
1836 Don't use this for relational operations such as EQ or LT.
1837 Use simplify_relational_operation instead. */
1838 rtx
1839 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1840 rtx op0, rtx op1)
1841 {
1842 rtx trueop0, trueop1;
1843 rtx tem;
1844
1845 /* Relational operations don't work here. We must know the mode
1846 of the operands in order to do the comparison correctly.
1847 Assuming a full word can give incorrect results.
1848 Consider comparing 128 with -128 in QImode. */
1849 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1850 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1851
1852 /* Make sure the constant is second. */
1853 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1854 && swap_commutative_operands_p (op0, op1))
1855 {
1856 tem = op0, op0 = op1, op1 = tem;
1857 }
1858
1859 trueop0 = avoid_constant_pool_reference (op0);
1860 trueop1 = avoid_constant_pool_reference (op1);
1861
1862 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1863 if (tem)
1864 return tem;
1865 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1866 }
1867
1868 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1869 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1870 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1871 actual constants. */
1872
1873 static rtx
1874 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1875 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1876 {
1877 rtx tem, reversed, opleft, opright;
1878 HOST_WIDE_INT val;
1879 unsigned int width = GET_MODE_BITSIZE (mode);
1880
1881 /* Even if we can't compute a constant result,
1882 there are some cases worth simplifying. */
1883
1884 switch (code)
1885 {
1886 case PLUS:
1887 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1888 when x is NaN, infinite, or finite and nonzero. They aren't
1889 when x is -0 and the rounding mode is not towards -infinity,
1890 since (-0) + 0 is then 0. */
1891 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1892 return op0;
1893
1894 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1895 transformations are safe even for IEEE. */
1896 if (GET_CODE (op0) == NEG)
1897 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1898 else if (GET_CODE (op1) == NEG)
1899 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1900
1901 /* (~a) + 1 -> -a */
1902 if (INTEGRAL_MODE_P (mode)
1903 && GET_CODE (op0) == NOT
1904 && trueop1 == const1_rtx)
1905 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1906
1907 /* Handle both-operands-constant cases. We can only add
1908 CONST_INTs to constants since the sum of relocatable symbols
1909 can't be handled by most assemblers. Don't add CONST_INT
1910 to CONST_INT since overflow won't be computed properly if wider
1911 than HOST_BITS_PER_WIDE_INT. */
1912
1913 if ((GET_CODE (op0) == CONST
1914 || GET_CODE (op0) == SYMBOL_REF
1915 || GET_CODE (op0) == LABEL_REF)
1916 && CONST_INT_P (op1))
1917 return plus_constant (op0, INTVAL (op1));
1918 else if ((GET_CODE (op1) == CONST
1919 || GET_CODE (op1) == SYMBOL_REF
1920 || GET_CODE (op1) == LABEL_REF)
1921 && CONST_INT_P (op0))
1922 return plus_constant (op1, INTVAL (op0));
1923
1924 /* See if this is something like X * C - X or vice versa or
1925 if the multiplication is written as a shift. If so, we can
1926 distribute and make a new multiply, shift, or maybe just
1927 have X (if C is 2 in the example above). But don't make
1928 something more expensive than we had before. */
1929
1930 if (SCALAR_INT_MODE_P (mode))
1931 {
1932 double_int coeff0, coeff1;
1933 rtx lhs = op0, rhs = op1;
1934
1935 coeff0 = double_int_one;
1936 coeff1 = double_int_one;
1937
1938 if (GET_CODE (lhs) == NEG)
1939 {
1940 coeff0 = double_int_minus_one;
1941 lhs = XEXP (lhs, 0);
1942 }
1943 else if (GET_CODE (lhs) == MULT
1944 && CONST_INT_P (XEXP (lhs, 1)))
1945 {
1946 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1947 lhs = XEXP (lhs, 0);
1948 }
1949 else if (GET_CODE (lhs) == ASHIFT
1950 && CONST_INT_P (XEXP (lhs, 1))
1951 && INTVAL (XEXP (lhs, 1)) >= 0
1952 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1953 {
1954 coeff0 = double_int_setbit (double_int_zero,
1955 INTVAL (XEXP (lhs, 1)));
1956 lhs = XEXP (lhs, 0);
1957 }
1958
1959 if (GET_CODE (rhs) == NEG)
1960 {
1961 coeff1 = double_int_minus_one;
1962 rhs = XEXP (rhs, 0);
1963 }
1964 else if (GET_CODE (rhs) == MULT
1965 && CONST_INT_P (XEXP (rhs, 1)))
1966 {
1967 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1968 rhs = XEXP (rhs, 0);
1969 }
1970 else if (GET_CODE (rhs) == ASHIFT
1971 && CONST_INT_P (XEXP (rhs, 1))
1972 && INTVAL (XEXP (rhs, 1)) >= 0
1973 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1974 {
1975 coeff1 = double_int_setbit (double_int_zero,
1976 INTVAL (XEXP (rhs, 1)));
1977 rhs = XEXP (rhs, 0);
1978 }
1979
1980 if (rtx_equal_p (lhs, rhs))
1981 {
1982 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1983 rtx coeff;
1984 double_int val;
1985 bool speed = optimize_function_for_speed_p (cfun);
1986
1987 val = double_int_add (coeff0, coeff1);
1988 coeff = immed_double_int_const (val, mode);
1989
1990 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1991 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1992 ? tem : 0;
1993 }
1994 }
1995
1996 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1997 if ((CONST_INT_P (op1)
1998 || GET_CODE (op1) == CONST_DOUBLE)
1999 && GET_CODE (op0) == XOR
2000 && (CONST_INT_P (XEXP (op0, 1))
2001 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2002 && mode_signbit_p (mode, op1))
2003 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2004 simplify_gen_binary (XOR, mode, op1,
2005 XEXP (op0, 1)));
2006
2007 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2008 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2009 && GET_CODE (op0) == MULT
2010 && GET_CODE (XEXP (op0, 0)) == NEG)
2011 {
2012 rtx in1, in2;
2013
2014 in1 = XEXP (XEXP (op0, 0), 0);
2015 in2 = XEXP (op0, 1);
2016 return simplify_gen_binary (MINUS, mode, op1,
2017 simplify_gen_binary (MULT, mode,
2018 in1, in2));
2019 }
2020
2021 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2022 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2023 is 1. */
2024 if (COMPARISON_P (op0)
2025 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2026 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2027 && (reversed = reversed_comparison (op0, mode)))
2028 return
2029 simplify_gen_unary (NEG, mode, reversed, mode);
2030
2031 /* If one of the operands is a PLUS or a MINUS, see if we can
2032 simplify this by the associative law.
2033 Don't use the associative law for floating point.
2034 The inaccuracy makes it nonassociative,
2035 and subtle programs can break if operations are associated. */
2036
2037 if (INTEGRAL_MODE_P (mode)
2038 && (plus_minus_operand_p (op0)
2039 || plus_minus_operand_p (op1))
2040 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2041 return tem;
2042
2043 /* Reassociate floating point addition only when the user
2044 specifies associative math operations. */
2045 if (FLOAT_MODE_P (mode)
2046 && flag_associative_math)
2047 {
2048 tem = simplify_associative_operation (code, mode, op0, op1);
2049 if (tem)
2050 return tem;
2051 }
2052 break;
2053
2054 case COMPARE:
2055 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2056 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2057 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2058 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2059 {
2060 rtx xop00 = XEXP (op0, 0);
2061 rtx xop10 = XEXP (op1, 0);
2062
2063 #ifdef HAVE_cc0
2064 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2065 #else
2066 if (REG_P (xop00) && REG_P (xop10)
2067 && GET_MODE (xop00) == GET_MODE (xop10)
2068 && REGNO (xop00) == REGNO (xop10)
2069 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2070 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2071 #endif
2072 return xop00;
2073 }
2074 break;
2075
2076 case MINUS:
2077 /* We can't assume x-x is 0 even with non-IEEE floating point,
2078 but since it is zero except in very strange circumstances, we
2079 will treat it as zero with -ffinite-math-only. */
2080 if (rtx_equal_p (trueop0, trueop1)
2081 && ! side_effects_p (op0)
2082 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2083 return CONST0_RTX (mode);
2084
2085 /* Change subtraction from zero into negation. (0 - x) is the
2086 same as -x when x is NaN, infinite, or finite and nonzero.
2087 But if the mode has signed zeros, and does not round towards
2088 -infinity, then 0 - 0 is 0, not -0. */
2089 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2090 return simplify_gen_unary (NEG, mode, op1, mode);
2091
2092 /* (-1 - a) is ~a. */
2093 if (trueop0 == constm1_rtx)
2094 return simplify_gen_unary (NOT, mode, op1, mode);
2095
2096 /* Subtracting 0 has no effect unless the mode has signed zeros
2097 and supports rounding towards -infinity. In such a case,
2098 0 - 0 is -0. */
2099 if (!(HONOR_SIGNED_ZEROS (mode)
2100 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2101 && trueop1 == CONST0_RTX (mode))
2102 return op0;
2103
2104 /* See if this is something like X * C - X or vice versa or
2105 if the multiplication is written as a shift. If so, we can
2106 distribute and make a new multiply, shift, or maybe just
2107 have X (if C is 2 in the example above). But don't make
2108 something more expensive than we had before. */
2109
2110 if (SCALAR_INT_MODE_P (mode))
2111 {
2112 double_int coeff0, negcoeff1;
2113 rtx lhs = op0, rhs = op1;
2114
2115 coeff0 = double_int_one;
2116 negcoeff1 = double_int_minus_one;
2117
2118 if (GET_CODE (lhs) == NEG)
2119 {
2120 coeff0 = double_int_minus_one;
2121 lhs = XEXP (lhs, 0);
2122 }
2123 else if (GET_CODE (lhs) == MULT
2124 && CONST_INT_P (XEXP (lhs, 1)))
2125 {
2126 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2127 lhs = XEXP (lhs, 0);
2128 }
2129 else if (GET_CODE (lhs) == ASHIFT
2130 && CONST_INT_P (XEXP (lhs, 1))
2131 && INTVAL (XEXP (lhs, 1)) >= 0
2132 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2133 {
2134 coeff0 = double_int_setbit (double_int_zero,
2135 INTVAL (XEXP (lhs, 1)));
2136 lhs = XEXP (lhs, 0);
2137 }
2138
2139 if (GET_CODE (rhs) == NEG)
2140 {
2141 negcoeff1 = double_int_one;
2142 rhs = XEXP (rhs, 0);
2143 }
2144 else if (GET_CODE (rhs) == MULT
2145 && CONST_INT_P (XEXP (rhs, 1)))
2146 {
2147 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2148 rhs = XEXP (rhs, 0);
2149 }
2150 else if (GET_CODE (rhs) == ASHIFT
2151 && CONST_INT_P (XEXP (rhs, 1))
2152 && INTVAL (XEXP (rhs, 1)) >= 0
2153 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2154 {
2155 negcoeff1 = double_int_setbit (double_int_zero,
2156 INTVAL (XEXP (rhs, 1)));
2157 negcoeff1 = double_int_neg (negcoeff1);
2158 rhs = XEXP (rhs, 0);
2159 }
2160
2161 if (rtx_equal_p (lhs, rhs))
2162 {
2163 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2164 rtx coeff;
2165 double_int val;
2166 bool speed = optimize_function_for_speed_p (cfun);
2167
2168 val = double_int_add (coeff0, negcoeff1);
2169 coeff = immed_double_int_const (val, mode);
2170
2171 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2172 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2173 ? tem : 0;
2174 }
2175 }
2176
2177 /* (a - (-b)) -> (a + b). True even for IEEE. */
2178 if (GET_CODE (op1) == NEG)
2179 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2180
2181 /* (-x - c) may be simplified as (-c - x). */
2182 if (GET_CODE (op0) == NEG
2183 && (CONST_INT_P (op1)
2184 || GET_CODE (op1) == CONST_DOUBLE))
2185 {
2186 tem = simplify_unary_operation (NEG, mode, op1, mode);
2187 if (tem)
2188 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2189 }
2190
2191 /* Don't let a relocatable value get a negative coeff. */
2192 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2193 return simplify_gen_binary (PLUS, mode,
2194 op0,
2195 neg_const_int (mode, op1));
2196
2197 /* (x - (x & y)) -> (x & ~y) */
2198 if (GET_CODE (op1) == AND)
2199 {
2200 if (rtx_equal_p (op0, XEXP (op1, 0)))
2201 {
2202 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2203 GET_MODE (XEXP (op1, 1)));
2204 return simplify_gen_binary (AND, mode, op0, tem);
2205 }
2206 if (rtx_equal_p (op0, XEXP (op1, 1)))
2207 {
2208 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2209 GET_MODE (XEXP (op1, 0)));
2210 return simplify_gen_binary (AND, mode, op0, tem);
2211 }
2212 }
2213
2214 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2215 by reversing the comparison code if valid. */
2216 if (STORE_FLAG_VALUE == 1
2217 && trueop0 == const1_rtx
2218 && COMPARISON_P (op1)
2219 && (reversed = reversed_comparison (op1, mode)))
2220 return reversed;
2221
2222 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2223 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2224 && GET_CODE (op1) == MULT
2225 && GET_CODE (XEXP (op1, 0)) == NEG)
2226 {
2227 rtx in1, in2;
2228
2229 in1 = XEXP (XEXP (op1, 0), 0);
2230 in2 = XEXP (op1, 1);
2231 return simplify_gen_binary (PLUS, mode,
2232 simplify_gen_binary (MULT, mode,
2233 in1, in2),
2234 op0);
2235 }
2236
2237 /* Canonicalize (minus (neg A) (mult B C)) to
2238 (minus (mult (neg B) C) A). */
2239 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2240 && GET_CODE (op1) == MULT
2241 && GET_CODE (op0) == NEG)
2242 {
2243 rtx in1, in2;
2244
2245 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2246 in2 = XEXP (op1, 1);
2247 return simplify_gen_binary (MINUS, mode,
2248 simplify_gen_binary (MULT, mode,
2249 in1, in2),
2250 XEXP (op0, 0));
2251 }
2252
2253 /* If one of the operands is a PLUS or a MINUS, see if we can
2254 simplify this by the associative law. This will, for example,
2255 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2256 Don't use the associative law for floating point.
2257 The inaccuracy makes it nonassociative,
2258 and subtle programs can break if operations are associated. */
2259
2260 if (INTEGRAL_MODE_P (mode)
2261 && (plus_minus_operand_p (op0)
2262 || plus_minus_operand_p (op1))
2263 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2264 return tem;
2265 break;
2266
2267 case MULT:
2268 if (trueop1 == constm1_rtx)
2269 return simplify_gen_unary (NEG, mode, op0, mode);
2270
2271 if (GET_CODE (op0) == NEG)
2272 {
2273 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2274 /* If op1 is a MULT as well and simplify_unary_operation
2275 just moved the NEG to the second operand, simplify_gen_binary
2276 below could through simplify_associative_operation move
2277 the NEG around again and recurse endlessly. */
2278 if (temp
2279 && GET_CODE (op1) == MULT
2280 && GET_CODE (temp) == MULT
2281 && XEXP (op1, 0) == XEXP (temp, 0)
2282 && GET_CODE (XEXP (temp, 1)) == NEG
2283 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2284 temp = NULL_RTX;
2285 if (temp)
2286 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2287 }
2288 if (GET_CODE (op1) == NEG)
2289 {
2290 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2291 /* If op0 is a MULT as well and simplify_unary_operation
2292 just moved the NEG to the second operand, simplify_gen_binary
2293 below could through simplify_associative_operation move
2294 the NEG around again and recurse endlessly. */
2295 if (temp
2296 && GET_CODE (op0) == MULT
2297 && GET_CODE (temp) == MULT
2298 && XEXP (op0, 0) == XEXP (temp, 0)
2299 && GET_CODE (XEXP (temp, 1)) == NEG
2300 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2301 temp = NULL_RTX;
2302 if (temp)
2303 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2304 }
2305
2306 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2307 x is NaN, since x * 0 is then also NaN. Nor is it valid
2308 when the mode has signed zeros, since multiplying a negative
2309 number by 0 will give -0, not 0. */
2310 if (!HONOR_NANS (mode)
2311 && !HONOR_SIGNED_ZEROS (mode)
2312 && trueop1 == CONST0_RTX (mode)
2313 && ! side_effects_p (op0))
2314 return op1;
2315
2316 /* In IEEE floating point, x*1 is not equivalent to x for
2317 signalling NaNs. */
2318 if (!HONOR_SNANS (mode)
2319 && trueop1 == CONST1_RTX (mode))
2320 return op0;
2321
2322 /* Convert multiply by constant power of two into shift unless
2323 we are still generating RTL. This test is a kludge. */
2324 if (CONST_INT_P (trueop1)
2325 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2326 /* If the mode is larger than the host word size, and the
2327 uppermost bit is set, then this isn't a power of two due
2328 to implicit sign extension. */
2329 && (width <= HOST_BITS_PER_WIDE_INT
2330 || val != HOST_BITS_PER_WIDE_INT - 1))
2331 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2332
2333 /* Likewise for multipliers wider than a word. */
2334 if (GET_CODE (trueop1) == CONST_DOUBLE
2335 && (GET_MODE (trueop1) == VOIDmode
2336 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2337 && GET_MODE (op0) == mode
2338 && CONST_DOUBLE_LOW (trueop1) == 0
2339 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2340 return simplify_gen_binary (ASHIFT, mode, op0,
2341 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2342
2343 /* x*2 is x+x and x*(-1) is -x */
2344 if (GET_CODE (trueop1) == CONST_DOUBLE
2345 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2346 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2347 && GET_MODE (op0) == mode)
2348 {
2349 REAL_VALUE_TYPE d;
2350 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2351
2352 if (REAL_VALUES_EQUAL (d, dconst2))
2353 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2354
2355 if (!HONOR_SNANS (mode)
2356 && REAL_VALUES_EQUAL (d, dconstm1))
2357 return simplify_gen_unary (NEG, mode, op0, mode);
2358 }
2359
2360 /* Optimize -x * -x as x * x. */
2361 if (FLOAT_MODE_P (mode)
2362 && GET_CODE (op0) == NEG
2363 && GET_CODE (op1) == NEG
2364 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2365 && !side_effects_p (XEXP (op0, 0)))
2366 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2367
2368 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2369 if (SCALAR_FLOAT_MODE_P (mode)
2370 && GET_CODE (op0) == ABS
2371 && GET_CODE (op1) == ABS
2372 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2373 && !side_effects_p (XEXP (op0, 0)))
2374 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2375
2376 /* Reassociate multiplication, but for floating point MULTs
2377 only when the user specifies unsafe math optimizations. */
2378 if (! FLOAT_MODE_P (mode)
2379 || flag_unsafe_math_optimizations)
2380 {
2381 tem = simplify_associative_operation (code, mode, op0, op1);
2382 if (tem)
2383 return tem;
2384 }
2385 break;
2386
2387 case IOR:
2388 if (trueop1 == CONST0_RTX (mode))
2389 return op0;
2390 if (CONST_INT_P (trueop1)
2391 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2392 == GET_MODE_MASK (mode)))
2393 return op1;
2394 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2395 return op0;
2396 /* A | (~A) -> -1 */
2397 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2398 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2399 && ! side_effects_p (op0)
2400 && SCALAR_INT_MODE_P (mode))
2401 return constm1_rtx;
2402
2403 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2404 if (CONST_INT_P (op1)
2405 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2406 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2407 return op1;
2408
2409 /* Canonicalize (X & C1) | C2. */
2410 if (GET_CODE (op0) == AND
2411 && CONST_INT_P (trueop1)
2412 && CONST_INT_P (XEXP (op0, 1)))
2413 {
2414 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2415 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2416 HOST_WIDE_INT c2 = INTVAL (trueop1);
2417
2418 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2419 if ((c1 & c2) == c1
2420 && !side_effects_p (XEXP (op0, 0)))
2421 return trueop1;
2422
2423 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2424 if (((c1|c2) & mask) == mask)
2425 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2426
2427 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2428 if (((c1 & ~c2) & mask) != (c1 & mask))
2429 {
2430 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2431 gen_int_mode (c1 & ~c2, mode));
2432 return simplify_gen_binary (IOR, mode, tem, op1);
2433 }
2434 }
2435
2436 /* Convert (A & B) | A to A. */
2437 if (GET_CODE (op0) == AND
2438 && (rtx_equal_p (XEXP (op0, 0), op1)
2439 || rtx_equal_p (XEXP (op0, 1), op1))
2440 && ! side_effects_p (XEXP (op0, 0))
2441 && ! side_effects_p (XEXP (op0, 1)))
2442 return op1;
2443
2444 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2445 mode size to (rotate A CX). */
2446
2447 if (GET_CODE (op1) == ASHIFT
2448 || GET_CODE (op1) == SUBREG)
2449 {
2450 opleft = op1;
2451 opright = op0;
2452 }
2453 else
2454 {
2455 opright = op1;
2456 opleft = op0;
2457 }
2458
2459 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2460 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2461 && CONST_INT_P (XEXP (opleft, 1))
2462 && CONST_INT_P (XEXP (opright, 1))
2463 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2464 == GET_MODE_BITSIZE (mode)))
2465 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2466
2467 /* Same, but for ashift that has been "simplified" to a wider mode
2468 by simplify_shift_const. */
2469
2470 if (GET_CODE (opleft) == SUBREG
2471 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2472 && GET_CODE (opright) == LSHIFTRT
2473 && GET_CODE (XEXP (opright, 0)) == SUBREG
2474 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2475 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2476 && (GET_MODE_SIZE (GET_MODE (opleft))
2477 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2478 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2479 SUBREG_REG (XEXP (opright, 0)))
2480 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2481 && CONST_INT_P (XEXP (opright, 1))
2482 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2483 == GET_MODE_BITSIZE (mode)))
2484 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2485 XEXP (SUBREG_REG (opleft), 1));
2486
2487 /* If we have (ior (and (X C1) C2)), simplify this by making
2488 C1 as small as possible if C1 actually changes. */
2489 if (CONST_INT_P (op1)
2490 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2491 || INTVAL (op1) > 0)
2492 && GET_CODE (op0) == AND
2493 && CONST_INT_P (XEXP (op0, 1))
2494 && CONST_INT_P (op1)
2495 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2496 return simplify_gen_binary (IOR, mode,
2497 simplify_gen_binary
2498 (AND, mode, XEXP (op0, 0),
2499 GEN_INT (UINTVAL (XEXP (op0, 1))
2500 & ~UINTVAL (op1))),
2501 op1);
2502
2503 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2504 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2505 the PLUS does not affect any of the bits in OP1: then we can do
2506 the IOR as a PLUS and we can associate. This is valid if OP1
2507 can be safely shifted left C bits. */
2508 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2509 && GET_CODE (XEXP (op0, 0)) == PLUS
2510 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2511 && CONST_INT_P (XEXP (op0, 1))
2512 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2513 {
2514 int count = INTVAL (XEXP (op0, 1));
2515 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2516
2517 if (mask >> count == INTVAL (trueop1)
2518 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2519 return simplify_gen_binary (ASHIFTRT, mode,
2520 plus_constant (XEXP (op0, 0), mask),
2521 XEXP (op0, 1));
2522 }
2523
2524 tem = simplify_associative_operation (code, mode, op0, op1);
2525 if (tem)
2526 return tem;
2527 break;
2528
2529 case XOR:
2530 if (trueop1 == CONST0_RTX (mode))
2531 return op0;
2532 if (CONST_INT_P (trueop1)
2533 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2534 == GET_MODE_MASK (mode)))
2535 return simplify_gen_unary (NOT, mode, op0, mode);
2536 if (rtx_equal_p (trueop0, trueop1)
2537 && ! side_effects_p (op0)
2538 && GET_MODE_CLASS (mode) != MODE_CC)
2539 return CONST0_RTX (mode);
2540
2541 /* Canonicalize XOR of the most significant bit to PLUS. */
2542 if ((CONST_INT_P (op1)
2543 || GET_CODE (op1) == CONST_DOUBLE)
2544 && mode_signbit_p (mode, op1))
2545 return simplify_gen_binary (PLUS, mode, op0, op1);
2546 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2547 if ((CONST_INT_P (op1)
2548 || GET_CODE (op1) == CONST_DOUBLE)
2549 && GET_CODE (op0) == PLUS
2550 && (CONST_INT_P (XEXP (op0, 1))
2551 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2552 && mode_signbit_p (mode, XEXP (op0, 1)))
2553 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2554 simplify_gen_binary (XOR, mode, op1,
2555 XEXP (op0, 1)));
2556
2557 /* If we are XORing two things that have no bits in common,
2558 convert them into an IOR. This helps to detect rotation encoded
2559 using those methods and possibly other simplifications. */
2560
2561 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2562 && (nonzero_bits (op0, mode)
2563 & nonzero_bits (op1, mode)) == 0)
2564 return (simplify_gen_binary (IOR, mode, op0, op1));
2565
2566 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2567 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2568 (NOT y). */
2569 {
2570 int num_negated = 0;
2571
2572 if (GET_CODE (op0) == NOT)
2573 num_negated++, op0 = XEXP (op0, 0);
2574 if (GET_CODE (op1) == NOT)
2575 num_negated++, op1 = XEXP (op1, 0);
2576
2577 if (num_negated == 2)
2578 return simplify_gen_binary (XOR, mode, op0, op1);
2579 else if (num_negated == 1)
2580 return simplify_gen_unary (NOT, mode,
2581 simplify_gen_binary (XOR, mode, op0, op1),
2582 mode);
2583 }
2584
2585 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2586 correspond to a machine insn or result in further simplifications
2587 if B is a constant. */
2588
2589 if (GET_CODE (op0) == AND
2590 && rtx_equal_p (XEXP (op0, 1), op1)
2591 && ! side_effects_p (op1))
2592 return simplify_gen_binary (AND, mode,
2593 simplify_gen_unary (NOT, mode,
2594 XEXP (op0, 0), mode),
2595 op1);
2596
2597 else if (GET_CODE (op0) == AND
2598 && rtx_equal_p (XEXP (op0, 0), op1)
2599 && ! side_effects_p (op1))
2600 return simplify_gen_binary (AND, mode,
2601 simplify_gen_unary (NOT, mode,
2602 XEXP (op0, 1), mode),
2603 op1);
2604
2605 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2606 we can transform like this:
2607 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2608 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2609 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2610 Attempt a few simplifications when B and C are both constants. */
2611 if (GET_CODE (op0) == AND
2612 && CONST_INT_P (op1)
2613 && CONST_INT_P (XEXP (op0, 1)))
2614 {
2615 rtx a = XEXP (op0, 0);
2616 rtx b = XEXP (op0, 1);
2617 rtx c = op1;
2618 HOST_WIDE_INT bval = INTVAL (b);
2619 HOST_WIDE_INT cval = INTVAL (c);
2620
2621 rtx na_c
2622 = simplify_binary_operation (AND, mode,
2623 simplify_gen_unary (NOT, mode, a, mode),
2624 c);
2625 if ((~cval & bval) == 0)
2626 {
2627 /* Try to simplify ~A&C | ~B&C. */
2628 if (na_c != NULL_RTX)
2629 return simplify_gen_binary (IOR, mode, na_c,
2630 GEN_INT (~bval & cval));
2631 }
2632 else
2633 {
2634 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2635 if (na_c == const0_rtx)
2636 {
2637 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2638 GEN_INT (~cval & bval));
2639 return simplify_gen_binary (IOR, mode, a_nc_b,
2640 GEN_INT (~bval & cval));
2641 }
2642 }
2643 }
2644
2645 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2646 comparison if STORE_FLAG_VALUE is 1. */
2647 if (STORE_FLAG_VALUE == 1
2648 && trueop1 == const1_rtx
2649 && COMPARISON_P (op0)
2650 && (reversed = reversed_comparison (op0, mode)))
2651 return reversed;
2652
2653 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2654 is (lt foo (const_int 0)), so we can perform the above
2655 simplification if STORE_FLAG_VALUE is 1. */
2656
2657 if (STORE_FLAG_VALUE == 1
2658 && trueop1 == const1_rtx
2659 && GET_CODE (op0) == LSHIFTRT
2660 && CONST_INT_P (XEXP (op0, 1))
2661 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2662 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2663
2664 /* (xor (comparison foo bar) (const_int sign-bit))
2665 when STORE_FLAG_VALUE is the sign bit. */
2666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2667 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2668 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2669 && trueop1 == const_true_rtx
2670 && COMPARISON_P (op0)
2671 && (reversed = reversed_comparison (op0, mode)))
2672 return reversed;
2673
2674 tem = simplify_associative_operation (code, mode, op0, op1);
2675 if (tem)
2676 return tem;
2677 break;
2678
2679 case AND:
2680 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2681 return trueop1;
2682 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2683 {
2684 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2685 HOST_WIDE_INT nzop1;
2686 if (CONST_INT_P (trueop1))
2687 {
2688 HOST_WIDE_INT val1 = INTVAL (trueop1);
2689 /* If we are turning off bits already known off in OP0, we need
2690 not do an AND. */
2691 if ((nzop0 & ~val1) == 0)
2692 return op0;
2693 }
2694 nzop1 = nonzero_bits (trueop1, mode);
2695 /* If we are clearing all the nonzero bits, the result is zero. */
2696 if ((nzop1 & nzop0) == 0
2697 && !side_effects_p (op0) && !side_effects_p (op1))
2698 return CONST0_RTX (mode);
2699 }
2700 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2701 && GET_MODE_CLASS (mode) != MODE_CC)
2702 return op0;
2703 /* A & (~A) -> 0 */
2704 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2705 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2706 && ! side_effects_p (op0)
2707 && GET_MODE_CLASS (mode) != MODE_CC)
2708 return CONST0_RTX (mode);
2709
2710 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2711 there are no nonzero bits of C outside of X's mode. */
2712 if ((GET_CODE (op0) == SIGN_EXTEND
2713 || GET_CODE (op0) == ZERO_EXTEND)
2714 && CONST_INT_P (trueop1)
2715 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2716 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2717 & UINTVAL (trueop1)) == 0)
2718 {
2719 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2720 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2721 gen_int_mode (INTVAL (trueop1),
2722 imode));
2723 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2724 }
2725
2726 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2727 we might be able to further simplify the AND with X and potentially
2728 remove the truncation altogether. */
2729 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2730 {
2731 rtx x = XEXP (op0, 0);
2732 enum machine_mode xmode = GET_MODE (x);
2733 tem = simplify_gen_binary (AND, xmode, x,
2734 gen_int_mode (INTVAL (trueop1), xmode));
2735 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2736 }
2737
2738 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2739 if (GET_CODE (op0) == IOR
2740 && CONST_INT_P (trueop1)
2741 && CONST_INT_P (XEXP (op0, 1)))
2742 {
2743 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2744 return simplify_gen_binary (IOR, mode,
2745 simplify_gen_binary (AND, mode,
2746 XEXP (op0, 0), op1),
2747 gen_int_mode (tmp, mode));
2748 }
2749
2750 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2751 insn (and may simplify more). */
2752 if (GET_CODE (op0) == XOR
2753 && rtx_equal_p (XEXP (op0, 0), op1)
2754 && ! side_effects_p (op1))
2755 return simplify_gen_binary (AND, mode,
2756 simplify_gen_unary (NOT, mode,
2757 XEXP (op0, 1), mode),
2758 op1);
2759
2760 if (GET_CODE (op0) == XOR
2761 && rtx_equal_p (XEXP (op0, 1), op1)
2762 && ! side_effects_p (op1))
2763 return simplify_gen_binary (AND, mode,
2764 simplify_gen_unary (NOT, mode,
2765 XEXP (op0, 0), mode),
2766 op1);
2767
2768 /* Similarly for (~(A ^ B)) & A. */
2769 if (GET_CODE (op0) == NOT
2770 && GET_CODE (XEXP (op0, 0)) == XOR
2771 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2772 && ! side_effects_p (op1))
2773 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2774
2775 if (GET_CODE (op0) == NOT
2776 && GET_CODE (XEXP (op0, 0)) == XOR
2777 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2778 && ! side_effects_p (op1))
2779 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2780
2781 /* Convert (A | B) & A to A. */
2782 if (GET_CODE (op0) == IOR
2783 && (rtx_equal_p (XEXP (op0, 0), op1)
2784 || rtx_equal_p (XEXP (op0, 1), op1))
2785 && ! side_effects_p (XEXP (op0, 0))
2786 && ! side_effects_p (XEXP (op0, 1)))
2787 return op1;
2788
2789 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2790 ((A & N) + B) & M -> (A + B) & M
2791 Similarly if (N & M) == 0,
2792 ((A | N) + B) & M -> (A + B) & M
2793 and for - instead of + and/or ^ instead of |.
2794 Also, if (N & M) == 0, then
2795 (A +- N) & M -> A & M. */
2796 if (CONST_INT_P (trueop1)
2797 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2798 && ~UINTVAL (trueop1)
2799 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2800 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2801 {
2802 rtx pmop[2];
2803 int which;
2804
2805 pmop[0] = XEXP (op0, 0);
2806 pmop[1] = XEXP (op0, 1);
2807
2808 if (CONST_INT_P (pmop[1])
2809 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2810 return simplify_gen_binary (AND, mode, pmop[0], op1);
2811
2812 for (which = 0; which < 2; which++)
2813 {
2814 tem = pmop[which];
2815 switch (GET_CODE (tem))
2816 {
2817 case AND:
2818 if (CONST_INT_P (XEXP (tem, 1))
2819 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2820 == UINTVAL (trueop1))
2821 pmop[which] = XEXP (tem, 0);
2822 break;
2823 case IOR:
2824 case XOR:
2825 if (CONST_INT_P (XEXP (tem, 1))
2826 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2827 pmop[which] = XEXP (tem, 0);
2828 break;
2829 default:
2830 break;
2831 }
2832 }
2833
2834 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2835 {
2836 tem = simplify_gen_binary (GET_CODE (op0), mode,
2837 pmop[0], pmop[1]);
2838 return simplify_gen_binary (code, mode, tem, op1);
2839 }
2840 }
2841
2842 /* (and X (ior (not X) Y) -> (and X Y) */
2843 if (GET_CODE (op1) == IOR
2844 && GET_CODE (XEXP (op1, 0)) == NOT
2845 && op0 == XEXP (XEXP (op1, 0), 0))
2846 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2847
2848 /* (and (ior (not X) Y) X) -> (and X Y) */
2849 if (GET_CODE (op0) == IOR
2850 && GET_CODE (XEXP (op0, 0)) == NOT
2851 && op1 == XEXP (XEXP (op0, 0), 0))
2852 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2853
2854 tem = simplify_associative_operation (code, mode, op0, op1);
2855 if (tem)
2856 return tem;
2857 break;
2858
2859 case UDIV:
2860 /* 0/x is 0 (or x&0 if x has side-effects). */
2861 if (trueop0 == CONST0_RTX (mode))
2862 {
2863 if (side_effects_p (op1))
2864 return simplify_gen_binary (AND, mode, op1, trueop0);
2865 return trueop0;
2866 }
2867 /* x/1 is x. */
2868 if (trueop1 == CONST1_RTX (mode))
2869 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2870 /* Convert divide by power of two into shift. */
2871 if (CONST_INT_P (trueop1)
2872 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2873 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2874 break;
2875
2876 case DIV:
2877 /* Handle floating point and integers separately. */
2878 if (SCALAR_FLOAT_MODE_P (mode))
2879 {
2880 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2881 safe for modes with NaNs, since 0.0 / 0.0 will then be
2882 NaN rather than 0.0. Nor is it safe for modes with signed
2883 zeros, since dividing 0 by a negative number gives -0.0 */
2884 if (trueop0 == CONST0_RTX (mode)
2885 && !HONOR_NANS (mode)
2886 && !HONOR_SIGNED_ZEROS (mode)
2887 && ! side_effects_p (op1))
2888 return op0;
2889 /* x/1.0 is x. */
2890 if (trueop1 == CONST1_RTX (mode)
2891 && !HONOR_SNANS (mode))
2892 return op0;
2893
2894 if (GET_CODE (trueop1) == CONST_DOUBLE
2895 && trueop1 != CONST0_RTX (mode))
2896 {
2897 REAL_VALUE_TYPE d;
2898 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2899
2900 /* x/-1.0 is -x. */
2901 if (REAL_VALUES_EQUAL (d, dconstm1)
2902 && !HONOR_SNANS (mode))
2903 return simplify_gen_unary (NEG, mode, op0, mode);
2904
2905 /* Change FP division by a constant into multiplication.
2906 Only do this with -freciprocal-math. */
2907 if (flag_reciprocal_math
2908 && !REAL_VALUES_EQUAL (d, dconst0))
2909 {
2910 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2911 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2912 return simplify_gen_binary (MULT, mode, op0, tem);
2913 }
2914 }
2915 }
2916 else
2917 {
2918 /* 0/x is 0 (or x&0 if x has side-effects). */
2919 if (trueop0 == CONST0_RTX (mode)
2920 && !cfun->can_throw_non_call_exceptions)
2921 {
2922 if (side_effects_p (op1))
2923 return simplify_gen_binary (AND, mode, op1, trueop0);
2924 return trueop0;
2925 }
2926 /* x/1 is x. */
2927 if (trueop1 == CONST1_RTX (mode))
2928 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2929 /* x/-1 is -x. */
2930 if (trueop1 == constm1_rtx)
2931 {
2932 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2933 return simplify_gen_unary (NEG, mode, x, mode);
2934 }
2935 }
2936 break;
2937
2938 case UMOD:
2939 /* 0%x is 0 (or x&0 if x has side-effects). */
2940 if (trueop0 == CONST0_RTX (mode))
2941 {
2942 if (side_effects_p (op1))
2943 return simplify_gen_binary (AND, mode, op1, trueop0);
2944 return trueop0;
2945 }
2946 /* x%1 is 0 (of x&0 if x has side-effects). */
2947 if (trueop1 == CONST1_RTX (mode))
2948 {
2949 if (side_effects_p (op0))
2950 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2951 return CONST0_RTX (mode);
2952 }
2953 /* Implement modulus by power of two as AND. */
2954 if (CONST_INT_P (trueop1)
2955 && exact_log2 (UINTVAL (trueop1)) > 0)
2956 return simplify_gen_binary (AND, mode, op0,
2957 GEN_INT (INTVAL (op1) - 1));
2958 break;
2959
2960 case MOD:
2961 /* 0%x is 0 (or x&0 if x has side-effects). */
2962 if (trueop0 == CONST0_RTX (mode))
2963 {
2964 if (side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode, op1, trueop0);
2966 return trueop0;
2967 }
2968 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2969 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2970 {
2971 if (side_effects_p (op0))
2972 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2973 return CONST0_RTX (mode);
2974 }
2975 break;
2976
2977 case ROTATERT:
2978 case ROTATE:
2979 case ASHIFTRT:
2980 if (trueop1 == CONST0_RTX (mode))
2981 return op0;
2982 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2983 return op0;
2984 /* Rotating ~0 always results in ~0. */
2985 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2986 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2987 && ! side_effects_p (op1))
2988 return op0;
2989 canonicalize_shift:
2990 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2991 {
2992 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2993 if (val != INTVAL (op1))
2994 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2995 }
2996 break;
2997
2998 case ASHIFT:
2999 case SS_ASHIFT:
3000 case US_ASHIFT:
3001 if (trueop1 == CONST0_RTX (mode))
3002 return op0;
3003 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3004 return op0;
3005 goto canonicalize_shift;
3006
3007 case LSHIFTRT:
3008 if (trueop1 == CONST0_RTX (mode))
3009 return op0;
3010 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3011 return op0;
3012 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3013 if (GET_CODE (op0) == CLZ
3014 && CONST_INT_P (trueop1)
3015 && STORE_FLAG_VALUE == 1
3016 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3017 {
3018 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3019 unsigned HOST_WIDE_INT zero_val = 0;
3020
3021 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3022 && zero_val == GET_MODE_BITSIZE (imode)
3023 && INTVAL (trueop1) == exact_log2 (zero_val))
3024 return simplify_gen_relational (EQ, mode, imode,
3025 XEXP (op0, 0), const0_rtx);
3026 }
3027 goto canonicalize_shift;
3028
3029 case SMIN:
3030 if (width <= HOST_BITS_PER_WIDE_INT
3031 && CONST_INT_P (trueop1)
3032 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
3033 && ! side_effects_p (op0))
3034 return op1;
3035 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3036 return op0;
3037 tem = simplify_associative_operation (code, mode, op0, op1);
3038 if (tem)
3039 return tem;
3040 break;
3041
3042 case SMAX:
3043 if (width <= HOST_BITS_PER_WIDE_INT
3044 && CONST_INT_P (trueop1)
3045 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3046 && ! side_effects_p (op0))
3047 return op1;
3048 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3049 return op0;
3050 tem = simplify_associative_operation (code, mode, op0, op1);
3051 if (tem)
3052 return tem;
3053 break;
3054
3055 case UMIN:
3056 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3057 return op1;
3058 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3059 return op0;
3060 tem = simplify_associative_operation (code, mode, op0, op1);
3061 if (tem)
3062 return tem;
3063 break;
3064
3065 case UMAX:
3066 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3067 return op1;
3068 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3069 return op0;
3070 tem = simplify_associative_operation (code, mode, op0, op1);
3071 if (tem)
3072 return tem;
3073 break;
3074
3075 case SS_PLUS:
3076 case US_PLUS:
3077 case SS_MINUS:
3078 case US_MINUS:
3079 case SS_MULT:
3080 case US_MULT:
3081 case SS_DIV:
3082 case US_DIV:
3083 /* ??? There are simplifications that can be done. */
3084 return 0;
3085
3086 case VEC_SELECT:
3087 if (!VECTOR_MODE_P (mode))
3088 {
3089 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3090 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3091 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3092 gcc_assert (XVECLEN (trueop1, 0) == 1);
3093 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3094
3095 if (GET_CODE (trueop0) == CONST_VECTOR)
3096 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3097 (trueop1, 0, 0)));
3098
3099 /* Extract a scalar element from a nested VEC_SELECT expression
3100 (with optional nested VEC_CONCAT expression). Some targets
3101 (i386) extract scalar element from a vector using chain of
3102 nested VEC_SELECT expressions. When input operand is a memory
3103 operand, this operation can be simplified to a simple scalar
3104 load from an offseted memory address. */
3105 if (GET_CODE (trueop0) == VEC_SELECT)
3106 {
3107 rtx op0 = XEXP (trueop0, 0);
3108 rtx op1 = XEXP (trueop0, 1);
3109
3110 enum machine_mode opmode = GET_MODE (op0);
3111 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3112 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3113
3114 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3115 int elem;
3116
3117 rtvec vec;
3118 rtx tmp_op, tmp;
3119
3120 gcc_assert (GET_CODE (op1) == PARALLEL);
3121 gcc_assert (i < n_elts);
3122
3123 /* Select element, pointed by nested selector. */
3124 elem = INTVAL (XVECEXP (op1, 0, i));
3125
3126 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3127 if (GET_CODE (op0) == VEC_CONCAT)
3128 {
3129 rtx op00 = XEXP (op0, 0);
3130 rtx op01 = XEXP (op0, 1);
3131
3132 enum machine_mode mode00, mode01;
3133 int n_elts00, n_elts01;
3134
3135 mode00 = GET_MODE (op00);
3136 mode01 = GET_MODE (op01);
3137
3138 /* Find out number of elements of each operand. */
3139 if (VECTOR_MODE_P (mode00))
3140 {
3141 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3142 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3143 }
3144 else
3145 n_elts00 = 1;
3146
3147 if (VECTOR_MODE_P (mode01))
3148 {
3149 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3150 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3151 }
3152 else
3153 n_elts01 = 1;
3154
3155 gcc_assert (n_elts == n_elts00 + n_elts01);
3156
3157 /* Select correct operand of VEC_CONCAT
3158 and adjust selector. */
3159 if (elem < n_elts01)
3160 tmp_op = op00;
3161 else
3162 {
3163 tmp_op = op01;
3164 elem -= n_elts00;
3165 }
3166 }
3167 else
3168 tmp_op = op0;
3169
3170 vec = rtvec_alloc (1);
3171 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3172
3173 tmp = gen_rtx_fmt_ee (code, mode,
3174 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3175 return tmp;
3176 }
3177 if (GET_CODE (trueop0) == VEC_DUPLICATE
3178 && GET_MODE (XEXP (trueop0, 0)) == mode)
3179 return XEXP (trueop0, 0);
3180 }
3181 else
3182 {
3183 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3184 gcc_assert (GET_MODE_INNER (mode)
3185 == GET_MODE_INNER (GET_MODE (trueop0)));
3186 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3187
3188 if (GET_CODE (trueop0) == CONST_VECTOR)
3189 {
3190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3192 rtvec v = rtvec_alloc (n_elts);
3193 unsigned int i;
3194
3195 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3196 for (i = 0; i < n_elts; i++)
3197 {
3198 rtx x = XVECEXP (trueop1, 0, i);
3199
3200 gcc_assert (CONST_INT_P (x));
3201 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3202 INTVAL (x));
3203 }
3204
3205 return gen_rtx_CONST_VECTOR (mode, v);
3206 }
3207 }
3208
3209 if (XVECLEN (trueop1, 0) == 1
3210 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3211 && GET_CODE (trueop0) == VEC_CONCAT)
3212 {
3213 rtx vec = trueop0;
3214 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3215
3216 /* Try to find the element in the VEC_CONCAT. */
3217 while (GET_MODE (vec) != mode
3218 && GET_CODE (vec) == VEC_CONCAT)
3219 {
3220 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3221 if (offset < vec_size)
3222 vec = XEXP (vec, 0);
3223 else
3224 {
3225 offset -= vec_size;
3226 vec = XEXP (vec, 1);
3227 }
3228 vec = avoid_constant_pool_reference (vec);
3229 }
3230
3231 if (GET_MODE (vec) == mode)
3232 return vec;
3233 }
3234
3235 return 0;
3236 case VEC_CONCAT:
3237 {
3238 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3239 ? GET_MODE (trueop0)
3240 : GET_MODE_INNER (mode));
3241 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3242 ? GET_MODE (trueop1)
3243 : GET_MODE_INNER (mode));
3244
3245 gcc_assert (VECTOR_MODE_P (mode));
3246 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3247 == GET_MODE_SIZE (mode));
3248
3249 if (VECTOR_MODE_P (op0_mode))
3250 gcc_assert (GET_MODE_INNER (mode)
3251 == GET_MODE_INNER (op0_mode));
3252 else
3253 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3254
3255 if (VECTOR_MODE_P (op1_mode))
3256 gcc_assert (GET_MODE_INNER (mode)
3257 == GET_MODE_INNER (op1_mode));
3258 else
3259 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3260
3261 if ((GET_CODE (trueop0) == CONST_VECTOR
3262 || CONST_INT_P (trueop0)
3263 || GET_CODE (trueop0) == CONST_DOUBLE)
3264 && (GET_CODE (trueop1) == CONST_VECTOR
3265 || CONST_INT_P (trueop1)
3266 || GET_CODE (trueop1) == CONST_DOUBLE))
3267 {
3268 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3269 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3270 rtvec v = rtvec_alloc (n_elts);
3271 unsigned int i;
3272 unsigned in_n_elts = 1;
3273
3274 if (VECTOR_MODE_P (op0_mode))
3275 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3276 for (i = 0; i < n_elts; i++)
3277 {
3278 if (i < in_n_elts)
3279 {
3280 if (!VECTOR_MODE_P (op0_mode))
3281 RTVEC_ELT (v, i) = trueop0;
3282 else
3283 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3284 }
3285 else
3286 {
3287 if (!VECTOR_MODE_P (op1_mode))
3288 RTVEC_ELT (v, i) = trueop1;
3289 else
3290 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3291 i - in_n_elts);
3292 }
3293 }
3294
3295 return gen_rtx_CONST_VECTOR (mode, v);
3296 }
3297 }
3298 return 0;
3299
3300 default:
3301 gcc_unreachable ();
3302 }
3303
3304 return 0;
3305 }
3306
3307 rtx
3308 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3309 rtx op0, rtx op1)
3310 {
3311 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3312 HOST_WIDE_INT val;
3313 unsigned int width = GET_MODE_BITSIZE (mode);
3314
3315 if (VECTOR_MODE_P (mode)
3316 && code != VEC_CONCAT
3317 && GET_CODE (op0) == CONST_VECTOR
3318 && GET_CODE (op1) == CONST_VECTOR)
3319 {
3320 unsigned n_elts = GET_MODE_NUNITS (mode);
3321 enum machine_mode op0mode = GET_MODE (op0);
3322 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3323 enum machine_mode op1mode = GET_MODE (op1);
3324 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3325 rtvec v = rtvec_alloc (n_elts);
3326 unsigned int i;
3327
3328 gcc_assert (op0_n_elts == n_elts);
3329 gcc_assert (op1_n_elts == n_elts);
3330 for (i = 0; i < n_elts; i++)
3331 {
3332 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3333 CONST_VECTOR_ELT (op0, i),
3334 CONST_VECTOR_ELT (op1, i));
3335 if (!x)
3336 return 0;
3337 RTVEC_ELT (v, i) = x;
3338 }
3339
3340 return gen_rtx_CONST_VECTOR (mode, v);
3341 }
3342
3343 if (VECTOR_MODE_P (mode)
3344 && code == VEC_CONCAT
3345 && (CONST_INT_P (op0)
3346 || GET_CODE (op0) == CONST_DOUBLE
3347 || GET_CODE (op0) == CONST_FIXED)
3348 && (CONST_INT_P (op1)
3349 || GET_CODE (op1) == CONST_DOUBLE
3350 || GET_CODE (op1) == CONST_FIXED))
3351 {
3352 unsigned n_elts = GET_MODE_NUNITS (mode);
3353 rtvec v = rtvec_alloc (n_elts);
3354
3355 gcc_assert (n_elts >= 2);
3356 if (n_elts == 2)
3357 {
3358 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3359 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3360
3361 RTVEC_ELT (v, 0) = op0;
3362 RTVEC_ELT (v, 1) = op1;
3363 }
3364 else
3365 {
3366 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3367 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3368 unsigned i;
3369
3370 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3371 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3372 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3373
3374 for (i = 0; i < op0_n_elts; ++i)
3375 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3376 for (i = 0; i < op1_n_elts; ++i)
3377 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3378 }
3379
3380 return gen_rtx_CONST_VECTOR (mode, v);
3381 }
3382
3383 if (SCALAR_FLOAT_MODE_P (mode)
3384 && GET_CODE (op0) == CONST_DOUBLE
3385 && GET_CODE (op1) == CONST_DOUBLE
3386 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3387 {
3388 if (code == AND
3389 || code == IOR
3390 || code == XOR)
3391 {
3392 long tmp0[4];
3393 long tmp1[4];
3394 REAL_VALUE_TYPE r;
3395 int i;
3396
3397 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3398 GET_MODE (op0));
3399 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3400 GET_MODE (op1));
3401 for (i = 0; i < 4; i++)
3402 {
3403 switch (code)
3404 {
3405 case AND:
3406 tmp0[i] &= tmp1[i];
3407 break;
3408 case IOR:
3409 tmp0[i] |= tmp1[i];
3410 break;
3411 case XOR:
3412 tmp0[i] ^= tmp1[i];
3413 break;
3414 default:
3415 gcc_unreachable ();
3416 }
3417 }
3418 real_from_target (&r, tmp0, mode);
3419 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3420 }
3421 else
3422 {
3423 REAL_VALUE_TYPE f0, f1, value, result;
3424 bool inexact;
3425
3426 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3427 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3428 real_convert (&f0, mode, &f0);
3429 real_convert (&f1, mode, &f1);
3430
3431 if (HONOR_SNANS (mode)
3432 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3433 return 0;
3434
3435 if (code == DIV
3436 && REAL_VALUES_EQUAL (f1, dconst0)
3437 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3438 return 0;
3439
3440 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3441 && flag_trapping_math
3442 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3443 {
3444 int s0 = REAL_VALUE_NEGATIVE (f0);
3445 int s1 = REAL_VALUE_NEGATIVE (f1);
3446
3447 switch (code)
3448 {
3449 case PLUS:
3450 /* Inf + -Inf = NaN plus exception. */
3451 if (s0 != s1)
3452 return 0;
3453 break;
3454 case MINUS:
3455 /* Inf - Inf = NaN plus exception. */
3456 if (s0 == s1)
3457 return 0;
3458 break;
3459 case DIV:
3460 /* Inf / Inf = NaN plus exception. */
3461 return 0;
3462 default:
3463 break;
3464 }
3465 }
3466
3467 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3468 && flag_trapping_math
3469 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3470 || (REAL_VALUE_ISINF (f1)
3471 && REAL_VALUES_EQUAL (f0, dconst0))))
3472 /* Inf * 0 = NaN plus exception. */
3473 return 0;
3474
3475 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3476 &f0, &f1);
3477 real_convert (&result, mode, &value);
3478
3479 /* Don't constant fold this floating point operation if
3480 the result has overflowed and flag_trapping_math. */
3481
3482 if (flag_trapping_math
3483 && MODE_HAS_INFINITIES (mode)
3484 && REAL_VALUE_ISINF (result)
3485 && !REAL_VALUE_ISINF (f0)
3486 && !REAL_VALUE_ISINF (f1))
3487 /* Overflow plus exception. */
3488 return 0;
3489
3490 /* Don't constant fold this floating point operation if the
3491 result may dependent upon the run-time rounding mode and
3492 flag_rounding_math is set, or if GCC's software emulation
3493 is unable to accurately represent the result. */
3494
3495 if ((flag_rounding_math
3496 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3497 && (inexact || !real_identical (&result, &value)))
3498 return NULL_RTX;
3499
3500 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3501 }
3502 }
3503
3504 /* We can fold some multi-word operations. */
3505 if (GET_MODE_CLASS (mode) == MODE_INT
3506 && width == HOST_BITS_PER_DOUBLE_INT
3507 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3508 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3509 {
3510 double_int o0, o1, res, tmp;
3511
3512 o0 = rtx_to_double_int (op0);
3513 o1 = rtx_to_double_int (op1);
3514
3515 switch (code)
3516 {
3517 case MINUS:
3518 /* A - B == A + (-B). */
3519 o1 = double_int_neg (o1);
3520
3521 /* Fall through.... */
3522
3523 case PLUS:
3524 res = double_int_add (o0, o1);
3525 break;
3526
3527 case MULT:
3528 res = double_int_mul (o0, o1);
3529 break;
3530
3531 case DIV:
3532 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3533 o0.low, o0.high, o1.low, o1.high,
3534 &res.low, &res.high,
3535 &tmp.low, &tmp.high))
3536 return 0;
3537 break;
3538
3539 case MOD:
3540 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3541 o0.low, o0.high, o1.low, o1.high,
3542 &tmp.low, &tmp.high,
3543 &res.low, &res.high))
3544 return 0;
3545 break;
3546
3547 case UDIV:
3548 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3549 o0.low, o0.high, o1.low, o1.high,
3550 &res.low, &res.high,
3551 &tmp.low, &tmp.high))
3552 return 0;
3553 break;
3554
3555 case UMOD:
3556 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3557 o0.low, o0.high, o1.low, o1.high,
3558 &tmp.low, &tmp.high,
3559 &res.low, &res.high))
3560 return 0;
3561 break;
3562
3563 case AND:
3564 res = double_int_and (o0, o1);
3565 break;
3566
3567 case IOR:
3568 res = double_int_ior (o0, o1);
3569 break;
3570
3571 case XOR:
3572 res = double_int_xor (o0, o1);
3573 break;
3574
3575 case SMIN:
3576 res = double_int_smin (o0, o1);
3577 break;
3578
3579 case SMAX:
3580 res = double_int_smax (o0, o1);
3581 break;
3582
3583 case UMIN:
3584 res = double_int_umin (o0, o1);
3585 break;
3586
3587 case UMAX:
3588 res = double_int_umax (o0, o1);
3589 break;
3590
3591 case LSHIFTRT: case ASHIFTRT:
3592 case ASHIFT:
3593 case ROTATE: case ROTATERT:
3594 {
3595 unsigned HOST_WIDE_INT cnt;
3596
3597 if (SHIFT_COUNT_TRUNCATED)
3598 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3599
3600 if (!double_int_fits_in_uhwi_p (o1)
3601 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3602 return 0;
3603
3604 cnt = double_int_to_uhwi (o1);
3605
3606 if (code == LSHIFTRT || code == ASHIFTRT)
3607 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3608 code == ASHIFTRT);
3609 else if (code == ASHIFT)
3610 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3611 true);
3612 else if (code == ROTATE)
3613 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3614 else /* code == ROTATERT */
3615 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3616 }
3617 break;
3618
3619 default:
3620 return 0;
3621 }
3622
3623 return immed_double_int_const (res, mode);
3624 }
3625
3626 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3627 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3628 {
3629 /* Get the integer argument values in two forms:
3630 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3631
3632 arg0 = INTVAL (op0);
3633 arg1 = INTVAL (op1);
3634
3635 if (width < HOST_BITS_PER_WIDE_INT)
3636 {
3637 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3638 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3639
3640 arg0s = arg0;
3641 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3642 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3643
3644 arg1s = arg1;
3645 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3646 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3647 }
3648 else
3649 {
3650 arg0s = arg0;
3651 arg1s = arg1;
3652 }
3653
3654 /* Compute the value of the arithmetic. */
3655
3656 switch (code)
3657 {
3658 case PLUS:
3659 val = arg0s + arg1s;
3660 break;
3661
3662 case MINUS:
3663 val = arg0s - arg1s;
3664 break;
3665
3666 case MULT:
3667 val = arg0s * arg1s;
3668 break;
3669
3670 case DIV:
3671 if (arg1s == 0
3672 || ((unsigned HOST_WIDE_INT) arg0s
3673 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3674 && arg1s == -1))
3675 return 0;
3676 val = arg0s / arg1s;
3677 break;
3678
3679 case MOD:
3680 if (arg1s == 0
3681 || ((unsigned HOST_WIDE_INT) arg0s
3682 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3683 && arg1s == -1))
3684 return 0;
3685 val = arg0s % arg1s;
3686 break;
3687
3688 case UDIV:
3689 if (arg1 == 0
3690 || ((unsigned HOST_WIDE_INT) arg0s
3691 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3692 && arg1s == -1))
3693 return 0;
3694 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3695 break;
3696
3697 case UMOD:
3698 if (arg1 == 0
3699 || ((unsigned HOST_WIDE_INT) arg0s
3700 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3701 && arg1s == -1))
3702 return 0;
3703 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3704 break;
3705
3706 case AND:
3707 val = arg0 & arg1;
3708 break;
3709
3710 case IOR:
3711 val = arg0 | arg1;
3712 break;
3713
3714 case XOR:
3715 val = arg0 ^ arg1;
3716 break;
3717
3718 case LSHIFTRT:
3719 case ASHIFT:
3720 case ASHIFTRT:
3721 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3722 the value is in range. We can't return any old value for
3723 out-of-range arguments because either the middle-end (via
3724 shift_truncation_mask) or the back-end might be relying on
3725 target-specific knowledge. Nor can we rely on
3726 shift_truncation_mask, since the shift might not be part of an
3727 ashlM3, lshrM3 or ashrM3 instruction. */
3728 if (SHIFT_COUNT_TRUNCATED)
3729 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3730 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3731 return 0;
3732
3733 val = (code == ASHIFT
3734 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3735 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3736
3737 /* Sign-extend the result for arithmetic right shifts. */
3738 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3739 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3740 break;
3741
3742 case ROTATERT:
3743 if (arg1 < 0)
3744 return 0;
3745
3746 arg1 %= width;
3747 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3748 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3749 break;
3750
3751 case ROTATE:
3752 if (arg1 < 0)
3753 return 0;
3754
3755 arg1 %= width;
3756 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3757 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3758 break;
3759
3760 case COMPARE:
3761 /* Do nothing here. */
3762 return 0;
3763
3764 case SMIN:
3765 val = arg0s <= arg1s ? arg0s : arg1s;
3766 break;
3767
3768 case UMIN:
3769 val = ((unsigned HOST_WIDE_INT) arg0
3770 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3771 break;
3772
3773 case SMAX:
3774 val = arg0s > arg1s ? arg0s : arg1s;
3775 break;
3776
3777 case UMAX:
3778 val = ((unsigned HOST_WIDE_INT) arg0
3779 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3780 break;
3781
3782 case SS_PLUS:
3783 case US_PLUS:
3784 case SS_MINUS:
3785 case US_MINUS:
3786 case SS_MULT:
3787 case US_MULT:
3788 case SS_DIV:
3789 case US_DIV:
3790 case SS_ASHIFT:
3791 case US_ASHIFT:
3792 /* ??? There are simplifications that can be done. */
3793 return 0;
3794
3795 default:
3796 gcc_unreachable ();
3797 }
3798
3799 return gen_int_mode (val, mode);
3800 }
3801
3802 return NULL_RTX;
3803 }
3804
3805
3806 \f
3807 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3808 PLUS or MINUS.
3809
3810 Rather than test for specific case, we do this by a brute-force method
3811 and do all possible simplifications until no more changes occur. Then
3812 we rebuild the operation. */
3813
3814 struct simplify_plus_minus_op_data
3815 {
3816 rtx op;
3817 short neg;
3818 };
3819
3820 static bool
3821 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3822 {
3823 int result;
3824
3825 result = (commutative_operand_precedence (y)
3826 - commutative_operand_precedence (x));
3827 if (result)
3828 return result > 0;
3829
3830 /* Group together equal REGs to do more simplification. */
3831 if (REG_P (x) && REG_P (y))
3832 return REGNO (x) > REGNO (y);
3833 else
3834 return false;
3835 }
3836
3837 static rtx
3838 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3839 rtx op1)
3840 {
3841 struct simplify_plus_minus_op_data ops[8];
3842 rtx result, tem;
3843 int n_ops = 2, input_ops = 2;
3844 int changed, n_constants = 0, canonicalized = 0;
3845 int i, j;
3846
3847 memset (ops, 0, sizeof ops);
3848
3849 /* Set up the two operands and then expand them until nothing has been
3850 changed. If we run out of room in our array, give up; this should
3851 almost never happen. */
3852
3853 ops[0].op = op0;
3854 ops[0].neg = 0;
3855 ops[1].op = op1;
3856 ops[1].neg = (code == MINUS);
3857
3858 do
3859 {
3860 changed = 0;
3861
3862 for (i = 0; i < n_ops; i++)
3863 {
3864 rtx this_op = ops[i].op;
3865 int this_neg = ops[i].neg;
3866 enum rtx_code this_code = GET_CODE (this_op);
3867
3868 switch (this_code)
3869 {
3870 case PLUS:
3871 case MINUS:
3872 if (n_ops == 7)
3873 return NULL_RTX;
3874
3875 ops[n_ops].op = XEXP (this_op, 1);
3876 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3877 n_ops++;
3878
3879 ops[i].op = XEXP (this_op, 0);
3880 input_ops++;
3881 changed = 1;
3882 canonicalized |= this_neg;
3883 break;
3884
3885 case NEG:
3886 ops[i].op = XEXP (this_op, 0);
3887 ops[i].neg = ! this_neg;
3888 changed = 1;
3889 canonicalized = 1;
3890 break;
3891
3892 case CONST:
3893 if (n_ops < 7
3894 && GET_CODE (XEXP (this_op, 0)) == PLUS
3895 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3896 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3897 {
3898 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3899 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3900 ops[n_ops].neg = this_neg;
3901 n_ops++;
3902 changed = 1;
3903 canonicalized = 1;
3904 }
3905 break;
3906
3907 case NOT:
3908 /* ~a -> (-a - 1) */
3909 if (n_ops != 7)
3910 {
3911 ops[n_ops].op = constm1_rtx;
3912 ops[n_ops++].neg = this_neg;
3913 ops[i].op = XEXP (this_op, 0);
3914 ops[i].neg = !this_neg;
3915 changed = 1;
3916 canonicalized = 1;
3917 }
3918 break;
3919
3920 case CONST_INT:
3921 n_constants++;
3922 if (this_neg)
3923 {
3924 ops[i].op = neg_const_int (mode, this_op);
3925 ops[i].neg = 0;
3926 changed = 1;
3927 canonicalized = 1;
3928 }
3929 break;
3930
3931 default:
3932 break;
3933 }
3934 }
3935 }
3936 while (changed);
3937
3938 if (n_constants > 1)
3939 canonicalized = 1;
3940
3941 gcc_assert (n_ops >= 2);
3942
3943 /* If we only have two operands, we can avoid the loops. */
3944 if (n_ops == 2)
3945 {
3946 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3947 rtx lhs, rhs;
3948
3949 /* Get the two operands. Be careful with the order, especially for
3950 the cases where code == MINUS. */
3951 if (ops[0].neg && ops[1].neg)
3952 {
3953 lhs = gen_rtx_NEG (mode, ops[0].op);
3954 rhs = ops[1].op;
3955 }
3956 else if (ops[0].neg)
3957 {
3958 lhs = ops[1].op;
3959 rhs = ops[0].op;
3960 }
3961 else
3962 {
3963 lhs = ops[0].op;
3964 rhs = ops[1].op;
3965 }
3966
3967 return simplify_const_binary_operation (code, mode, lhs, rhs);
3968 }
3969
3970 /* Now simplify each pair of operands until nothing changes. */
3971 do
3972 {
3973 /* Insertion sort is good enough for an eight-element array. */
3974 for (i = 1; i < n_ops; i++)
3975 {
3976 struct simplify_plus_minus_op_data save;
3977 j = i - 1;
3978 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3979 continue;
3980
3981 canonicalized = 1;
3982 save = ops[i];
3983 do
3984 ops[j + 1] = ops[j];
3985 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3986 ops[j + 1] = save;
3987 }
3988
3989 changed = 0;
3990 for (i = n_ops - 1; i > 0; i--)
3991 for (j = i - 1; j >= 0; j--)
3992 {
3993 rtx lhs = ops[j].op, rhs = ops[i].op;
3994 int lneg = ops[j].neg, rneg = ops[i].neg;
3995
3996 if (lhs != 0 && rhs != 0)
3997 {
3998 enum rtx_code ncode = PLUS;
3999
4000 if (lneg != rneg)
4001 {
4002 ncode = MINUS;
4003 if (lneg)
4004 tem = lhs, lhs = rhs, rhs = tem;
4005 }
4006 else if (swap_commutative_operands_p (lhs, rhs))
4007 tem = lhs, lhs = rhs, rhs = tem;
4008
4009 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4010 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4011 {
4012 rtx tem_lhs, tem_rhs;
4013
4014 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4015 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4016 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4017
4018 if (tem && !CONSTANT_P (tem))
4019 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4020 }
4021 else
4022 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4023
4024 /* Reject "simplifications" that just wrap the two
4025 arguments in a CONST. Failure to do so can result
4026 in infinite recursion with simplify_binary_operation
4027 when it calls us to simplify CONST operations. */
4028 if (tem
4029 && ! (GET_CODE (tem) == CONST
4030 && GET_CODE (XEXP (tem, 0)) == ncode
4031 && XEXP (XEXP (tem, 0), 0) == lhs
4032 && XEXP (XEXP (tem, 0), 1) == rhs))
4033 {
4034 lneg &= rneg;
4035 if (GET_CODE (tem) == NEG)
4036 tem = XEXP (tem, 0), lneg = !lneg;
4037 if (CONST_INT_P (tem) && lneg)
4038 tem = neg_const_int (mode, tem), lneg = 0;
4039
4040 ops[i].op = tem;
4041 ops[i].neg = lneg;
4042 ops[j].op = NULL_RTX;
4043 changed = 1;
4044 canonicalized = 1;
4045 }
4046 }
4047 }
4048
4049 /* If nothing changed, fail. */
4050 if (!canonicalized)
4051 return NULL_RTX;
4052
4053 /* Pack all the operands to the lower-numbered entries. */
4054 for (i = 0, j = 0; j < n_ops; j++)
4055 if (ops[j].op)
4056 {
4057 ops[i] = ops[j];
4058 i++;
4059 }
4060 n_ops = i;
4061 }
4062 while (changed);
4063
4064 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4065 if (n_ops == 2
4066 && CONST_INT_P (ops[1].op)
4067 && CONSTANT_P (ops[0].op)
4068 && ops[0].neg)
4069 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4070
4071 /* We suppressed creation of trivial CONST expressions in the
4072 combination loop to avoid recursion. Create one manually now.
4073 The combination loop should have ensured that there is exactly
4074 one CONST_INT, and the sort will have ensured that it is last
4075 in the array and that any other constant will be next-to-last. */
4076
4077 if (n_ops > 1
4078 && CONST_INT_P (ops[n_ops - 1].op)
4079 && CONSTANT_P (ops[n_ops - 2].op))
4080 {
4081 rtx value = ops[n_ops - 1].op;
4082 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4083 value = neg_const_int (mode, value);
4084 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4085 n_ops--;
4086 }
4087
4088 /* Put a non-negated operand first, if possible. */
4089
4090 for (i = 0; i < n_ops && ops[i].neg; i++)
4091 continue;
4092 if (i == n_ops)
4093 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4094 else if (i != 0)
4095 {
4096 tem = ops[0].op;
4097 ops[0] = ops[i];
4098 ops[i].op = tem;
4099 ops[i].neg = 1;
4100 }
4101
4102 /* Now make the result by performing the requested operations. */
4103 result = ops[0].op;
4104 for (i = 1; i < n_ops; i++)
4105 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4106 mode, result, ops[i].op);
4107
4108 return result;
4109 }
4110
4111 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4112 static bool
4113 plus_minus_operand_p (const_rtx x)
4114 {
4115 return GET_CODE (x) == PLUS
4116 || GET_CODE (x) == MINUS
4117 || (GET_CODE (x) == CONST
4118 && GET_CODE (XEXP (x, 0)) == PLUS
4119 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4120 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4121 }
4122
4123 /* Like simplify_binary_operation except used for relational operators.
4124 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4125 not also be VOIDmode.
4126
4127 CMP_MODE specifies in which mode the comparison is done in, so it is
4128 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4129 the operands or, if both are VOIDmode, the operands are compared in
4130 "infinite precision". */
4131 rtx
4132 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4133 enum machine_mode cmp_mode, rtx op0, rtx op1)
4134 {
4135 rtx tem, trueop0, trueop1;
4136
4137 if (cmp_mode == VOIDmode)
4138 cmp_mode = GET_MODE (op0);
4139 if (cmp_mode == VOIDmode)
4140 cmp_mode = GET_MODE (op1);
4141
4142 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4143 if (tem)
4144 {
4145 if (SCALAR_FLOAT_MODE_P (mode))
4146 {
4147 if (tem == const0_rtx)
4148 return CONST0_RTX (mode);
4149 #ifdef FLOAT_STORE_FLAG_VALUE
4150 {
4151 REAL_VALUE_TYPE val;
4152 val = FLOAT_STORE_FLAG_VALUE (mode);
4153 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4154 }
4155 #else
4156 return NULL_RTX;
4157 #endif
4158 }
4159 if (VECTOR_MODE_P (mode))
4160 {
4161 if (tem == const0_rtx)
4162 return CONST0_RTX (mode);
4163 #ifdef VECTOR_STORE_FLAG_VALUE
4164 {
4165 int i, units;
4166 rtvec v;
4167
4168 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4169 if (val == NULL_RTX)
4170 return NULL_RTX;
4171 if (val == const1_rtx)
4172 return CONST1_RTX (mode);
4173
4174 units = GET_MODE_NUNITS (mode);
4175 v = rtvec_alloc (units);
4176 for (i = 0; i < units; i++)
4177 RTVEC_ELT (v, i) = val;
4178 return gen_rtx_raw_CONST_VECTOR (mode, v);
4179 }
4180 #else
4181 return NULL_RTX;
4182 #endif
4183 }
4184
4185 return tem;
4186 }
4187
4188 /* For the following tests, ensure const0_rtx is op1. */
4189 if (swap_commutative_operands_p (op0, op1)
4190 || (op0 == const0_rtx && op1 != const0_rtx))
4191 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4192
4193 /* If op0 is a compare, extract the comparison arguments from it. */
4194 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4195 return simplify_gen_relational (code, mode, VOIDmode,
4196 XEXP (op0, 0), XEXP (op0, 1));
4197
4198 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4199 || CC0_P (op0))
4200 return NULL_RTX;
4201
4202 trueop0 = avoid_constant_pool_reference (op0);
4203 trueop1 = avoid_constant_pool_reference (op1);
4204 return simplify_relational_operation_1 (code, mode, cmp_mode,
4205 trueop0, trueop1);
4206 }
4207
4208 /* This part of simplify_relational_operation is only used when CMP_MODE
4209 is not in class MODE_CC (i.e. it is a real comparison).
4210
4211 MODE is the mode of the result, while CMP_MODE specifies in which
4212 mode the comparison is done in, so it is the mode of the operands. */
4213
4214 static rtx
4215 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4216 enum machine_mode cmp_mode, rtx op0, rtx op1)
4217 {
4218 enum rtx_code op0code = GET_CODE (op0);
4219
4220 if (op1 == const0_rtx && COMPARISON_P (op0))
4221 {
4222 /* If op0 is a comparison, extract the comparison arguments
4223 from it. */
4224 if (code == NE)
4225 {
4226 if (GET_MODE (op0) == mode)
4227 return simplify_rtx (op0);
4228 else
4229 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4230 XEXP (op0, 0), XEXP (op0, 1));
4231 }
4232 else if (code == EQ)
4233 {
4234 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4235 if (new_code != UNKNOWN)
4236 return simplify_gen_relational (new_code, mode, VOIDmode,
4237 XEXP (op0, 0), XEXP (op0, 1));
4238 }
4239 }
4240
4241 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4242 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4243 if ((code == LTU || code == GEU)
4244 && GET_CODE (op0) == PLUS
4245 && CONST_INT_P (XEXP (op0, 1))
4246 && (rtx_equal_p (op1, XEXP (op0, 0))
4247 || rtx_equal_p (op1, XEXP (op0, 1))))
4248 {
4249 rtx new_cmp
4250 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4251 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4252 cmp_mode, XEXP (op0, 0), new_cmp);
4253 }
4254
4255 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4256 if ((code == LTU || code == GEU)
4257 && GET_CODE (op0) == PLUS
4258 && rtx_equal_p (op1, XEXP (op0, 1))
4259 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4260 && !rtx_equal_p (op1, XEXP (op0, 0)))
4261 return simplify_gen_relational (code, mode, cmp_mode, op0,
4262 copy_rtx (XEXP (op0, 0)));
4263
4264 if (op1 == const0_rtx)
4265 {
4266 /* Canonicalize (GTU x 0) as (NE x 0). */
4267 if (code == GTU)
4268 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4269 /* Canonicalize (LEU x 0) as (EQ x 0). */
4270 if (code == LEU)
4271 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4272 }
4273 else if (op1 == const1_rtx)
4274 {
4275 switch (code)
4276 {
4277 case GE:
4278 /* Canonicalize (GE x 1) as (GT x 0). */
4279 return simplify_gen_relational (GT, mode, cmp_mode,
4280 op0, const0_rtx);
4281 case GEU:
4282 /* Canonicalize (GEU x 1) as (NE x 0). */
4283 return simplify_gen_relational (NE, mode, cmp_mode,
4284 op0, const0_rtx);
4285 case LT:
4286 /* Canonicalize (LT x 1) as (LE x 0). */
4287 return simplify_gen_relational (LE, mode, cmp_mode,
4288 op0, const0_rtx);
4289 case LTU:
4290 /* Canonicalize (LTU x 1) as (EQ x 0). */
4291 return simplify_gen_relational (EQ, mode, cmp_mode,
4292 op0, const0_rtx);
4293 default:
4294 break;
4295 }
4296 }
4297 else if (op1 == constm1_rtx)
4298 {
4299 /* Canonicalize (LE x -1) as (LT x 0). */
4300 if (code == LE)
4301 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4302 /* Canonicalize (GT x -1) as (GE x 0). */
4303 if (code == GT)
4304 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4305 }
4306
4307 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4308 if ((code == EQ || code == NE)
4309 && (op0code == PLUS || op0code == MINUS)
4310 && CONSTANT_P (op1)
4311 && CONSTANT_P (XEXP (op0, 1))
4312 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4313 {
4314 rtx x = XEXP (op0, 0);
4315 rtx c = XEXP (op0, 1);
4316
4317 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4318 cmp_mode, op1, c);
4319 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4320 }
4321
4322 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4323 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4324 if (code == NE
4325 && op1 == const0_rtx
4326 && GET_MODE_CLASS (mode) == MODE_INT
4327 && cmp_mode != VOIDmode
4328 /* ??? Work-around BImode bugs in the ia64 backend. */
4329 && mode != BImode
4330 && cmp_mode != BImode
4331 && nonzero_bits (op0, cmp_mode) == 1
4332 && STORE_FLAG_VALUE == 1)
4333 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4334 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4335 : lowpart_subreg (mode, op0, cmp_mode);
4336
4337 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4338 if ((code == EQ || code == NE)
4339 && op1 == const0_rtx
4340 && op0code == XOR)
4341 return simplify_gen_relational (code, mode, cmp_mode,
4342 XEXP (op0, 0), XEXP (op0, 1));
4343
4344 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4345 if ((code == EQ || code == NE)
4346 && op0code == XOR
4347 && rtx_equal_p (XEXP (op0, 0), op1)
4348 && !side_effects_p (XEXP (op0, 0)))
4349 return simplify_gen_relational (code, mode, cmp_mode,
4350 XEXP (op0, 1), const0_rtx);
4351
4352 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4353 if ((code == EQ || code == NE)
4354 && op0code == XOR
4355 && rtx_equal_p (XEXP (op0, 1), op1)
4356 && !side_effects_p (XEXP (op0, 1)))
4357 return simplify_gen_relational (code, mode, cmp_mode,
4358 XEXP (op0, 0), const0_rtx);
4359
4360 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4361 if ((code == EQ || code == NE)
4362 && op0code == XOR
4363 && (CONST_INT_P (op1)
4364 || GET_CODE (op1) == CONST_DOUBLE)
4365 && (CONST_INT_P (XEXP (op0, 1))
4366 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4367 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4368 simplify_gen_binary (XOR, cmp_mode,
4369 XEXP (op0, 1), op1));
4370
4371 if (op0code == POPCOUNT && op1 == const0_rtx)
4372 switch (code)
4373 {
4374 case EQ:
4375 case LE:
4376 case LEU:
4377 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4378 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4379 XEXP (op0, 0), const0_rtx);
4380
4381 case NE:
4382 case GT:
4383 case GTU:
4384 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4385 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4386 XEXP (op0, 0), const0_rtx);
4387
4388 default:
4389 break;
4390 }
4391
4392 return NULL_RTX;
4393 }
4394
4395 enum
4396 {
4397 CMP_EQ = 1,
4398 CMP_LT = 2,
4399 CMP_GT = 4,
4400 CMP_LTU = 8,
4401 CMP_GTU = 16
4402 };
4403
4404
4405 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4406 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4407 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4408 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4409 For floating-point comparisons, assume that the operands were ordered. */
4410
4411 static rtx
4412 comparison_result (enum rtx_code code, int known_results)
4413 {
4414 switch (code)
4415 {
4416 case EQ:
4417 case UNEQ:
4418 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4419 case NE:
4420 case LTGT:
4421 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4422
4423 case LT:
4424 case UNLT:
4425 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4426 case GE:
4427 case UNGE:
4428 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4429
4430 case GT:
4431 case UNGT:
4432 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4433 case LE:
4434 case UNLE:
4435 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4436
4437 case LTU:
4438 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4439 case GEU:
4440 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4441
4442 case GTU:
4443 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4444 case LEU:
4445 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4446
4447 case ORDERED:
4448 return const_true_rtx;
4449 case UNORDERED:
4450 return const0_rtx;
4451 default:
4452 gcc_unreachable ();
4453 }
4454 }
4455
4456 /* Check if the given comparison (done in the given MODE) is actually a
4457 tautology or a contradiction.
4458 If no simplification is possible, this function returns zero.
4459 Otherwise, it returns either const_true_rtx or const0_rtx. */
4460
4461 rtx
4462 simplify_const_relational_operation (enum rtx_code code,
4463 enum machine_mode mode,
4464 rtx op0, rtx op1)
4465 {
4466 rtx tem;
4467 rtx trueop0;
4468 rtx trueop1;
4469
4470 gcc_assert (mode != VOIDmode
4471 || (GET_MODE (op0) == VOIDmode
4472 && GET_MODE (op1) == VOIDmode));
4473
4474 /* If op0 is a compare, extract the comparison arguments from it. */
4475 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4476 {
4477 op1 = XEXP (op0, 1);
4478 op0 = XEXP (op0, 0);
4479
4480 if (GET_MODE (op0) != VOIDmode)
4481 mode = GET_MODE (op0);
4482 else if (GET_MODE (op1) != VOIDmode)
4483 mode = GET_MODE (op1);
4484 else
4485 return 0;
4486 }
4487
4488 /* We can't simplify MODE_CC values since we don't know what the
4489 actual comparison is. */
4490 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4491 return 0;
4492
4493 /* Make sure the constant is second. */
4494 if (swap_commutative_operands_p (op0, op1))
4495 {
4496 tem = op0, op0 = op1, op1 = tem;
4497 code = swap_condition (code);
4498 }
4499
4500 trueop0 = avoid_constant_pool_reference (op0);
4501 trueop1 = avoid_constant_pool_reference (op1);
4502
4503 /* For integer comparisons of A and B maybe we can simplify A - B and can
4504 then simplify a comparison of that with zero. If A and B are both either
4505 a register or a CONST_INT, this can't help; testing for these cases will
4506 prevent infinite recursion here and speed things up.
4507
4508 We can only do this for EQ and NE comparisons as otherwise we may
4509 lose or introduce overflow which we cannot disregard as undefined as
4510 we do not know the signedness of the operation on either the left or
4511 the right hand side of the comparison. */
4512
4513 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4514 && (code == EQ || code == NE)
4515 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4516 && (REG_P (op1) || CONST_INT_P (trueop1)))
4517 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4518 /* We cannot do this if tem is a nonzero address. */
4519 && ! nonzero_address_p (tem))
4520 return simplify_const_relational_operation (signed_condition (code),
4521 mode, tem, const0_rtx);
4522
4523 if (! HONOR_NANS (mode) && code == ORDERED)
4524 return const_true_rtx;
4525
4526 if (! HONOR_NANS (mode) && code == UNORDERED)
4527 return const0_rtx;
4528
4529 /* For modes without NaNs, if the two operands are equal, we know the
4530 result except if they have side-effects. Even with NaNs we know
4531 the result of unordered comparisons and, if signaling NaNs are
4532 irrelevant, also the result of LT/GT/LTGT. */
4533 if ((! HONOR_NANS (GET_MODE (trueop0))
4534 || code == UNEQ || code == UNLE || code == UNGE
4535 || ((code == LT || code == GT || code == LTGT)
4536 && ! HONOR_SNANS (GET_MODE (trueop0))))
4537 && rtx_equal_p (trueop0, trueop1)
4538 && ! side_effects_p (trueop0))
4539 return comparison_result (code, CMP_EQ);
4540
4541 /* If the operands are floating-point constants, see if we can fold
4542 the result. */
4543 if (GET_CODE (trueop0) == CONST_DOUBLE
4544 && GET_CODE (trueop1) == CONST_DOUBLE
4545 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4546 {
4547 REAL_VALUE_TYPE d0, d1;
4548
4549 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4550 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4551
4552 /* Comparisons are unordered iff at least one of the values is NaN. */
4553 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4554 switch (code)
4555 {
4556 case UNEQ:
4557 case UNLT:
4558 case UNGT:
4559 case UNLE:
4560 case UNGE:
4561 case NE:
4562 case UNORDERED:
4563 return const_true_rtx;
4564 case EQ:
4565 case LT:
4566 case GT:
4567 case LE:
4568 case GE:
4569 case LTGT:
4570 case ORDERED:
4571 return const0_rtx;
4572 default:
4573 return 0;
4574 }
4575
4576 return comparison_result (code,
4577 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4578 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4579 }
4580
4581 /* Otherwise, see if the operands are both integers. */
4582 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4583 && (GET_CODE (trueop0) == CONST_DOUBLE
4584 || CONST_INT_P (trueop0))
4585 && (GET_CODE (trueop1) == CONST_DOUBLE
4586 || CONST_INT_P (trueop1)))
4587 {
4588 int width = GET_MODE_BITSIZE (mode);
4589 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4590 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4591
4592 /* Get the two words comprising each integer constant. */
4593 if (GET_CODE (trueop0) == CONST_DOUBLE)
4594 {
4595 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4596 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4597 }
4598 else
4599 {
4600 l0u = l0s = INTVAL (trueop0);
4601 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4602 }
4603
4604 if (GET_CODE (trueop1) == CONST_DOUBLE)
4605 {
4606 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4607 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4608 }
4609 else
4610 {
4611 l1u = l1s = INTVAL (trueop1);
4612 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4613 }
4614
4615 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4616 we have to sign or zero-extend the values. */
4617 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4618 {
4619 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4620 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4621
4622 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4623 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4624
4625 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4626 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4627 }
4628 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4629 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4630
4631 if (h0u == h1u && l0u == l1u)
4632 return comparison_result (code, CMP_EQ);
4633 else
4634 {
4635 int cr;
4636 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4637 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4638 return comparison_result (code, cr);
4639 }
4640 }
4641
4642 /* Optimize comparisons with upper and lower bounds. */
4643 if (SCALAR_INT_MODE_P (mode)
4644 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4645 && CONST_INT_P (trueop1))
4646 {
4647 int sign;
4648 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4649 HOST_WIDE_INT val = INTVAL (trueop1);
4650 HOST_WIDE_INT mmin, mmax;
4651
4652 if (code == GEU
4653 || code == LEU
4654 || code == GTU
4655 || code == LTU)
4656 sign = 0;
4657 else
4658 sign = 1;
4659
4660 /* Get a reduced range if the sign bit is zero. */
4661 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4662 {
4663 mmin = 0;
4664 mmax = nonzero;
4665 }
4666 else
4667 {
4668 rtx mmin_rtx, mmax_rtx;
4669 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4670
4671 mmin = INTVAL (mmin_rtx);
4672 mmax = INTVAL (mmax_rtx);
4673 if (sign)
4674 {
4675 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4676
4677 mmin >>= (sign_copies - 1);
4678 mmax >>= (sign_copies - 1);
4679 }
4680 }
4681
4682 switch (code)
4683 {
4684 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4685 case GEU:
4686 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4687 return const_true_rtx;
4688 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4689 return const0_rtx;
4690 break;
4691 case GE:
4692 if (val <= mmin)
4693 return const_true_rtx;
4694 if (val > mmax)
4695 return const0_rtx;
4696 break;
4697
4698 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4699 case LEU:
4700 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4701 return const_true_rtx;
4702 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4703 return const0_rtx;
4704 break;
4705 case LE:
4706 if (val >= mmax)
4707 return const_true_rtx;
4708 if (val < mmin)
4709 return const0_rtx;
4710 break;
4711
4712 case EQ:
4713 /* x == y is always false for y out of range. */
4714 if (val < mmin || val > mmax)
4715 return const0_rtx;
4716 break;
4717
4718 /* x > y is always false for y >= mmax, always true for y < mmin. */
4719 case GTU:
4720 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4721 return const0_rtx;
4722 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4723 return const_true_rtx;
4724 break;
4725 case GT:
4726 if (val >= mmax)
4727 return const0_rtx;
4728 if (val < mmin)
4729 return const_true_rtx;
4730 break;
4731
4732 /* x < y is always false for y <= mmin, always true for y > mmax. */
4733 case LTU:
4734 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4735 return const0_rtx;
4736 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4737 return const_true_rtx;
4738 break;
4739 case LT:
4740 if (val <= mmin)
4741 return const0_rtx;
4742 if (val > mmax)
4743 return const_true_rtx;
4744 break;
4745
4746 case NE:
4747 /* x != y is always true for y out of range. */
4748 if (val < mmin || val > mmax)
4749 return const_true_rtx;
4750 break;
4751
4752 default:
4753 break;
4754 }
4755 }
4756
4757 /* Optimize integer comparisons with zero. */
4758 if (trueop1 == const0_rtx)
4759 {
4760 /* Some addresses are known to be nonzero. We don't know
4761 their sign, but equality comparisons are known. */
4762 if (nonzero_address_p (trueop0))
4763 {
4764 if (code == EQ || code == LEU)
4765 return const0_rtx;
4766 if (code == NE || code == GTU)
4767 return const_true_rtx;
4768 }
4769
4770 /* See if the first operand is an IOR with a constant. If so, we
4771 may be able to determine the result of this comparison. */
4772 if (GET_CODE (op0) == IOR)
4773 {
4774 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4775 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4776 {
4777 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4778 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4779 && (UINTVAL (inner_const)
4780 & ((unsigned HOST_WIDE_INT) 1
4781 << sign_bitnum)));
4782
4783 switch (code)
4784 {
4785 case EQ:
4786 case LEU:
4787 return const0_rtx;
4788 case NE:
4789 case GTU:
4790 return const_true_rtx;
4791 case LT:
4792 case LE:
4793 if (has_sign)
4794 return const_true_rtx;
4795 break;
4796 case GT:
4797 case GE:
4798 if (has_sign)
4799 return const0_rtx;
4800 break;
4801 default:
4802 break;
4803 }
4804 }
4805 }
4806 }
4807
4808 /* Optimize comparison of ABS with zero. */
4809 if (trueop1 == CONST0_RTX (mode)
4810 && (GET_CODE (trueop0) == ABS
4811 || (GET_CODE (trueop0) == FLOAT_EXTEND
4812 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4813 {
4814 switch (code)
4815 {
4816 case LT:
4817 /* Optimize abs(x) < 0.0. */
4818 if (!HONOR_SNANS (mode)
4819 && (!INTEGRAL_MODE_P (mode)
4820 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4821 {
4822 if (INTEGRAL_MODE_P (mode)
4823 && (issue_strict_overflow_warning
4824 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4825 warning (OPT_Wstrict_overflow,
4826 ("assuming signed overflow does not occur when "
4827 "assuming abs (x) < 0 is false"));
4828 return const0_rtx;
4829 }
4830 break;
4831
4832 case GE:
4833 /* Optimize abs(x) >= 0.0. */
4834 if (!HONOR_NANS (mode)
4835 && (!INTEGRAL_MODE_P (mode)
4836 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4837 {
4838 if (INTEGRAL_MODE_P (mode)
4839 && (issue_strict_overflow_warning
4840 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4841 warning (OPT_Wstrict_overflow,
4842 ("assuming signed overflow does not occur when "
4843 "assuming abs (x) >= 0 is true"));
4844 return const_true_rtx;
4845 }
4846 break;
4847
4848 case UNGE:
4849 /* Optimize ! (abs(x) < 0.0). */
4850 return const_true_rtx;
4851
4852 default:
4853 break;
4854 }
4855 }
4856
4857 return 0;
4858 }
4859 \f
4860 /* Simplify CODE, an operation with result mode MODE and three operands,
4861 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4862 a constant. Return 0 if no simplifications is possible. */
4863
4864 rtx
4865 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4866 enum machine_mode op0_mode, rtx op0, rtx op1,
4867 rtx op2)
4868 {
4869 unsigned int width = GET_MODE_BITSIZE (mode);
4870 bool any_change = false;
4871 rtx tem;
4872
4873 /* VOIDmode means "infinite" precision. */
4874 if (width == 0)
4875 width = HOST_BITS_PER_WIDE_INT;
4876
4877 switch (code)
4878 {
4879 case FMA:
4880 /* Simplify negations around the multiplication. */
4881 /* -a * -b + c => a * b + c. */
4882 if (GET_CODE (op0) == NEG)
4883 {
4884 tem = simplify_unary_operation (NEG, mode, op1, mode);
4885 if (tem)
4886 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4887 }
4888 else if (GET_CODE (op1) == NEG)
4889 {
4890 tem = simplify_unary_operation (NEG, mode, op0, mode);
4891 if (tem)
4892 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4893 }
4894
4895 /* Canonicalize the two multiplication operands. */
4896 /* a * -b + c => -b * a + c. */
4897 if (swap_commutative_operands_p (op0, op1))
4898 tem = op0, op0 = op1, op1 = tem, any_change = true;
4899
4900 if (any_change)
4901 return gen_rtx_FMA (mode, op0, op1, op2);
4902 return NULL_RTX;
4903
4904 case SIGN_EXTRACT:
4905 case ZERO_EXTRACT:
4906 if (CONST_INT_P (op0)
4907 && CONST_INT_P (op1)
4908 && CONST_INT_P (op2)
4909 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4910 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4911 {
4912 /* Extracting a bit-field from a constant */
4913 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4914
4915 if (BITS_BIG_ENDIAN)
4916 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4917 else
4918 val >>= INTVAL (op2);
4919
4920 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4921 {
4922 /* First zero-extend. */
4923 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4924 /* If desired, propagate sign bit. */
4925 if (code == SIGN_EXTRACT
4926 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4927 != 0)
4928 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4929 }
4930
4931 /* Clear the bits that don't belong in our mode,
4932 unless they and our sign bit are all one.
4933 So we get either a reasonable negative value or a reasonable
4934 unsigned value for this mode. */
4935 if (width < HOST_BITS_PER_WIDE_INT
4936 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4937 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4938 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4939
4940 return gen_int_mode (val, mode);
4941 }
4942 break;
4943
4944 case IF_THEN_ELSE:
4945 if (CONST_INT_P (op0))
4946 return op0 != const0_rtx ? op1 : op2;
4947
4948 /* Convert c ? a : a into "a". */
4949 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4950 return op1;
4951
4952 /* Convert a != b ? a : b into "a". */
4953 if (GET_CODE (op0) == NE
4954 && ! side_effects_p (op0)
4955 && ! HONOR_NANS (mode)
4956 && ! HONOR_SIGNED_ZEROS (mode)
4957 && ((rtx_equal_p (XEXP (op0, 0), op1)
4958 && rtx_equal_p (XEXP (op0, 1), op2))
4959 || (rtx_equal_p (XEXP (op0, 0), op2)
4960 && rtx_equal_p (XEXP (op0, 1), op1))))
4961 return op1;
4962
4963 /* Convert a == b ? a : b into "b". */
4964 if (GET_CODE (op0) == EQ
4965 && ! side_effects_p (op0)
4966 && ! HONOR_NANS (mode)
4967 && ! HONOR_SIGNED_ZEROS (mode)
4968 && ((rtx_equal_p (XEXP (op0, 0), op1)
4969 && rtx_equal_p (XEXP (op0, 1), op2))
4970 || (rtx_equal_p (XEXP (op0, 0), op2)
4971 && rtx_equal_p (XEXP (op0, 1), op1))))
4972 return op2;
4973
4974 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4975 {
4976 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4977 ? GET_MODE (XEXP (op0, 1))
4978 : GET_MODE (XEXP (op0, 0)));
4979 rtx temp;
4980
4981 /* Look for happy constants in op1 and op2. */
4982 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4983 {
4984 HOST_WIDE_INT t = INTVAL (op1);
4985 HOST_WIDE_INT f = INTVAL (op2);
4986
4987 if (t == STORE_FLAG_VALUE && f == 0)
4988 code = GET_CODE (op0);
4989 else if (t == 0 && f == STORE_FLAG_VALUE)
4990 {
4991 enum rtx_code tmp;
4992 tmp = reversed_comparison_code (op0, NULL_RTX);
4993 if (tmp == UNKNOWN)
4994 break;
4995 code = tmp;
4996 }
4997 else
4998 break;
4999
5000 return simplify_gen_relational (code, mode, cmp_mode,
5001 XEXP (op0, 0), XEXP (op0, 1));
5002 }
5003
5004 if (cmp_mode == VOIDmode)
5005 cmp_mode = op0_mode;
5006 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5007 cmp_mode, XEXP (op0, 0),
5008 XEXP (op0, 1));
5009
5010 /* See if any simplifications were possible. */
5011 if (temp)
5012 {
5013 if (CONST_INT_P (temp))
5014 return temp == const0_rtx ? op2 : op1;
5015 else if (temp)
5016 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5017 }
5018 }
5019 break;
5020
5021 case VEC_MERGE:
5022 gcc_assert (GET_MODE (op0) == mode);
5023 gcc_assert (GET_MODE (op1) == mode);
5024 gcc_assert (VECTOR_MODE_P (mode));
5025 op2 = avoid_constant_pool_reference (op2);
5026 if (CONST_INT_P (op2))
5027 {
5028 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5029 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5030 int mask = (1 << n_elts) - 1;
5031
5032 if (!(INTVAL (op2) & mask))
5033 return op1;
5034 if ((INTVAL (op2) & mask) == mask)
5035 return op0;
5036
5037 op0 = avoid_constant_pool_reference (op0);
5038 op1 = avoid_constant_pool_reference (op1);
5039 if (GET_CODE (op0) == CONST_VECTOR
5040 && GET_CODE (op1) == CONST_VECTOR)
5041 {
5042 rtvec v = rtvec_alloc (n_elts);
5043 unsigned int i;
5044
5045 for (i = 0; i < n_elts; i++)
5046 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5047 ? CONST_VECTOR_ELT (op0, i)
5048 : CONST_VECTOR_ELT (op1, i));
5049 return gen_rtx_CONST_VECTOR (mode, v);
5050 }
5051 }
5052 break;
5053
5054 default:
5055 gcc_unreachable ();
5056 }
5057
5058 return 0;
5059 }
5060
5061 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5062 or CONST_VECTOR,
5063 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5064
5065 Works by unpacking OP into a collection of 8-bit values
5066 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5067 and then repacking them again for OUTERMODE. */
5068
5069 static rtx
5070 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5071 enum machine_mode innermode, unsigned int byte)
5072 {
5073 /* We support up to 512-bit values (for V8DFmode). */
5074 enum {
5075 max_bitsize = 512,
5076 value_bit = 8,
5077 value_mask = (1 << value_bit) - 1
5078 };
5079 unsigned char value[max_bitsize / value_bit];
5080 int value_start;
5081 int i;
5082 int elem;
5083
5084 int num_elem;
5085 rtx * elems;
5086 int elem_bitsize;
5087 rtx result_s;
5088 rtvec result_v = NULL;
5089 enum mode_class outer_class;
5090 enum machine_mode outer_submode;
5091
5092 /* Some ports misuse CCmode. */
5093 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5094 return op;
5095
5096 /* We have no way to represent a complex constant at the rtl level. */
5097 if (COMPLEX_MODE_P (outermode))
5098 return NULL_RTX;
5099
5100 /* Unpack the value. */
5101
5102 if (GET_CODE (op) == CONST_VECTOR)
5103 {
5104 num_elem = CONST_VECTOR_NUNITS (op);
5105 elems = &CONST_VECTOR_ELT (op, 0);
5106 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5107 }
5108 else
5109 {
5110 num_elem = 1;
5111 elems = &op;
5112 elem_bitsize = max_bitsize;
5113 }
5114 /* If this asserts, it is too complicated; reducing value_bit may help. */
5115 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5116 /* I don't know how to handle endianness of sub-units. */
5117 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5118
5119 for (elem = 0; elem < num_elem; elem++)
5120 {
5121 unsigned char * vp;
5122 rtx el = elems[elem];
5123
5124 /* Vectors are kept in target memory order. (This is probably
5125 a mistake.) */
5126 {
5127 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5128 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5129 / BITS_PER_UNIT);
5130 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5131 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5132 unsigned bytele = (subword_byte % UNITS_PER_WORD
5133 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5134 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5135 }
5136
5137 switch (GET_CODE (el))
5138 {
5139 case CONST_INT:
5140 for (i = 0;
5141 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5142 i += value_bit)
5143 *vp++ = INTVAL (el) >> i;
5144 /* CONST_INTs are always logically sign-extended. */
5145 for (; i < elem_bitsize; i += value_bit)
5146 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5147 break;
5148
5149 case CONST_DOUBLE:
5150 if (GET_MODE (el) == VOIDmode)
5151 {
5152 /* If this triggers, someone should have generated a
5153 CONST_INT instead. */
5154 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5155
5156 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5157 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5158 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5159 {
5160 *vp++
5161 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5162 i += value_bit;
5163 }
5164 /* It shouldn't matter what's done here, so fill it with
5165 zero. */
5166 for (; i < elem_bitsize; i += value_bit)
5167 *vp++ = 0;
5168 }
5169 else
5170 {
5171 long tmp[max_bitsize / 32];
5172 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5173
5174 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5175 gcc_assert (bitsize <= elem_bitsize);
5176 gcc_assert (bitsize % value_bit == 0);
5177
5178 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5179 GET_MODE (el));
5180
5181 /* real_to_target produces its result in words affected by
5182 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5183 and use WORDS_BIG_ENDIAN instead; see the documentation
5184 of SUBREG in rtl.texi. */
5185 for (i = 0; i < bitsize; i += value_bit)
5186 {
5187 int ibase;
5188 if (WORDS_BIG_ENDIAN)
5189 ibase = bitsize - 1 - i;
5190 else
5191 ibase = i;
5192 *vp++ = tmp[ibase / 32] >> i % 32;
5193 }
5194
5195 /* It shouldn't matter what's done here, so fill it with
5196 zero. */
5197 for (; i < elem_bitsize; i += value_bit)
5198 *vp++ = 0;
5199 }
5200 break;
5201
5202 case CONST_FIXED:
5203 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5204 {
5205 for (i = 0; i < elem_bitsize; i += value_bit)
5206 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5207 }
5208 else
5209 {
5210 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5211 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5212 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5213 i += value_bit)
5214 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5215 >> (i - HOST_BITS_PER_WIDE_INT);
5216 for (; i < elem_bitsize; i += value_bit)
5217 *vp++ = 0;
5218 }
5219 break;
5220
5221 default:
5222 gcc_unreachable ();
5223 }
5224 }
5225
5226 /* Now, pick the right byte to start with. */
5227 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5228 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5229 will already have offset 0. */
5230 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5231 {
5232 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5233 - byte);
5234 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5235 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5236 byte = (subword_byte % UNITS_PER_WORD
5237 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5238 }
5239
5240 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5241 so if it's become negative it will instead be very large.) */
5242 gcc_assert (byte < GET_MODE_SIZE (innermode));
5243
5244 /* Convert from bytes to chunks of size value_bit. */
5245 value_start = byte * (BITS_PER_UNIT / value_bit);
5246
5247 /* Re-pack the value. */
5248
5249 if (VECTOR_MODE_P (outermode))
5250 {
5251 num_elem = GET_MODE_NUNITS (outermode);
5252 result_v = rtvec_alloc (num_elem);
5253 elems = &RTVEC_ELT (result_v, 0);
5254 outer_submode = GET_MODE_INNER (outermode);
5255 }
5256 else
5257 {
5258 num_elem = 1;
5259 elems = &result_s;
5260 outer_submode = outermode;
5261 }
5262
5263 outer_class = GET_MODE_CLASS (outer_submode);
5264 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5265
5266 gcc_assert (elem_bitsize % value_bit == 0);
5267 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5268
5269 for (elem = 0; elem < num_elem; elem++)
5270 {
5271 unsigned char *vp;
5272
5273 /* Vectors are stored in target memory order. (This is probably
5274 a mistake.) */
5275 {
5276 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5277 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5278 / BITS_PER_UNIT);
5279 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5280 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5281 unsigned bytele = (subword_byte % UNITS_PER_WORD
5282 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5283 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5284 }
5285
5286 switch (outer_class)
5287 {
5288 case MODE_INT:
5289 case MODE_PARTIAL_INT:
5290 {
5291 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5292
5293 for (i = 0;
5294 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5295 i += value_bit)
5296 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5297 for (; i < elem_bitsize; i += value_bit)
5298 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5299 << (i - HOST_BITS_PER_WIDE_INT);
5300
5301 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5302 know why. */
5303 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5304 elems[elem] = gen_int_mode (lo, outer_submode);
5305 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5306 elems[elem] = immed_double_const (lo, hi, outer_submode);
5307 else
5308 return NULL_RTX;
5309 }
5310 break;
5311
5312 case MODE_FLOAT:
5313 case MODE_DECIMAL_FLOAT:
5314 {
5315 REAL_VALUE_TYPE r;
5316 long tmp[max_bitsize / 32];
5317
5318 /* real_from_target wants its input in words affected by
5319 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5320 and use WORDS_BIG_ENDIAN instead; see the documentation
5321 of SUBREG in rtl.texi. */
5322 for (i = 0; i < max_bitsize / 32; i++)
5323 tmp[i] = 0;
5324 for (i = 0; i < elem_bitsize; i += value_bit)
5325 {
5326 int ibase;
5327 if (WORDS_BIG_ENDIAN)
5328 ibase = elem_bitsize - 1 - i;
5329 else
5330 ibase = i;
5331 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5332 }
5333
5334 real_from_target (&r, tmp, outer_submode);
5335 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5336 }
5337 break;
5338
5339 case MODE_FRACT:
5340 case MODE_UFRACT:
5341 case MODE_ACCUM:
5342 case MODE_UACCUM:
5343 {
5344 FIXED_VALUE_TYPE f;
5345 f.data.low = 0;
5346 f.data.high = 0;
5347 f.mode = outer_submode;
5348
5349 for (i = 0;
5350 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5351 i += value_bit)
5352 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5353 for (; i < elem_bitsize; i += value_bit)
5354 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5355 << (i - HOST_BITS_PER_WIDE_INT));
5356
5357 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5358 }
5359 break;
5360
5361 default:
5362 gcc_unreachable ();
5363 }
5364 }
5365 if (VECTOR_MODE_P (outermode))
5366 return gen_rtx_CONST_VECTOR (outermode, result_v);
5367 else
5368 return result_s;
5369 }
5370
5371 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5372 Return 0 if no simplifications are possible. */
5373 rtx
5374 simplify_subreg (enum machine_mode outermode, rtx op,
5375 enum machine_mode innermode, unsigned int byte)
5376 {
5377 /* Little bit of sanity checking. */
5378 gcc_assert (innermode != VOIDmode);
5379 gcc_assert (outermode != VOIDmode);
5380 gcc_assert (innermode != BLKmode);
5381 gcc_assert (outermode != BLKmode);
5382
5383 gcc_assert (GET_MODE (op) == innermode
5384 || GET_MODE (op) == VOIDmode);
5385
5386 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5387 gcc_assert (byte < GET_MODE_SIZE (innermode));
5388
5389 if (outermode == innermode && !byte)
5390 return op;
5391
5392 if (CONST_INT_P (op)
5393 || GET_CODE (op) == CONST_DOUBLE
5394 || GET_CODE (op) == CONST_FIXED
5395 || GET_CODE (op) == CONST_VECTOR)
5396 return simplify_immed_subreg (outermode, op, innermode, byte);
5397
5398 /* Changing mode twice with SUBREG => just change it once,
5399 or not at all if changing back op starting mode. */
5400 if (GET_CODE (op) == SUBREG)
5401 {
5402 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5403 int final_offset = byte + SUBREG_BYTE (op);
5404 rtx newx;
5405
5406 if (outermode == innermostmode
5407 && byte == 0 && SUBREG_BYTE (op) == 0)
5408 return SUBREG_REG (op);
5409
5410 /* The SUBREG_BYTE represents offset, as if the value were stored
5411 in memory. Irritating exception is paradoxical subreg, where
5412 we define SUBREG_BYTE to be 0. On big endian machines, this
5413 value should be negative. For a moment, undo this exception. */
5414 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5415 {
5416 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5417 if (WORDS_BIG_ENDIAN)
5418 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5419 if (BYTES_BIG_ENDIAN)
5420 final_offset += difference % UNITS_PER_WORD;
5421 }
5422 if (SUBREG_BYTE (op) == 0
5423 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5424 {
5425 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5426 if (WORDS_BIG_ENDIAN)
5427 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5428 if (BYTES_BIG_ENDIAN)
5429 final_offset += difference % UNITS_PER_WORD;
5430 }
5431
5432 /* See whether resulting subreg will be paradoxical. */
5433 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5434 {
5435 /* In nonparadoxical subregs we can't handle negative offsets. */
5436 if (final_offset < 0)
5437 return NULL_RTX;
5438 /* Bail out in case resulting subreg would be incorrect. */
5439 if (final_offset % GET_MODE_SIZE (outermode)
5440 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5441 return NULL_RTX;
5442 }
5443 else
5444 {
5445 int offset = 0;
5446 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5447
5448 /* In paradoxical subreg, see if we are still looking on lower part.
5449 If so, our SUBREG_BYTE will be 0. */
5450 if (WORDS_BIG_ENDIAN)
5451 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5452 if (BYTES_BIG_ENDIAN)
5453 offset += difference % UNITS_PER_WORD;
5454 if (offset == final_offset)
5455 final_offset = 0;
5456 else
5457 return NULL_RTX;
5458 }
5459
5460 /* Recurse for further possible simplifications. */
5461 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5462 final_offset);
5463 if (newx)
5464 return newx;
5465 if (validate_subreg (outermode, innermostmode,
5466 SUBREG_REG (op), final_offset))
5467 {
5468 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5469 if (SUBREG_PROMOTED_VAR_P (op)
5470 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5471 && GET_MODE_CLASS (outermode) == MODE_INT
5472 && IN_RANGE (GET_MODE_SIZE (outermode),
5473 GET_MODE_SIZE (innermode),
5474 GET_MODE_SIZE (innermostmode))
5475 && subreg_lowpart_p (newx))
5476 {
5477 SUBREG_PROMOTED_VAR_P (newx) = 1;
5478 SUBREG_PROMOTED_UNSIGNED_SET
5479 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5480 }
5481 return newx;
5482 }
5483 return NULL_RTX;
5484 }
5485
5486 /* Merge implicit and explicit truncations. */
5487
5488 if (GET_CODE (op) == TRUNCATE
5489 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5490 && subreg_lowpart_offset (outermode, innermode) == byte)
5491 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5492 GET_MODE (XEXP (op, 0)));
5493
5494 /* SUBREG of a hard register => just change the register number
5495 and/or mode. If the hard register is not valid in that mode,
5496 suppress this simplification. If the hard register is the stack,
5497 frame, or argument pointer, leave this as a SUBREG. */
5498
5499 if (REG_P (op) && HARD_REGISTER_P (op))
5500 {
5501 unsigned int regno, final_regno;
5502
5503 regno = REGNO (op);
5504 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5505 if (HARD_REGISTER_NUM_P (final_regno))
5506 {
5507 rtx x;
5508 int final_offset = byte;
5509
5510 /* Adjust offset for paradoxical subregs. */
5511 if (byte == 0
5512 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5513 {
5514 int difference = (GET_MODE_SIZE (innermode)
5515 - GET_MODE_SIZE (outermode));
5516 if (WORDS_BIG_ENDIAN)
5517 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5518 if (BYTES_BIG_ENDIAN)
5519 final_offset += difference % UNITS_PER_WORD;
5520 }
5521
5522 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5523
5524 /* Propagate original regno. We don't have any way to specify
5525 the offset inside original regno, so do so only for lowpart.
5526 The information is used only by alias analysis that can not
5527 grog partial register anyway. */
5528
5529 if (subreg_lowpart_offset (outermode, innermode) == byte)
5530 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5531 return x;
5532 }
5533 }
5534
5535 /* If we have a SUBREG of a register that we are replacing and we are
5536 replacing it with a MEM, make a new MEM and try replacing the
5537 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5538 or if we would be widening it. */
5539
5540 if (MEM_P (op)
5541 && ! mode_dependent_address_p (XEXP (op, 0))
5542 /* Allow splitting of volatile memory references in case we don't
5543 have instruction to move the whole thing. */
5544 && (! MEM_VOLATILE_P (op)
5545 || ! have_insn_for (SET, innermode))
5546 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5547 return adjust_address_nv (op, outermode, byte);
5548
5549 /* Handle complex values represented as CONCAT
5550 of real and imaginary part. */
5551 if (GET_CODE (op) == CONCAT)
5552 {
5553 unsigned int part_size, final_offset;
5554 rtx part, res;
5555
5556 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5557 if (byte < part_size)
5558 {
5559 part = XEXP (op, 0);
5560 final_offset = byte;
5561 }
5562 else
5563 {
5564 part = XEXP (op, 1);
5565 final_offset = byte - part_size;
5566 }
5567
5568 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5569 return NULL_RTX;
5570
5571 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5572 if (res)
5573 return res;
5574 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5575 return gen_rtx_SUBREG (outermode, part, final_offset);
5576 return NULL_RTX;
5577 }
5578
5579 /* Optimize SUBREG truncations of zero and sign extended values. */
5580 if ((GET_CODE (op) == ZERO_EXTEND
5581 || GET_CODE (op) == SIGN_EXTEND)
5582 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5583 {
5584 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5585
5586 /* If we're requesting the lowpart of a zero or sign extension,
5587 there are three possibilities. If the outermode is the same
5588 as the origmode, we can omit both the extension and the subreg.
5589 If the outermode is not larger than the origmode, we can apply
5590 the truncation without the extension. Finally, if the outermode
5591 is larger than the origmode, but both are integer modes, we
5592 can just extend to the appropriate mode. */
5593 if (bitpos == 0)
5594 {
5595 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5596 if (outermode == origmode)
5597 return XEXP (op, 0);
5598 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5599 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5600 subreg_lowpart_offset (outermode,
5601 origmode));
5602 if (SCALAR_INT_MODE_P (outermode))
5603 return simplify_gen_unary (GET_CODE (op), outermode,
5604 XEXP (op, 0), origmode);
5605 }
5606
5607 /* A SUBREG resulting from a zero extension may fold to zero if
5608 it extracts higher bits that the ZERO_EXTEND's source bits. */
5609 if (GET_CODE (op) == ZERO_EXTEND
5610 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5611 return CONST0_RTX (outermode);
5612 }
5613
5614 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5615 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5616 the outer subreg is effectively a truncation to the original mode. */
5617 if ((GET_CODE (op) == LSHIFTRT
5618 || GET_CODE (op) == ASHIFTRT)
5619 && SCALAR_INT_MODE_P (outermode)
5620 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5621 to avoid the possibility that an outer LSHIFTRT shifts by more
5622 than the sign extension's sign_bit_copies and introduces zeros
5623 into the high bits of the result. */
5624 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5625 && CONST_INT_P (XEXP (op, 1))
5626 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5627 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5628 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5629 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5630 return simplify_gen_binary (ASHIFTRT, outermode,
5631 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5632
5633 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5634 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5635 the outer subreg is effectively a truncation to the original mode. */
5636 if ((GET_CODE (op) == LSHIFTRT
5637 || GET_CODE (op) == ASHIFTRT)
5638 && SCALAR_INT_MODE_P (outermode)
5639 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5640 && CONST_INT_P (XEXP (op, 1))
5641 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5642 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5643 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5644 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5645 return simplify_gen_binary (LSHIFTRT, outermode,
5646 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5647
5648 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5649 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5650 the outer subreg is effectively a truncation to the original mode. */
5651 if (GET_CODE (op) == ASHIFT
5652 && SCALAR_INT_MODE_P (outermode)
5653 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5654 && CONST_INT_P (XEXP (op, 1))
5655 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5656 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5657 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5658 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5659 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5660 return simplify_gen_binary (ASHIFT, outermode,
5661 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5662
5663 /* Recognize a word extraction from a multi-word subreg. */
5664 if ((GET_CODE (op) == LSHIFTRT
5665 || GET_CODE (op) == ASHIFTRT)
5666 && SCALAR_INT_MODE_P (outermode)
5667 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5668 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5669 && CONST_INT_P (XEXP (op, 1))
5670 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5671 && INTVAL (XEXP (op, 1)) >= 0
5672 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5673 && byte == subreg_lowpart_offset (outermode, innermode))
5674 {
5675 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5676 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5677 (WORDS_BIG_ENDIAN
5678 ? byte - shifted_bytes
5679 : byte + shifted_bytes));
5680 }
5681
5682 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5683 and try replacing the SUBREG and shift with it. Don't do this if
5684 the MEM has a mode-dependent address or if we would be widening it. */
5685
5686 if ((GET_CODE (op) == LSHIFTRT
5687 || GET_CODE (op) == ASHIFTRT)
5688 && MEM_P (XEXP (op, 0))
5689 && CONST_INT_P (XEXP (op, 1))
5690 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5691 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5692 && INTVAL (XEXP (op, 1)) > 0
5693 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5694 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5695 && ! MEM_VOLATILE_P (XEXP (op, 0))
5696 && byte == subreg_lowpart_offset (outermode, innermode)
5697 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5698 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5699 {
5700 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5701 return adjust_address_nv (XEXP (op, 0), outermode,
5702 (WORDS_BIG_ENDIAN
5703 ? byte - shifted_bytes
5704 : byte + shifted_bytes));
5705 }
5706
5707 return NULL_RTX;
5708 }
5709
5710 /* Make a SUBREG operation or equivalent if it folds. */
5711
5712 rtx
5713 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5714 enum machine_mode innermode, unsigned int byte)
5715 {
5716 rtx newx;
5717
5718 newx = simplify_subreg (outermode, op, innermode, byte);
5719 if (newx)
5720 return newx;
5721
5722 if (GET_CODE (op) == SUBREG
5723 || GET_CODE (op) == CONCAT
5724 || GET_MODE (op) == VOIDmode)
5725 return NULL_RTX;
5726
5727 if (validate_subreg (outermode, innermode, op, byte))
5728 return gen_rtx_SUBREG (outermode, op, byte);
5729
5730 return NULL_RTX;
5731 }
5732
5733 /* Simplify X, an rtx expression.
5734
5735 Return the simplified expression or NULL if no simplifications
5736 were possible.
5737
5738 This is the preferred entry point into the simplification routines;
5739 however, we still allow passes to call the more specific routines.
5740
5741 Right now GCC has three (yes, three) major bodies of RTL simplification
5742 code that need to be unified.
5743
5744 1. fold_rtx in cse.c. This code uses various CSE specific
5745 information to aid in RTL simplification.
5746
5747 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5748 it uses combine specific information to aid in RTL
5749 simplification.
5750
5751 3. The routines in this file.
5752
5753
5754 Long term we want to only have one body of simplification code; to
5755 get to that state I recommend the following steps:
5756
5757 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5758 which are not pass dependent state into these routines.
5759
5760 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5761 use this routine whenever possible.
5762
5763 3. Allow for pass dependent state to be provided to these
5764 routines and add simplifications based on the pass dependent
5765 state. Remove code from cse.c & combine.c that becomes
5766 redundant/dead.
5767
5768 It will take time, but ultimately the compiler will be easier to
5769 maintain and improve. It's totally silly that when we add a
5770 simplification that it needs to be added to 4 places (3 for RTL
5771 simplification and 1 for tree simplification. */
5772
5773 rtx
5774 simplify_rtx (const_rtx x)
5775 {
5776 const enum rtx_code code = GET_CODE (x);
5777 const enum machine_mode mode = GET_MODE (x);
5778
5779 switch (GET_RTX_CLASS (code))
5780 {
5781 case RTX_UNARY:
5782 return simplify_unary_operation (code, mode,
5783 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5784 case RTX_COMM_ARITH:
5785 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5786 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5787
5788 /* Fall through.... */
5789
5790 case RTX_BIN_ARITH:
5791 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5792
5793 case RTX_TERNARY:
5794 case RTX_BITFIELD_OPS:
5795 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5796 XEXP (x, 0), XEXP (x, 1),
5797 XEXP (x, 2));
5798
5799 case RTX_COMPARE:
5800 case RTX_COMM_COMPARE:
5801 return simplify_relational_operation (code, mode,
5802 ((GET_MODE (XEXP (x, 0))
5803 != VOIDmode)
5804 ? GET_MODE (XEXP (x, 0))
5805 : GET_MODE (XEXP (x, 1))),
5806 XEXP (x, 0),
5807 XEXP (x, 1));
5808
5809 case RTX_EXTRA:
5810 if (code == SUBREG)
5811 return simplify_subreg (mode, SUBREG_REG (x),
5812 GET_MODE (SUBREG_REG (x)),
5813 SUBREG_BYTE (x));
5814 break;
5815
5816 case RTX_OBJ:
5817 if (code == LO_SUM)
5818 {
5819 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5820 if (GET_CODE (XEXP (x, 0)) == HIGH
5821 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5822 return XEXP (x, 1);
5823 }
5824 break;
5825
5826 default:
5827 break;
5828 }
5829 return NULL;
5830 }