e1bd3cf2e3b8d36d8e19992d6c8e01ee9df9b7ed
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872 break;
873
874 case FLOAT_TRUNCATE:
875 if (DECIMAL_FLOAT_MODE_P (mode))
876 break;
877
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op, 0)) == mode)
881 return XEXP (op, 0);
882
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
886
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
889
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations)
894 || GET_CODE (op) == FLOAT_EXTEND)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896 0)))
897 > GET_MODE_SIZE (mode)
898 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899 mode,
900 XEXP (op, 0), mode);
901
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906 && ((unsigned)significand_size (GET_MODE (op))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908 - num_sign_bit_copies (XEXP (op, 0),
909 GET_MODE (XEXP (op, 0))))))))
910 return simplify_gen_unary (FLOAT, mode,
911 XEXP (op, 0),
912 GET_MODE (XEXP (op, 0)));
913
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op) == ABS
917 || GET_CODE (op) == NEG)
918 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920 return simplify_gen_unary (GET_CODE (op), mode,
921 XEXP (XEXP (op, 0), 0), mode);
922
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op) == SUBREG
926 && subreg_lowpart_p (op)
927 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928 return SUBREG_REG (op);
929 break;
930
931 case FLOAT_EXTEND:
932 if (DECIMAL_FLOAT_MODE_P (mode))
933 break;
934
935 /* (float_extend (float_extend x)) is (float_extend x)
936
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
939 */
940 if (GET_CODE (op) == FLOAT_EXTEND
941 || (GET_CODE (op) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943 && ((unsigned)significand_size (GET_MODE (op))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945 - num_sign_bit_copies (XEXP (op, 0),
946 GET_MODE (XEXP (op, 0)))))))
947 return simplify_gen_unary (GET_CODE (op), mode,
948 XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950
951 break;
952
953 case ABS:
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op) == NEG)
956 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960 do nothing. */
961 if (GET_MODE (op) == VOIDmode)
962 break;
963
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op),
967 nonzero_bits (op, GET_MODE (op))))
968 return op;
969
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972 return gen_rtx_NEG (mode, op);
973
974 break;
975
976 case FFS:
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op) == SIGN_EXTEND
979 || GET_CODE (op) == ZERO_EXTEND)
980 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981 GET_MODE (XEXP (op, 0)));
982 break;
983
984 case POPCOUNT:
985 switch (GET_CODE (op))
986 {
987 case BSWAP:
988 case ZERO_EXTEND:
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991 GET_MODE (XEXP (op, 0)));
992
993 case ROTATE:
994 case ROTATERT:
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op, 1)))
997 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998 GET_MODE (XEXP (op, 0)));
999 break;
1000
1001 default:
1002 break;
1003 }
1004 break;
1005
1006 case PARITY:
1007 switch (GET_CODE (op))
1008 {
1009 case NOT:
1010 case BSWAP:
1011 case ZERO_EXTEND:
1012 case SIGN_EXTEND:
1013 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014 GET_MODE (XEXP (op, 0)));
1015
1016 case ROTATE:
1017 case ROTATERT:
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op, 1)))
1020 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021 GET_MODE (XEXP (op, 0)));
1022 break;
1023
1024 default:
1025 break;
1026 }
1027 break;
1028
1029 case BSWAP:
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op) == BSWAP)
1032 return XEXP (op, 0);
1033 break;
1034
1035 case FLOAT:
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op) == SIGN_EXTEND)
1038 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039 GET_MODE (XEXP (op, 0)));
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op) == MULT)
1057 {
1058 rtx lhs = XEXP (op, 0);
1059 rtx rhs = XEXP (op, 1);
1060 enum rtx_code lcode = GET_CODE (lhs);
1061 enum rtx_code rcode = GET_CODE (rhs);
1062
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode == SIGN_EXTEND
1066 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067 && (rcode == SIGN_EXTEND
1068 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069 {
1070 enum machine_mode lmode = GET_MODE (lhs);
1071 enum machine_mode rmode = GET_MODE (rhs);
1072 int bits;
1073
1074 if (lcode == ASHIFTRT)
1075 /* Number of bits not shifted off the end. */
1076 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
1081 if (rcode == ASHIFTRT)
1082 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089 return simplify_gen_binary
1090 (MULT, mode,
1091 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093 }
1094 }
1095
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108 {
1109 gcc_assert (GET_MODE_BITSIZE (mode)
1110 > GET_MODE_BITSIZE (GET_MODE (op)));
1111 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112 GET_MODE (XEXP (op, 0)));
1113 }
1114
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121 && GET_CODE (XEXP (op, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125 {
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode)
1130 > GET_MODE_BITSIZE (GET_MODE (op)));
1131 if (tmode != BLKmode)
1132 {
1133 rtx inner =
1134 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136 ? SIGN_EXTEND : ZERO_EXTEND,
1137 mode, inner, tmode);
1138 }
1139 }
1140
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode == Pmode && GET_MODE (op) == ptr_mode
1148 && (CONSTANT_P (op)
1149 || (GET_CODE (op) == SUBREG
1150 && REG_P (SUBREG_REG (op))
1151 && REG_POINTER (SUBREG_REG (op))
1152 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153 return convert_memory_address (Pmode, op);
1154 #endif
1155 break;
1156
1157 case ZERO_EXTEND:
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op) == MULT)
1170 {
1171 rtx lhs = XEXP (op, 0);
1172 rtx rhs = XEXP (op, 1);
1173 enum rtx_code lcode = GET_CODE (lhs);
1174 enum rtx_code rcode = GET_CODE (rhs);
1175
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode == ZERO_EXTEND
1179 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180 && (rcode == ZERO_EXTEND
1181 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182 {
1183 enum machine_mode lmode = GET_MODE (lhs);
1184 enum machine_mode rmode = GET_MODE (rhs);
1185 int bits;
1186
1187 if (lcode == LSHIFTRT)
1188 /* Number of bits not shifted off the end. */
1189 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
1194 if (rcode == LSHIFTRT)
1195 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202 return simplify_gen_binary
1203 (MULT, mode,
1204 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206 }
1207 }
1208
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op) == LSHIFTRT
1218 && GET_CODE (XEXP (op, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op, 1))
1220 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222 {
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226 if (tmode != BLKmode)
1227 {
1228 rtx inner =
1229 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231 }
1232 }
1233
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED > 0
1240 && mode == Pmode && GET_MODE (op) == ptr_mode
1241 && (CONSTANT_P (op)
1242 || (GET_CODE (op) == SUBREG
1243 && REG_P (SUBREG_REG (op))
1244 && REG_POINTER (SUBREG_REG (op))
1245 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246 return convert_memory_address (Pmode, op);
1247 #endif
1248 break;
1249
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255 }
1256
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1260 rtx
1261 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262 rtx op, enum machine_mode op_mode)
1263 {
1264 unsigned int width = GET_MODE_PRECISION (mode);
1265 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
1267 if (code == VEC_DUPLICATE)
1268 {
1269 gcc_assert (VECTOR_MODE_P (mode));
1270 if (GET_MODE (op) != VOIDmode)
1271 {
1272 if (!VECTOR_MODE_P (GET_MODE (op)))
1273 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274 else
1275 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276 (GET_MODE (op)));
1277 }
1278 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1279 || GET_CODE (op) == CONST_VECTOR)
1280 {
1281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283 rtvec v = rtvec_alloc (n_elts);
1284 unsigned int i;
1285
1286 if (GET_CODE (op) != CONST_VECTOR)
1287 for (i = 0; i < n_elts; i++)
1288 RTVEC_ELT (v, i) = op;
1289 else
1290 {
1291 enum machine_mode inmode = GET_MODE (op);
1292 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
1295 gcc_assert (in_n_elts < n_elts);
1296 gcc_assert ((n_elts % in_n_elts) == 0);
1297 for (i = 0; i < n_elts; i++)
1298 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299 }
1300 return gen_rtx_CONST_VECTOR (mode, v);
1301 }
1302 }
1303
1304 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305 {
1306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308 enum machine_mode opmode = GET_MODE (op);
1309 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311 rtvec v = rtvec_alloc (n_elts);
1312 unsigned int i;
1313
1314 gcc_assert (op_n_elts == n_elts);
1315 for (i = 0; i < n_elts; i++)
1316 {
1317 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318 CONST_VECTOR_ELT (op, i),
1319 GET_MODE_INNER (opmode));
1320 if (!x)
1321 return 0;
1322 RTVEC_ELT (v, i) = x;
1323 }
1324 return gen_rtx_CONST_VECTOR (mode, v);
1325 }
1326
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1330
1331 if (code == FLOAT && GET_MODE (op) == VOIDmode
1332 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1333 {
1334 HOST_WIDE_INT hv, lv;
1335 REAL_VALUE_TYPE d;
1336
1337 if (CONST_INT_P (op))
1338 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1339 else
1340 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1341
1342 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1343 d = real_value_truncate (mode, d);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1345 }
1346 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1347 && (GET_CODE (op) == CONST_DOUBLE
1348 || CONST_INT_P (op)))
1349 {
1350 HOST_WIDE_INT hv, lv;
1351 REAL_VALUE_TYPE d;
1352
1353 if (CONST_INT_P (op))
1354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1355 else
1356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1357
1358 if (op_mode == VOIDmode
1359 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1360 /* We should never get a negative number. */
1361 gcc_assert (hv >= 0);
1362 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1363 hv = 0, lv &= GET_MODE_MASK (op_mode);
1364
1365 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1366 d = real_value_truncate (mode, d);
1367 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1368 }
1369
1370 if (CONST_INT_P (op)
1371 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1372 {
1373 HOST_WIDE_INT arg0 = INTVAL (op);
1374 HOST_WIDE_INT val;
1375
1376 switch (code)
1377 {
1378 case NOT:
1379 val = ~ arg0;
1380 break;
1381
1382 case NEG:
1383 val = - arg0;
1384 break;
1385
1386 case ABS:
1387 val = (arg0 >= 0 ? arg0 : - arg0);
1388 break;
1389
1390 case FFS:
1391 arg0 &= GET_MODE_MASK (mode);
1392 val = ffs_hwi (arg0);
1393 break;
1394
1395 case CLZ:
1396 arg0 &= GET_MODE_MASK (mode);
1397 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1398 ;
1399 else
1400 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1401 break;
1402
1403 case CLRSB:
1404 arg0 &= GET_MODE_MASK (mode);
1405 if (arg0 == 0)
1406 val = GET_MODE_PRECISION (mode) - 1;
1407 else if (arg0 >= 0)
1408 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1409 else if (arg0 < 0)
1410 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1411 break;
1412
1413 case CTZ:
1414 arg0 &= GET_MODE_MASK (mode);
1415 if (arg0 == 0)
1416 {
1417 /* Even if the value at zero is undefined, we have to come
1418 up with some replacement. Seems good enough. */
1419 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1420 val = GET_MODE_PRECISION (mode);
1421 }
1422 else
1423 val = ctz_hwi (arg0);
1424 break;
1425
1426 case POPCOUNT:
1427 arg0 &= GET_MODE_MASK (mode);
1428 val = 0;
1429 while (arg0)
1430 val++, arg0 &= arg0 - 1;
1431 break;
1432
1433 case PARITY:
1434 arg0 &= GET_MODE_MASK (mode);
1435 val = 0;
1436 while (arg0)
1437 val++, arg0 &= arg0 - 1;
1438 val &= 1;
1439 break;
1440
1441 case BSWAP:
1442 {
1443 unsigned int s;
1444
1445 val = 0;
1446 for (s = 0; s < width; s += 8)
1447 {
1448 unsigned int d = width - s - 8;
1449 unsigned HOST_WIDE_INT byte;
1450 byte = (arg0 >> s) & 0xff;
1451 val |= byte << d;
1452 }
1453 }
1454 break;
1455
1456 case TRUNCATE:
1457 val = arg0;
1458 break;
1459
1460 case ZERO_EXTEND:
1461 /* When zero-extending a CONST_INT, we need to know its
1462 original mode. */
1463 gcc_assert (op_mode != VOIDmode);
1464 if (op_width == HOST_BITS_PER_WIDE_INT)
1465 {
1466 /* If we were really extending the mode,
1467 we would have to distinguish between zero-extension
1468 and sign-extension. */
1469 gcc_assert (width == op_width);
1470 val = arg0;
1471 }
1472 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1473 val = arg0 & GET_MODE_MASK (op_mode);
1474 else
1475 return 0;
1476 break;
1477
1478 case SIGN_EXTEND:
1479 if (op_mode == VOIDmode)
1480 op_mode = mode;
1481 op_width = GET_MODE_PRECISION (op_mode);
1482 if (op_width == HOST_BITS_PER_WIDE_INT)
1483 {
1484 /* If we were really extending the mode,
1485 we would have to distinguish between zero-extension
1486 and sign-extension. */
1487 gcc_assert (width == op_width);
1488 val = arg0;
1489 }
1490 else if (op_width < HOST_BITS_PER_WIDE_INT)
1491 {
1492 val = arg0 & GET_MODE_MASK (op_mode);
1493 if (val_signbit_known_set_p (op_mode, val))
1494 val |= ~GET_MODE_MASK (op_mode);
1495 }
1496 else
1497 return 0;
1498 break;
1499
1500 case SQRT:
1501 case FLOAT_EXTEND:
1502 case FLOAT_TRUNCATE:
1503 case SS_TRUNCATE:
1504 case US_TRUNCATE:
1505 case SS_NEG:
1506 case US_NEG:
1507 case SS_ABS:
1508 return 0;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513
1514 return gen_int_mode (val, mode);
1515 }
1516
1517 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1518 for a DImode operation on a CONST_INT. */
1519 else if (GET_MODE (op) == VOIDmode
1520 && width <= HOST_BITS_PER_DOUBLE_INT
1521 && (GET_CODE (op) == CONST_DOUBLE
1522 || CONST_INT_P (op)))
1523 {
1524 unsigned HOST_WIDE_INT l1, lv;
1525 HOST_WIDE_INT h1, hv;
1526
1527 if (GET_CODE (op) == CONST_DOUBLE)
1528 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1529 else
1530 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1531
1532 switch (code)
1533 {
1534 case NOT:
1535 lv = ~ l1;
1536 hv = ~ h1;
1537 break;
1538
1539 case NEG:
1540 neg_double (l1, h1, &lv, &hv);
1541 break;
1542
1543 case ABS:
1544 if (h1 < 0)
1545 neg_double (l1, h1, &lv, &hv);
1546 else
1547 lv = l1, hv = h1;
1548 break;
1549
1550 case FFS:
1551 hv = 0;
1552 if (l1 != 0)
1553 lv = ffs_hwi (l1);
1554 else if (h1 != 0)
1555 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1556 else
1557 lv = 0;
1558 break;
1559
1560 case CLZ:
1561 hv = 0;
1562 if (h1 != 0)
1563 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1564 - HOST_BITS_PER_WIDE_INT;
1565 else if (l1 != 0)
1566 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1567 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1568 lv = GET_MODE_PRECISION (mode);
1569 break;
1570
1571 case CTZ:
1572 hv = 0;
1573 if (l1 != 0)
1574 lv = ctz_hwi (l1);
1575 else if (h1 != 0)
1576 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1577 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1578 lv = GET_MODE_PRECISION (mode);
1579 break;
1580
1581 case POPCOUNT:
1582 hv = 0;
1583 lv = 0;
1584 while (l1)
1585 lv++, l1 &= l1 - 1;
1586 while (h1)
1587 lv++, h1 &= h1 - 1;
1588 break;
1589
1590 case PARITY:
1591 hv = 0;
1592 lv = 0;
1593 while (l1)
1594 lv++, l1 &= l1 - 1;
1595 while (h1)
1596 lv++, h1 &= h1 - 1;
1597 lv &= 1;
1598 break;
1599
1600 case BSWAP:
1601 {
1602 unsigned int s;
1603
1604 hv = 0;
1605 lv = 0;
1606 for (s = 0; s < width; s += 8)
1607 {
1608 unsigned int d = width - s - 8;
1609 unsigned HOST_WIDE_INT byte;
1610
1611 if (s < HOST_BITS_PER_WIDE_INT)
1612 byte = (l1 >> s) & 0xff;
1613 else
1614 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1615
1616 if (d < HOST_BITS_PER_WIDE_INT)
1617 lv |= byte << d;
1618 else
1619 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1620 }
1621 }
1622 break;
1623
1624 case TRUNCATE:
1625 /* This is just a change-of-mode, so do nothing. */
1626 lv = l1, hv = h1;
1627 break;
1628
1629 case ZERO_EXTEND:
1630 gcc_assert (op_mode != VOIDmode);
1631
1632 if (op_width > HOST_BITS_PER_WIDE_INT)
1633 return 0;
1634
1635 hv = 0;
1636 lv = l1 & GET_MODE_MASK (op_mode);
1637 break;
1638
1639 case SIGN_EXTEND:
1640 if (op_mode == VOIDmode
1641 || op_width > HOST_BITS_PER_WIDE_INT)
1642 return 0;
1643 else
1644 {
1645 lv = l1 & GET_MODE_MASK (op_mode);
1646 if (val_signbit_known_set_p (op_mode, lv))
1647 lv |= ~GET_MODE_MASK (op_mode);
1648
1649 hv = HWI_SIGN_EXTEND (lv);
1650 }
1651 break;
1652
1653 case SQRT:
1654 return 0;
1655
1656 default:
1657 return 0;
1658 }
1659
1660 return immed_double_const (lv, hv, mode);
1661 }
1662
1663 else if (GET_CODE (op) == CONST_DOUBLE
1664 && SCALAR_FLOAT_MODE_P (mode)
1665 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1666 {
1667 REAL_VALUE_TYPE d, t;
1668 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1669
1670 switch (code)
1671 {
1672 case SQRT:
1673 if (HONOR_SNANS (mode) && real_isnan (&d))
1674 return 0;
1675 real_sqrt (&t, mode, &d);
1676 d = t;
1677 break;
1678 case ABS:
1679 d = real_value_abs (&d);
1680 break;
1681 case NEG:
1682 d = real_value_negate (&d);
1683 break;
1684 case FLOAT_TRUNCATE:
1685 d = real_value_truncate (mode, d);
1686 break;
1687 case FLOAT_EXTEND:
1688 /* All this does is change the mode, unless changing
1689 mode class. */
1690 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1691 real_convert (&d, mode, &d);
1692 break;
1693 case FIX:
1694 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1695 break;
1696 case NOT:
1697 {
1698 long tmp[4];
1699 int i;
1700
1701 real_to_target (tmp, &d, GET_MODE (op));
1702 for (i = 0; i < 4; i++)
1703 tmp[i] = ~tmp[i];
1704 real_from_target (&d, tmp, mode);
1705 break;
1706 }
1707 default:
1708 gcc_unreachable ();
1709 }
1710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1711 }
1712
1713 else if (GET_CODE (op) == CONST_DOUBLE
1714 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1715 && GET_MODE_CLASS (mode) == MODE_INT
1716 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1717 {
1718 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1719 operators are intentionally left unspecified (to ease implementation
1720 by target backends), for consistency, this routine implements the
1721 same semantics for constant folding as used by the middle-end. */
1722
1723 /* This was formerly used only for non-IEEE float.
1724 eggert@twinsun.com says it is safe for IEEE also. */
1725 HOST_WIDE_INT xh, xl, th, tl;
1726 REAL_VALUE_TYPE x, t;
1727 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1728 switch (code)
1729 {
1730 case FIX:
1731 if (REAL_VALUE_ISNAN (x))
1732 return const0_rtx;
1733
1734 /* Test against the signed upper bound. */
1735 if (width > HOST_BITS_PER_WIDE_INT)
1736 {
1737 th = ((unsigned HOST_WIDE_INT) 1
1738 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1739 tl = -1;
1740 }
1741 else
1742 {
1743 th = 0;
1744 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1745 }
1746 real_from_integer (&t, VOIDmode, tl, th, 0);
1747 if (REAL_VALUES_LESS (t, x))
1748 {
1749 xh = th;
1750 xl = tl;
1751 break;
1752 }
1753
1754 /* Test against the signed lower bound. */
1755 if (width > HOST_BITS_PER_WIDE_INT)
1756 {
1757 th = (unsigned HOST_WIDE_INT) (-1)
1758 << (width - HOST_BITS_PER_WIDE_INT - 1);
1759 tl = 0;
1760 }
1761 else
1762 {
1763 th = -1;
1764 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1765 }
1766 real_from_integer (&t, VOIDmode, tl, th, 0);
1767 if (REAL_VALUES_LESS (x, t))
1768 {
1769 xh = th;
1770 xl = tl;
1771 break;
1772 }
1773 REAL_VALUE_TO_INT (&xl, &xh, x);
1774 break;
1775
1776 case UNSIGNED_FIX:
1777 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1778 return const0_rtx;
1779
1780 /* Test against the unsigned upper bound. */
1781 if (width == HOST_BITS_PER_DOUBLE_INT)
1782 {
1783 th = -1;
1784 tl = -1;
1785 }
1786 else if (width >= HOST_BITS_PER_WIDE_INT)
1787 {
1788 th = ((unsigned HOST_WIDE_INT) 1
1789 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1790 tl = -1;
1791 }
1792 else
1793 {
1794 th = 0;
1795 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1796 }
1797 real_from_integer (&t, VOIDmode, tl, th, 1);
1798 if (REAL_VALUES_LESS (t, x))
1799 {
1800 xh = th;
1801 xl = tl;
1802 break;
1803 }
1804
1805 REAL_VALUE_TO_INT (&xl, &xh, x);
1806 break;
1807
1808 default:
1809 gcc_unreachable ();
1810 }
1811 return immed_double_const (xl, xh, mode);
1812 }
1813
1814 return NULL_RTX;
1815 }
1816 \f
1817 /* Subroutine of simplify_binary_operation to simplify a commutative,
1818 associative binary operation CODE with result mode MODE, operating
1819 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1820 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1821 canonicalization is possible. */
1822
1823 static rtx
1824 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1825 rtx op0, rtx op1)
1826 {
1827 rtx tem;
1828
1829 /* Linearize the operator to the left. */
1830 if (GET_CODE (op1) == code)
1831 {
1832 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1833 if (GET_CODE (op0) == code)
1834 {
1835 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1836 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1837 }
1838
1839 /* "a op (b op c)" becomes "(b op c) op a". */
1840 if (! swap_commutative_operands_p (op1, op0))
1841 return simplify_gen_binary (code, mode, op1, op0);
1842
1843 tem = op0;
1844 op0 = op1;
1845 op1 = tem;
1846 }
1847
1848 if (GET_CODE (op0) == code)
1849 {
1850 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1851 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1852 {
1853 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1854 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1855 }
1856
1857 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1858 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1859 if (tem != 0)
1860 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1861
1862 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1863 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1864 if (tem != 0)
1865 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1866 }
1867
1868 return 0;
1869 }
1870
1871
1872 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1873 and OP1. Return 0 if no simplification is possible.
1874
1875 Don't use this for relational operations such as EQ or LT.
1876 Use simplify_relational_operation instead. */
1877 rtx
1878 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1879 rtx op0, rtx op1)
1880 {
1881 rtx trueop0, trueop1;
1882 rtx tem;
1883
1884 /* Relational operations don't work here. We must know the mode
1885 of the operands in order to do the comparison correctly.
1886 Assuming a full word can give incorrect results.
1887 Consider comparing 128 with -128 in QImode. */
1888 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1889 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1890
1891 /* Make sure the constant is second. */
1892 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1893 && swap_commutative_operands_p (op0, op1))
1894 {
1895 tem = op0, op0 = op1, op1 = tem;
1896 }
1897
1898 trueop0 = avoid_constant_pool_reference (op0);
1899 trueop1 = avoid_constant_pool_reference (op1);
1900
1901 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1902 if (tem)
1903 return tem;
1904 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1905 }
1906
1907 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1908 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1909 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1910 actual constants. */
1911
1912 static rtx
1913 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1914 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1915 {
1916 rtx tem, reversed, opleft, opright;
1917 HOST_WIDE_INT val;
1918 unsigned int width = GET_MODE_PRECISION (mode);
1919
1920 /* Even if we can't compute a constant result,
1921 there are some cases worth simplifying. */
1922
1923 switch (code)
1924 {
1925 case PLUS:
1926 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1927 when x is NaN, infinite, or finite and nonzero. They aren't
1928 when x is -0 and the rounding mode is not towards -infinity,
1929 since (-0) + 0 is then 0. */
1930 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1931 return op0;
1932
1933 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1934 transformations are safe even for IEEE. */
1935 if (GET_CODE (op0) == NEG)
1936 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1937 else if (GET_CODE (op1) == NEG)
1938 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1939
1940 /* (~a) + 1 -> -a */
1941 if (INTEGRAL_MODE_P (mode)
1942 && GET_CODE (op0) == NOT
1943 && trueop1 == const1_rtx)
1944 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1945
1946 /* Handle both-operands-constant cases. We can only add
1947 CONST_INTs to constants since the sum of relocatable symbols
1948 can't be handled by most assemblers. Don't add CONST_INT
1949 to CONST_INT since overflow won't be computed properly if wider
1950 than HOST_BITS_PER_WIDE_INT. */
1951
1952 if ((GET_CODE (op0) == CONST
1953 || GET_CODE (op0) == SYMBOL_REF
1954 || GET_CODE (op0) == LABEL_REF)
1955 && CONST_INT_P (op1))
1956 return plus_constant (mode, op0, INTVAL (op1));
1957 else if ((GET_CODE (op1) == CONST
1958 || GET_CODE (op1) == SYMBOL_REF
1959 || GET_CODE (op1) == LABEL_REF)
1960 && CONST_INT_P (op0))
1961 return plus_constant (mode, op1, INTVAL (op0));
1962
1963 /* See if this is something like X * C - X or vice versa or
1964 if the multiplication is written as a shift. If so, we can
1965 distribute and make a new multiply, shift, or maybe just
1966 have X (if C is 2 in the example above). But don't make
1967 something more expensive than we had before. */
1968
1969 if (SCALAR_INT_MODE_P (mode))
1970 {
1971 double_int coeff0, coeff1;
1972 rtx lhs = op0, rhs = op1;
1973
1974 coeff0 = double_int_one;
1975 coeff1 = double_int_one;
1976
1977 if (GET_CODE (lhs) == NEG)
1978 {
1979 coeff0 = double_int_minus_one;
1980 lhs = XEXP (lhs, 0);
1981 }
1982 else if (GET_CODE (lhs) == MULT
1983 && CONST_INT_P (XEXP (lhs, 1)))
1984 {
1985 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1986 lhs = XEXP (lhs, 0);
1987 }
1988 else if (GET_CODE (lhs) == ASHIFT
1989 && CONST_INT_P (XEXP (lhs, 1))
1990 && INTVAL (XEXP (lhs, 1)) >= 0
1991 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1992 {
1993 coeff0 = double_int_setbit (double_int_zero,
1994 INTVAL (XEXP (lhs, 1)));
1995 lhs = XEXP (lhs, 0);
1996 }
1997
1998 if (GET_CODE (rhs) == NEG)
1999 {
2000 coeff1 = double_int_minus_one;
2001 rhs = XEXP (rhs, 0);
2002 }
2003 else if (GET_CODE (rhs) == MULT
2004 && CONST_INT_P (XEXP (rhs, 1)))
2005 {
2006 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2007 rhs = XEXP (rhs, 0);
2008 }
2009 else if (GET_CODE (rhs) == ASHIFT
2010 && CONST_INT_P (XEXP (rhs, 1))
2011 && INTVAL (XEXP (rhs, 1)) >= 0
2012 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2013 {
2014 coeff1 = double_int_setbit (double_int_zero,
2015 INTVAL (XEXP (rhs, 1)));
2016 rhs = XEXP (rhs, 0);
2017 }
2018
2019 if (rtx_equal_p (lhs, rhs))
2020 {
2021 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2022 rtx coeff;
2023 double_int val;
2024 bool speed = optimize_function_for_speed_p (cfun);
2025
2026 val = double_int_add (coeff0, coeff1);
2027 coeff = immed_double_int_const (val, mode);
2028
2029 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2030 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2031 ? tem : 0;
2032 }
2033 }
2034
2035 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2036 if ((CONST_INT_P (op1)
2037 || GET_CODE (op1) == CONST_DOUBLE)
2038 && GET_CODE (op0) == XOR
2039 && (CONST_INT_P (XEXP (op0, 1))
2040 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2041 && mode_signbit_p (mode, op1))
2042 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2043 simplify_gen_binary (XOR, mode, op1,
2044 XEXP (op0, 1)));
2045
2046 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2047 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2048 && GET_CODE (op0) == MULT
2049 && GET_CODE (XEXP (op0, 0)) == NEG)
2050 {
2051 rtx in1, in2;
2052
2053 in1 = XEXP (XEXP (op0, 0), 0);
2054 in2 = XEXP (op0, 1);
2055 return simplify_gen_binary (MINUS, mode, op1,
2056 simplify_gen_binary (MULT, mode,
2057 in1, in2));
2058 }
2059
2060 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2061 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2062 is 1. */
2063 if (COMPARISON_P (op0)
2064 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2065 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2066 && (reversed = reversed_comparison (op0, mode)))
2067 return
2068 simplify_gen_unary (NEG, mode, reversed, mode);
2069
2070 /* If one of the operands is a PLUS or a MINUS, see if we can
2071 simplify this by the associative law.
2072 Don't use the associative law for floating point.
2073 The inaccuracy makes it nonassociative,
2074 and subtle programs can break if operations are associated. */
2075
2076 if (INTEGRAL_MODE_P (mode)
2077 && (plus_minus_operand_p (op0)
2078 || plus_minus_operand_p (op1))
2079 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2080 return tem;
2081
2082 /* Reassociate floating point addition only when the user
2083 specifies associative math operations. */
2084 if (FLOAT_MODE_P (mode)
2085 && flag_associative_math)
2086 {
2087 tem = simplify_associative_operation (code, mode, op0, op1);
2088 if (tem)
2089 return tem;
2090 }
2091 break;
2092
2093 case COMPARE:
2094 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2095 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2096 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2097 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2098 {
2099 rtx xop00 = XEXP (op0, 0);
2100 rtx xop10 = XEXP (op1, 0);
2101
2102 #ifdef HAVE_cc0
2103 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2104 #else
2105 if (REG_P (xop00) && REG_P (xop10)
2106 && GET_MODE (xop00) == GET_MODE (xop10)
2107 && REGNO (xop00) == REGNO (xop10)
2108 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2109 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2110 #endif
2111 return xop00;
2112 }
2113 break;
2114
2115 case MINUS:
2116 /* We can't assume x-x is 0 even with non-IEEE floating point,
2117 but since it is zero except in very strange circumstances, we
2118 will treat it as zero with -ffinite-math-only. */
2119 if (rtx_equal_p (trueop0, trueop1)
2120 && ! side_effects_p (op0)
2121 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2122 return CONST0_RTX (mode);
2123
2124 /* Change subtraction from zero into negation. (0 - x) is the
2125 same as -x when x is NaN, infinite, or finite and nonzero.
2126 But if the mode has signed zeros, and does not round towards
2127 -infinity, then 0 - 0 is 0, not -0. */
2128 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2129 return simplify_gen_unary (NEG, mode, op1, mode);
2130
2131 /* (-1 - a) is ~a. */
2132 if (trueop0 == constm1_rtx)
2133 return simplify_gen_unary (NOT, mode, op1, mode);
2134
2135 /* Subtracting 0 has no effect unless the mode has signed zeros
2136 and supports rounding towards -infinity. In such a case,
2137 0 - 0 is -0. */
2138 if (!(HONOR_SIGNED_ZEROS (mode)
2139 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2140 && trueop1 == CONST0_RTX (mode))
2141 return op0;
2142
2143 /* See if this is something like X * C - X or vice versa or
2144 if the multiplication is written as a shift. If so, we can
2145 distribute and make a new multiply, shift, or maybe just
2146 have X (if C is 2 in the example above). But don't make
2147 something more expensive than we had before. */
2148
2149 if (SCALAR_INT_MODE_P (mode))
2150 {
2151 double_int coeff0, negcoeff1;
2152 rtx lhs = op0, rhs = op1;
2153
2154 coeff0 = double_int_one;
2155 negcoeff1 = double_int_minus_one;
2156
2157 if (GET_CODE (lhs) == NEG)
2158 {
2159 coeff0 = double_int_minus_one;
2160 lhs = XEXP (lhs, 0);
2161 }
2162 else if (GET_CODE (lhs) == MULT
2163 && CONST_INT_P (XEXP (lhs, 1)))
2164 {
2165 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2166 lhs = XEXP (lhs, 0);
2167 }
2168 else if (GET_CODE (lhs) == ASHIFT
2169 && CONST_INT_P (XEXP (lhs, 1))
2170 && INTVAL (XEXP (lhs, 1)) >= 0
2171 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2172 {
2173 coeff0 = double_int_setbit (double_int_zero,
2174 INTVAL (XEXP (lhs, 1)));
2175 lhs = XEXP (lhs, 0);
2176 }
2177
2178 if (GET_CODE (rhs) == NEG)
2179 {
2180 negcoeff1 = double_int_one;
2181 rhs = XEXP (rhs, 0);
2182 }
2183 else if (GET_CODE (rhs) == MULT
2184 && CONST_INT_P (XEXP (rhs, 1)))
2185 {
2186 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2187 rhs = XEXP (rhs, 0);
2188 }
2189 else if (GET_CODE (rhs) == ASHIFT
2190 && CONST_INT_P (XEXP (rhs, 1))
2191 && INTVAL (XEXP (rhs, 1)) >= 0
2192 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2193 {
2194 negcoeff1 = double_int_setbit (double_int_zero,
2195 INTVAL (XEXP (rhs, 1)));
2196 negcoeff1 = double_int_neg (negcoeff1);
2197 rhs = XEXP (rhs, 0);
2198 }
2199
2200 if (rtx_equal_p (lhs, rhs))
2201 {
2202 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2203 rtx coeff;
2204 double_int val;
2205 bool speed = optimize_function_for_speed_p (cfun);
2206
2207 val = double_int_add (coeff0, negcoeff1);
2208 coeff = immed_double_int_const (val, mode);
2209
2210 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2211 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2212 ? tem : 0;
2213 }
2214 }
2215
2216 /* (a - (-b)) -> (a + b). True even for IEEE. */
2217 if (GET_CODE (op1) == NEG)
2218 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2219
2220 /* (-x - c) may be simplified as (-c - x). */
2221 if (GET_CODE (op0) == NEG
2222 && (CONST_INT_P (op1)
2223 || GET_CODE (op1) == CONST_DOUBLE))
2224 {
2225 tem = simplify_unary_operation (NEG, mode, op1, mode);
2226 if (tem)
2227 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2228 }
2229
2230 /* Don't let a relocatable value get a negative coeff. */
2231 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2232 return simplify_gen_binary (PLUS, mode,
2233 op0,
2234 neg_const_int (mode, op1));
2235
2236 /* (x - (x & y)) -> (x & ~y) */
2237 if (GET_CODE (op1) == AND)
2238 {
2239 if (rtx_equal_p (op0, XEXP (op1, 0)))
2240 {
2241 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2242 GET_MODE (XEXP (op1, 1)));
2243 return simplify_gen_binary (AND, mode, op0, tem);
2244 }
2245 if (rtx_equal_p (op0, XEXP (op1, 1)))
2246 {
2247 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2248 GET_MODE (XEXP (op1, 0)));
2249 return simplify_gen_binary (AND, mode, op0, tem);
2250 }
2251 }
2252
2253 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2254 by reversing the comparison code if valid. */
2255 if (STORE_FLAG_VALUE == 1
2256 && trueop0 == const1_rtx
2257 && COMPARISON_P (op1)
2258 && (reversed = reversed_comparison (op1, mode)))
2259 return reversed;
2260
2261 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2262 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2263 && GET_CODE (op1) == MULT
2264 && GET_CODE (XEXP (op1, 0)) == NEG)
2265 {
2266 rtx in1, in2;
2267
2268 in1 = XEXP (XEXP (op1, 0), 0);
2269 in2 = XEXP (op1, 1);
2270 return simplify_gen_binary (PLUS, mode,
2271 simplify_gen_binary (MULT, mode,
2272 in1, in2),
2273 op0);
2274 }
2275
2276 /* Canonicalize (minus (neg A) (mult B C)) to
2277 (minus (mult (neg B) C) A). */
2278 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2279 && GET_CODE (op1) == MULT
2280 && GET_CODE (op0) == NEG)
2281 {
2282 rtx in1, in2;
2283
2284 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2285 in2 = XEXP (op1, 1);
2286 return simplify_gen_binary (MINUS, mode,
2287 simplify_gen_binary (MULT, mode,
2288 in1, in2),
2289 XEXP (op0, 0));
2290 }
2291
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law. This will, for example,
2294 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2298
2299 if (INTEGRAL_MODE_P (mode)
2300 && (plus_minus_operand_p (op0)
2301 || plus_minus_operand_p (op1))
2302 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2303 return tem;
2304 break;
2305
2306 case MULT:
2307 if (trueop1 == constm1_rtx)
2308 return simplify_gen_unary (NEG, mode, op0, mode);
2309
2310 if (GET_CODE (op0) == NEG)
2311 {
2312 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2313 /* If op1 is a MULT as well and simplify_unary_operation
2314 just moved the NEG to the second operand, simplify_gen_binary
2315 below could through simplify_associative_operation move
2316 the NEG around again and recurse endlessly. */
2317 if (temp
2318 && GET_CODE (op1) == MULT
2319 && GET_CODE (temp) == MULT
2320 && XEXP (op1, 0) == XEXP (temp, 0)
2321 && GET_CODE (XEXP (temp, 1)) == NEG
2322 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2323 temp = NULL_RTX;
2324 if (temp)
2325 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2326 }
2327 if (GET_CODE (op1) == NEG)
2328 {
2329 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2330 /* If op0 is a MULT as well and simplify_unary_operation
2331 just moved the NEG to the second operand, simplify_gen_binary
2332 below could through simplify_associative_operation move
2333 the NEG around again and recurse endlessly. */
2334 if (temp
2335 && GET_CODE (op0) == MULT
2336 && GET_CODE (temp) == MULT
2337 && XEXP (op0, 0) == XEXP (temp, 0)
2338 && GET_CODE (XEXP (temp, 1)) == NEG
2339 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2340 temp = NULL_RTX;
2341 if (temp)
2342 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2343 }
2344
2345 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2346 x is NaN, since x * 0 is then also NaN. Nor is it valid
2347 when the mode has signed zeros, since multiplying a negative
2348 number by 0 will give -0, not 0. */
2349 if (!HONOR_NANS (mode)
2350 && !HONOR_SIGNED_ZEROS (mode)
2351 && trueop1 == CONST0_RTX (mode)
2352 && ! side_effects_p (op0))
2353 return op1;
2354
2355 /* In IEEE floating point, x*1 is not equivalent to x for
2356 signalling NaNs. */
2357 if (!HONOR_SNANS (mode)
2358 && trueop1 == CONST1_RTX (mode))
2359 return op0;
2360
2361 /* Convert multiply by constant power of two into shift unless
2362 we are still generating RTL. This test is a kludge. */
2363 if (CONST_INT_P (trueop1)
2364 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2365 /* If the mode is larger than the host word size, and the
2366 uppermost bit is set, then this isn't a power of two due
2367 to implicit sign extension. */
2368 && (width <= HOST_BITS_PER_WIDE_INT
2369 || val != HOST_BITS_PER_WIDE_INT - 1))
2370 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2371
2372 /* Likewise for multipliers wider than a word. */
2373 if (GET_CODE (trueop1) == CONST_DOUBLE
2374 && (GET_MODE (trueop1) == VOIDmode
2375 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2376 && GET_MODE (op0) == mode
2377 && CONST_DOUBLE_LOW (trueop1) == 0
2378 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2379 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2380 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2381 return simplify_gen_binary (ASHIFT, mode, op0,
2382 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2383
2384 /* x*2 is x+x and x*(-1) is -x */
2385 if (GET_CODE (trueop1) == CONST_DOUBLE
2386 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2387 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2388 && GET_MODE (op0) == mode)
2389 {
2390 REAL_VALUE_TYPE d;
2391 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2392
2393 if (REAL_VALUES_EQUAL (d, dconst2))
2394 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2395
2396 if (!HONOR_SNANS (mode)
2397 && REAL_VALUES_EQUAL (d, dconstm1))
2398 return simplify_gen_unary (NEG, mode, op0, mode);
2399 }
2400
2401 /* Optimize -x * -x as x * x. */
2402 if (FLOAT_MODE_P (mode)
2403 && GET_CODE (op0) == NEG
2404 && GET_CODE (op1) == NEG
2405 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2406 && !side_effects_p (XEXP (op0, 0)))
2407 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2408
2409 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2410 if (SCALAR_FLOAT_MODE_P (mode)
2411 && GET_CODE (op0) == ABS
2412 && GET_CODE (op1) == ABS
2413 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2414 && !side_effects_p (XEXP (op0, 0)))
2415 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2416
2417 /* Reassociate multiplication, but for floating point MULTs
2418 only when the user specifies unsafe math optimizations. */
2419 if (! FLOAT_MODE_P (mode)
2420 || flag_unsafe_math_optimizations)
2421 {
2422 tem = simplify_associative_operation (code, mode, op0, op1);
2423 if (tem)
2424 return tem;
2425 }
2426 break;
2427
2428 case IOR:
2429 if (trueop1 == CONST0_RTX (mode))
2430 return op0;
2431 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2432 return op1;
2433 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2434 return op0;
2435 /* A | (~A) -> -1 */
2436 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2437 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2438 && ! side_effects_p (op0)
2439 && SCALAR_INT_MODE_P (mode))
2440 return constm1_rtx;
2441
2442 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2443 if (CONST_INT_P (op1)
2444 && HWI_COMPUTABLE_MODE_P (mode)
2445 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2446 return op1;
2447
2448 /* Canonicalize (X & C1) | C2. */
2449 if (GET_CODE (op0) == AND
2450 && CONST_INT_P (trueop1)
2451 && CONST_INT_P (XEXP (op0, 1)))
2452 {
2453 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2454 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2455 HOST_WIDE_INT c2 = INTVAL (trueop1);
2456
2457 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2458 if ((c1 & c2) == c1
2459 && !side_effects_p (XEXP (op0, 0)))
2460 return trueop1;
2461
2462 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2463 if (((c1|c2) & mask) == mask)
2464 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2465
2466 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2467 if (((c1 & ~c2) & mask) != (c1 & mask))
2468 {
2469 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2470 gen_int_mode (c1 & ~c2, mode));
2471 return simplify_gen_binary (IOR, mode, tem, op1);
2472 }
2473 }
2474
2475 /* Convert (A & B) | A to A. */
2476 if (GET_CODE (op0) == AND
2477 && (rtx_equal_p (XEXP (op0, 0), op1)
2478 || rtx_equal_p (XEXP (op0, 1), op1))
2479 && ! side_effects_p (XEXP (op0, 0))
2480 && ! side_effects_p (XEXP (op0, 1)))
2481 return op1;
2482
2483 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2484 mode size to (rotate A CX). */
2485
2486 if (GET_CODE (op1) == ASHIFT
2487 || GET_CODE (op1) == SUBREG)
2488 {
2489 opleft = op1;
2490 opright = op0;
2491 }
2492 else
2493 {
2494 opright = op1;
2495 opleft = op0;
2496 }
2497
2498 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2499 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2500 && CONST_INT_P (XEXP (opleft, 1))
2501 && CONST_INT_P (XEXP (opright, 1))
2502 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2503 == GET_MODE_PRECISION (mode)))
2504 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2505
2506 /* Same, but for ashift that has been "simplified" to a wider mode
2507 by simplify_shift_const. */
2508
2509 if (GET_CODE (opleft) == SUBREG
2510 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2511 && GET_CODE (opright) == LSHIFTRT
2512 && GET_CODE (XEXP (opright, 0)) == SUBREG
2513 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2514 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2515 && (GET_MODE_SIZE (GET_MODE (opleft))
2516 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2517 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2518 SUBREG_REG (XEXP (opright, 0)))
2519 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2520 && CONST_INT_P (XEXP (opright, 1))
2521 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2522 == GET_MODE_PRECISION (mode)))
2523 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2524 XEXP (SUBREG_REG (opleft), 1));
2525
2526 /* If we have (ior (and (X C1) C2)), simplify this by making
2527 C1 as small as possible if C1 actually changes. */
2528 if (CONST_INT_P (op1)
2529 && (HWI_COMPUTABLE_MODE_P (mode)
2530 || INTVAL (op1) > 0)
2531 && GET_CODE (op0) == AND
2532 && CONST_INT_P (XEXP (op0, 1))
2533 && CONST_INT_P (op1)
2534 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2535 return simplify_gen_binary (IOR, mode,
2536 simplify_gen_binary
2537 (AND, mode, XEXP (op0, 0),
2538 GEN_INT (UINTVAL (XEXP (op0, 1))
2539 & ~UINTVAL (op1))),
2540 op1);
2541
2542 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2543 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2544 the PLUS does not affect any of the bits in OP1: then we can do
2545 the IOR as a PLUS and we can associate. This is valid if OP1
2546 can be safely shifted left C bits. */
2547 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2548 && GET_CODE (XEXP (op0, 0)) == PLUS
2549 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2550 && CONST_INT_P (XEXP (op0, 1))
2551 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2552 {
2553 int count = INTVAL (XEXP (op0, 1));
2554 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2555
2556 if (mask >> count == INTVAL (trueop1)
2557 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2558 return simplify_gen_binary (ASHIFTRT, mode,
2559 plus_constant (mode, XEXP (op0, 0),
2560 mask),
2561 XEXP (op0, 1));
2562 }
2563
2564 tem = simplify_associative_operation (code, mode, op0, op1);
2565 if (tem)
2566 return tem;
2567 break;
2568
2569 case XOR:
2570 if (trueop1 == CONST0_RTX (mode))
2571 return op0;
2572 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2573 return simplify_gen_unary (NOT, mode, op0, mode);
2574 if (rtx_equal_p (trueop0, trueop1)
2575 && ! side_effects_p (op0)
2576 && GET_MODE_CLASS (mode) != MODE_CC)
2577 return CONST0_RTX (mode);
2578
2579 /* Canonicalize XOR of the most significant bit to PLUS. */
2580 if ((CONST_INT_P (op1)
2581 || GET_CODE (op1) == CONST_DOUBLE)
2582 && mode_signbit_p (mode, op1))
2583 return simplify_gen_binary (PLUS, mode, op0, op1);
2584 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2585 if ((CONST_INT_P (op1)
2586 || GET_CODE (op1) == CONST_DOUBLE)
2587 && GET_CODE (op0) == PLUS
2588 && (CONST_INT_P (XEXP (op0, 1))
2589 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2590 && mode_signbit_p (mode, XEXP (op0, 1)))
2591 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2592 simplify_gen_binary (XOR, mode, op1,
2593 XEXP (op0, 1)));
2594
2595 /* If we are XORing two things that have no bits in common,
2596 convert them into an IOR. This helps to detect rotation encoded
2597 using those methods and possibly other simplifications. */
2598
2599 if (HWI_COMPUTABLE_MODE_P (mode)
2600 && (nonzero_bits (op0, mode)
2601 & nonzero_bits (op1, mode)) == 0)
2602 return (simplify_gen_binary (IOR, mode, op0, op1));
2603
2604 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2605 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2606 (NOT y). */
2607 {
2608 int num_negated = 0;
2609
2610 if (GET_CODE (op0) == NOT)
2611 num_negated++, op0 = XEXP (op0, 0);
2612 if (GET_CODE (op1) == NOT)
2613 num_negated++, op1 = XEXP (op1, 0);
2614
2615 if (num_negated == 2)
2616 return simplify_gen_binary (XOR, mode, op0, op1);
2617 else if (num_negated == 1)
2618 return simplify_gen_unary (NOT, mode,
2619 simplify_gen_binary (XOR, mode, op0, op1),
2620 mode);
2621 }
2622
2623 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2624 correspond to a machine insn or result in further simplifications
2625 if B is a constant. */
2626
2627 if (GET_CODE (op0) == AND
2628 && rtx_equal_p (XEXP (op0, 1), op1)
2629 && ! side_effects_p (op1))
2630 return simplify_gen_binary (AND, mode,
2631 simplify_gen_unary (NOT, mode,
2632 XEXP (op0, 0), mode),
2633 op1);
2634
2635 else if (GET_CODE (op0) == AND
2636 && rtx_equal_p (XEXP (op0, 0), op1)
2637 && ! side_effects_p (op1))
2638 return simplify_gen_binary (AND, mode,
2639 simplify_gen_unary (NOT, mode,
2640 XEXP (op0, 1), mode),
2641 op1);
2642
2643 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2644 we can transform like this:
2645 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2646 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2647 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2648 Attempt a few simplifications when B and C are both constants. */
2649 if (GET_CODE (op0) == AND
2650 && CONST_INT_P (op1)
2651 && CONST_INT_P (XEXP (op0, 1)))
2652 {
2653 rtx a = XEXP (op0, 0);
2654 rtx b = XEXP (op0, 1);
2655 rtx c = op1;
2656 HOST_WIDE_INT bval = INTVAL (b);
2657 HOST_WIDE_INT cval = INTVAL (c);
2658
2659 rtx na_c
2660 = simplify_binary_operation (AND, mode,
2661 simplify_gen_unary (NOT, mode, a, mode),
2662 c);
2663 if ((~cval & bval) == 0)
2664 {
2665 /* Try to simplify ~A&C | ~B&C. */
2666 if (na_c != NULL_RTX)
2667 return simplify_gen_binary (IOR, mode, na_c,
2668 GEN_INT (~bval & cval));
2669 }
2670 else
2671 {
2672 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2673 if (na_c == const0_rtx)
2674 {
2675 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2676 GEN_INT (~cval & bval));
2677 return simplify_gen_binary (IOR, mode, a_nc_b,
2678 GEN_INT (~bval & cval));
2679 }
2680 }
2681 }
2682
2683 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2684 comparison if STORE_FLAG_VALUE is 1. */
2685 if (STORE_FLAG_VALUE == 1
2686 && trueop1 == const1_rtx
2687 && COMPARISON_P (op0)
2688 && (reversed = reversed_comparison (op0, mode)))
2689 return reversed;
2690
2691 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2692 is (lt foo (const_int 0)), so we can perform the above
2693 simplification if STORE_FLAG_VALUE is 1. */
2694
2695 if (STORE_FLAG_VALUE == 1
2696 && trueop1 == const1_rtx
2697 && GET_CODE (op0) == LSHIFTRT
2698 && CONST_INT_P (XEXP (op0, 1))
2699 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2700 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2701
2702 /* (xor (comparison foo bar) (const_int sign-bit))
2703 when STORE_FLAG_VALUE is the sign bit. */
2704 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2705 && trueop1 == const_true_rtx
2706 && COMPARISON_P (op0)
2707 && (reversed = reversed_comparison (op0, mode)))
2708 return reversed;
2709
2710 tem = simplify_associative_operation (code, mode, op0, op1);
2711 if (tem)
2712 return tem;
2713 break;
2714
2715 case AND:
2716 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2717 return trueop1;
2718 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2719 return op0;
2720 if (HWI_COMPUTABLE_MODE_P (mode))
2721 {
2722 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2723 HOST_WIDE_INT nzop1;
2724 if (CONST_INT_P (trueop1))
2725 {
2726 HOST_WIDE_INT val1 = INTVAL (trueop1);
2727 /* If we are turning off bits already known off in OP0, we need
2728 not do an AND. */
2729 if ((nzop0 & ~val1) == 0)
2730 return op0;
2731 }
2732 nzop1 = nonzero_bits (trueop1, mode);
2733 /* If we are clearing all the nonzero bits, the result is zero. */
2734 if ((nzop1 & nzop0) == 0
2735 && !side_effects_p (op0) && !side_effects_p (op1))
2736 return CONST0_RTX (mode);
2737 }
2738 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2739 && GET_MODE_CLASS (mode) != MODE_CC)
2740 return op0;
2741 /* A & (~A) -> 0 */
2742 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2743 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2744 && ! side_effects_p (op0)
2745 && GET_MODE_CLASS (mode) != MODE_CC)
2746 return CONST0_RTX (mode);
2747
2748 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2749 there are no nonzero bits of C outside of X's mode. */
2750 if ((GET_CODE (op0) == SIGN_EXTEND
2751 || GET_CODE (op0) == ZERO_EXTEND)
2752 && CONST_INT_P (trueop1)
2753 && HWI_COMPUTABLE_MODE_P (mode)
2754 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2755 & UINTVAL (trueop1)) == 0)
2756 {
2757 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2758 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2759 gen_int_mode (INTVAL (trueop1),
2760 imode));
2761 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2762 }
2763
2764 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2765 we might be able to further simplify the AND with X and potentially
2766 remove the truncation altogether. */
2767 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2768 {
2769 rtx x = XEXP (op0, 0);
2770 enum machine_mode xmode = GET_MODE (x);
2771 tem = simplify_gen_binary (AND, xmode, x,
2772 gen_int_mode (INTVAL (trueop1), xmode));
2773 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2774 }
2775
2776 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2777 if (GET_CODE (op0) == IOR
2778 && CONST_INT_P (trueop1)
2779 && CONST_INT_P (XEXP (op0, 1)))
2780 {
2781 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2782 return simplify_gen_binary (IOR, mode,
2783 simplify_gen_binary (AND, mode,
2784 XEXP (op0, 0), op1),
2785 gen_int_mode (tmp, mode));
2786 }
2787
2788 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2789 insn (and may simplify more). */
2790 if (GET_CODE (op0) == XOR
2791 && rtx_equal_p (XEXP (op0, 0), op1)
2792 && ! side_effects_p (op1))
2793 return simplify_gen_binary (AND, mode,
2794 simplify_gen_unary (NOT, mode,
2795 XEXP (op0, 1), mode),
2796 op1);
2797
2798 if (GET_CODE (op0) == XOR
2799 && rtx_equal_p (XEXP (op0, 1), op1)
2800 && ! side_effects_p (op1))
2801 return simplify_gen_binary (AND, mode,
2802 simplify_gen_unary (NOT, mode,
2803 XEXP (op0, 0), mode),
2804 op1);
2805
2806 /* Similarly for (~(A ^ B)) & A. */
2807 if (GET_CODE (op0) == NOT
2808 && GET_CODE (XEXP (op0, 0)) == XOR
2809 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2810 && ! side_effects_p (op1))
2811 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2812
2813 if (GET_CODE (op0) == NOT
2814 && GET_CODE (XEXP (op0, 0)) == XOR
2815 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2816 && ! side_effects_p (op1))
2817 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2818
2819 /* Convert (A | B) & A to A. */
2820 if (GET_CODE (op0) == IOR
2821 && (rtx_equal_p (XEXP (op0, 0), op1)
2822 || rtx_equal_p (XEXP (op0, 1), op1))
2823 && ! side_effects_p (XEXP (op0, 0))
2824 && ! side_effects_p (XEXP (op0, 1)))
2825 return op1;
2826
2827 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2828 ((A & N) + B) & M -> (A + B) & M
2829 Similarly if (N & M) == 0,
2830 ((A | N) + B) & M -> (A + B) & M
2831 and for - instead of + and/or ^ instead of |.
2832 Also, if (N & M) == 0, then
2833 (A +- N) & M -> A & M. */
2834 if (CONST_INT_P (trueop1)
2835 && HWI_COMPUTABLE_MODE_P (mode)
2836 && ~UINTVAL (trueop1)
2837 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2838 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2839 {
2840 rtx pmop[2];
2841 int which;
2842
2843 pmop[0] = XEXP (op0, 0);
2844 pmop[1] = XEXP (op0, 1);
2845
2846 if (CONST_INT_P (pmop[1])
2847 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2848 return simplify_gen_binary (AND, mode, pmop[0], op1);
2849
2850 for (which = 0; which < 2; which++)
2851 {
2852 tem = pmop[which];
2853 switch (GET_CODE (tem))
2854 {
2855 case AND:
2856 if (CONST_INT_P (XEXP (tem, 1))
2857 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2858 == UINTVAL (trueop1))
2859 pmop[which] = XEXP (tem, 0);
2860 break;
2861 case IOR:
2862 case XOR:
2863 if (CONST_INT_P (XEXP (tem, 1))
2864 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2865 pmop[which] = XEXP (tem, 0);
2866 break;
2867 default:
2868 break;
2869 }
2870 }
2871
2872 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2873 {
2874 tem = simplify_gen_binary (GET_CODE (op0), mode,
2875 pmop[0], pmop[1]);
2876 return simplify_gen_binary (code, mode, tem, op1);
2877 }
2878 }
2879
2880 /* (and X (ior (not X) Y) -> (and X Y) */
2881 if (GET_CODE (op1) == IOR
2882 && GET_CODE (XEXP (op1, 0)) == NOT
2883 && op0 == XEXP (XEXP (op1, 0), 0))
2884 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2885
2886 /* (and (ior (not X) Y) X) -> (and X Y) */
2887 if (GET_CODE (op0) == IOR
2888 && GET_CODE (XEXP (op0, 0)) == NOT
2889 && op1 == XEXP (XEXP (op0, 0), 0))
2890 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2891
2892 tem = simplify_associative_operation (code, mode, op0, op1);
2893 if (tem)
2894 return tem;
2895 break;
2896
2897 case UDIV:
2898 /* 0/x is 0 (or x&0 if x has side-effects). */
2899 if (trueop0 == CONST0_RTX (mode))
2900 {
2901 if (side_effects_p (op1))
2902 return simplify_gen_binary (AND, mode, op1, trueop0);
2903 return trueop0;
2904 }
2905 /* x/1 is x. */
2906 if (trueop1 == CONST1_RTX (mode))
2907 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2908 /* Convert divide by power of two into shift. */
2909 if (CONST_INT_P (trueop1)
2910 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2911 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2912 break;
2913
2914 case DIV:
2915 /* Handle floating point and integers separately. */
2916 if (SCALAR_FLOAT_MODE_P (mode))
2917 {
2918 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2919 safe for modes with NaNs, since 0.0 / 0.0 will then be
2920 NaN rather than 0.0. Nor is it safe for modes with signed
2921 zeros, since dividing 0 by a negative number gives -0.0 */
2922 if (trueop0 == CONST0_RTX (mode)
2923 && !HONOR_NANS (mode)
2924 && !HONOR_SIGNED_ZEROS (mode)
2925 && ! side_effects_p (op1))
2926 return op0;
2927 /* x/1.0 is x. */
2928 if (trueop1 == CONST1_RTX (mode)
2929 && !HONOR_SNANS (mode))
2930 return op0;
2931
2932 if (GET_CODE (trueop1) == CONST_DOUBLE
2933 && trueop1 != CONST0_RTX (mode))
2934 {
2935 REAL_VALUE_TYPE d;
2936 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2937
2938 /* x/-1.0 is -x. */
2939 if (REAL_VALUES_EQUAL (d, dconstm1)
2940 && !HONOR_SNANS (mode))
2941 return simplify_gen_unary (NEG, mode, op0, mode);
2942
2943 /* Change FP division by a constant into multiplication.
2944 Only do this with -freciprocal-math. */
2945 if (flag_reciprocal_math
2946 && !REAL_VALUES_EQUAL (d, dconst0))
2947 {
2948 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2949 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2950 return simplify_gen_binary (MULT, mode, op0, tem);
2951 }
2952 }
2953 }
2954 else if (SCALAR_INT_MODE_P (mode))
2955 {
2956 /* 0/x is 0 (or x&0 if x has side-effects). */
2957 if (trueop0 == CONST0_RTX (mode)
2958 && !cfun->can_throw_non_call_exceptions)
2959 {
2960 if (side_effects_p (op1))
2961 return simplify_gen_binary (AND, mode, op1, trueop0);
2962 return trueop0;
2963 }
2964 /* x/1 is x. */
2965 if (trueop1 == CONST1_RTX (mode))
2966 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2967 /* x/-1 is -x. */
2968 if (trueop1 == constm1_rtx)
2969 {
2970 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2971 return simplify_gen_unary (NEG, mode, x, mode);
2972 }
2973 }
2974 break;
2975
2976 case UMOD:
2977 /* 0%x is 0 (or x&0 if x has side-effects). */
2978 if (trueop0 == CONST0_RTX (mode))
2979 {
2980 if (side_effects_p (op1))
2981 return simplify_gen_binary (AND, mode, op1, trueop0);
2982 return trueop0;
2983 }
2984 /* x%1 is 0 (of x&0 if x has side-effects). */
2985 if (trueop1 == CONST1_RTX (mode))
2986 {
2987 if (side_effects_p (op0))
2988 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2989 return CONST0_RTX (mode);
2990 }
2991 /* Implement modulus by power of two as AND. */
2992 if (CONST_INT_P (trueop1)
2993 && exact_log2 (UINTVAL (trueop1)) > 0)
2994 return simplify_gen_binary (AND, mode, op0,
2995 GEN_INT (INTVAL (op1) - 1));
2996 break;
2997
2998 case MOD:
2999 /* 0%x is 0 (or x&0 if x has side-effects). */
3000 if (trueop0 == CONST0_RTX (mode))
3001 {
3002 if (side_effects_p (op1))
3003 return simplify_gen_binary (AND, mode, op1, trueop0);
3004 return trueop0;
3005 }
3006 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3007 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3008 {
3009 if (side_effects_p (op0))
3010 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3011 return CONST0_RTX (mode);
3012 }
3013 break;
3014
3015 case ROTATERT:
3016 case ROTATE:
3017 case ASHIFTRT:
3018 if (trueop1 == CONST0_RTX (mode))
3019 return op0;
3020 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3021 return op0;
3022 /* Rotating ~0 always results in ~0. */
3023 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3024 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3025 && ! side_effects_p (op1))
3026 return op0;
3027 canonicalize_shift:
3028 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3029 {
3030 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3031 if (val != INTVAL (op1))
3032 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3033 }
3034 break;
3035
3036 case ASHIFT:
3037 case SS_ASHIFT:
3038 case US_ASHIFT:
3039 if (trueop1 == CONST0_RTX (mode))
3040 return op0;
3041 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3042 return op0;
3043 goto canonicalize_shift;
3044
3045 case LSHIFTRT:
3046 if (trueop1 == CONST0_RTX (mode))
3047 return op0;
3048 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3049 return op0;
3050 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3051 if (GET_CODE (op0) == CLZ
3052 && CONST_INT_P (trueop1)
3053 && STORE_FLAG_VALUE == 1
3054 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3055 {
3056 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3057 unsigned HOST_WIDE_INT zero_val = 0;
3058
3059 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3060 && zero_val == GET_MODE_PRECISION (imode)
3061 && INTVAL (trueop1) == exact_log2 (zero_val))
3062 return simplify_gen_relational (EQ, mode, imode,
3063 XEXP (op0, 0), const0_rtx);
3064 }
3065 goto canonicalize_shift;
3066
3067 case SMIN:
3068 if (width <= HOST_BITS_PER_WIDE_INT
3069 && mode_signbit_p (mode, trueop1)
3070 && ! side_effects_p (op0))
3071 return op1;
3072 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3073 return op0;
3074 tem = simplify_associative_operation (code, mode, op0, op1);
3075 if (tem)
3076 return tem;
3077 break;
3078
3079 case SMAX:
3080 if (width <= HOST_BITS_PER_WIDE_INT
3081 && CONST_INT_P (trueop1)
3082 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3083 && ! side_effects_p (op0))
3084 return op1;
3085 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3086 return op0;
3087 tem = simplify_associative_operation (code, mode, op0, op1);
3088 if (tem)
3089 return tem;
3090 break;
3091
3092 case UMIN:
3093 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3094 return op1;
3095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3096 return op0;
3097 tem = simplify_associative_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3100 break;
3101
3102 case UMAX:
3103 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3104 return op1;
3105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3106 return op0;
3107 tem = simplify_associative_operation (code, mode, op0, op1);
3108 if (tem)
3109 return tem;
3110 break;
3111
3112 case SS_PLUS:
3113 case US_PLUS:
3114 case SS_MINUS:
3115 case US_MINUS:
3116 case SS_MULT:
3117 case US_MULT:
3118 case SS_DIV:
3119 case US_DIV:
3120 /* ??? There are simplifications that can be done. */
3121 return 0;
3122
3123 case VEC_SELECT:
3124 if (!VECTOR_MODE_P (mode))
3125 {
3126 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3127 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3128 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3129 gcc_assert (XVECLEN (trueop1, 0) == 1);
3130 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3131
3132 if (GET_CODE (trueop0) == CONST_VECTOR)
3133 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3134 (trueop1, 0, 0)));
3135
3136 /* Extract a scalar element from a nested VEC_SELECT expression
3137 (with optional nested VEC_CONCAT expression). Some targets
3138 (i386) extract scalar element from a vector using chain of
3139 nested VEC_SELECT expressions. When input operand is a memory
3140 operand, this operation can be simplified to a simple scalar
3141 load from an offseted memory address. */
3142 if (GET_CODE (trueop0) == VEC_SELECT)
3143 {
3144 rtx op0 = XEXP (trueop0, 0);
3145 rtx op1 = XEXP (trueop0, 1);
3146
3147 enum machine_mode opmode = GET_MODE (op0);
3148 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3149 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3150
3151 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3152 int elem;
3153
3154 rtvec vec;
3155 rtx tmp_op, tmp;
3156
3157 gcc_assert (GET_CODE (op1) == PARALLEL);
3158 gcc_assert (i < n_elts);
3159
3160 /* Select element, pointed by nested selector. */
3161 elem = INTVAL (XVECEXP (op1, 0, i));
3162
3163 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3164 if (GET_CODE (op0) == VEC_CONCAT)
3165 {
3166 rtx op00 = XEXP (op0, 0);
3167 rtx op01 = XEXP (op0, 1);
3168
3169 enum machine_mode mode00, mode01;
3170 int n_elts00, n_elts01;
3171
3172 mode00 = GET_MODE (op00);
3173 mode01 = GET_MODE (op01);
3174
3175 /* Find out number of elements of each operand. */
3176 if (VECTOR_MODE_P (mode00))
3177 {
3178 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3179 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3180 }
3181 else
3182 n_elts00 = 1;
3183
3184 if (VECTOR_MODE_P (mode01))
3185 {
3186 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3187 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3188 }
3189 else
3190 n_elts01 = 1;
3191
3192 gcc_assert (n_elts == n_elts00 + n_elts01);
3193
3194 /* Select correct operand of VEC_CONCAT
3195 and adjust selector. */
3196 if (elem < n_elts01)
3197 tmp_op = op00;
3198 else
3199 {
3200 tmp_op = op01;
3201 elem -= n_elts00;
3202 }
3203 }
3204 else
3205 tmp_op = op0;
3206
3207 vec = rtvec_alloc (1);
3208 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3209
3210 tmp = gen_rtx_fmt_ee (code, mode,
3211 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3212 return tmp;
3213 }
3214 if (GET_CODE (trueop0) == VEC_DUPLICATE
3215 && GET_MODE (XEXP (trueop0, 0)) == mode)
3216 return XEXP (trueop0, 0);
3217 }
3218 else
3219 {
3220 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3221 gcc_assert (GET_MODE_INNER (mode)
3222 == GET_MODE_INNER (GET_MODE (trueop0)));
3223 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3224
3225 if (GET_CODE (trueop0) == CONST_VECTOR)
3226 {
3227 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3228 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3229 rtvec v = rtvec_alloc (n_elts);
3230 unsigned int i;
3231
3232 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3233 for (i = 0; i < n_elts; i++)
3234 {
3235 rtx x = XVECEXP (trueop1, 0, i);
3236
3237 gcc_assert (CONST_INT_P (x));
3238 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3239 INTVAL (x));
3240 }
3241
3242 return gen_rtx_CONST_VECTOR (mode, v);
3243 }
3244
3245 /* If we build {a,b} then permute it, build the result directly. */
3246 if (XVECLEN (trueop1, 0) == 2
3247 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3248 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3249 && GET_CODE (trueop0) == VEC_CONCAT
3250 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3251 && GET_MODE (XEXP (trueop0, 0)) == mode
3252 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3253 && GET_MODE (XEXP (trueop0, 1)) == mode)
3254 {
3255 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3256 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3257 rtx subop0, subop1;
3258
3259 gcc_assert (i0 < 4 && i1 < 4);
3260 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3261 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3262
3263 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3264 }
3265 }
3266
3267 if (XVECLEN (trueop1, 0) == 1
3268 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3269 && GET_CODE (trueop0) == VEC_CONCAT)
3270 {
3271 rtx vec = trueop0;
3272 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3273
3274 /* Try to find the element in the VEC_CONCAT. */
3275 while (GET_MODE (vec) != mode
3276 && GET_CODE (vec) == VEC_CONCAT)
3277 {
3278 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3279 if (offset < vec_size)
3280 vec = XEXP (vec, 0);
3281 else
3282 {
3283 offset -= vec_size;
3284 vec = XEXP (vec, 1);
3285 }
3286 vec = avoid_constant_pool_reference (vec);
3287 }
3288
3289 if (GET_MODE (vec) == mode)
3290 return vec;
3291 }
3292
3293 return 0;
3294 case VEC_CONCAT:
3295 {
3296 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3297 ? GET_MODE (trueop0)
3298 : GET_MODE_INNER (mode));
3299 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3300 ? GET_MODE (trueop1)
3301 : GET_MODE_INNER (mode));
3302
3303 gcc_assert (VECTOR_MODE_P (mode));
3304 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3305 == GET_MODE_SIZE (mode));
3306
3307 if (VECTOR_MODE_P (op0_mode))
3308 gcc_assert (GET_MODE_INNER (mode)
3309 == GET_MODE_INNER (op0_mode));
3310 else
3311 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3312
3313 if (VECTOR_MODE_P (op1_mode))
3314 gcc_assert (GET_MODE_INNER (mode)
3315 == GET_MODE_INNER (op1_mode));
3316 else
3317 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3318
3319 if ((GET_CODE (trueop0) == CONST_VECTOR
3320 || CONST_INT_P (trueop0)
3321 || GET_CODE (trueop0) == CONST_DOUBLE)
3322 && (GET_CODE (trueop1) == CONST_VECTOR
3323 || CONST_INT_P (trueop1)
3324 || GET_CODE (trueop1) == CONST_DOUBLE))
3325 {
3326 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3327 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3328 rtvec v = rtvec_alloc (n_elts);
3329 unsigned int i;
3330 unsigned in_n_elts = 1;
3331
3332 if (VECTOR_MODE_P (op0_mode))
3333 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3334 for (i = 0; i < n_elts; i++)
3335 {
3336 if (i < in_n_elts)
3337 {
3338 if (!VECTOR_MODE_P (op0_mode))
3339 RTVEC_ELT (v, i) = trueop0;
3340 else
3341 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3342 }
3343 else
3344 {
3345 if (!VECTOR_MODE_P (op1_mode))
3346 RTVEC_ELT (v, i) = trueop1;
3347 else
3348 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3349 i - in_n_elts);
3350 }
3351 }
3352
3353 return gen_rtx_CONST_VECTOR (mode, v);
3354 }
3355 }
3356 return 0;
3357
3358 default:
3359 gcc_unreachable ();
3360 }
3361
3362 return 0;
3363 }
3364
3365 rtx
3366 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3367 rtx op0, rtx op1)
3368 {
3369 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3370 HOST_WIDE_INT val;
3371 unsigned int width = GET_MODE_PRECISION (mode);
3372
3373 if (VECTOR_MODE_P (mode)
3374 && code != VEC_CONCAT
3375 && GET_CODE (op0) == CONST_VECTOR
3376 && GET_CODE (op1) == CONST_VECTOR)
3377 {
3378 unsigned n_elts = GET_MODE_NUNITS (mode);
3379 enum machine_mode op0mode = GET_MODE (op0);
3380 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3381 enum machine_mode op1mode = GET_MODE (op1);
3382 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3383 rtvec v = rtvec_alloc (n_elts);
3384 unsigned int i;
3385
3386 gcc_assert (op0_n_elts == n_elts);
3387 gcc_assert (op1_n_elts == n_elts);
3388 for (i = 0; i < n_elts; i++)
3389 {
3390 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3391 CONST_VECTOR_ELT (op0, i),
3392 CONST_VECTOR_ELT (op1, i));
3393 if (!x)
3394 return 0;
3395 RTVEC_ELT (v, i) = x;
3396 }
3397
3398 return gen_rtx_CONST_VECTOR (mode, v);
3399 }
3400
3401 if (VECTOR_MODE_P (mode)
3402 && code == VEC_CONCAT
3403 && (CONST_INT_P (op0)
3404 || GET_CODE (op0) == CONST_DOUBLE
3405 || GET_CODE (op0) == CONST_FIXED)
3406 && (CONST_INT_P (op1)
3407 || GET_CODE (op1) == CONST_DOUBLE
3408 || GET_CODE (op1) == CONST_FIXED))
3409 {
3410 unsigned n_elts = GET_MODE_NUNITS (mode);
3411 rtvec v = rtvec_alloc (n_elts);
3412
3413 gcc_assert (n_elts >= 2);
3414 if (n_elts == 2)
3415 {
3416 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3417 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3418
3419 RTVEC_ELT (v, 0) = op0;
3420 RTVEC_ELT (v, 1) = op1;
3421 }
3422 else
3423 {
3424 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3425 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3426 unsigned i;
3427
3428 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3429 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3430 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3431
3432 for (i = 0; i < op0_n_elts; ++i)
3433 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3434 for (i = 0; i < op1_n_elts; ++i)
3435 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3436 }
3437
3438 return gen_rtx_CONST_VECTOR (mode, v);
3439 }
3440
3441 if (SCALAR_FLOAT_MODE_P (mode)
3442 && GET_CODE (op0) == CONST_DOUBLE
3443 && GET_CODE (op1) == CONST_DOUBLE
3444 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3445 {
3446 if (code == AND
3447 || code == IOR
3448 || code == XOR)
3449 {
3450 long tmp0[4];
3451 long tmp1[4];
3452 REAL_VALUE_TYPE r;
3453 int i;
3454
3455 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3456 GET_MODE (op0));
3457 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3458 GET_MODE (op1));
3459 for (i = 0; i < 4; i++)
3460 {
3461 switch (code)
3462 {
3463 case AND:
3464 tmp0[i] &= tmp1[i];
3465 break;
3466 case IOR:
3467 tmp0[i] |= tmp1[i];
3468 break;
3469 case XOR:
3470 tmp0[i] ^= tmp1[i];
3471 break;
3472 default:
3473 gcc_unreachable ();
3474 }
3475 }
3476 real_from_target (&r, tmp0, mode);
3477 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3478 }
3479 else
3480 {
3481 REAL_VALUE_TYPE f0, f1, value, result;
3482 bool inexact;
3483
3484 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3485 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3486 real_convert (&f0, mode, &f0);
3487 real_convert (&f1, mode, &f1);
3488
3489 if (HONOR_SNANS (mode)
3490 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3491 return 0;
3492
3493 if (code == DIV
3494 && REAL_VALUES_EQUAL (f1, dconst0)
3495 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3496 return 0;
3497
3498 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3499 && flag_trapping_math
3500 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3501 {
3502 int s0 = REAL_VALUE_NEGATIVE (f0);
3503 int s1 = REAL_VALUE_NEGATIVE (f1);
3504
3505 switch (code)
3506 {
3507 case PLUS:
3508 /* Inf + -Inf = NaN plus exception. */
3509 if (s0 != s1)
3510 return 0;
3511 break;
3512 case MINUS:
3513 /* Inf - Inf = NaN plus exception. */
3514 if (s0 == s1)
3515 return 0;
3516 break;
3517 case DIV:
3518 /* Inf / Inf = NaN plus exception. */
3519 return 0;
3520 default:
3521 break;
3522 }
3523 }
3524
3525 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3526 && flag_trapping_math
3527 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3528 || (REAL_VALUE_ISINF (f1)
3529 && REAL_VALUES_EQUAL (f0, dconst0))))
3530 /* Inf * 0 = NaN plus exception. */
3531 return 0;
3532
3533 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3534 &f0, &f1);
3535 real_convert (&result, mode, &value);
3536
3537 /* Don't constant fold this floating point operation if
3538 the result has overflowed and flag_trapping_math. */
3539
3540 if (flag_trapping_math
3541 && MODE_HAS_INFINITIES (mode)
3542 && REAL_VALUE_ISINF (result)
3543 && !REAL_VALUE_ISINF (f0)
3544 && !REAL_VALUE_ISINF (f1))
3545 /* Overflow plus exception. */
3546 return 0;
3547
3548 /* Don't constant fold this floating point operation if the
3549 result may dependent upon the run-time rounding mode and
3550 flag_rounding_math is set, or if GCC's software emulation
3551 is unable to accurately represent the result. */
3552
3553 if ((flag_rounding_math
3554 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3555 && (inexact || !real_identical (&result, &value)))
3556 return NULL_RTX;
3557
3558 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3559 }
3560 }
3561
3562 /* We can fold some multi-word operations. */
3563 if (GET_MODE_CLASS (mode) == MODE_INT
3564 && width == HOST_BITS_PER_DOUBLE_INT
3565 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3566 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3567 {
3568 double_int o0, o1, res, tmp;
3569
3570 o0 = rtx_to_double_int (op0);
3571 o1 = rtx_to_double_int (op1);
3572
3573 switch (code)
3574 {
3575 case MINUS:
3576 /* A - B == A + (-B). */
3577 o1 = double_int_neg (o1);
3578
3579 /* Fall through.... */
3580
3581 case PLUS:
3582 res = double_int_add (o0, o1);
3583 break;
3584
3585 case MULT:
3586 res = double_int_mul (o0, o1);
3587 break;
3588
3589 case DIV:
3590 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3591 o0.low, o0.high, o1.low, o1.high,
3592 &res.low, &res.high,
3593 &tmp.low, &tmp.high))
3594 return 0;
3595 break;
3596
3597 case MOD:
3598 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3599 o0.low, o0.high, o1.low, o1.high,
3600 &tmp.low, &tmp.high,
3601 &res.low, &res.high))
3602 return 0;
3603 break;
3604
3605 case UDIV:
3606 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3607 o0.low, o0.high, o1.low, o1.high,
3608 &res.low, &res.high,
3609 &tmp.low, &tmp.high))
3610 return 0;
3611 break;
3612
3613 case UMOD:
3614 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3615 o0.low, o0.high, o1.low, o1.high,
3616 &tmp.low, &tmp.high,
3617 &res.low, &res.high))
3618 return 0;
3619 break;
3620
3621 case AND:
3622 res = double_int_and (o0, o1);
3623 break;
3624
3625 case IOR:
3626 res = double_int_ior (o0, o1);
3627 break;
3628
3629 case XOR:
3630 res = double_int_xor (o0, o1);
3631 break;
3632
3633 case SMIN:
3634 res = double_int_smin (o0, o1);
3635 break;
3636
3637 case SMAX:
3638 res = double_int_smax (o0, o1);
3639 break;
3640
3641 case UMIN:
3642 res = double_int_umin (o0, o1);
3643 break;
3644
3645 case UMAX:
3646 res = double_int_umax (o0, o1);
3647 break;
3648
3649 case LSHIFTRT: case ASHIFTRT:
3650 case ASHIFT:
3651 case ROTATE: case ROTATERT:
3652 {
3653 unsigned HOST_WIDE_INT cnt;
3654
3655 if (SHIFT_COUNT_TRUNCATED)
3656 {
3657 o1.high = 0;
3658 o1.low &= GET_MODE_PRECISION (mode) - 1;
3659 }
3660
3661 if (!double_int_fits_in_uhwi_p (o1)
3662 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3663 return 0;
3664
3665 cnt = double_int_to_uhwi (o1);
3666
3667 if (code == LSHIFTRT || code == ASHIFTRT)
3668 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3669 code == ASHIFTRT);
3670 else if (code == ASHIFT)
3671 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3672 true);
3673 else if (code == ROTATE)
3674 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3675 else /* code == ROTATERT */
3676 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3677 }
3678 break;
3679
3680 default:
3681 return 0;
3682 }
3683
3684 return immed_double_int_const (res, mode);
3685 }
3686
3687 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3688 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3689 {
3690 /* Get the integer argument values in two forms:
3691 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3692
3693 arg0 = INTVAL (op0);
3694 arg1 = INTVAL (op1);
3695
3696 if (width < HOST_BITS_PER_WIDE_INT)
3697 {
3698 arg0 &= GET_MODE_MASK (mode);
3699 arg1 &= GET_MODE_MASK (mode);
3700
3701 arg0s = arg0;
3702 if (val_signbit_known_set_p (mode, arg0s))
3703 arg0s |= ~GET_MODE_MASK (mode);
3704
3705 arg1s = arg1;
3706 if (val_signbit_known_set_p (mode, arg1s))
3707 arg1s |= ~GET_MODE_MASK (mode);
3708 }
3709 else
3710 {
3711 arg0s = arg0;
3712 arg1s = arg1;
3713 }
3714
3715 /* Compute the value of the arithmetic. */
3716
3717 switch (code)
3718 {
3719 case PLUS:
3720 val = arg0s + arg1s;
3721 break;
3722
3723 case MINUS:
3724 val = arg0s - arg1s;
3725 break;
3726
3727 case MULT:
3728 val = arg0s * arg1s;
3729 break;
3730
3731 case DIV:
3732 if (arg1s == 0
3733 || ((unsigned HOST_WIDE_INT) arg0s
3734 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3735 && arg1s == -1))
3736 return 0;
3737 val = arg0s / arg1s;
3738 break;
3739
3740 case MOD:
3741 if (arg1s == 0
3742 || ((unsigned HOST_WIDE_INT) arg0s
3743 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3744 && arg1s == -1))
3745 return 0;
3746 val = arg0s % arg1s;
3747 break;
3748
3749 case UDIV:
3750 if (arg1 == 0
3751 || ((unsigned HOST_WIDE_INT) arg0s
3752 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3753 && arg1s == -1))
3754 return 0;
3755 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3756 break;
3757
3758 case UMOD:
3759 if (arg1 == 0
3760 || ((unsigned HOST_WIDE_INT) arg0s
3761 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3762 && arg1s == -1))
3763 return 0;
3764 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3765 break;
3766
3767 case AND:
3768 val = arg0 & arg1;
3769 break;
3770
3771 case IOR:
3772 val = arg0 | arg1;
3773 break;
3774
3775 case XOR:
3776 val = arg0 ^ arg1;
3777 break;
3778
3779 case LSHIFTRT:
3780 case ASHIFT:
3781 case ASHIFTRT:
3782 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3783 the value is in range. We can't return any old value for
3784 out-of-range arguments because either the middle-end (via
3785 shift_truncation_mask) or the back-end might be relying on
3786 target-specific knowledge. Nor can we rely on
3787 shift_truncation_mask, since the shift might not be part of an
3788 ashlM3, lshrM3 or ashrM3 instruction. */
3789 if (SHIFT_COUNT_TRUNCATED)
3790 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3791 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3792 return 0;
3793
3794 val = (code == ASHIFT
3795 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3796 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3797
3798 /* Sign-extend the result for arithmetic right shifts. */
3799 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3800 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3801 break;
3802
3803 case ROTATERT:
3804 if (arg1 < 0)
3805 return 0;
3806
3807 arg1 %= width;
3808 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3809 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3810 break;
3811
3812 case ROTATE:
3813 if (arg1 < 0)
3814 return 0;
3815
3816 arg1 %= width;
3817 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3818 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3819 break;
3820
3821 case COMPARE:
3822 /* Do nothing here. */
3823 return 0;
3824
3825 case SMIN:
3826 val = arg0s <= arg1s ? arg0s : arg1s;
3827 break;
3828
3829 case UMIN:
3830 val = ((unsigned HOST_WIDE_INT) arg0
3831 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3832 break;
3833
3834 case SMAX:
3835 val = arg0s > arg1s ? arg0s : arg1s;
3836 break;
3837
3838 case UMAX:
3839 val = ((unsigned HOST_WIDE_INT) arg0
3840 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3841 break;
3842
3843 case SS_PLUS:
3844 case US_PLUS:
3845 case SS_MINUS:
3846 case US_MINUS:
3847 case SS_MULT:
3848 case US_MULT:
3849 case SS_DIV:
3850 case US_DIV:
3851 case SS_ASHIFT:
3852 case US_ASHIFT:
3853 /* ??? There are simplifications that can be done. */
3854 return 0;
3855
3856 default:
3857 gcc_unreachable ();
3858 }
3859
3860 return gen_int_mode (val, mode);
3861 }
3862
3863 return NULL_RTX;
3864 }
3865
3866
3867 \f
3868 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3869 PLUS or MINUS.
3870
3871 Rather than test for specific case, we do this by a brute-force method
3872 and do all possible simplifications until no more changes occur. Then
3873 we rebuild the operation. */
3874
3875 struct simplify_plus_minus_op_data
3876 {
3877 rtx op;
3878 short neg;
3879 };
3880
3881 static bool
3882 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3883 {
3884 int result;
3885
3886 result = (commutative_operand_precedence (y)
3887 - commutative_operand_precedence (x));
3888 if (result)
3889 return result > 0;
3890
3891 /* Group together equal REGs to do more simplification. */
3892 if (REG_P (x) && REG_P (y))
3893 return REGNO (x) > REGNO (y);
3894 else
3895 return false;
3896 }
3897
3898 static rtx
3899 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3900 rtx op1)
3901 {
3902 struct simplify_plus_minus_op_data ops[8];
3903 rtx result, tem;
3904 int n_ops = 2, input_ops = 2;
3905 int changed, n_constants = 0, canonicalized = 0;
3906 int i, j;
3907
3908 memset (ops, 0, sizeof ops);
3909
3910 /* Set up the two operands and then expand them until nothing has been
3911 changed. If we run out of room in our array, give up; this should
3912 almost never happen. */
3913
3914 ops[0].op = op0;
3915 ops[0].neg = 0;
3916 ops[1].op = op1;
3917 ops[1].neg = (code == MINUS);
3918
3919 do
3920 {
3921 changed = 0;
3922
3923 for (i = 0; i < n_ops; i++)
3924 {
3925 rtx this_op = ops[i].op;
3926 int this_neg = ops[i].neg;
3927 enum rtx_code this_code = GET_CODE (this_op);
3928
3929 switch (this_code)
3930 {
3931 case PLUS:
3932 case MINUS:
3933 if (n_ops == 7)
3934 return NULL_RTX;
3935
3936 ops[n_ops].op = XEXP (this_op, 1);
3937 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3938 n_ops++;
3939
3940 ops[i].op = XEXP (this_op, 0);
3941 input_ops++;
3942 changed = 1;
3943 canonicalized |= this_neg;
3944 break;
3945
3946 case NEG:
3947 ops[i].op = XEXP (this_op, 0);
3948 ops[i].neg = ! this_neg;
3949 changed = 1;
3950 canonicalized = 1;
3951 break;
3952
3953 case CONST:
3954 if (n_ops < 7
3955 && GET_CODE (XEXP (this_op, 0)) == PLUS
3956 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3957 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3958 {
3959 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3960 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3961 ops[n_ops].neg = this_neg;
3962 n_ops++;
3963 changed = 1;
3964 canonicalized = 1;
3965 }
3966 break;
3967
3968 case NOT:
3969 /* ~a -> (-a - 1) */
3970 if (n_ops != 7)
3971 {
3972 ops[n_ops].op = CONSTM1_RTX (mode);
3973 ops[n_ops++].neg = this_neg;
3974 ops[i].op = XEXP (this_op, 0);
3975 ops[i].neg = !this_neg;
3976 changed = 1;
3977 canonicalized = 1;
3978 }
3979 break;
3980
3981 case CONST_INT:
3982 n_constants++;
3983 if (this_neg)
3984 {
3985 ops[i].op = neg_const_int (mode, this_op);
3986 ops[i].neg = 0;
3987 changed = 1;
3988 canonicalized = 1;
3989 }
3990 break;
3991
3992 default:
3993 break;
3994 }
3995 }
3996 }
3997 while (changed);
3998
3999 if (n_constants > 1)
4000 canonicalized = 1;
4001
4002 gcc_assert (n_ops >= 2);
4003
4004 /* If we only have two operands, we can avoid the loops. */
4005 if (n_ops == 2)
4006 {
4007 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4008 rtx lhs, rhs;
4009
4010 /* Get the two operands. Be careful with the order, especially for
4011 the cases where code == MINUS. */
4012 if (ops[0].neg && ops[1].neg)
4013 {
4014 lhs = gen_rtx_NEG (mode, ops[0].op);
4015 rhs = ops[1].op;
4016 }
4017 else if (ops[0].neg)
4018 {
4019 lhs = ops[1].op;
4020 rhs = ops[0].op;
4021 }
4022 else
4023 {
4024 lhs = ops[0].op;
4025 rhs = ops[1].op;
4026 }
4027
4028 return simplify_const_binary_operation (code, mode, lhs, rhs);
4029 }
4030
4031 /* Now simplify each pair of operands until nothing changes. */
4032 do
4033 {
4034 /* Insertion sort is good enough for an eight-element array. */
4035 for (i = 1; i < n_ops; i++)
4036 {
4037 struct simplify_plus_minus_op_data save;
4038 j = i - 1;
4039 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4040 continue;
4041
4042 canonicalized = 1;
4043 save = ops[i];
4044 do
4045 ops[j + 1] = ops[j];
4046 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4047 ops[j + 1] = save;
4048 }
4049
4050 changed = 0;
4051 for (i = n_ops - 1; i > 0; i--)
4052 for (j = i - 1; j >= 0; j--)
4053 {
4054 rtx lhs = ops[j].op, rhs = ops[i].op;
4055 int lneg = ops[j].neg, rneg = ops[i].neg;
4056
4057 if (lhs != 0 && rhs != 0)
4058 {
4059 enum rtx_code ncode = PLUS;
4060
4061 if (lneg != rneg)
4062 {
4063 ncode = MINUS;
4064 if (lneg)
4065 tem = lhs, lhs = rhs, rhs = tem;
4066 }
4067 else if (swap_commutative_operands_p (lhs, rhs))
4068 tem = lhs, lhs = rhs, rhs = tem;
4069
4070 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4071 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4072 {
4073 rtx tem_lhs, tem_rhs;
4074
4075 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4076 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4077 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4078
4079 if (tem && !CONSTANT_P (tem))
4080 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4081 }
4082 else
4083 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4084
4085 /* Reject "simplifications" that just wrap the two
4086 arguments in a CONST. Failure to do so can result
4087 in infinite recursion with simplify_binary_operation
4088 when it calls us to simplify CONST operations. */
4089 if (tem
4090 && ! (GET_CODE (tem) == CONST
4091 && GET_CODE (XEXP (tem, 0)) == ncode
4092 && XEXP (XEXP (tem, 0), 0) == lhs
4093 && XEXP (XEXP (tem, 0), 1) == rhs))
4094 {
4095 lneg &= rneg;
4096 if (GET_CODE (tem) == NEG)
4097 tem = XEXP (tem, 0), lneg = !lneg;
4098 if (CONST_INT_P (tem) && lneg)
4099 tem = neg_const_int (mode, tem), lneg = 0;
4100
4101 ops[i].op = tem;
4102 ops[i].neg = lneg;
4103 ops[j].op = NULL_RTX;
4104 changed = 1;
4105 canonicalized = 1;
4106 }
4107 }
4108 }
4109
4110 /* If nothing changed, fail. */
4111 if (!canonicalized)
4112 return NULL_RTX;
4113
4114 /* Pack all the operands to the lower-numbered entries. */
4115 for (i = 0, j = 0; j < n_ops; j++)
4116 if (ops[j].op)
4117 {
4118 ops[i] = ops[j];
4119 i++;
4120 }
4121 n_ops = i;
4122 }
4123 while (changed);
4124
4125 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4126 if (n_ops == 2
4127 && CONST_INT_P (ops[1].op)
4128 && CONSTANT_P (ops[0].op)
4129 && ops[0].neg)
4130 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4131
4132 /* We suppressed creation of trivial CONST expressions in the
4133 combination loop to avoid recursion. Create one manually now.
4134 The combination loop should have ensured that there is exactly
4135 one CONST_INT, and the sort will have ensured that it is last
4136 in the array and that any other constant will be next-to-last. */
4137
4138 if (n_ops > 1
4139 && CONST_INT_P (ops[n_ops - 1].op)
4140 && CONSTANT_P (ops[n_ops - 2].op))
4141 {
4142 rtx value = ops[n_ops - 1].op;
4143 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4144 value = neg_const_int (mode, value);
4145 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4146 INTVAL (value));
4147 n_ops--;
4148 }
4149
4150 /* Put a non-negated operand first, if possible. */
4151
4152 for (i = 0; i < n_ops && ops[i].neg; i++)
4153 continue;
4154 if (i == n_ops)
4155 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4156 else if (i != 0)
4157 {
4158 tem = ops[0].op;
4159 ops[0] = ops[i];
4160 ops[i].op = tem;
4161 ops[i].neg = 1;
4162 }
4163
4164 /* Now make the result by performing the requested operations. */
4165 result = ops[0].op;
4166 for (i = 1; i < n_ops; i++)
4167 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4168 mode, result, ops[i].op);
4169
4170 return result;
4171 }
4172
4173 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4174 static bool
4175 plus_minus_operand_p (const_rtx x)
4176 {
4177 return GET_CODE (x) == PLUS
4178 || GET_CODE (x) == MINUS
4179 || (GET_CODE (x) == CONST
4180 && GET_CODE (XEXP (x, 0)) == PLUS
4181 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4182 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4183 }
4184
4185 /* Like simplify_binary_operation except used for relational operators.
4186 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4187 not also be VOIDmode.
4188
4189 CMP_MODE specifies in which mode the comparison is done in, so it is
4190 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4191 the operands or, if both are VOIDmode, the operands are compared in
4192 "infinite precision". */
4193 rtx
4194 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4195 enum machine_mode cmp_mode, rtx op0, rtx op1)
4196 {
4197 rtx tem, trueop0, trueop1;
4198
4199 if (cmp_mode == VOIDmode)
4200 cmp_mode = GET_MODE (op0);
4201 if (cmp_mode == VOIDmode)
4202 cmp_mode = GET_MODE (op1);
4203
4204 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4205 if (tem)
4206 {
4207 if (SCALAR_FLOAT_MODE_P (mode))
4208 {
4209 if (tem == const0_rtx)
4210 return CONST0_RTX (mode);
4211 #ifdef FLOAT_STORE_FLAG_VALUE
4212 {
4213 REAL_VALUE_TYPE val;
4214 val = FLOAT_STORE_FLAG_VALUE (mode);
4215 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4216 }
4217 #else
4218 return NULL_RTX;
4219 #endif
4220 }
4221 if (VECTOR_MODE_P (mode))
4222 {
4223 if (tem == const0_rtx)
4224 return CONST0_RTX (mode);
4225 #ifdef VECTOR_STORE_FLAG_VALUE
4226 {
4227 int i, units;
4228 rtvec v;
4229
4230 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4231 if (val == NULL_RTX)
4232 return NULL_RTX;
4233 if (val == const1_rtx)
4234 return CONST1_RTX (mode);
4235
4236 units = GET_MODE_NUNITS (mode);
4237 v = rtvec_alloc (units);
4238 for (i = 0; i < units; i++)
4239 RTVEC_ELT (v, i) = val;
4240 return gen_rtx_raw_CONST_VECTOR (mode, v);
4241 }
4242 #else
4243 return NULL_RTX;
4244 #endif
4245 }
4246
4247 return tem;
4248 }
4249
4250 /* For the following tests, ensure const0_rtx is op1. */
4251 if (swap_commutative_operands_p (op0, op1)
4252 || (op0 == const0_rtx && op1 != const0_rtx))
4253 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4254
4255 /* If op0 is a compare, extract the comparison arguments from it. */
4256 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4257 return simplify_gen_relational (code, mode, VOIDmode,
4258 XEXP (op0, 0), XEXP (op0, 1));
4259
4260 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4261 || CC0_P (op0))
4262 return NULL_RTX;
4263
4264 trueop0 = avoid_constant_pool_reference (op0);
4265 trueop1 = avoid_constant_pool_reference (op1);
4266 return simplify_relational_operation_1 (code, mode, cmp_mode,
4267 trueop0, trueop1);
4268 }
4269
4270 /* This part of simplify_relational_operation is only used when CMP_MODE
4271 is not in class MODE_CC (i.e. it is a real comparison).
4272
4273 MODE is the mode of the result, while CMP_MODE specifies in which
4274 mode the comparison is done in, so it is the mode of the operands. */
4275
4276 static rtx
4277 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4278 enum machine_mode cmp_mode, rtx op0, rtx op1)
4279 {
4280 enum rtx_code op0code = GET_CODE (op0);
4281
4282 if (op1 == const0_rtx && COMPARISON_P (op0))
4283 {
4284 /* If op0 is a comparison, extract the comparison arguments
4285 from it. */
4286 if (code == NE)
4287 {
4288 if (GET_MODE (op0) == mode)
4289 return simplify_rtx (op0);
4290 else
4291 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4292 XEXP (op0, 0), XEXP (op0, 1));
4293 }
4294 else if (code == EQ)
4295 {
4296 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4297 if (new_code != UNKNOWN)
4298 return simplify_gen_relational (new_code, mode, VOIDmode,
4299 XEXP (op0, 0), XEXP (op0, 1));
4300 }
4301 }
4302
4303 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4304 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4305 if ((code == LTU || code == GEU)
4306 && GET_CODE (op0) == PLUS
4307 && CONST_INT_P (XEXP (op0, 1))
4308 && (rtx_equal_p (op1, XEXP (op0, 0))
4309 || rtx_equal_p (op1, XEXP (op0, 1))))
4310 {
4311 rtx new_cmp
4312 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4313 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4314 cmp_mode, XEXP (op0, 0), new_cmp);
4315 }
4316
4317 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4318 if ((code == LTU || code == GEU)
4319 && GET_CODE (op0) == PLUS
4320 && rtx_equal_p (op1, XEXP (op0, 1))
4321 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4322 && !rtx_equal_p (op1, XEXP (op0, 0)))
4323 return simplify_gen_relational (code, mode, cmp_mode, op0,
4324 copy_rtx (XEXP (op0, 0)));
4325
4326 if (op1 == const0_rtx)
4327 {
4328 /* Canonicalize (GTU x 0) as (NE x 0). */
4329 if (code == GTU)
4330 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4331 /* Canonicalize (LEU x 0) as (EQ x 0). */
4332 if (code == LEU)
4333 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4334 }
4335 else if (op1 == const1_rtx)
4336 {
4337 switch (code)
4338 {
4339 case GE:
4340 /* Canonicalize (GE x 1) as (GT x 0). */
4341 return simplify_gen_relational (GT, mode, cmp_mode,
4342 op0, const0_rtx);
4343 case GEU:
4344 /* Canonicalize (GEU x 1) as (NE x 0). */
4345 return simplify_gen_relational (NE, mode, cmp_mode,
4346 op0, const0_rtx);
4347 case LT:
4348 /* Canonicalize (LT x 1) as (LE x 0). */
4349 return simplify_gen_relational (LE, mode, cmp_mode,
4350 op0, const0_rtx);
4351 case LTU:
4352 /* Canonicalize (LTU x 1) as (EQ x 0). */
4353 return simplify_gen_relational (EQ, mode, cmp_mode,
4354 op0, const0_rtx);
4355 default:
4356 break;
4357 }
4358 }
4359 else if (op1 == constm1_rtx)
4360 {
4361 /* Canonicalize (LE x -1) as (LT x 0). */
4362 if (code == LE)
4363 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4364 /* Canonicalize (GT x -1) as (GE x 0). */
4365 if (code == GT)
4366 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4367 }
4368
4369 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4370 if ((code == EQ || code == NE)
4371 && (op0code == PLUS || op0code == MINUS)
4372 && CONSTANT_P (op1)
4373 && CONSTANT_P (XEXP (op0, 1))
4374 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4375 {
4376 rtx x = XEXP (op0, 0);
4377 rtx c = XEXP (op0, 1);
4378 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4379 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4380
4381 /* Detect an infinite recursive condition, where we oscillate at this
4382 simplification case between:
4383 A + B == C <---> C - B == A,
4384 where A, B, and C are all constants with non-simplifiable expressions,
4385 usually SYMBOL_REFs. */
4386 if (GET_CODE (tem) == invcode
4387 && CONSTANT_P (x)
4388 && rtx_equal_p (c, XEXP (tem, 1)))
4389 return NULL_RTX;
4390
4391 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4392 }
4393
4394 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4395 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4396 if (code == NE
4397 && op1 == const0_rtx
4398 && GET_MODE_CLASS (mode) == MODE_INT
4399 && cmp_mode != VOIDmode
4400 /* ??? Work-around BImode bugs in the ia64 backend. */
4401 && mode != BImode
4402 && cmp_mode != BImode
4403 && nonzero_bits (op0, cmp_mode) == 1
4404 && STORE_FLAG_VALUE == 1)
4405 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4406 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4407 : lowpart_subreg (mode, op0, cmp_mode);
4408
4409 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4410 if ((code == EQ || code == NE)
4411 && op1 == const0_rtx
4412 && op0code == XOR)
4413 return simplify_gen_relational (code, mode, cmp_mode,
4414 XEXP (op0, 0), XEXP (op0, 1));
4415
4416 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4417 if ((code == EQ || code == NE)
4418 && op0code == XOR
4419 && rtx_equal_p (XEXP (op0, 0), op1)
4420 && !side_effects_p (XEXP (op0, 0)))
4421 return simplify_gen_relational (code, mode, cmp_mode,
4422 XEXP (op0, 1), const0_rtx);
4423
4424 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4425 if ((code == EQ || code == NE)
4426 && op0code == XOR
4427 && rtx_equal_p (XEXP (op0, 1), op1)
4428 && !side_effects_p (XEXP (op0, 1)))
4429 return simplify_gen_relational (code, mode, cmp_mode,
4430 XEXP (op0, 0), const0_rtx);
4431
4432 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4433 if ((code == EQ || code == NE)
4434 && op0code == XOR
4435 && (CONST_INT_P (op1)
4436 || GET_CODE (op1) == CONST_DOUBLE)
4437 && (CONST_INT_P (XEXP (op0, 1))
4438 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4439 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4440 simplify_gen_binary (XOR, cmp_mode,
4441 XEXP (op0, 1), op1));
4442
4443 if (op0code == POPCOUNT && op1 == const0_rtx)
4444 switch (code)
4445 {
4446 case EQ:
4447 case LE:
4448 case LEU:
4449 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4450 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4451 XEXP (op0, 0), const0_rtx);
4452
4453 case NE:
4454 case GT:
4455 case GTU:
4456 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4457 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4458 XEXP (op0, 0), const0_rtx);
4459
4460 default:
4461 break;
4462 }
4463
4464 return NULL_RTX;
4465 }
4466
4467 enum
4468 {
4469 CMP_EQ = 1,
4470 CMP_LT = 2,
4471 CMP_GT = 4,
4472 CMP_LTU = 8,
4473 CMP_GTU = 16
4474 };
4475
4476
4477 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4478 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4479 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4480 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4481 For floating-point comparisons, assume that the operands were ordered. */
4482
4483 static rtx
4484 comparison_result (enum rtx_code code, int known_results)
4485 {
4486 switch (code)
4487 {
4488 case EQ:
4489 case UNEQ:
4490 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4491 case NE:
4492 case LTGT:
4493 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4494
4495 case LT:
4496 case UNLT:
4497 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4498 case GE:
4499 case UNGE:
4500 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4501
4502 case GT:
4503 case UNGT:
4504 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4505 case LE:
4506 case UNLE:
4507 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4508
4509 case LTU:
4510 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4511 case GEU:
4512 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4513
4514 case GTU:
4515 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4516 case LEU:
4517 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4518
4519 case ORDERED:
4520 return const_true_rtx;
4521 case UNORDERED:
4522 return const0_rtx;
4523 default:
4524 gcc_unreachable ();
4525 }
4526 }
4527
4528 /* Check if the given comparison (done in the given MODE) is actually a
4529 tautology or a contradiction.
4530 If no simplification is possible, this function returns zero.
4531 Otherwise, it returns either const_true_rtx or const0_rtx. */
4532
4533 rtx
4534 simplify_const_relational_operation (enum rtx_code code,
4535 enum machine_mode mode,
4536 rtx op0, rtx op1)
4537 {
4538 rtx tem;
4539 rtx trueop0;
4540 rtx trueop1;
4541
4542 gcc_assert (mode != VOIDmode
4543 || (GET_MODE (op0) == VOIDmode
4544 && GET_MODE (op1) == VOIDmode));
4545
4546 /* If op0 is a compare, extract the comparison arguments from it. */
4547 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4548 {
4549 op1 = XEXP (op0, 1);
4550 op0 = XEXP (op0, 0);
4551
4552 if (GET_MODE (op0) != VOIDmode)
4553 mode = GET_MODE (op0);
4554 else if (GET_MODE (op1) != VOIDmode)
4555 mode = GET_MODE (op1);
4556 else
4557 return 0;
4558 }
4559
4560 /* We can't simplify MODE_CC values since we don't know what the
4561 actual comparison is. */
4562 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4563 return 0;
4564
4565 /* Make sure the constant is second. */
4566 if (swap_commutative_operands_p (op0, op1))
4567 {
4568 tem = op0, op0 = op1, op1 = tem;
4569 code = swap_condition (code);
4570 }
4571
4572 trueop0 = avoid_constant_pool_reference (op0);
4573 trueop1 = avoid_constant_pool_reference (op1);
4574
4575 /* For integer comparisons of A and B maybe we can simplify A - B and can
4576 then simplify a comparison of that with zero. If A and B are both either
4577 a register or a CONST_INT, this can't help; testing for these cases will
4578 prevent infinite recursion here and speed things up.
4579
4580 We can only do this for EQ and NE comparisons as otherwise we may
4581 lose or introduce overflow which we cannot disregard as undefined as
4582 we do not know the signedness of the operation on either the left or
4583 the right hand side of the comparison. */
4584
4585 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4586 && (code == EQ || code == NE)
4587 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4588 && (REG_P (op1) || CONST_INT_P (trueop1)))
4589 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4590 /* We cannot do this if tem is a nonzero address. */
4591 && ! nonzero_address_p (tem))
4592 return simplify_const_relational_operation (signed_condition (code),
4593 mode, tem, const0_rtx);
4594
4595 if (! HONOR_NANS (mode) && code == ORDERED)
4596 return const_true_rtx;
4597
4598 if (! HONOR_NANS (mode) && code == UNORDERED)
4599 return const0_rtx;
4600
4601 /* For modes without NaNs, if the two operands are equal, we know the
4602 result except if they have side-effects. Even with NaNs we know
4603 the result of unordered comparisons and, if signaling NaNs are
4604 irrelevant, also the result of LT/GT/LTGT. */
4605 if ((! HONOR_NANS (GET_MODE (trueop0))
4606 || code == UNEQ || code == UNLE || code == UNGE
4607 || ((code == LT || code == GT || code == LTGT)
4608 && ! HONOR_SNANS (GET_MODE (trueop0))))
4609 && rtx_equal_p (trueop0, trueop1)
4610 && ! side_effects_p (trueop0))
4611 return comparison_result (code, CMP_EQ);
4612
4613 /* If the operands are floating-point constants, see if we can fold
4614 the result. */
4615 if (GET_CODE (trueop0) == CONST_DOUBLE
4616 && GET_CODE (trueop1) == CONST_DOUBLE
4617 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4618 {
4619 REAL_VALUE_TYPE d0, d1;
4620
4621 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4622 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4623
4624 /* Comparisons are unordered iff at least one of the values is NaN. */
4625 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4626 switch (code)
4627 {
4628 case UNEQ:
4629 case UNLT:
4630 case UNGT:
4631 case UNLE:
4632 case UNGE:
4633 case NE:
4634 case UNORDERED:
4635 return const_true_rtx;
4636 case EQ:
4637 case LT:
4638 case GT:
4639 case LE:
4640 case GE:
4641 case LTGT:
4642 case ORDERED:
4643 return const0_rtx;
4644 default:
4645 return 0;
4646 }
4647
4648 return comparison_result (code,
4649 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4650 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4651 }
4652
4653 /* Otherwise, see if the operands are both integers. */
4654 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4655 && (GET_CODE (trueop0) == CONST_DOUBLE
4656 || CONST_INT_P (trueop0))
4657 && (GET_CODE (trueop1) == CONST_DOUBLE
4658 || CONST_INT_P (trueop1)))
4659 {
4660 int width = GET_MODE_PRECISION (mode);
4661 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4662 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4663
4664 /* Get the two words comprising each integer constant. */
4665 if (GET_CODE (trueop0) == CONST_DOUBLE)
4666 {
4667 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4668 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4669 }
4670 else
4671 {
4672 l0u = l0s = INTVAL (trueop0);
4673 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4674 }
4675
4676 if (GET_CODE (trueop1) == CONST_DOUBLE)
4677 {
4678 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4679 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4680 }
4681 else
4682 {
4683 l1u = l1s = INTVAL (trueop1);
4684 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4685 }
4686
4687 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4688 we have to sign or zero-extend the values. */
4689 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4690 {
4691 l0u &= GET_MODE_MASK (mode);
4692 l1u &= GET_MODE_MASK (mode);
4693
4694 if (val_signbit_known_set_p (mode, l0s))
4695 l0s |= ~GET_MODE_MASK (mode);
4696
4697 if (val_signbit_known_set_p (mode, l1s))
4698 l1s |= ~GET_MODE_MASK (mode);
4699 }
4700 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4701 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4702
4703 if (h0u == h1u && l0u == l1u)
4704 return comparison_result (code, CMP_EQ);
4705 else
4706 {
4707 int cr;
4708 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4709 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4710 return comparison_result (code, cr);
4711 }
4712 }
4713
4714 /* Optimize comparisons with upper and lower bounds. */
4715 if (HWI_COMPUTABLE_MODE_P (mode)
4716 && CONST_INT_P (trueop1))
4717 {
4718 int sign;
4719 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4720 HOST_WIDE_INT val = INTVAL (trueop1);
4721 HOST_WIDE_INT mmin, mmax;
4722
4723 if (code == GEU
4724 || code == LEU
4725 || code == GTU
4726 || code == LTU)
4727 sign = 0;
4728 else
4729 sign = 1;
4730
4731 /* Get a reduced range if the sign bit is zero. */
4732 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4733 {
4734 mmin = 0;
4735 mmax = nonzero;
4736 }
4737 else
4738 {
4739 rtx mmin_rtx, mmax_rtx;
4740 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4741
4742 mmin = INTVAL (mmin_rtx);
4743 mmax = INTVAL (mmax_rtx);
4744 if (sign)
4745 {
4746 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4747
4748 mmin >>= (sign_copies - 1);
4749 mmax >>= (sign_copies - 1);
4750 }
4751 }
4752
4753 switch (code)
4754 {
4755 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4756 case GEU:
4757 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4758 return const_true_rtx;
4759 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4760 return const0_rtx;
4761 break;
4762 case GE:
4763 if (val <= mmin)
4764 return const_true_rtx;
4765 if (val > mmax)
4766 return const0_rtx;
4767 break;
4768
4769 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4770 case LEU:
4771 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4772 return const_true_rtx;
4773 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4774 return const0_rtx;
4775 break;
4776 case LE:
4777 if (val >= mmax)
4778 return const_true_rtx;
4779 if (val < mmin)
4780 return const0_rtx;
4781 break;
4782
4783 case EQ:
4784 /* x == y is always false for y out of range. */
4785 if (val < mmin || val > mmax)
4786 return const0_rtx;
4787 break;
4788
4789 /* x > y is always false for y >= mmax, always true for y < mmin. */
4790 case GTU:
4791 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4792 return const0_rtx;
4793 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4794 return const_true_rtx;
4795 break;
4796 case GT:
4797 if (val >= mmax)
4798 return const0_rtx;
4799 if (val < mmin)
4800 return const_true_rtx;
4801 break;
4802
4803 /* x < y is always false for y <= mmin, always true for y > mmax. */
4804 case LTU:
4805 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4806 return const0_rtx;
4807 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4808 return const_true_rtx;
4809 break;
4810 case LT:
4811 if (val <= mmin)
4812 return const0_rtx;
4813 if (val > mmax)
4814 return const_true_rtx;
4815 break;
4816
4817 case NE:
4818 /* x != y is always true for y out of range. */
4819 if (val < mmin || val > mmax)
4820 return const_true_rtx;
4821 break;
4822
4823 default:
4824 break;
4825 }
4826 }
4827
4828 /* Optimize integer comparisons with zero. */
4829 if (trueop1 == const0_rtx)
4830 {
4831 /* Some addresses are known to be nonzero. We don't know
4832 their sign, but equality comparisons are known. */
4833 if (nonzero_address_p (trueop0))
4834 {
4835 if (code == EQ || code == LEU)
4836 return const0_rtx;
4837 if (code == NE || code == GTU)
4838 return const_true_rtx;
4839 }
4840
4841 /* See if the first operand is an IOR with a constant. If so, we
4842 may be able to determine the result of this comparison. */
4843 if (GET_CODE (op0) == IOR)
4844 {
4845 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4846 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4847 {
4848 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4849 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4850 && (UINTVAL (inner_const)
4851 & ((unsigned HOST_WIDE_INT) 1
4852 << sign_bitnum)));
4853
4854 switch (code)
4855 {
4856 case EQ:
4857 case LEU:
4858 return const0_rtx;
4859 case NE:
4860 case GTU:
4861 return const_true_rtx;
4862 case LT:
4863 case LE:
4864 if (has_sign)
4865 return const_true_rtx;
4866 break;
4867 case GT:
4868 case GE:
4869 if (has_sign)
4870 return const0_rtx;
4871 break;
4872 default:
4873 break;
4874 }
4875 }
4876 }
4877 }
4878
4879 /* Optimize comparison of ABS with zero. */
4880 if (trueop1 == CONST0_RTX (mode)
4881 && (GET_CODE (trueop0) == ABS
4882 || (GET_CODE (trueop0) == FLOAT_EXTEND
4883 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4884 {
4885 switch (code)
4886 {
4887 case LT:
4888 /* Optimize abs(x) < 0.0. */
4889 if (!HONOR_SNANS (mode)
4890 && (!INTEGRAL_MODE_P (mode)
4891 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4892 {
4893 if (INTEGRAL_MODE_P (mode)
4894 && (issue_strict_overflow_warning
4895 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4896 warning (OPT_Wstrict_overflow,
4897 ("assuming signed overflow does not occur when "
4898 "assuming abs (x) < 0 is false"));
4899 return const0_rtx;
4900 }
4901 break;
4902
4903 case GE:
4904 /* Optimize abs(x) >= 0.0. */
4905 if (!HONOR_NANS (mode)
4906 && (!INTEGRAL_MODE_P (mode)
4907 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4908 {
4909 if (INTEGRAL_MODE_P (mode)
4910 && (issue_strict_overflow_warning
4911 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4912 warning (OPT_Wstrict_overflow,
4913 ("assuming signed overflow does not occur when "
4914 "assuming abs (x) >= 0 is true"));
4915 return const_true_rtx;
4916 }
4917 break;
4918
4919 case UNGE:
4920 /* Optimize ! (abs(x) < 0.0). */
4921 return const_true_rtx;
4922
4923 default:
4924 break;
4925 }
4926 }
4927
4928 return 0;
4929 }
4930 \f
4931 /* Simplify CODE, an operation with result mode MODE and three operands,
4932 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4933 a constant. Return 0 if no simplifications is possible. */
4934
4935 rtx
4936 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4937 enum machine_mode op0_mode, rtx op0, rtx op1,
4938 rtx op2)
4939 {
4940 unsigned int width = GET_MODE_PRECISION (mode);
4941 bool any_change = false;
4942 rtx tem;
4943
4944 /* VOIDmode means "infinite" precision. */
4945 if (width == 0)
4946 width = HOST_BITS_PER_WIDE_INT;
4947
4948 switch (code)
4949 {
4950 case FMA:
4951 /* Simplify negations around the multiplication. */
4952 /* -a * -b + c => a * b + c. */
4953 if (GET_CODE (op0) == NEG)
4954 {
4955 tem = simplify_unary_operation (NEG, mode, op1, mode);
4956 if (tem)
4957 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4958 }
4959 else if (GET_CODE (op1) == NEG)
4960 {
4961 tem = simplify_unary_operation (NEG, mode, op0, mode);
4962 if (tem)
4963 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4964 }
4965
4966 /* Canonicalize the two multiplication operands. */
4967 /* a * -b + c => -b * a + c. */
4968 if (swap_commutative_operands_p (op0, op1))
4969 tem = op0, op0 = op1, op1 = tem, any_change = true;
4970
4971 if (any_change)
4972 return gen_rtx_FMA (mode, op0, op1, op2);
4973 return NULL_RTX;
4974
4975 case SIGN_EXTRACT:
4976 case ZERO_EXTRACT:
4977 if (CONST_INT_P (op0)
4978 && CONST_INT_P (op1)
4979 && CONST_INT_P (op2)
4980 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4981 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4982 {
4983 /* Extracting a bit-field from a constant */
4984 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4985 HOST_WIDE_INT op1val = INTVAL (op1);
4986 HOST_WIDE_INT op2val = INTVAL (op2);
4987 if (BITS_BIG_ENDIAN)
4988 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4989 else
4990 val >>= op2val;
4991
4992 if (HOST_BITS_PER_WIDE_INT != op1val)
4993 {
4994 /* First zero-extend. */
4995 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4996 /* If desired, propagate sign bit. */
4997 if (code == SIGN_EXTRACT
4998 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4999 != 0)
5000 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5001 }
5002
5003 return gen_int_mode (val, mode);
5004 }
5005 break;
5006
5007 case IF_THEN_ELSE:
5008 if (CONST_INT_P (op0))
5009 return op0 != const0_rtx ? op1 : op2;
5010
5011 /* Convert c ? a : a into "a". */
5012 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5013 return op1;
5014
5015 /* Convert a != b ? a : b into "a". */
5016 if (GET_CODE (op0) == NE
5017 && ! side_effects_p (op0)
5018 && ! HONOR_NANS (mode)
5019 && ! HONOR_SIGNED_ZEROS (mode)
5020 && ((rtx_equal_p (XEXP (op0, 0), op1)
5021 && rtx_equal_p (XEXP (op0, 1), op2))
5022 || (rtx_equal_p (XEXP (op0, 0), op2)
5023 && rtx_equal_p (XEXP (op0, 1), op1))))
5024 return op1;
5025
5026 /* Convert a == b ? a : b into "b". */
5027 if (GET_CODE (op0) == EQ
5028 && ! side_effects_p (op0)
5029 && ! HONOR_NANS (mode)
5030 && ! HONOR_SIGNED_ZEROS (mode)
5031 && ((rtx_equal_p (XEXP (op0, 0), op1)
5032 && rtx_equal_p (XEXP (op0, 1), op2))
5033 || (rtx_equal_p (XEXP (op0, 0), op2)
5034 && rtx_equal_p (XEXP (op0, 1), op1))))
5035 return op2;
5036
5037 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5038 {
5039 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5040 ? GET_MODE (XEXP (op0, 1))
5041 : GET_MODE (XEXP (op0, 0)));
5042 rtx temp;
5043
5044 /* Look for happy constants in op1 and op2. */
5045 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5046 {
5047 HOST_WIDE_INT t = INTVAL (op1);
5048 HOST_WIDE_INT f = INTVAL (op2);
5049
5050 if (t == STORE_FLAG_VALUE && f == 0)
5051 code = GET_CODE (op0);
5052 else if (t == 0 && f == STORE_FLAG_VALUE)
5053 {
5054 enum rtx_code tmp;
5055 tmp = reversed_comparison_code (op0, NULL_RTX);
5056 if (tmp == UNKNOWN)
5057 break;
5058 code = tmp;
5059 }
5060 else
5061 break;
5062
5063 return simplify_gen_relational (code, mode, cmp_mode,
5064 XEXP (op0, 0), XEXP (op0, 1));
5065 }
5066
5067 if (cmp_mode == VOIDmode)
5068 cmp_mode = op0_mode;
5069 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5070 cmp_mode, XEXP (op0, 0),
5071 XEXP (op0, 1));
5072
5073 /* See if any simplifications were possible. */
5074 if (temp)
5075 {
5076 if (CONST_INT_P (temp))
5077 return temp == const0_rtx ? op2 : op1;
5078 else if (temp)
5079 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5080 }
5081 }
5082 break;
5083
5084 case VEC_MERGE:
5085 gcc_assert (GET_MODE (op0) == mode);
5086 gcc_assert (GET_MODE (op1) == mode);
5087 gcc_assert (VECTOR_MODE_P (mode));
5088 op2 = avoid_constant_pool_reference (op2);
5089 if (CONST_INT_P (op2))
5090 {
5091 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5092 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5093 int mask = (1 << n_elts) - 1;
5094
5095 if (!(INTVAL (op2) & mask))
5096 return op1;
5097 if ((INTVAL (op2) & mask) == mask)
5098 return op0;
5099
5100 op0 = avoid_constant_pool_reference (op0);
5101 op1 = avoid_constant_pool_reference (op1);
5102 if (GET_CODE (op0) == CONST_VECTOR
5103 && GET_CODE (op1) == CONST_VECTOR)
5104 {
5105 rtvec v = rtvec_alloc (n_elts);
5106 unsigned int i;
5107
5108 for (i = 0; i < n_elts; i++)
5109 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5110 ? CONST_VECTOR_ELT (op0, i)
5111 : CONST_VECTOR_ELT (op1, i));
5112 return gen_rtx_CONST_VECTOR (mode, v);
5113 }
5114 }
5115 break;
5116
5117 default:
5118 gcc_unreachable ();
5119 }
5120
5121 return 0;
5122 }
5123
5124 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5125 or CONST_VECTOR,
5126 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5127
5128 Works by unpacking OP into a collection of 8-bit values
5129 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5130 and then repacking them again for OUTERMODE. */
5131
5132 static rtx
5133 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5134 enum machine_mode innermode, unsigned int byte)
5135 {
5136 /* We support up to 512-bit values (for V8DFmode). */
5137 enum {
5138 max_bitsize = 512,
5139 value_bit = 8,
5140 value_mask = (1 << value_bit) - 1
5141 };
5142 unsigned char value[max_bitsize / value_bit];
5143 int value_start;
5144 int i;
5145 int elem;
5146
5147 int num_elem;
5148 rtx * elems;
5149 int elem_bitsize;
5150 rtx result_s;
5151 rtvec result_v = NULL;
5152 enum mode_class outer_class;
5153 enum machine_mode outer_submode;
5154
5155 /* Some ports misuse CCmode. */
5156 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5157 return op;
5158
5159 /* We have no way to represent a complex constant at the rtl level. */
5160 if (COMPLEX_MODE_P (outermode))
5161 return NULL_RTX;
5162
5163 /* Unpack the value. */
5164
5165 if (GET_CODE (op) == CONST_VECTOR)
5166 {
5167 num_elem = CONST_VECTOR_NUNITS (op);
5168 elems = &CONST_VECTOR_ELT (op, 0);
5169 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5170 }
5171 else
5172 {
5173 num_elem = 1;
5174 elems = &op;
5175 elem_bitsize = max_bitsize;
5176 }
5177 /* If this asserts, it is too complicated; reducing value_bit may help. */
5178 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5179 /* I don't know how to handle endianness of sub-units. */
5180 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5181
5182 for (elem = 0; elem < num_elem; elem++)
5183 {
5184 unsigned char * vp;
5185 rtx el = elems[elem];
5186
5187 /* Vectors are kept in target memory order. (This is probably
5188 a mistake.) */
5189 {
5190 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5191 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5192 / BITS_PER_UNIT);
5193 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5194 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5195 unsigned bytele = (subword_byte % UNITS_PER_WORD
5196 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5197 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5198 }
5199
5200 switch (GET_CODE (el))
5201 {
5202 case CONST_INT:
5203 for (i = 0;
5204 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5205 i += value_bit)
5206 *vp++ = INTVAL (el) >> i;
5207 /* CONST_INTs are always logically sign-extended. */
5208 for (; i < elem_bitsize; i += value_bit)
5209 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5210 break;
5211
5212 case CONST_DOUBLE:
5213 if (GET_MODE (el) == VOIDmode)
5214 {
5215 unsigned char extend = 0;
5216 /* If this triggers, someone should have generated a
5217 CONST_INT instead. */
5218 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5219
5220 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5221 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5222 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5223 {
5224 *vp++
5225 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5226 i += value_bit;
5227 }
5228
5229 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5230 extend = -1;
5231 for (; i < elem_bitsize; i += value_bit)
5232 *vp++ = extend;
5233 }
5234 else
5235 {
5236 long tmp[max_bitsize / 32];
5237 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5238
5239 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5240 gcc_assert (bitsize <= elem_bitsize);
5241 gcc_assert (bitsize % value_bit == 0);
5242
5243 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5244 GET_MODE (el));
5245
5246 /* real_to_target produces its result in words affected by
5247 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5248 and use WORDS_BIG_ENDIAN instead; see the documentation
5249 of SUBREG in rtl.texi. */
5250 for (i = 0; i < bitsize; i += value_bit)
5251 {
5252 int ibase;
5253 if (WORDS_BIG_ENDIAN)
5254 ibase = bitsize - 1 - i;
5255 else
5256 ibase = i;
5257 *vp++ = tmp[ibase / 32] >> i % 32;
5258 }
5259
5260 /* It shouldn't matter what's done here, so fill it with
5261 zero. */
5262 for (; i < elem_bitsize; i += value_bit)
5263 *vp++ = 0;
5264 }
5265 break;
5266
5267 case CONST_FIXED:
5268 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5269 {
5270 for (i = 0; i < elem_bitsize; i += value_bit)
5271 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5272 }
5273 else
5274 {
5275 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5276 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5277 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5278 i += value_bit)
5279 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5280 >> (i - HOST_BITS_PER_WIDE_INT);
5281 for (; i < elem_bitsize; i += value_bit)
5282 *vp++ = 0;
5283 }
5284 break;
5285
5286 default:
5287 gcc_unreachable ();
5288 }
5289 }
5290
5291 /* Now, pick the right byte to start with. */
5292 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5293 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5294 will already have offset 0. */
5295 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5296 {
5297 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5298 - byte);
5299 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5300 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5301 byte = (subword_byte % UNITS_PER_WORD
5302 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5303 }
5304
5305 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5306 so if it's become negative it will instead be very large.) */
5307 gcc_assert (byte < GET_MODE_SIZE (innermode));
5308
5309 /* Convert from bytes to chunks of size value_bit. */
5310 value_start = byte * (BITS_PER_UNIT / value_bit);
5311
5312 /* Re-pack the value. */
5313
5314 if (VECTOR_MODE_P (outermode))
5315 {
5316 num_elem = GET_MODE_NUNITS (outermode);
5317 result_v = rtvec_alloc (num_elem);
5318 elems = &RTVEC_ELT (result_v, 0);
5319 outer_submode = GET_MODE_INNER (outermode);
5320 }
5321 else
5322 {
5323 num_elem = 1;
5324 elems = &result_s;
5325 outer_submode = outermode;
5326 }
5327
5328 outer_class = GET_MODE_CLASS (outer_submode);
5329 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5330
5331 gcc_assert (elem_bitsize % value_bit == 0);
5332 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5333
5334 for (elem = 0; elem < num_elem; elem++)
5335 {
5336 unsigned char *vp;
5337
5338 /* Vectors are stored in target memory order. (This is probably
5339 a mistake.) */
5340 {
5341 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5342 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5343 / BITS_PER_UNIT);
5344 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5345 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5346 unsigned bytele = (subword_byte % UNITS_PER_WORD
5347 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5348 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5349 }
5350
5351 switch (outer_class)
5352 {
5353 case MODE_INT:
5354 case MODE_PARTIAL_INT:
5355 {
5356 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5357
5358 for (i = 0;
5359 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5360 i += value_bit)
5361 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5362 for (; i < elem_bitsize; i += value_bit)
5363 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5364 << (i - HOST_BITS_PER_WIDE_INT);
5365
5366 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5367 know why. */
5368 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5369 elems[elem] = gen_int_mode (lo, outer_submode);
5370 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5371 elems[elem] = immed_double_const (lo, hi, outer_submode);
5372 else
5373 return NULL_RTX;
5374 }
5375 break;
5376
5377 case MODE_FLOAT:
5378 case MODE_DECIMAL_FLOAT:
5379 {
5380 REAL_VALUE_TYPE r;
5381 long tmp[max_bitsize / 32];
5382
5383 /* real_from_target wants its input in words affected by
5384 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5385 and use WORDS_BIG_ENDIAN instead; see the documentation
5386 of SUBREG in rtl.texi. */
5387 for (i = 0; i < max_bitsize / 32; i++)
5388 tmp[i] = 0;
5389 for (i = 0; i < elem_bitsize; i += value_bit)
5390 {
5391 int ibase;
5392 if (WORDS_BIG_ENDIAN)
5393 ibase = elem_bitsize - 1 - i;
5394 else
5395 ibase = i;
5396 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5397 }
5398
5399 real_from_target (&r, tmp, outer_submode);
5400 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5401 }
5402 break;
5403
5404 case MODE_FRACT:
5405 case MODE_UFRACT:
5406 case MODE_ACCUM:
5407 case MODE_UACCUM:
5408 {
5409 FIXED_VALUE_TYPE f;
5410 f.data.low = 0;
5411 f.data.high = 0;
5412 f.mode = outer_submode;
5413
5414 for (i = 0;
5415 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5416 i += value_bit)
5417 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5418 for (; i < elem_bitsize; i += value_bit)
5419 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5420 << (i - HOST_BITS_PER_WIDE_INT));
5421
5422 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5423 }
5424 break;
5425
5426 default:
5427 gcc_unreachable ();
5428 }
5429 }
5430 if (VECTOR_MODE_P (outermode))
5431 return gen_rtx_CONST_VECTOR (outermode, result_v);
5432 else
5433 return result_s;
5434 }
5435
5436 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5437 Return 0 if no simplifications are possible. */
5438 rtx
5439 simplify_subreg (enum machine_mode outermode, rtx op,
5440 enum machine_mode innermode, unsigned int byte)
5441 {
5442 /* Little bit of sanity checking. */
5443 gcc_assert (innermode != VOIDmode);
5444 gcc_assert (outermode != VOIDmode);
5445 gcc_assert (innermode != BLKmode);
5446 gcc_assert (outermode != BLKmode);
5447
5448 gcc_assert (GET_MODE (op) == innermode
5449 || GET_MODE (op) == VOIDmode);
5450
5451 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5452 gcc_assert (byte < GET_MODE_SIZE (innermode));
5453
5454 if (outermode == innermode && !byte)
5455 return op;
5456
5457 if (CONST_INT_P (op)
5458 || GET_CODE (op) == CONST_DOUBLE
5459 || GET_CODE (op) == CONST_FIXED
5460 || GET_CODE (op) == CONST_VECTOR)
5461 return simplify_immed_subreg (outermode, op, innermode, byte);
5462
5463 /* Changing mode twice with SUBREG => just change it once,
5464 or not at all if changing back op starting mode. */
5465 if (GET_CODE (op) == SUBREG)
5466 {
5467 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5468 int final_offset = byte + SUBREG_BYTE (op);
5469 rtx newx;
5470
5471 if (outermode == innermostmode
5472 && byte == 0 && SUBREG_BYTE (op) == 0)
5473 return SUBREG_REG (op);
5474
5475 /* The SUBREG_BYTE represents offset, as if the value were stored
5476 in memory. Irritating exception is paradoxical subreg, where
5477 we define SUBREG_BYTE to be 0. On big endian machines, this
5478 value should be negative. For a moment, undo this exception. */
5479 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5480 {
5481 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5482 if (WORDS_BIG_ENDIAN)
5483 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5484 if (BYTES_BIG_ENDIAN)
5485 final_offset += difference % UNITS_PER_WORD;
5486 }
5487 if (SUBREG_BYTE (op) == 0
5488 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5489 {
5490 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5491 if (WORDS_BIG_ENDIAN)
5492 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5493 if (BYTES_BIG_ENDIAN)
5494 final_offset += difference % UNITS_PER_WORD;
5495 }
5496
5497 /* See whether resulting subreg will be paradoxical. */
5498 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5499 {
5500 /* In nonparadoxical subregs we can't handle negative offsets. */
5501 if (final_offset < 0)
5502 return NULL_RTX;
5503 /* Bail out in case resulting subreg would be incorrect. */
5504 if (final_offset % GET_MODE_SIZE (outermode)
5505 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5506 return NULL_RTX;
5507 }
5508 else
5509 {
5510 int offset = 0;
5511 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5512
5513 /* In paradoxical subreg, see if we are still looking on lower part.
5514 If so, our SUBREG_BYTE will be 0. */
5515 if (WORDS_BIG_ENDIAN)
5516 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5517 if (BYTES_BIG_ENDIAN)
5518 offset += difference % UNITS_PER_WORD;
5519 if (offset == final_offset)
5520 final_offset = 0;
5521 else
5522 return NULL_RTX;
5523 }
5524
5525 /* Recurse for further possible simplifications. */
5526 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5527 final_offset);
5528 if (newx)
5529 return newx;
5530 if (validate_subreg (outermode, innermostmode,
5531 SUBREG_REG (op), final_offset))
5532 {
5533 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5534 if (SUBREG_PROMOTED_VAR_P (op)
5535 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5536 && GET_MODE_CLASS (outermode) == MODE_INT
5537 && IN_RANGE (GET_MODE_SIZE (outermode),
5538 GET_MODE_SIZE (innermode),
5539 GET_MODE_SIZE (innermostmode))
5540 && subreg_lowpart_p (newx))
5541 {
5542 SUBREG_PROMOTED_VAR_P (newx) = 1;
5543 SUBREG_PROMOTED_UNSIGNED_SET
5544 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5545 }
5546 return newx;
5547 }
5548 return NULL_RTX;
5549 }
5550
5551 /* Merge implicit and explicit truncations. */
5552
5553 if (GET_CODE (op) == TRUNCATE
5554 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5555 && subreg_lowpart_offset (outermode, innermode) == byte)
5556 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5557 GET_MODE (XEXP (op, 0)));
5558
5559 /* SUBREG of a hard register => just change the register number
5560 and/or mode. If the hard register is not valid in that mode,
5561 suppress this simplification. If the hard register is the stack,
5562 frame, or argument pointer, leave this as a SUBREG. */
5563
5564 if (REG_P (op) && HARD_REGISTER_P (op))
5565 {
5566 unsigned int regno, final_regno;
5567
5568 regno = REGNO (op);
5569 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5570 if (HARD_REGISTER_NUM_P (final_regno))
5571 {
5572 rtx x;
5573 int final_offset = byte;
5574
5575 /* Adjust offset for paradoxical subregs. */
5576 if (byte == 0
5577 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5578 {
5579 int difference = (GET_MODE_SIZE (innermode)
5580 - GET_MODE_SIZE (outermode));
5581 if (WORDS_BIG_ENDIAN)
5582 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5583 if (BYTES_BIG_ENDIAN)
5584 final_offset += difference % UNITS_PER_WORD;
5585 }
5586
5587 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5588
5589 /* Propagate original regno. We don't have any way to specify
5590 the offset inside original regno, so do so only for lowpart.
5591 The information is used only by alias analysis that can not
5592 grog partial register anyway. */
5593
5594 if (subreg_lowpart_offset (outermode, innermode) == byte)
5595 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5596 return x;
5597 }
5598 }
5599
5600 /* If we have a SUBREG of a register that we are replacing and we are
5601 replacing it with a MEM, make a new MEM and try replacing the
5602 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5603 or if we would be widening it. */
5604
5605 if (MEM_P (op)
5606 && ! mode_dependent_address_p (XEXP (op, 0))
5607 /* Allow splitting of volatile memory references in case we don't
5608 have instruction to move the whole thing. */
5609 && (! MEM_VOLATILE_P (op)
5610 || ! have_insn_for (SET, innermode))
5611 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5612 return adjust_address_nv (op, outermode, byte);
5613
5614 /* Handle complex values represented as CONCAT
5615 of real and imaginary part. */
5616 if (GET_CODE (op) == CONCAT)
5617 {
5618 unsigned int part_size, final_offset;
5619 rtx part, res;
5620
5621 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5622 if (byte < part_size)
5623 {
5624 part = XEXP (op, 0);
5625 final_offset = byte;
5626 }
5627 else
5628 {
5629 part = XEXP (op, 1);
5630 final_offset = byte - part_size;
5631 }
5632
5633 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5634 return NULL_RTX;
5635
5636 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5637 if (res)
5638 return res;
5639 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5640 return gen_rtx_SUBREG (outermode, part, final_offset);
5641 return NULL_RTX;
5642 }
5643
5644 /* Optimize SUBREG truncations of zero and sign extended values. */
5645 if ((GET_CODE (op) == ZERO_EXTEND
5646 || GET_CODE (op) == SIGN_EXTEND)
5647 && SCALAR_INT_MODE_P (innermode)
5648 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5649 {
5650 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5651
5652 /* If we're requesting the lowpart of a zero or sign extension,
5653 there are three possibilities. If the outermode is the same
5654 as the origmode, we can omit both the extension and the subreg.
5655 If the outermode is not larger than the origmode, we can apply
5656 the truncation without the extension. Finally, if the outermode
5657 is larger than the origmode, but both are integer modes, we
5658 can just extend to the appropriate mode. */
5659 if (bitpos == 0)
5660 {
5661 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5662 if (outermode == origmode)
5663 return XEXP (op, 0);
5664 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5665 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5666 subreg_lowpart_offset (outermode,
5667 origmode));
5668 if (SCALAR_INT_MODE_P (outermode))
5669 return simplify_gen_unary (GET_CODE (op), outermode,
5670 XEXP (op, 0), origmode);
5671 }
5672
5673 /* A SUBREG resulting from a zero extension may fold to zero if
5674 it extracts higher bits that the ZERO_EXTEND's source bits. */
5675 if (GET_CODE (op) == ZERO_EXTEND
5676 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5677 return CONST0_RTX (outermode);
5678 }
5679
5680 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5682 the outer subreg is effectively a truncation to the original mode. */
5683 if ((GET_CODE (op) == LSHIFTRT
5684 || GET_CODE (op) == ASHIFTRT)
5685 && SCALAR_INT_MODE_P (outermode)
5686 && SCALAR_INT_MODE_P (innermode)
5687 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5688 to avoid the possibility that an outer LSHIFTRT shifts by more
5689 than the sign extension's sign_bit_copies and introduces zeros
5690 into the high bits of the result. */
5691 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5692 && CONST_INT_P (XEXP (op, 1))
5693 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5695 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5696 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5697 return simplify_gen_binary (ASHIFTRT, outermode,
5698 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5699
5700 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5701 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5702 the outer subreg is effectively a truncation to the original mode. */
5703 if ((GET_CODE (op) == LSHIFTRT
5704 || GET_CODE (op) == ASHIFTRT)
5705 && SCALAR_INT_MODE_P (outermode)
5706 && SCALAR_INT_MODE_P (innermode)
5707 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5708 && CONST_INT_P (XEXP (op, 1))
5709 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5711 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5712 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5713 return simplify_gen_binary (LSHIFTRT, outermode,
5714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5715
5716 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5718 the outer subreg is effectively a truncation to the original mode. */
5719 if (GET_CODE (op) == ASHIFT
5720 && SCALAR_INT_MODE_P (outermode)
5721 && SCALAR_INT_MODE_P (innermode)
5722 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5723 && CONST_INT_P (XEXP (op, 1))
5724 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5725 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5726 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5727 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5728 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5729 return simplify_gen_binary (ASHIFT, outermode,
5730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5731
5732 /* Recognize a word extraction from a multi-word subreg. */
5733 if ((GET_CODE (op) == LSHIFTRT
5734 || GET_CODE (op) == ASHIFTRT)
5735 && SCALAR_INT_MODE_P (innermode)
5736 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5737 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5738 && CONST_INT_P (XEXP (op, 1))
5739 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5740 && INTVAL (XEXP (op, 1)) >= 0
5741 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5742 && byte == subreg_lowpart_offset (outermode, innermode))
5743 {
5744 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5745 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5746 (WORDS_BIG_ENDIAN
5747 ? byte - shifted_bytes
5748 : byte + shifted_bytes));
5749 }
5750
5751 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5752 and try replacing the SUBREG and shift with it. Don't do this if
5753 the MEM has a mode-dependent address or if we would be widening it. */
5754
5755 if ((GET_CODE (op) == LSHIFTRT
5756 || GET_CODE (op) == ASHIFTRT)
5757 && SCALAR_INT_MODE_P (innermode)
5758 && MEM_P (XEXP (op, 0))
5759 && CONST_INT_P (XEXP (op, 1))
5760 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5761 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5762 && INTVAL (XEXP (op, 1)) > 0
5763 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5764 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5765 && ! MEM_VOLATILE_P (XEXP (op, 0))
5766 && byte == subreg_lowpart_offset (outermode, innermode)
5767 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5768 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5769 {
5770 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5771 return adjust_address_nv (XEXP (op, 0), outermode,
5772 (WORDS_BIG_ENDIAN
5773 ? byte - shifted_bytes
5774 : byte + shifted_bytes));
5775 }
5776
5777 return NULL_RTX;
5778 }
5779
5780 /* Make a SUBREG operation or equivalent if it folds. */
5781
5782 rtx
5783 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5784 enum machine_mode innermode, unsigned int byte)
5785 {
5786 rtx newx;
5787
5788 newx = simplify_subreg (outermode, op, innermode, byte);
5789 if (newx)
5790 return newx;
5791
5792 if (GET_CODE (op) == SUBREG
5793 || GET_CODE (op) == CONCAT
5794 || GET_MODE (op) == VOIDmode)
5795 return NULL_RTX;
5796
5797 if (validate_subreg (outermode, innermode, op, byte))
5798 return gen_rtx_SUBREG (outermode, op, byte);
5799
5800 return NULL_RTX;
5801 }
5802
5803 /* Simplify X, an rtx expression.
5804
5805 Return the simplified expression or NULL if no simplifications
5806 were possible.
5807
5808 This is the preferred entry point into the simplification routines;
5809 however, we still allow passes to call the more specific routines.
5810
5811 Right now GCC has three (yes, three) major bodies of RTL simplification
5812 code that need to be unified.
5813
5814 1. fold_rtx in cse.c. This code uses various CSE specific
5815 information to aid in RTL simplification.
5816
5817 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5818 it uses combine specific information to aid in RTL
5819 simplification.
5820
5821 3. The routines in this file.
5822
5823
5824 Long term we want to only have one body of simplification code; to
5825 get to that state I recommend the following steps:
5826
5827 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5828 which are not pass dependent state into these routines.
5829
5830 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5831 use this routine whenever possible.
5832
5833 3. Allow for pass dependent state to be provided to these
5834 routines and add simplifications based on the pass dependent
5835 state. Remove code from cse.c & combine.c that becomes
5836 redundant/dead.
5837
5838 It will take time, but ultimately the compiler will be easier to
5839 maintain and improve. It's totally silly that when we add a
5840 simplification that it needs to be added to 4 places (3 for RTL
5841 simplification and 1 for tree simplification. */
5842
5843 rtx
5844 simplify_rtx (const_rtx x)
5845 {
5846 const enum rtx_code code = GET_CODE (x);
5847 const enum machine_mode mode = GET_MODE (x);
5848
5849 switch (GET_RTX_CLASS (code))
5850 {
5851 case RTX_UNARY:
5852 return simplify_unary_operation (code, mode,
5853 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5854 case RTX_COMM_ARITH:
5855 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5856 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5857
5858 /* Fall through.... */
5859
5860 case RTX_BIN_ARITH:
5861 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5862
5863 case RTX_TERNARY:
5864 case RTX_BITFIELD_OPS:
5865 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5866 XEXP (x, 0), XEXP (x, 1),
5867 XEXP (x, 2));
5868
5869 case RTX_COMPARE:
5870 case RTX_COMM_COMPARE:
5871 return simplify_relational_operation (code, mode,
5872 ((GET_MODE (XEXP (x, 0))
5873 != VOIDmode)
5874 ? GET_MODE (XEXP (x, 0))
5875 : GET_MODE (XEXP (x, 1))),
5876 XEXP (x, 0),
5877 XEXP (x, 1));
5878
5879 case RTX_EXTRA:
5880 if (code == SUBREG)
5881 return simplify_subreg (mode, SUBREG_REG (x),
5882 GET_MODE (SUBREG_REG (x)),
5883 SUBREG_BYTE (x));
5884 break;
5885
5886 case RTX_OBJ:
5887 if (code == LO_SUM)
5888 {
5889 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5890 if (GET_CODE (XEXP (x, 0)) == HIGH
5891 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5892 return XEXP (x, 1);
5893 }
5894 break;
5895
5896 default:
5897 break;
5898 }
5899 return NULL;
5900 }