f56a5edef8f5f8d0322e39bf60a80f1739ca4b5e
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872 break;
873
874 case FLOAT_TRUNCATE:
875 if (DECIMAL_FLOAT_MODE_P (mode))
876 break;
877
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op, 0)) == mode)
881 return XEXP (op, 0);
882
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
886
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
889
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations)
894 || GET_CODE (op) == FLOAT_EXTEND)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896 0)))
897 > GET_MODE_SIZE (mode)
898 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899 mode,
900 XEXP (op, 0), mode);
901
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906 && ((unsigned)significand_size (GET_MODE (op))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908 - num_sign_bit_copies (XEXP (op, 0),
909 GET_MODE (XEXP (op, 0))))))))
910 return simplify_gen_unary (FLOAT, mode,
911 XEXP (op, 0),
912 GET_MODE (XEXP (op, 0)));
913
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op) == ABS
917 || GET_CODE (op) == NEG)
918 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920 return simplify_gen_unary (GET_CODE (op), mode,
921 XEXP (XEXP (op, 0), 0), mode);
922
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op) == SUBREG
926 && subreg_lowpart_p (op)
927 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928 return SUBREG_REG (op);
929 break;
930
931 case FLOAT_EXTEND:
932 if (DECIMAL_FLOAT_MODE_P (mode))
933 break;
934
935 /* (float_extend (float_extend x)) is (float_extend x)
936
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
939 */
940 if (GET_CODE (op) == FLOAT_EXTEND
941 || (GET_CODE (op) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943 && ((unsigned)significand_size (GET_MODE (op))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945 - num_sign_bit_copies (XEXP (op, 0),
946 GET_MODE (XEXP (op, 0)))))))
947 return simplify_gen_unary (GET_CODE (op), mode,
948 XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950
951 break;
952
953 case ABS:
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op) == NEG)
956 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960 do nothing. */
961 if (GET_MODE (op) == VOIDmode)
962 break;
963
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op),
967 nonzero_bits (op, GET_MODE (op))))
968 return op;
969
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972 return gen_rtx_NEG (mode, op);
973
974 break;
975
976 case FFS:
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op) == SIGN_EXTEND
979 || GET_CODE (op) == ZERO_EXTEND)
980 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981 GET_MODE (XEXP (op, 0)));
982 break;
983
984 case POPCOUNT:
985 switch (GET_CODE (op))
986 {
987 case BSWAP:
988 case ZERO_EXTEND:
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991 GET_MODE (XEXP (op, 0)));
992
993 case ROTATE:
994 case ROTATERT:
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op, 1)))
997 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998 GET_MODE (XEXP (op, 0)));
999 break;
1000
1001 default:
1002 break;
1003 }
1004 break;
1005
1006 case PARITY:
1007 switch (GET_CODE (op))
1008 {
1009 case NOT:
1010 case BSWAP:
1011 case ZERO_EXTEND:
1012 case SIGN_EXTEND:
1013 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014 GET_MODE (XEXP (op, 0)));
1015
1016 case ROTATE:
1017 case ROTATERT:
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op, 1)))
1020 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021 GET_MODE (XEXP (op, 0)));
1022 break;
1023
1024 default:
1025 break;
1026 }
1027 break;
1028
1029 case BSWAP:
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op) == BSWAP)
1032 return XEXP (op, 0);
1033 break;
1034
1035 case FLOAT:
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op) == SIGN_EXTEND)
1038 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039 GET_MODE (XEXP (op, 0)));
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op) == MULT)
1057 {
1058 rtx lhs = XEXP (op, 0);
1059 rtx rhs = XEXP (op, 1);
1060 enum rtx_code lcode = GET_CODE (lhs);
1061 enum rtx_code rcode = GET_CODE (rhs);
1062
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode == SIGN_EXTEND
1066 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067 && (rcode == SIGN_EXTEND
1068 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069 {
1070 enum machine_mode lmode = GET_MODE (lhs);
1071 enum machine_mode rmode = GET_MODE (rhs);
1072 int bits;
1073
1074 if (lcode == ASHIFTRT)
1075 /* Number of bits not shifted off the end. */
1076 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
1081 if (rcode == ASHIFTRT)
1082 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089 return simplify_gen_binary
1090 (MULT, mode,
1091 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093 }
1094 }
1095
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108 {
1109 gcc_assert (GET_MODE_BITSIZE (mode)
1110 > GET_MODE_BITSIZE (GET_MODE (op)));
1111 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112 GET_MODE (XEXP (op, 0)));
1113 }
1114
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121 && GET_CODE (XEXP (op, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125 {
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode)
1130 > GET_MODE_BITSIZE (GET_MODE (op)));
1131 if (tmode != BLKmode)
1132 {
1133 rtx inner =
1134 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136 ? SIGN_EXTEND : ZERO_EXTEND,
1137 mode, inner, tmode);
1138 }
1139 }
1140
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode == Pmode && GET_MODE (op) == ptr_mode
1148 && (CONSTANT_P (op)
1149 || (GET_CODE (op) == SUBREG
1150 && REG_P (SUBREG_REG (op))
1151 && REG_POINTER (SUBREG_REG (op))
1152 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153 return convert_memory_address (Pmode, op);
1154 #endif
1155 break;
1156
1157 case ZERO_EXTEND:
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op) == MULT)
1170 {
1171 rtx lhs = XEXP (op, 0);
1172 rtx rhs = XEXP (op, 1);
1173 enum rtx_code lcode = GET_CODE (lhs);
1174 enum rtx_code rcode = GET_CODE (rhs);
1175
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode == ZERO_EXTEND
1179 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180 && (rcode == ZERO_EXTEND
1181 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182 {
1183 enum machine_mode lmode = GET_MODE (lhs);
1184 enum machine_mode rmode = GET_MODE (rhs);
1185 int bits;
1186
1187 if (lcode == LSHIFTRT)
1188 /* Number of bits not shifted off the end. */
1189 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
1194 if (rcode == LSHIFTRT)
1195 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202 return simplify_gen_binary
1203 (MULT, mode,
1204 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206 }
1207 }
1208
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op) == LSHIFTRT
1218 && GET_CODE (XEXP (op, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op, 1))
1220 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222 {
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226 if (tmode != BLKmode)
1227 {
1228 rtx inner =
1229 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231 }
1232 }
1233
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED > 0
1240 && mode == Pmode && GET_MODE (op) == ptr_mode
1241 && (CONSTANT_P (op)
1242 || (GET_CODE (op) == SUBREG
1243 && REG_P (SUBREG_REG (op))
1244 && REG_POINTER (SUBREG_REG (op))
1245 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246 return convert_memory_address (Pmode, op);
1247 #endif
1248 break;
1249
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255 }
1256
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1260 rtx
1261 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262 rtx op, enum machine_mode op_mode)
1263 {
1264 unsigned int width = GET_MODE_PRECISION (mode);
1265 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
1267 if (code == VEC_DUPLICATE)
1268 {
1269 gcc_assert (VECTOR_MODE_P (mode));
1270 if (GET_MODE (op) != VOIDmode)
1271 {
1272 if (!VECTOR_MODE_P (GET_MODE (op)))
1273 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274 else
1275 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276 (GET_MODE (op)));
1277 }
1278 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1279 || GET_CODE (op) == CONST_VECTOR)
1280 {
1281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283 rtvec v = rtvec_alloc (n_elts);
1284 unsigned int i;
1285
1286 if (GET_CODE (op) != CONST_VECTOR)
1287 for (i = 0; i < n_elts; i++)
1288 RTVEC_ELT (v, i) = op;
1289 else
1290 {
1291 enum machine_mode inmode = GET_MODE (op);
1292 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
1295 gcc_assert (in_n_elts < n_elts);
1296 gcc_assert ((n_elts % in_n_elts) == 0);
1297 for (i = 0; i < n_elts; i++)
1298 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299 }
1300 return gen_rtx_CONST_VECTOR (mode, v);
1301 }
1302 }
1303
1304 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305 {
1306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308 enum machine_mode opmode = GET_MODE (op);
1309 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311 rtvec v = rtvec_alloc (n_elts);
1312 unsigned int i;
1313
1314 gcc_assert (op_n_elts == n_elts);
1315 for (i = 0; i < n_elts; i++)
1316 {
1317 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318 CONST_VECTOR_ELT (op, i),
1319 GET_MODE_INNER (opmode));
1320 if (!x)
1321 return 0;
1322 RTVEC_ELT (v, i) = x;
1323 }
1324 return gen_rtx_CONST_VECTOR (mode, v);
1325 }
1326
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1330
1331 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1332 {
1333 HOST_WIDE_INT hv, lv;
1334 REAL_VALUE_TYPE d;
1335
1336 if (CONST_INT_P (op))
1337 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1338 else
1339 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1340
1341 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1342 d = real_value_truncate (mode, d);
1343 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1344 }
1345 else if (code == UNSIGNED_FLOAT
1346 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1347 {
1348 HOST_WIDE_INT hv, lv;
1349 REAL_VALUE_TYPE d;
1350
1351 if (CONST_INT_P (op))
1352 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1353 else
1354 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1355
1356 if (op_mode == VOIDmode
1357 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1358 /* We should never get a negative number. */
1359 gcc_assert (hv >= 0);
1360 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1361 hv = 0, lv &= GET_MODE_MASK (op_mode);
1362
1363 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1364 d = real_value_truncate (mode, d);
1365 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1366 }
1367
1368 if (CONST_INT_P (op)
1369 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1370 {
1371 HOST_WIDE_INT arg0 = INTVAL (op);
1372 HOST_WIDE_INT val;
1373
1374 switch (code)
1375 {
1376 case NOT:
1377 val = ~ arg0;
1378 break;
1379
1380 case NEG:
1381 val = - arg0;
1382 break;
1383
1384 case ABS:
1385 val = (arg0 >= 0 ? arg0 : - arg0);
1386 break;
1387
1388 case FFS:
1389 arg0 &= GET_MODE_MASK (mode);
1390 val = ffs_hwi (arg0);
1391 break;
1392
1393 case CLZ:
1394 arg0 &= GET_MODE_MASK (mode);
1395 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1396 ;
1397 else
1398 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1399 break;
1400
1401 case CLRSB:
1402 arg0 &= GET_MODE_MASK (mode);
1403 if (arg0 == 0)
1404 val = GET_MODE_PRECISION (mode) - 1;
1405 else if (arg0 >= 0)
1406 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1407 else if (arg0 < 0)
1408 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1409 break;
1410
1411 case CTZ:
1412 arg0 &= GET_MODE_MASK (mode);
1413 if (arg0 == 0)
1414 {
1415 /* Even if the value at zero is undefined, we have to come
1416 up with some replacement. Seems good enough. */
1417 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1418 val = GET_MODE_PRECISION (mode);
1419 }
1420 else
1421 val = ctz_hwi (arg0);
1422 break;
1423
1424 case POPCOUNT:
1425 arg0 &= GET_MODE_MASK (mode);
1426 val = 0;
1427 while (arg0)
1428 val++, arg0 &= arg0 - 1;
1429 break;
1430
1431 case PARITY:
1432 arg0 &= GET_MODE_MASK (mode);
1433 val = 0;
1434 while (arg0)
1435 val++, arg0 &= arg0 - 1;
1436 val &= 1;
1437 break;
1438
1439 case BSWAP:
1440 {
1441 unsigned int s;
1442
1443 val = 0;
1444 for (s = 0; s < width; s += 8)
1445 {
1446 unsigned int d = width - s - 8;
1447 unsigned HOST_WIDE_INT byte;
1448 byte = (arg0 >> s) & 0xff;
1449 val |= byte << d;
1450 }
1451 }
1452 break;
1453
1454 case TRUNCATE:
1455 val = arg0;
1456 break;
1457
1458 case ZERO_EXTEND:
1459 /* When zero-extending a CONST_INT, we need to know its
1460 original mode. */
1461 gcc_assert (op_mode != VOIDmode);
1462 if (op_width == HOST_BITS_PER_WIDE_INT)
1463 {
1464 /* If we were really extending the mode,
1465 we would have to distinguish between zero-extension
1466 and sign-extension. */
1467 gcc_assert (width == op_width);
1468 val = arg0;
1469 }
1470 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1471 val = arg0 & GET_MODE_MASK (op_mode);
1472 else
1473 return 0;
1474 break;
1475
1476 case SIGN_EXTEND:
1477 if (op_mode == VOIDmode)
1478 op_mode = mode;
1479 op_width = GET_MODE_PRECISION (op_mode);
1480 if (op_width == HOST_BITS_PER_WIDE_INT)
1481 {
1482 /* If we were really extending the mode,
1483 we would have to distinguish between zero-extension
1484 and sign-extension. */
1485 gcc_assert (width == op_width);
1486 val = arg0;
1487 }
1488 else if (op_width < HOST_BITS_PER_WIDE_INT)
1489 {
1490 val = arg0 & GET_MODE_MASK (op_mode);
1491 if (val_signbit_known_set_p (op_mode, val))
1492 val |= ~GET_MODE_MASK (op_mode);
1493 }
1494 else
1495 return 0;
1496 break;
1497
1498 case SQRT:
1499 case FLOAT_EXTEND:
1500 case FLOAT_TRUNCATE:
1501 case SS_TRUNCATE:
1502 case US_TRUNCATE:
1503 case SS_NEG:
1504 case US_NEG:
1505 case SS_ABS:
1506 return 0;
1507
1508 default:
1509 gcc_unreachable ();
1510 }
1511
1512 return gen_int_mode (val, mode);
1513 }
1514
1515 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1516 for a DImode operation on a CONST_INT. */
1517 else if (width <= HOST_BITS_PER_DOUBLE_INT
1518 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1519 {
1520 unsigned HOST_WIDE_INT l1, lv;
1521 HOST_WIDE_INT h1, hv;
1522
1523 if (CONST_DOUBLE_AS_INT_P (op))
1524 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1525 else
1526 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1527
1528 switch (code)
1529 {
1530 case NOT:
1531 lv = ~ l1;
1532 hv = ~ h1;
1533 break;
1534
1535 case NEG:
1536 neg_double (l1, h1, &lv, &hv);
1537 break;
1538
1539 case ABS:
1540 if (h1 < 0)
1541 neg_double (l1, h1, &lv, &hv);
1542 else
1543 lv = l1, hv = h1;
1544 break;
1545
1546 case FFS:
1547 hv = 0;
1548 if (l1 != 0)
1549 lv = ffs_hwi (l1);
1550 else if (h1 != 0)
1551 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1552 else
1553 lv = 0;
1554 break;
1555
1556 case CLZ:
1557 hv = 0;
1558 if (h1 != 0)
1559 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1560 - HOST_BITS_PER_WIDE_INT;
1561 else if (l1 != 0)
1562 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1563 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1564 lv = GET_MODE_PRECISION (mode);
1565 break;
1566
1567 case CTZ:
1568 hv = 0;
1569 if (l1 != 0)
1570 lv = ctz_hwi (l1);
1571 else if (h1 != 0)
1572 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1573 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1574 lv = GET_MODE_PRECISION (mode);
1575 break;
1576
1577 case POPCOUNT:
1578 hv = 0;
1579 lv = 0;
1580 while (l1)
1581 lv++, l1 &= l1 - 1;
1582 while (h1)
1583 lv++, h1 &= h1 - 1;
1584 break;
1585
1586 case PARITY:
1587 hv = 0;
1588 lv = 0;
1589 while (l1)
1590 lv++, l1 &= l1 - 1;
1591 while (h1)
1592 lv++, h1 &= h1 - 1;
1593 lv &= 1;
1594 break;
1595
1596 case BSWAP:
1597 {
1598 unsigned int s;
1599
1600 hv = 0;
1601 lv = 0;
1602 for (s = 0; s < width; s += 8)
1603 {
1604 unsigned int d = width - s - 8;
1605 unsigned HOST_WIDE_INT byte;
1606
1607 if (s < HOST_BITS_PER_WIDE_INT)
1608 byte = (l1 >> s) & 0xff;
1609 else
1610 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1611
1612 if (d < HOST_BITS_PER_WIDE_INT)
1613 lv |= byte << d;
1614 else
1615 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1616 }
1617 }
1618 break;
1619
1620 case TRUNCATE:
1621 /* This is just a change-of-mode, so do nothing. */
1622 lv = l1, hv = h1;
1623 break;
1624
1625 case ZERO_EXTEND:
1626 gcc_assert (op_mode != VOIDmode);
1627
1628 if (op_width > HOST_BITS_PER_WIDE_INT)
1629 return 0;
1630
1631 hv = 0;
1632 lv = l1 & GET_MODE_MASK (op_mode);
1633 break;
1634
1635 case SIGN_EXTEND:
1636 if (op_mode == VOIDmode
1637 || op_width > HOST_BITS_PER_WIDE_INT)
1638 return 0;
1639 else
1640 {
1641 lv = l1 & GET_MODE_MASK (op_mode);
1642 if (val_signbit_known_set_p (op_mode, lv))
1643 lv |= ~GET_MODE_MASK (op_mode);
1644
1645 hv = HWI_SIGN_EXTEND (lv);
1646 }
1647 break;
1648
1649 case SQRT:
1650 return 0;
1651
1652 default:
1653 return 0;
1654 }
1655
1656 return immed_double_const (lv, hv, mode);
1657 }
1658
1659 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1660 && SCALAR_FLOAT_MODE_P (mode)
1661 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1662 {
1663 REAL_VALUE_TYPE d, t;
1664 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1665
1666 switch (code)
1667 {
1668 case SQRT:
1669 if (HONOR_SNANS (mode) && real_isnan (&d))
1670 return 0;
1671 real_sqrt (&t, mode, &d);
1672 d = t;
1673 break;
1674 case ABS:
1675 d = real_value_abs (&d);
1676 break;
1677 case NEG:
1678 d = real_value_negate (&d);
1679 break;
1680 case FLOAT_TRUNCATE:
1681 d = real_value_truncate (mode, d);
1682 break;
1683 case FLOAT_EXTEND:
1684 /* All this does is change the mode, unless changing
1685 mode class. */
1686 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1687 real_convert (&d, mode, &d);
1688 break;
1689 case FIX:
1690 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1691 break;
1692 case NOT:
1693 {
1694 long tmp[4];
1695 int i;
1696
1697 real_to_target (tmp, &d, GET_MODE (op));
1698 for (i = 0; i < 4; i++)
1699 tmp[i] = ~tmp[i];
1700 real_from_target (&d, tmp, mode);
1701 break;
1702 }
1703 default:
1704 gcc_unreachable ();
1705 }
1706 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1707 }
1708
1709 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1710 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1711 && GET_MODE_CLASS (mode) == MODE_INT
1712 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1713 {
1714 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1715 operators are intentionally left unspecified (to ease implementation
1716 by target backends), for consistency, this routine implements the
1717 same semantics for constant folding as used by the middle-end. */
1718
1719 /* This was formerly used only for non-IEEE float.
1720 eggert@twinsun.com says it is safe for IEEE also. */
1721 HOST_WIDE_INT xh, xl, th, tl;
1722 REAL_VALUE_TYPE x, t;
1723 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1724 switch (code)
1725 {
1726 case FIX:
1727 if (REAL_VALUE_ISNAN (x))
1728 return const0_rtx;
1729
1730 /* Test against the signed upper bound. */
1731 if (width > HOST_BITS_PER_WIDE_INT)
1732 {
1733 th = ((unsigned HOST_WIDE_INT) 1
1734 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1735 tl = -1;
1736 }
1737 else
1738 {
1739 th = 0;
1740 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1741 }
1742 real_from_integer (&t, VOIDmode, tl, th, 0);
1743 if (REAL_VALUES_LESS (t, x))
1744 {
1745 xh = th;
1746 xl = tl;
1747 break;
1748 }
1749
1750 /* Test against the signed lower bound. */
1751 if (width > HOST_BITS_PER_WIDE_INT)
1752 {
1753 th = (unsigned HOST_WIDE_INT) (-1)
1754 << (width - HOST_BITS_PER_WIDE_INT - 1);
1755 tl = 0;
1756 }
1757 else
1758 {
1759 th = -1;
1760 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1761 }
1762 real_from_integer (&t, VOIDmode, tl, th, 0);
1763 if (REAL_VALUES_LESS (x, t))
1764 {
1765 xh = th;
1766 xl = tl;
1767 break;
1768 }
1769 REAL_VALUE_TO_INT (&xl, &xh, x);
1770 break;
1771
1772 case UNSIGNED_FIX:
1773 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1774 return const0_rtx;
1775
1776 /* Test against the unsigned upper bound. */
1777 if (width == HOST_BITS_PER_DOUBLE_INT)
1778 {
1779 th = -1;
1780 tl = -1;
1781 }
1782 else if (width >= HOST_BITS_PER_WIDE_INT)
1783 {
1784 th = ((unsigned HOST_WIDE_INT) 1
1785 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1786 tl = -1;
1787 }
1788 else
1789 {
1790 th = 0;
1791 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1792 }
1793 real_from_integer (&t, VOIDmode, tl, th, 1);
1794 if (REAL_VALUES_LESS (t, x))
1795 {
1796 xh = th;
1797 xl = tl;
1798 break;
1799 }
1800
1801 REAL_VALUE_TO_INT (&xl, &xh, x);
1802 break;
1803
1804 default:
1805 gcc_unreachable ();
1806 }
1807 return immed_double_const (xl, xh, mode);
1808 }
1809
1810 return NULL_RTX;
1811 }
1812 \f
1813 /* Subroutine of simplify_binary_operation to simplify a commutative,
1814 associative binary operation CODE with result mode MODE, operating
1815 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1816 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1817 canonicalization is possible. */
1818
1819 static rtx
1820 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1821 rtx op0, rtx op1)
1822 {
1823 rtx tem;
1824
1825 /* Linearize the operator to the left. */
1826 if (GET_CODE (op1) == code)
1827 {
1828 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1829 if (GET_CODE (op0) == code)
1830 {
1831 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1832 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1833 }
1834
1835 /* "a op (b op c)" becomes "(b op c) op a". */
1836 if (! swap_commutative_operands_p (op1, op0))
1837 return simplify_gen_binary (code, mode, op1, op0);
1838
1839 tem = op0;
1840 op0 = op1;
1841 op1 = tem;
1842 }
1843
1844 if (GET_CODE (op0) == code)
1845 {
1846 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1847 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1848 {
1849 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1850 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1851 }
1852
1853 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1854 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1855 if (tem != 0)
1856 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1857
1858 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1859 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1860 if (tem != 0)
1861 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1862 }
1863
1864 return 0;
1865 }
1866
1867
1868 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1869 and OP1. Return 0 if no simplification is possible.
1870
1871 Don't use this for relational operations such as EQ or LT.
1872 Use simplify_relational_operation instead. */
1873 rtx
1874 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1875 rtx op0, rtx op1)
1876 {
1877 rtx trueop0, trueop1;
1878 rtx tem;
1879
1880 /* Relational operations don't work here. We must know the mode
1881 of the operands in order to do the comparison correctly.
1882 Assuming a full word can give incorrect results.
1883 Consider comparing 128 with -128 in QImode. */
1884 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1885 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1886
1887 /* Make sure the constant is second. */
1888 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1889 && swap_commutative_operands_p (op0, op1))
1890 {
1891 tem = op0, op0 = op1, op1 = tem;
1892 }
1893
1894 trueop0 = avoid_constant_pool_reference (op0);
1895 trueop1 = avoid_constant_pool_reference (op1);
1896
1897 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1898 if (tem)
1899 return tem;
1900 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1901 }
1902
1903 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1904 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1905 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1906 actual constants. */
1907
1908 static rtx
1909 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1910 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1911 {
1912 rtx tem, reversed, opleft, opright;
1913 HOST_WIDE_INT val;
1914 unsigned int width = GET_MODE_PRECISION (mode);
1915
1916 /* Even if we can't compute a constant result,
1917 there are some cases worth simplifying. */
1918
1919 switch (code)
1920 {
1921 case PLUS:
1922 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1923 when x is NaN, infinite, or finite and nonzero. They aren't
1924 when x is -0 and the rounding mode is not towards -infinity,
1925 since (-0) + 0 is then 0. */
1926 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1927 return op0;
1928
1929 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1930 transformations are safe even for IEEE. */
1931 if (GET_CODE (op0) == NEG)
1932 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1933 else if (GET_CODE (op1) == NEG)
1934 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1935
1936 /* (~a) + 1 -> -a */
1937 if (INTEGRAL_MODE_P (mode)
1938 && GET_CODE (op0) == NOT
1939 && trueop1 == const1_rtx)
1940 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1941
1942 /* Handle both-operands-constant cases. We can only add
1943 CONST_INTs to constants since the sum of relocatable symbols
1944 can't be handled by most assemblers. Don't add CONST_INT
1945 to CONST_INT since overflow won't be computed properly if wider
1946 than HOST_BITS_PER_WIDE_INT. */
1947
1948 if ((GET_CODE (op0) == CONST
1949 || GET_CODE (op0) == SYMBOL_REF
1950 || GET_CODE (op0) == LABEL_REF)
1951 && CONST_INT_P (op1))
1952 return plus_constant (mode, op0, INTVAL (op1));
1953 else if ((GET_CODE (op1) == CONST
1954 || GET_CODE (op1) == SYMBOL_REF
1955 || GET_CODE (op1) == LABEL_REF)
1956 && CONST_INT_P (op0))
1957 return plus_constant (mode, op1, INTVAL (op0));
1958
1959 /* See if this is something like X * C - X or vice versa or
1960 if the multiplication is written as a shift. If so, we can
1961 distribute and make a new multiply, shift, or maybe just
1962 have X (if C is 2 in the example above). But don't make
1963 something more expensive than we had before. */
1964
1965 if (SCALAR_INT_MODE_P (mode))
1966 {
1967 double_int coeff0, coeff1;
1968 rtx lhs = op0, rhs = op1;
1969
1970 coeff0 = double_int_one;
1971 coeff1 = double_int_one;
1972
1973 if (GET_CODE (lhs) == NEG)
1974 {
1975 coeff0 = double_int_minus_one;
1976 lhs = XEXP (lhs, 0);
1977 }
1978 else if (GET_CODE (lhs) == MULT
1979 && CONST_INT_P (XEXP (lhs, 1)))
1980 {
1981 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1982 lhs = XEXP (lhs, 0);
1983 }
1984 else if (GET_CODE (lhs) == ASHIFT
1985 && CONST_INT_P (XEXP (lhs, 1))
1986 && INTVAL (XEXP (lhs, 1)) >= 0
1987 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1988 {
1989 coeff0 = double_int_setbit (double_int_zero,
1990 INTVAL (XEXP (lhs, 1)));
1991 lhs = XEXP (lhs, 0);
1992 }
1993
1994 if (GET_CODE (rhs) == NEG)
1995 {
1996 coeff1 = double_int_minus_one;
1997 rhs = XEXP (rhs, 0);
1998 }
1999 else if (GET_CODE (rhs) == MULT
2000 && CONST_INT_P (XEXP (rhs, 1)))
2001 {
2002 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2003 rhs = XEXP (rhs, 0);
2004 }
2005 else if (GET_CODE (rhs) == ASHIFT
2006 && CONST_INT_P (XEXP (rhs, 1))
2007 && INTVAL (XEXP (rhs, 1)) >= 0
2008 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2009 {
2010 coeff1 = double_int_setbit (double_int_zero,
2011 INTVAL (XEXP (rhs, 1)));
2012 rhs = XEXP (rhs, 0);
2013 }
2014
2015 if (rtx_equal_p (lhs, rhs))
2016 {
2017 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2018 rtx coeff;
2019 double_int val;
2020 bool speed = optimize_function_for_speed_p (cfun);
2021
2022 val = double_int_add (coeff0, coeff1);
2023 coeff = immed_double_int_const (val, mode);
2024
2025 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2026 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2027 ? tem : 0;
2028 }
2029 }
2030
2031 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2032 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2033 && GET_CODE (op0) == XOR
2034 && (CONST_INT_P (XEXP (op0, 1))
2035 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2036 && mode_signbit_p (mode, op1))
2037 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2038 simplify_gen_binary (XOR, mode, op1,
2039 XEXP (op0, 1)));
2040
2041 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2042 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2043 && GET_CODE (op0) == MULT
2044 && GET_CODE (XEXP (op0, 0)) == NEG)
2045 {
2046 rtx in1, in2;
2047
2048 in1 = XEXP (XEXP (op0, 0), 0);
2049 in2 = XEXP (op0, 1);
2050 return simplify_gen_binary (MINUS, mode, op1,
2051 simplify_gen_binary (MULT, mode,
2052 in1, in2));
2053 }
2054
2055 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2056 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2057 is 1. */
2058 if (COMPARISON_P (op0)
2059 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2060 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2061 && (reversed = reversed_comparison (op0, mode)))
2062 return
2063 simplify_gen_unary (NEG, mode, reversed, mode);
2064
2065 /* If one of the operands is a PLUS or a MINUS, see if we can
2066 simplify this by the associative law.
2067 Don't use the associative law for floating point.
2068 The inaccuracy makes it nonassociative,
2069 and subtle programs can break if operations are associated. */
2070
2071 if (INTEGRAL_MODE_P (mode)
2072 && (plus_minus_operand_p (op0)
2073 || plus_minus_operand_p (op1))
2074 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2075 return tem;
2076
2077 /* Reassociate floating point addition only when the user
2078 specifies associative math operations. */
2079 if (FLOAT_MODE_P (mode)
2080 && flag_associative_math)
2081 {
2082 tem = simplify_associative_operation (code, mode, op0, op1);
2083 if (tem)
2084 return tem;
2085 }
2086 break;
2087
2088 case COMPARE:
2089 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2090 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2091 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2092 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2093 {
2094 rtx xop00 = XEXP (op0, 0);
2095 rtx xop10 = XEXP (op1, 0);
2096
2097 #ifdef HAVE_cc0
2098 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2099 #else
2100 if (REG_P (xop00) && REG_P (xop10)
2101 && GET_MODE (xop00) == GET_MODE (xop10)
2102 && REGNO (xop00) == REGNO (xop10)
2103 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2104 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2105 #endif
2106 return xop00;
2107 }
2108 break;
2109
2110 case MINUS:
2111 /* We can't assume x-x is 0 even with non-IEEE floating point,
2112 but since it is zero except in very strange circumstances, we
2113 will treat it as zero with -ffinite-math-only. */
2114 if (rtx_equal_p (trueop0, trueop1)
2115 && ! side_effects_p (op0)
2116 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2117 return CONST0_RTX (mode);
2118
2119 /* Change subtraction from zero into negation. (0 - x) is the
2120 same as -x when x is NaN, infinite, or finite and nonzero.
2121 But if the mode has signed zeros, and does not round towards
2122 -infinity, then 0 - 0 is 0, not -0. */
2123 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2124 return simplify_gen_unary (NEG, mode, op1, mode);
2125
2126 /* (-1 - a) is ~a. */
2127 if (trueop0 == constm1_rtx)
2128 return simplify_gen_unary (NOT, mode, op1, mode);
2129
2130 /* Subtracting 0 has no effect unless the mode has signed zeros
2131 and supports rounding towards -infinity. In such a case,
2132 0 - 0 is -0. */
2133 if (!(HONOR_SIGNED_ZEROS (mode)
2134 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2135 && trueop1 == CONST0_RTX (mode))
2136 return op0;
2137
2138 /* See if this is something like X * C - X or vice versa or
2139 if the multiplication is written as a shift. If so, we can
2140 distribute and make a new multiply, shift, or maybe just
2141 have X (if C is 2 in the example above). But don't make
2142 something more expensive than we had before. */
2143
2144 if (SCALAR_INT_MODE_P (mode))
2145 {
2146 double_int coeff0, negcoeff1;
2147 rtx lhs = op0, rhs = op1;
2148
2149 coeff0 = double_int_one;
2150 negcoeff1 = double_int_minus_one;
2151
2152 if (GET_CODE (lhs) == NEG)
2153 {
2154 coeff0 = double_int_minus_one;
2155 lhs = XEXP (lhs, 0);
2156 }
2157 else if (GET_CODE (lhs) == MULT
2158 && CONST_INT_P (XEXP (lhs, 1)))
2159 {
2160 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2161 lhs = XEXP (lhs, 0);
2162 }
2163 else if (GET_CODE (lhs) == ASHIFT
2164 && CONST_INT_P (XEXP (lhs, 1))
2165 && INTVAL (XEXP (lhs, 1)) >= 0
2166 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2167 {
2168 coeff0 = double_int_setbit (double_int_zero,
2169 INTVAL (XEXP (lhs, 1)));
2170 lhs = XEXP (lhs, 0);
2171 }
2172
2173 if (GET_CODE (rhs) == NEG)
2174 {
2175 negcoeff1 = double_int_one;
2176 rhs = XEXP (rhs, 0);
2177 }
2178 else if (GET_CODE (rhs) == MULT
2179 && CONST_INT_P (XEXP (rhs, 1)))
2180 {
2181 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2182 rhs = XEXP (rhs, 0);
2183 }
2184 else if (GET_CODE (rhs) == ASHIFT
2185 && CONST_INT_P (XEXP (rhs, 1))
2186 && INTVAL (XEXP (rhs, 1)) >= 0
2187 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2188 {
2189 negcoeff1 = double_int_setbit (double_int_zero,
2190 INTVAL (XEXP (rhs, 1)));
2191 negcoeff1 = double_int_neg (negcoeff1);
2192 rhs = XEXP (rhs, 0);
2193 }
2194
2195 if (rtx_equal_p (lhs, rhs))
2196 {
2197 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2198 rtx coeff;
2199 double_int val;
2200 bool speed = optimize_function_for_speed_p (cfun);
2201
2202 val = double_int_add (coeff0, negcoeff1);
2203 coeff = immed_double_int_const (val, mode);
2204
2205 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2206 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2207 ? tem : 0;
2208 }
2209 }
2210
2211 /* (a - (-b)) -> (a + b). True even for IEEE. */
2212 if (GET_CODE (op1) == NEG)
2213 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2214
2215 /* (-x - c) may be simplified as (-c - x). */
2216 if (GET_CODE (op0) == NEG
2217 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2218 {
2219 tem = simplify_unary_operation (NEG, mode, op1, mode);
2220 if (tem)
2221 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2222 }
2223
2224 /* Don't let a relocatable value get a negative coeff. */
2225 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2226 return simplify_gen_binary (PLUS, mode,
2227 op0,
2228 neg_const_int (mode, op1));
2229
2230 /* (x - (x & y)) -> (x & ~y) */
2231 if (GET_CODE (op1) == AND)
2232 {
2233 if (rtx_equal_p (op0, XEXP (op1, 0)))
2234 {
2235 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2236 GET_MODE (XEXP (op1, 1)));
2237 return simplify_gen_binary (AND, mode, op0, tem);
2238 }
2239 if (rtx_equal_p (op0, XEXP (op1, 1)))
2240 {
2241 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2242 GET_MODE (XEXP (op1, 0)));
2243 return simplify_gen_binary (AND, mode, op0, tem);
2244 }
2245 }
2246
2247 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2248 by reversing the comparison code if valid. */
2249 if (STORE_FLAG_VALUE == 1
2250 && trueop0 == const1_rtx
2251 && COMPARISON_P (op1)
2252 && (reversed = reversed_comparison (op1, mode)))
2253 return reversed;
2254
2255 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2256 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2257 && GET_CODE (op1) == MULT
2258 && GET_CODE (XEXP (op1, 0)) == NEG)
2259 {
2260 rtx in1, in2;
2261
2262 in1 = XEXP (XEXP (op1, 0), 0);
2263 in2 = XEXP (op1, 1);
2264 return simplify_gen_binary (PLUS, mode,
2265 simplify_gen_binary (MULT, mode,
2266 in1, in2),
2267 op0);
2268 }
2269
2270 /* Canonicalize (minus (neg A) (mult B C)) to
2271 (minus (mult (neg B) C) A). */
2272 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2273 && GET_CODE (op1) == MULT
2274 && GET_CODE (op0) == NEG)
2275 {
2276 rtx in1, in2;
2277
2278 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2279 in2 = XEXP (op1, 1);
2280 return simplify_gen_binary (MINUS, mode,
2281 simplify_gen_binary (MULT, mode,
2282 in1, in2),
2283 XEXP (op0, 0));
2284 }
2285
2286 /* If one of the operands is a PLUS or a MINUS, see if we can
2287 simplify this by the associative law. This will, for example,
2288 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2289 Don't use the associative law for floating point.
2290 The inaccuracy makes it nonassociative,
2291 and subtle programs can break if operations are associated. */
2292
2293 if (INTEGRAL_MODE_P (mode)
2294 && (plus_minus_operand_p (op0)
2295 || plus_minus_operand_p (op1))
2296 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2297 return tem;
2298 break;
2299
2300 case MULT:
2301 if (trueop1 == constm1_rtx)
2302 return simplify_gen_unary (NEG, mode, op0, mode);
2303
2304 if (GET_CODE (op0) == NEG)
2305 {
2306 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2307 /* If op1 is a MULT as well and simplify_unary_operation
2308 just moved the NEG to the second operand, simplify_gen_binary
2309 below could through simplify_associative_operation move
2310 the NEG around again and recurse endlessly. */
2311 if (temp
2312 && GET_CODE (op1) == MULT
2313 && GET_CODE (temp) == MULT
2314 && XEXP (op1, 0) == XEXP (temp, 0)
2315 && GET_CODE (XEXP (temp, 1)) == NEG
2316 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2317 temp = NULL_RTX;
2318 if (temp)
2319 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2320 }
2321 if (GET_CODE (op1) == NEG)
2322 {
2323 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2324 /* If op0 is a MULT as well and simplify_unary_operation
2325 just moved the NEG to the second operand, simplify_gen_binary
2326 below could through simplify_associative_operation move
2327 the NEG around again and recurse endlessly. */
2328 if (temp
2329 && GET_CODE (op0) == MULT
2330 && GET_CODE (temp) == MULT
2331 && XEXP (op0, 0) == XEXP (temp, 0)
2332 && GET_CODE (XEXP (temp, 1)) == NEG
2333 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2334 temp = NULL_RTX;
2335 if (temp)
2336 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2337 }
2338
2339 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2340 x is NaN, since x * 0 is then also NaN. Nor is it valid
2341 when the mode has signed zeros, since multiplying a negative
2342 number by 0 will give -0, not 0. */
2343 if (!HONOR_NANS (mode)
2344 && !HONOR_SIGNED_ZEROS (mode)
2345 && trueop1 == CONST0_RTX (mode)
2346 && ! side_effects_p (op0))
2347 return op1;
2348
2349 /* In IEEE floating point, x*1 is not equivalent to x for
2350 signalling NaNs. */
2351 if (!HONOR_SNANS (mode)
2352 && trueop1 == CONST1_RTX (mode))
2353 return op0;
2354
2355 /* Convert multiply by constant power of two into shift unless
2356 we are still generating RTL. This test is a kludge. */
2357 if (CONST_INT_P (trueop1)
2358 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2359 /* If the mode is larger than the host word size, and the
2360 uppermost bit is set, then this isn't a power of two due
2361 to implicit sign extension. */
2362 && (width <= HOST_BITS_PER_WIDE_INT
2363 || val != HOST_BITS_PER_WIDE_INT - 1))
2364 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2365
2366 /* Likewise for multipliers wider than a word. */
2367 if (CONST_DOUBLE_AS_INT_P (trueop1)
2368 && GET_MODE (op0) == mode
2369 && CONST_DOUBLE_LOW (trueop1) == 0
2370 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2371 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2372 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2373 return simplify_gen_binary (ASHIFT, mode, op0,
2374 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2375
2376 /* x*2 is x+x and x*(-1) is -x */
2377 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2378 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2379 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2380 && GET_MODE (op0) == mode)
2381 {
2382 REAL_VALUE_TYPE d;
2383 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2384
2385 if (REAL_VALUES_EQUAL (d, dconst2))
2386 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2387
2388 if (!HONOR_SNANS (mode)
2389 && REAL_VALUES_EQUAL (d, dconstm1))
2390 return simplify_gen_unary (NEG, mode, op0, mode);
2391 }
2392
2393 /* Optimize -x * -x as x * x. */
2394 if (FLOAT_MODE_P (mode)
2395 && GET_CODE (op0) == NEG
2396 && GET_CODE (op1) == NEG
2397 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2398 && !side_effects_p (XEXP (op0, 0)))
2399 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2400
2401 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2402 if (SCALAR_FLOAT_MODE_P (mode)
2403 && GET_CODE (op0) == ABS
2404 && GET_CODE (op1) == ABS
2405 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2406 && !side_effects_p (XEXP (op0, 0)))
2407 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2408
2409 /* Reassociate multiplication, but for floating point MULTs
2410 only when the user specifies unsafe math optimizations. */
2411 if (! FLOAT_MODE_P (mode)
2412 || flag_unsafe_math_optimizations)
2413 {
2414 tem = simplify_associative_operation (code, mode, op0, op1);
2415 if (tem)
2416 return tem;
2417 }
2418 break;
2419
2420 case IOR:
2421 if (trueop1 == CONST0_RTX (mode))
2422 return op0;
2423 if (INTEGRAL_MODE_P (mode)
2424 && trueop1 == CONSTM1_RTX (mode)
2425 && !side_effects_p (op0))
2426 return op1;
2427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2428 return op0;
2429 /* A | (~A) -> -1 */
2430 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2431 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2432 && ! side_effects_p (op0)
2433 && SCALAR_INT_MODE_P (mode))
2434 return constm1_rtx;
2435
2436 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2437 if (CONST_INT_P (op1)
2438 && HWI_COMPUTABLE_MODE_P (mode)
2439 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2440 && !side_effects_p (op0))
2441 return op1;
2442
2443 /* Canonicalize (X & C1) | C2. */
2444 if (GET_CODE (op0) == AND
2445 && CONST_INT_P (trueop1)
2446 && CONST_INT_P (XEXP (op0, 1)))
2447 {
2448 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2449 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2450 HOST_WIDE_INT c2 = INTVAL (trueop1);
2451
2452 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2453 if ((c1 & c2) == c1
2454 && !side_effects_p (XEXP (op0, 0)))
2455 return trueop1;
2456
2457 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2458 if (((c1|c2) & mask) == mask)
2459 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2460
2461 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2462 if (((c1 & ~c2) & mask) != (c1 & mask))
2463 {
2464 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2465 gen_int_mode (c1 & ~c2, mode));
2466 return simplify_gen_binary (IOR, mode, tem, op1);
2467 }
2468 }
2469
2470 /* Convert (A & B) | A to A. */
2471 if (GET_CODE (op0) == AND
2472 && (rtx_equal_p (XEXP (op0, 0), op1)
2473 || rtx_equal_p (XEXP (op0, 1), op1))
2474 && ! side_effects_p (XEXP (op0, 0))
2475 && ! side_effects_p (XEXP (op0, 1)))
2476 return op1;
2477
2478 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2479 mode size to (rotate A CX). */
2480
2481 if (GET_CODE (op1) == ASHIFT
2482 || GET_CODE (op1) == SUBREG)
2483 {
2484 opleft = op1;
2485 opright = op0;
2486 }
2487 else
2488 {
2489 opright = op1;
2490 opleft = op0;
2491 }
2492
2493 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2494 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2495 && CONST_INT_P (XEXP (opleft, 1))
2496 && CONST_INT_P (XEXP (opright, 1))
2497 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2498 == GET_MODE_PRECISION (mode)))
2499 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2500
2501 /* Same, but for ashift that has been "simplified" to a wider mode
2502 by simplify_shift_const. */
2503
2504 if (GET_CODE (opleft) == SUBREG
2505 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2506 && GET_CODE (opright) == LSHIFTRT
2507 && GET_CODE (XEXP (opright, 0)) == SUBREG
2508 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2509 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2510 && (GET_MODE_SIZE (GET_MODE (opleft))
2511 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2512 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2513 SUBREG_REG (XEXP (opright, 0)))
2514 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2515 && CONST_INT_P (XEXP (opright, 1))
2516 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2517 == GET_MODE_PRECISION (mode)))
2518 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2519 XEXP (SUBREG_REG (opleft), 1));
2520
2521 /* If we have (ior (and (X C1) C2)), simplify this by making
2522 C1 as small as possible if C1 actually changes. */
2523 if (CONST_INT_P (op1)
2524 && (HWI_COMPUTABLE_MODE_P (mode)
2525 || INTVAL (op1) > 0)
2526 && GET_CODE (op0) == AND
2527 && CONST_INT_P (XEXP (op0, 1))
2528 && CONST_INT_P (op1)
2529 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2530 return simplify_gen_binary (IOR, mode,
2531 simplify_gen_binary
2532 (AND, mode, XEXP (op0, 0),
2533 GEN_INT (UINTVAL (XEXP (op0, 1))
2534 & ~UINTVAL (op1))),
2535 op1);
2536
2537 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2538 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2539 the PLUS does not affect any of the bits in OP1: then we can do
2540 the IOR as a PLUS and we can associate. This is valid if OP1
2541 can be safely shifted left C bits. */
2542 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2543 && GET_CODE (XEXP (op0, 0)) == PLUS
2544 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2545 && CONST_INT_P (XEXP (op0, 1))
2546 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2547 {
2548 int count = INTVAL (XEXP (op0, 1));
2549 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2550
2551 if (mask >> count == INTVAL (trueop1)
2552 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2553 return simplify_gen_binary (ASHIFTRT, mode,
2554 plus_constant (mode, XEXP (op0, 0),
2555 mask),
2556 XEXP (op0, 1));
2557 }
2558
2559 tem = simplify_associative_operation (code, mode, op0, op1);
2560 if (tem)
2561 return tem;
2562 break;
2563
2564 case XOR:
2565 if (trueop1 == CONST0_RTX (mode))
2566 return op0;
2567 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2568 return simplify_gen_unary (NOT, mode, op0, mode);
2569 if (rtx_equal_p (trueop0, trueop1)
2570 && ! side_effects_p (op0)
2571 && GET_MODE_CLASS (mode) != MODE_CC)
2572 return CONST0_RTX (mode);
2573
2574 /* Canonicalize XOR of the most significant bit to PLUS. */
2575 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2576 && mode_signbit_p (mode, op1))
2577 return simplify_gen_binary (PLUS, mode, op0, op1);
2578 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2579 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2580 && GET_CODE (op0) == PLUS
2581 && (CONST_INT_P (XEXP (op0, 1))
2582 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2583 && mode_signbit_p (mode, XEXP (op0, 1)))
2584 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2585 simplify_gen_binary (XOR, mode, op1,
2586 XEXP (op0, 1)));
2587
2588 /* If we are XORing two things that have no bits in common,
2589 convert them into an IOR. This helps to detect rotation encoded
2590 using those methods and possibly other simplifications. */
2591
2592 if (HWI_COMPUTABLE_MODE_P (mode)
2593 && (nonzero_bits (op0, mode)
2594 & nonzero_bits (op1, mode)) == 0)
2595 return (simplify_gen_binary (IOR, mode, op0, op1));
2596
2597 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2598 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2599 (NOT y). */
2600 {
2601 int num_negated = 0;
2602
2603 if (GET_CODE (op0) == NOT)
2604 num_negated++, op0 = XEXP (op0, 0);
2605 if (GET_CODE (op1) == NOT)
2606 num_negated++, op1 = XEXP (op1, 0);
2607
2608 if (num_negated == 2)
2609 return simplify_gen_binary (XOR, mode, op0, op1);
2610 else if (num_negated == 1)
2611 return simplify_gen_unary (NOT, mode,
2612 simplify_gen_binary (XOR, mode, op0, op1),
2613 mode);
2614 }
2615
2616 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2617 correspond to a machine insn or result in further simplifications
2618 if B is a constant. */
2619
2620 if (GET_CODE (op0) == AND
2621 && rtx_equal_p (XEXP (op0, 1), op1)
2622 && ! side_effects_p (op1))
2623 return simplify_gen_binary (AND, mode,
2624 simplify_gen_unary (NOT, mode,
2625 XEXP (op0, 0), mode),
2626 op1);
2627
2628 else if (GET_CODE (op0) == AND
2629 && rtx_equal_p (XEXP (op0, 0), op1)
2630 && ! side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode,
2632 simplify_gen_unary (NOT, mode,
2633 XEXP (op0, 1), mode),
2634 op1);
2635
2636 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2637 we can transform like this:
2638 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2639 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2640 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2641 Attempt a few simplifications when B and C are both constants. */
2642 if (GET_CODE (op0) == AND
2643 && CONST_INT_P (op1)
2644 && CONST_INT_P (XEXP (op0, 1)))
2645 {
2646 rtx a = XEXP (op0, 0);
2647 rtx b = XEXP (op0, 1);
2648 rtx c = op1;
2649 HOST_WIDE_INT bval = INTVAL (b);
2650 HOST_WIDE_INT cval = INTVAL (c);
2651
2652 rtx na_c
2653 = simplify_binary_operation (AND, mode,
2654 simplify_gen_unary (NOT, mode, a, mode),
2655 c);
2656 if ((~cval & bval) == 0)
2657 {
2658 /* Try to simplify ~A&C | ~B&C. */
2659 if (na_c != NULL_RTX)
2660 return simplify_gen_binary (IOR, mode, na_c,
2661 GEN_INT (~bval & cval));
2662 }
2663 else
2664 {
2665 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2666 if (na_c == const0_rtx)
2667 {
2668 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2669 GEN_INT (~cval & bval));
2670 return simplify_gen_binary (IOR, mode, a_nc_b,
2671 GEN_INT (~bval & cval));
2672 }
2673 }
2674 }
2675
2676 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2677 comparison if STORE_FLAG_VALUE is 1. */
2678 if (STORE_FLAG_VALUE == 1
2679 && trueop1 == const1_rtx
2680 && COMPARISON_P (op0)
2681 && (reversed = reversed_comparison (op0, mode)))
2682 return reversed;
2683
2684 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2685 is (lt foo (const_int 0)), so we can perform the above
2686 simplification if STORE_FLAG_VALUE is 1. */
2687
2688 if (STORE_FLAG_VALUE == 1
2689 && trueop1 == const1_rtx
2690 && GET_CODE (op0) == LSHIFTRT
2691 && CONST_INT_P (XEXP (op0, 1))
2692 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2693 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2694
2695 /* (xor (comparison foo bar) (const_int sign-bit))
2696 when STORE_FLAG_VALUE is the sign bit. */
2697 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2698 && trueop1 == const_true_rtx
2699 && COMPARISON_P (op0)
2700 && (reversed = reversed_comparison (op0, mode)))
2701 return reversed;
2702
2703 tem = simplify_associative_operation (code, mode, op0, op1);
2704 if (tem)
2705 return tem;
2706 break;
2707
2708 case AND:
2709 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2710 return trueop1;
2711 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2712 return op0;
2713 if (HWI_COMPUTABLE_MODE_P (mode))
2714 {
2715 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2716 HOST_WIDE_INT nzop1;
2717 if (CONST_INT_P (trueop1))
2718 {
2719 HOST_WIDE_INT val1 = INTVAL (trueop1);
2720 /* If we are turning off bits already known off in OP0, we need
2721 not do an AND. */
2722 if ((nzop0 & ~val1) == 0)
2723 return op0;
2724 }
2725 nzop1 = nonzero_bits (trueop1, mode);
2726 /* If we are clearing all the nonzero bits, the result is zero. */
2727 if ((nzop1 & nzop0) == 0
2728 && !side_effects_p (op0) && !side_effects_p (op1))
2729 return CONST0_RTX (mode);
2730 }
2731 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2732 && GET_MODE_CLASS (mode) != MODE_CC)
2733 return op0;
2734 /* A & (~A) -> 0 */
2735 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2736 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2737 && ! side_effects_p (op0)
2738 && GET_MODE_CLASS (mode) != MODE_CC)
2739 return CONST0_RTX (mode);
2740
2741 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2742 there are no nonzero bits of C outside of X's mode. */
2743 if ((GET_CODE (op0) == SIGN_EXTEND
2744 || GET_CODE (op0) == ZERO_EXTEND)
2745 && CONST_INT_P (trueop1)
2746 && HWI_COMPUTABLE_MODE_P (mode)
2747 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2748 & UINTVAL (trueop1)) == 0)
2749 {
2750 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2751 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2752 gen_int_mode (INTVAL (trueop1),
2753 imode));
2754 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2755 }
2756
2757 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2758 we might be able to further simplify the AND with X and potentially
2759 remove the truncation altogether. */
2760 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2761 {
2762 rtx x = XEXP (op0, 0);
2763 enum machine_mode xmode = GET_MODE (x);
2764 tem = simplify_gen_binary (AND, xmode, x,
2765 gen_int_mode (INTVAL (trueop1), xmode));
2766 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2767 }
2768
2769 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2770 if (GET_CODE (op0) == IOR
2771 && CONST_INT_P (trueop1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2773 {
2774 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2775 return simplify_gen_binary (IOR, mode,
2776 simplify_gen_binary (AND, mode,
2777 XEXP (op0, 0), op1),
2778 gen_int_mode (tmp, mode));
2779 }
2780
2781 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2782 insn (and may simplify more). */
2783 if (GET_CODE (op0) == XOR
2784 && rtx_equal_p (XEXP (op0, 0), op1)
2785 && ! side_effects_p (op1))
2786 return simplify_gen_binary (AND, mode,
2787 simplify_gen_unary (NOT, mode,
2788 XEXP (op0, 1), mode),
2789 op1);
2790
2791 if (GET_CODE (op0) == XOR
2792 && rtx_equal_p (XEXP (op0, 1), op1)
2793 && ! side_effects_p (op1))
2794 return simplify_gen_binary (AND, mode,
2795 simplify_gen_unary (NOT, mode,
2796 XEXP (op0, 0), mode),
2797 op1);
2798
2799 /* Similarly for (~(A ^ B)) & A. */
2800 if (GET_CODE (op0) == NOT
2801 && GET_CODE (XEXP (op0, 0)) == XOR
2802 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2803 && ! side_effects_p (op1))
2804 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2805
2806 if (GET_CODE (op0) == NOT
2807 && GET_CODE (XEXP (op0, 0)) == XOR
2808 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2809 && ! side_effects_p (op1))
2810 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2811
2812 /* Convert (A | B) & A to A. */
2813 if (GET_CODE (op0) == IOR
2814 && (rtx_equal_p (XEXP (op0, 0), op1)
2815 || rtx_equal_p (XEXP (op0, 1), op1))
2816 && ! side_effects_p (XEXP (op0, 0))
2817 && ! side_effects_p (XEXP (op0, 1)))
2818 return op1;
2819
2820 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2821 ((A & N) + B) & M -> (A + B) & M
2822 Similarly if (N & M) == 0,
2823 ((A | N) + B) & M -> (A + B) & M
2824 and for - instead of + and/or ^ instead of |.
2825 Also, if (N & M) == 0, then
2826 (A +- N) & M -> A & M. */
2827 if (CONST_INT_P (trueop1)
2828 && HWI_COMPUTABLE_MODE_P (mode)
2829 && ~UINTVAL (trueop1)
2830 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2831 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2832 {
2833 rtx pmop[2];
2834 int which;
2835
2836 pmop[0] = XEXP (op0, 0);
2837 pmop[1] = XEXP (op0, 1);
2838
2839 if (CONST_INT_P (pmop[1])
2840 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2841 return simplify_gen_binary (AND, mode, pmop[0], op1);
2842
2843 for (which = 0; which < 2; which++)
2844 {
2845 tem = pmop[which];
2846 switch (GET_CODE (tem))
2847 {
2848 case AND:
2849 if (CONST_INT_P (XEXP (tem, 1))
2850 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2851 == UINTVAL (trueop1))
2852 pmop[which] = XEXP (tem, 0);
2853 break;
2854 case IOR:
2855 case XOR:
2856 if (CONST_INT_P (XEXP (tem, 1))
2857 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2858 pmop[which] = XEXP (tem, 0);
2859 break;
2860 default:
2861 break;
2862 }
2863 }
2864
2865 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2866 {
2867 tem = simplify_gen_binary (GET_CODE (op0), mode,
2868 pmop[0], pmop[1]);
2869 return simplify_gen_binary (code, mode, tem, op1);
2870 }
2871 }
2872
2873 /* (and X (ior (not X) Y) -> (and X Y) */
2874 if (GET_CODE (op1) == IOR
2875 && GET_CODE (XEXP (op1, 0)) == NOT
2876 && op0 == XEXP (XEXP (op1, 0), 0))
2877 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2878
2879 /* (and (ior (not X) Y) X) -> (and X Y) */
2880 if (GET_CODE (op0) == IOR
2881 && GET_CODE (XEXP (op0, 0)) == NOT
2882 && op1 == XEXP (XEXP (op0, 0), 0))
2883 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2884
2885 tem = simplify_associative_operation (code, mode, op0, op1);
2886 if (tem)
2887 return tem;
2888 break;
2889
2890 case UDIV:
2891 /* 0/x is 0 (or x&0 if x has side-effects). */
2892 if (trueop0 == CONST0_RTX (mode))
2893 {
2894 if (side_effects_p (op1))
2895 return simplify_gen_binary (AND, mode, op1, trueop0);
2896 return trueop0;
2897 }
2898 /* x/1 is x. */
2899 if (trueop1 == CONST1_RTX (mode))
2900 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2901 /* Convert divide by power of two into shift. */
2902 if (CONST_INT_P (trueop1)
2903 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2904 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2905 break;
2906
2907 case DIV:
2908 /* Handle floating point and integers separately. */
2909 if (SCALAR_FLOAT_MODE_P (mode))
2910 {
2911 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2912 safe for modes with NaNs, since 0.0 / 0.0 will then be
2913 NaN rather than 0.0. Nor is it safe for modes with signed
2914 zeros, since dividing 0 by a negative number gives -0.0 */
2915 if (trueop0 == CONST0_RTX (mode)
2916 && !HONOR_NANS (mode)
2917 && !HONOR_SIGNED_ZEROS (mode)
2918 && ! side_effects_p (op1))
2919 return op0;
2920 /* x/1.0 is x. */
2921 if (trueop1 == CONST1_RTX (mode)
2922 && !HONOR_SNANS (mode))
2923 return op0;
2924
2925 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2926 && trueop1 != CONST0_RTX (mode))
2927 {
2928 REAL_VALUE_TYPE d;
2929 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2930
2931 /* x/-1.0 is -x. */
2932 if (REAL_VALUES_EQUAL (d, dconstm1)
2933 && !HONOR_SNANS (mode))
2934 return simplify_gen_unary (NEG, mode, op0, mode);
2935
2936 /* Change FP division by a constant into multiplication.
2937 Only do this with -freciprocal-math. */
2938 if (flag_reciprocal_math
2939 && !REAL_VALUES_EQUAL (d, dconst0))
2940 {
2941 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2942 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2943 return simplify_gen_binary (MULT, mode, op0, tem);
2944 }
2945 }
2946 }
2947 else if (SCALAR_INT_MODE_P (mode))
2948 {
2949 /* 0/x is 0 (or x&0 if x has side-effects). */
2950 if (trueop0 == CONST0_RTX (mode)
2951 && !cfun->can_throw_non_call_exceptions)
2952 {
2953 if (side_effects_p (op1))
2954 return simplify_gen_binary (AND, mode, op1, trueop0);
2955 return trueop0;
2956 }
2957 /* x/1 is x. */
2958 if (trueop1 == CONST1_RTX (mode))
2959 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2960 /* x/-1 is -x. */
2961 if (trueop1 == constm1_rtx)
2962 {
2963 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2964 return simplify_gen_unary (NEG, mode, x, mode);
2965 }
2966 }
2967 break;
2968
2969 case UMOD:
2970 /* 0%x is 0 (or x&0 if x has side-effects). */
2971 if (trueop0 == CONST0_RTX (mode))
2972 {
2973 if (side_effects_p (op1))
2974 return simplify_gen_binary (AND, mode, op1, trueop0);
2975 return trueop0;
2976 }
2977 /* x%1 is 0 (of x&0 if x has side-effects). */
2978 if (trueop1 == CONST1_RTX (mode))
2979 {
2980 if (side_effects_p (op0))
2981 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2982 return CONST0_RTX (mode);
2983 }
2984 /* Implement modulus by power of two as AND. */
2985 if (CONST_INT_P (trueop1)
2986 && exact_log2 (UINTVAL (trueop1)) > 0)
2987 return simplify_gen_binary (AND, mode, op0,
2988 GEN_INT (INTVAL (op1) - 1));
2989 break;
2990
2991 case MOD:
2992 /* 0%x is 0 (or x&0 if x has side-effects). */
2993 if (trueop0 == CONST0_RTX (mode))
2994 {
2995 if (side_effects_p (op1))
2996 return simplify_gen_binary (AND, mode, op1, trueop0);
2997 return trueop0;
2998 }
2999 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3000 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3001 {
3002 if (side_effects_p (op0))
3003 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3004 return CONST0_RTX (mode);
3005 }
3006 break;
3007
3008 case ROTATERT:
3009 case ROTATE:
3010 case ASHIFTRT:
3011 if (trueop1 == CONST0_RTX (mode))
3012 return op0;
3013 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3014 return op0;
3015 /* Rotating ~0 always results in ~0. */
3016 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3017 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3018 && ! side_effects_p (op1))
3019 return op0;
3020 canonicalize_shift:
3021 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3022 {
3023 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3024 if (val != INTVAL (op1))
3025 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3026 }
3027 break;
3028
3029 case ASHIFT:
3030 case SS_ASHIFT:
3031 case US_ASHIFT:
3032 if (trueop1 == CONST0_RTX (mode))
3033 return op0;
3034 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3035 return op0;
3036 goto canonicalize_shift;
3037
3038 case LSHIFTRT:
3039 if (trueop1 == CONST0_RTX (mode))
3040 return op0;
3041 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3042 return op0;
3043 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3044 if (GET_CODE (op0) == CLZ
3045 && CONST_INT_P (trueop1)
3046 && STORE_FLAG_VALUE == 1
3047 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3048 {
3049 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3050 unsigned HOST_WIDE_INT zero_val = 0;
3051
3052 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3053 && zero_val == GET_MODE_PRECISION (imode)
3054 && INTVAL (trueop1) == exact_log2 (zero_val))
3055 return simplify_gen_relational (EQ, mode, imode,
3056 XEXP (op0, 0), const0_rtx);
3057 }
3058 goto canonicalize_shift;
3059
3060 case SMIN:
3061 if (width <= HOST_BITS_PER_WIDE_INT
3062 && mode_signbit_p (mode, trueop1)
3063 && ! side_effects_p (op0))
3064 return op1;
3065 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3066 return op0;
3067 tem = simplify_associative_operation (code, mode, op0, op1);
3068 if (tem)
3069 return tem;
3070 break;
3071
3072 case SMAX:
3073 if (width <= HOST_BITS_PER_WIDE_INT
3074 && CONST_INT_P (trueop1)
3075 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3076 && ! side_effects_p (op0))
3077 return op1;
3078 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3079 return op0;
3080 tem = simplify_associative_operation (code, mode, op0, op1);
3081 if (tem)
3082 return tem;
3083 break;
3084
3085 case UMIN:
3086 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3087 return op1;
3088 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3089 return op0;
3090 tem = simplify_associative_operation (code, mode, op0, op1);
3091 if (tem)
3092 return tem;
3093 break;
3094
3095 case UMAX:
3096 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3097 return op1;
3098 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3099 return op0;
3100 tem = simplify_associative_operation (code, mode, op0, op1);
3101 if (tem)
3102 return tem;
3103 break;
3104
3105 case SS_PLUS:
3106 case US_PLUS:
3107 case SS_MINUS:
3108 case US_MINUS:
3109 case SS_MULT:
3110 case US_MULT:
3111 case SS_DIV:
3112 case US_DIV:
3113 /* ??? There are simplifications that can be done. */
3114 return 0;
3115
3116 case VEC_SELECT:
3117 if (!VECTOR_MODE_P (mode))
3118 {
3119 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3120 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3121 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3122 gcc_assert (XVECLEN (trueop1, 0) == 1);
3123 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3124
3125 if (GET_CODE (trueop0) == CONST_VECTOR)
3126 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3127 (trueop1, 0, 0)));
3128
3129 /* Extract a scalar element from a nested VEC_SELECT expression
3130 (with optional nested VEC_CONCAT expression). Some targets
3131 (i386) extract scalar element from a vector using chain of
3132 nested VEC_SELECT expressions. When input operand is a memory
3133 operand, this operation can be simplified to a simple scalar
3134 load from an offseted memory address. */
3135 if (GET_CODE (trueop0) == VEC_SELECT)
3136 {
3137 rtx op0 = XEXP (trueop0, 0);
3138 rtx op1 = XEXP (trueop0, 1);
3139
3140 enum machine_mode opmode = GET_MODE (op0);
3141 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3142 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3143
3144 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3145 int elem;
3146
3147 rtvec vec;
3148 rtx tmp_op, tmp;
3149
3150 gcc_assert (GET_CODE (op1) == PARALLEL);
3151 gcc_assert (i < n_elts);
3152
3153 /* Select element, pointed by nested selector. */
3154 elem = INTVAL (XVECEXP (op1, 0, i));
3155
3156 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3157 if (GET_CODE (op0) == VEC_CONCAT)
3158 {
3159 rtx op00 = XEXP (op0, 0);
3160 rtx op01 = XEXP (op0, 1);
3161
3162 enum machine_mode mode00, mode01;
3163 int n_elts00, n_elts01;
3164
3165 mode00 = GET_MODE (op00);
3166 mode01 = GET_MODE (op01);
3167
3168 /* Find out number of elements of each operand. */
3169 if (VECTOR_MODE_P (mode00))
3170 {
3171 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3172 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3173 }
3174 else
3175 n_elts00 = 1;
3176
3177 if (VECTOR_MODE_P (mode01))
3178 {
3179 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3180 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3181 }
3182 else
3183 n_elts01 = 1;
3184
3185 gcc_assert (n_elts == n_elts00 + n_elts01);
3186
3187 /* Select correct operand of VEC_CONCAT
3188 and adjust selector. */
3189 if (elem < n_elts01)
3190 tmp_op = op00;
3191 else
3192 {
3193 tmp_op = op01;
3194 elem -= n_elts00;
3195 }
3196 }
3197 else
3198 tmp_op = op0;
3199
3200 vec = rtvec_alloc (1);
3201 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3202
3203 tmp = gen_rtx_fmt_ee (code, mode,
3204 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3205 return tmp;
3206 }
3207 if (GET_CODE (trueop0) == VEC_DUPLICATE
3208 && GET_MODE (XEXP (trueop0, 0)) == mode)
3209 return XEXP (trueop0, 0);
3210 }
3211 else
3212 {
3213 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3214 gcc_assert (GET_MODE_INNER (mode)
3215 == GET_MODE_INNER (GET_MODE (trueop0)));
3216 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3217
3218 if (GET_CODE (trueop0) == CONST_VECTOR)
3219 {
3220 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3221 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3222 rtvec v = rtvec_alloc (n_elts);
3223 unsigned int i;
3224
3225 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3226 for (i = 0; i < n_elts; i++)
3227 {
3228 rtx x = XVECEXP (trueop1, 0, i);
3229
3230 gcc_assert (CONST_INT_P (x));
3231 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3232 INTVAL (x));
3233 }
3234
3235 return gen_rtx_CONST_VECTOR (mode, v);
3236 }
3237
3238 /* If we build {a,b} then permute it, build the result directly. */
3239 if (XVECLEN (trueop1, 0) == 2
3240 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3241 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3242 && GET_CODE (trueop0) == VEC_CONCAT
3243 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3244 && GET_MODE (XEXP (trueop0, 0)) == mode
3245 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3246 && GET_MODE (XEXP (trueop0, 1)) == mode)
3247 {
3248 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3249 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3250 rtx subop0, subop1;
3251
3252 gcc_assert (i0 < 4 && i1 < 4);
3253 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3254 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3255
3256 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3257 }
3258 }
3259
3260 if (XVECLEN (trueop1, 0) == 1
3261 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3262 && GET_CODE (trueop0) == VEC_CONCAT)
3263 {
3264 rtx vec = trueop0;
3265 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3266
3267 /* Try to find the element in the VEC_CONCAT. */
3268 while (GET_MODE (vec) != mode
3269 && GET_CODE (vec) == VEC_CONCAT)
3270 {
3271 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3272 if (offset < vec_size)
3273 vec = XEXP (vec, 0);
3274 else
3275 {
3276 offset -= vec_size;
3277 vec = XEXP (vec, 1);
3278 }
3279 vec = avoid_constant_pool_reference (vec);
3280 }
3281
3282 if (GET_MODE (vec) == mode)
3283 return vec;
3284 }
3285
3286 return 0;
3287 case VEC_CONCAT:
3288 {
3289 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3290 ? GET_MODE (trueop0)
3291 : GET_MODE_INNER (mode));
3292 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3293 ? GET_MODE (trueop1)
3294 : GET_MODE_INNER (mode));
3295
3296 gcc_assert (VECTOR_MODE_P (mode));
3297 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3298 == GET_MODE_SIZE (mode));
3299
3300 if (VECTOR_MODE_P (op0_mode))
3301 gcc_assert (GET_MODE_INNER (mode)
3302 == GET_MODE_INNER (op0_mode));
3303 else
3304 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3305
3306 if (VECTOR_MODE_P (op1_mode))
3307 gcc_assert (GET_MODE_INNER (mode)
3308 == GET_MODE_INNER (op1_mode));
3309 else
3310 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3311
3312 if ((GET_CODE (trueop0) == CONST_VECTOR
3313 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3314 && (GET_CODE (trueop1) == CONST_VECTOR
3315 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3316 {
3317 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3318 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3319 rtvec v = rtvec_alloc (n_elts);
3320 unsigned int i;
3321 unsigned in_n_elts = 1;
3322
3323 if (VECTOR_MODE_P (op0_mode))
3324 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3325 for (i = 0; i < n_elts; i++)
3326 {
3327 if (i < in_n_elts)
3328 {
3329 if (!VECTOR_MODE_P (op0_mode))
3330 RTVEC_ELT (v, i) = trueop0;
3331 else
3332 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3333 }
3334 else
3335 {
3336 if (!VECTOR_MODE_P (op1_mode))
3337 RTVEC_ELT (v, i) = trueop1;
3338 else
3339 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3340 i - in_n_elts);
3341 }
3342 }
3343
3344 return gen_rtx_CONST_VECTOR (mode, v);
3345 }
3346 }
3347 return 0;
3348
3349 default:
3350 gcc_unreachable ();
3351 }
3352
3353 return 0;
3354 }
3355
3356 rtx
3357 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3358 rtx op0, rtx op1)
3359 {
3360 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3361 HOST_WIDE_INT val;
3362 unsigned int width = GET_MODE_PRECISION (mode);
3363
3364 if (VECTOR_MODE_P (mode)
3365 && code != VEC_CONCAT
3366 && GET_CODE (op0) == CONST_VECTOR
3367 && GET_CODE (op1) == CONST_VECTOR)
3368 {
3369 unsigned n_elts = GET_MODE_NUNITS (mode);
3370 enum machine_mode op0mode = GET_MODE (op0);
3371 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3372 enum machine_mode op1mode = GET_MODE (op1);
3373 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3374 rtvec v = rtvec_alloc (n_elts);
3375 unsigned int i;
3376
3377 gcc_assert (op0_n_elts == n_elts);
3378 gcc_assert (op1_n_elts == n_elts);
3379 for (i = 0; i < n_elts; i++)
3380 {
3381 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3382 CONST_VECTOR_ELT (op0, i),
3383 CONST_VECTOR_ELT (op1, i));
3384 if (!x)
3385 return 0;
3386 RTVEC_ELT (v, i) = x;
3387 }
3388
3389 return gen_rtx_CONST_VECTOR (mode, v);
3390 }
3391
3392 if (VECTOR_MODE_P (mode)
3393 && code == VEC_CONCAT
3394 && (CONST_INT_P (op0)
3395 || GET_CODE (op0) == CONST_FIXED
3396 || CONST_DOUBLE_P (op0))
3397 && (CONST_INT_P (op1)
3398 || CONST_DOUBLE_P (op1)
3399 || GET_CODE (op1) == CONST_FIXED))
3400 {
3401 unsigned n_elts = GET_MODE_NUNITS (mode);
3402 rtvec v = rtvec_alloc (n_elts);
3403
3404 gcc_assert (n_elts >= 2);
3405 if (n_elts == 2)
3406 {
3407 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3408 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3409
3410 RTVEC_ELT (v, 0) = op0;
3411 RTVEC_ELT (v, 1) = op1;
3412 }
3413 else
3414 {
3415 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3416 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3417 unsigned i;
3418
3419 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3420 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3421 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3422
3423 for (i = 0; i < op0_n_elts; ++i)
3424 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3425 for (i = 0; i < op1_n_elts; ++i)
3426 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3427 }
3428
3429 return gen_rtx_CONST_VECTOR (mode, v);
3430 }
3431
3432 if (SCALAR_FLOAT_MODE_P (mode)
3433 && CONST_DOUBLE_AS_FLOAT_P (op0)
3434 && CONST_DOUBLE_AS_FLOAT_P (op1)
3435 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3436 {
3437 if (code == AND
3438 || code == IOR
3439 || code == XOR)
3440 {
3441 long tmp0[4];
3442 long tmp1[4];
3443 REAL_VALUE_TYPE r;
3444 int i;
3445
3446 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3447 GET_MODE (op0));
3448 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3449 GET_MODE (op1));
3450 for (i = 0; i < 4; i++)
3451 {
3452 switch (code)
3453 {
3454 case AND:
3455 tmp0[i] &= tmp1[i];
3456 break;
3457 case IOR:
3458 tmp0[i] |= tmp1[i];
3459 break;
3460 case XOR:
3461 tmp0[i] ^= tmp1[i];
3462 break;
3463 default:
3464 gcc_unreachable ();
3465 }
3466 }
3467 real_from_target (&r, tmp0, mode);
3468 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3469 }
3470 else
3471 {
3472 REAL_VALUE_TYPE f0, f1, value, result;
3473 bool inexact;
3474
3475 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3476 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3477 real_convert (&f0, mode, &f0);
3478 real_convert (&f1, mode, &f1);
3479
3480 if (HONOR_SNANS (mode)
3481 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3482 return 0;
3483
3484 if (code == DIV
3485 && REAL_VALUES_EQUAL (f1, dconst0)
3486 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3487 return 0;
3488
3489 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3490 && flag_trapping_math
3491 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3492 {
3493 int s0 = REAL_VALUE_NEGATIVE (f0);
3494 int s1 = REAL_VALUE_NEGATIVE (f1);
3495
3496 switch (code)
3497 {
3498 case PLUS:
3499 /* Inf + -Inf = NaN plus exception. */
3500 if (s0 != s1)
3501 return 0;
3502 break;
3503 case MINUS:
3504 /* Inf - Inf = NaN plus exception. */
3505 if (s0 == s1)
3506 return 0;
3507 break;
3508 case DIV:
3509 /* Inf / Inf = NaN plus exception. */
3510 return 0;
3511 default:
3512 break;
3513 }
3514 }
3515
3516 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3517 && flag_trapping_math
3518 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3519 || (REAL_VALUE_ISINF (f1)
3520 && REAL_VALUES_EQUAL (f0, dconst0))))
3521 /* Inf * 0 = NaN plus exception. */
3522 return 0;
3523
3524 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3525 &f0, &f1);
3526 real_convert (&result, mode, &value);
3527
3528 /* Don't constant fold this floating point operation if
3529 the result has overflowed and flag_trapping_math. */
3530
3531 if (flag_trapping_math
3532 && MODE_HAS_INFINITIES (mode)
3533 && REAL_VALUE_ISINF (result)
3534 && !REAL_VALUE_ISINF (f0)
3535 && !REAL_VALUE_ISINF (f1))
3536 /* Overflow plus exception. */
3537 return 0;
3538
3539 /* Don't constant fold this floating point operation if the
3540 result may dependent upon the run-time rounding mode and
3541 flag_rounding_math is set, or if GCC's software emulation
3542 is unable to accurately represent the result. */
3543
3544 if ((flag_rounding_math
3545 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3546 && (inexact || !real_identical (&result, &value)))
3547 return NULL_RTX;
3548
3549 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3550 }
3551 }
3552
3553 /* We can fold some multi-word operations. */
3554 if (GET_MODE_CLASS (mode) == MODE_INT
3555 && width == HOST_BITS_PER_DOUBLE_INT
3556 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3557 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3558 {
3559 double_int o0, o1, res, tmp;
3560
3561 o0 = rtx_to_double_int (op0);
3562 o1 = rtx_to_double_int (op1);
3563
3564 switch (code)
3565 {
3566 case MINUS:
3567 /* A - B == A + (-B). */
3568 o1 = double_int_neg (o1);
3569
3570 /* Fall through.... */
3571
3572 case PLUS:
3573 res = double_int_add (o0, o1);
3574 break;
3575
3576 case MULT:
3577 res = double_int_mul (o0, o1);
3578 break;
3579
3580 case DIV:
3581 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3582 o0.low, o0.high, o1.low, o1.high,
3583 &res.low, &res.high,
3584 &tmp.low, &tmp.high))
3585 return 0;
3586 break;
3587
3588 case MOD:
3589 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3590 o0.low, o0.high, o1.low, o1.high,
3591 &tmp.low, &tmp.high,
3592 &res.low, &res.high))
3593 return 0;
3594 break;
3595
3596 case UDIV:
3597 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3598 o0.low, o0.high, o1.low, o1.high,
3599 &res.low, &res.high,
3600 &tmp.low, &tmp.high))
3601 return 0;
3602 break;
3603
3604 case UMOD:
3605 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3606 o0.low, o0.high, o1.low, o1.high,
3607 &tmp.low, &tmp.high,
3608 &res.low, &res.high))
3609 return 0;
3610 break;
3611
3612 case AND:
3613 res = double_int_and (o0, o1);
3614 break;
3615
3616 case IOR:
3617 res = double_int_ior (o0, o1);
3618 break;
3619
3620 case XOR:
3621 res = double_int_xor (o0, o1);
3622 break;
3623
3624 case SMIN:
3625 res = double_int_smin (o0, o1);
3626 break;
3627
3628 case SMAX:
3629 res = double_int_smax (o0, o1);
3630 break;
3631
3632 case UMIN:
3633 res = double_int_umin (o0, o1);
3634 break;
3635
3636 case UMAX:
3637 res = double_int_umax (o0, o1);
3638 break;
3639
3640 case LSHIFTRT: case ASHIFTRT:
3641 case ASHIFT:
3642 case ROTATE: case ROTATERT:
3643 {
3644 unsigned HOST_WIDE_INT cnt;
3645
3646 if (SHIFT_COUNT_TRUNCATED)
3647 {
3648 o1.high = 0;
3649 o1.low &= GET_MODE_PRECISION (mode) - 1;
3650 }
3651
3652 if (!double_int_fits_in_uhwi_p (o1)
3653 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3654 return 0;
3655
3656 cnt = double_int_to_uhwi (o1);
3657
3658 if (code == LSHIFTRT || code == ASHIFTRT)
3659 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3660 code == ASHIFTRT);
3661 else if (code == ASHIFT)
3662 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3663 true);
3664 else if (code == ROTATE)
3665 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3666 else /* code == ROTATERT */
3667 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3668 }
3669 break;
3670
3671 default:
3672 return 0;
3673 }
3674
3675 return immed_double_int_const (res, mode);
3676 }
3677
3678 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3679 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3680 {
3681 /* Get the integer argument values in two forms:
3682 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3683
3684 arg0 = INTVAL (op0);
3685 arg1 = INTVAL (op1);
3686
3687 if (width < HOST_BITS_PER_WIDE_INT)
3688 {
3689 arg0 &= GET_MODE_MASK (mode);
3690 arg1 &= GET_MODE_MASK (mode);
3691
3692 arg0s = arg0;
3693 if (val_signbit_known_set_p (mode, arg0s))
3694 arg0s |= ~GET_MODE_MASK (mode);
3695
3696 arg1s = arg1;
3697 if (val_signbit_known_set_p (mode, arg1s))
3698 arg1s |= ~GET_MODE_MASK (mode);
3699 }
3700 else
3701 {
3702 arg0s = arg0;
3703 arg1s = arg1;
3704 }
3705
3706 /* Compute the value of the arithmetic. */
3707
3708 switch (code)
3709 {
3710 case PLUS:
3711 val = arg0s + arg1s;
3712 break;
3713
3714 case MINUS:
3715 val = arg0s - arg1s;
3716 break;
3717
3718 case MULT:
3719 val = arg0s * arg1s;
3720 break;
3721
3722 case DIV:
3723 if (arg1s == 0
3724 || ((unsigned HOST_WIDE_INT) arg0s
3725 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3726 && arg1s == -1))
3727 return 0;
3728 val = arg0s / arg1s;
3729 break;
3730
3731 case MOD:
3732 if (arg1s == 0
3733 || ((unsigned HOST_WIDE_INT) arg0s
3734 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3735 && arg1s == -1))
3736 return 0;
3737 val = arg0s % arg1s;
3738 break;
3739
3740 case UDIV:
3741 if (arg1 == 0
3742 || ((unsigned HOST_WIDE_INT) arg0s
3743 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3744 && arg1s == -1))
3745 return 0;
3746 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3747 break;
3748
3749 case UMOD:
3750 if (arg1 == 0
3751 || ((unsigned HOST_WIDE_INT) arg0s
3752 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3753 && arg1s == -1))
3754 return 0;
3755 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3756 break;
3757
3758 case AND:
3759 val = arg0 & arg1;
3760 break;
3761
3762 case IOR:
3763 val = arg0 | arg1;
3764 break;
3765
3766 case XOR:
3767 val = arg0 ^ arg1;
3768 break;
3769
3770 case LSHIFTRT:
3771 case ASHIFT:
3772 case ASHIFTRT:
3773 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3774 the value is in range. We can't return any old value for
3775 out-of-range arguments because either the middle-end (via
3776 shift_truncation_mask) or the back-end might be relying on
3777 target-specific knowledge. Nor can we rely on
3778 shift_truncation_mask, since the shift might not be part of an
3779 ashlM3, lshrM3 or ashrM3 instruction. */
3780 if (SHIFT_COUNT_TRUNCATED)
3781 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3782 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3783 return 0;
3784
3785 val = (code == ASHIFT
3786 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3787 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3788
3789 /* Sign-extend the result for arithmetic right shifts. */
3790 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3791 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3792 break;
3793
3794 case ROTATERT:
3795 if (arg1 < 0)
3796 return 0;
3797
3798 arg1 %= width;
3799 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3800 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3801 break;
3802
3803 case ROTATE:
3804 if (arg1 < 0)
3805 return 0;
3806
3807 arg1 %= width;
3808 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3809 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3810 break;
3811
3812 case COMPARE:
3813 /* Do nothing here. */
3814 return 0;
3815
3816 case SMIN:
3817 val = arg0s <= arg1s ? arg0s : arg1s;
3818 break;
3819
3820 case UMIN:
3821 val = ((unsigned HOST_WIDE_INT) arg0
3822 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3823 break;
3824
3825 case SMAX:
3826 val = arg0s > arg1s ? arg0s : arg1s;
3827 break;
3828
3829 case UMAX:
3830 val = ((unsigned HOST_WIDE_INT) arg0
3831 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3832 break;
3833
3834 case SS_PLUS:
3835 case US_PLUS:
3836 case SS_MINUS:
3837 case US_MINUS:
3838 case SS_MULT:
3839 case US_MULT:
3840 case SS_DIV:
3841 case US_DIV:
3842 case SS_ASHIFT:
3843 case US_ASHIFT:
3844 /* ??? There are simplifications that can be done. */
3845 return 0;
3846
3847 default:
3848 gcc_unreachable ();
3849 }
3850
3851 return gen_int_mode (val, mode);
3852 }
3853
3854 return NULL_RTX;
3855 }
3856
3857
3858 \f
3859 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3860 PLUS or MINUS.
3861
3862 Rather than test for specific case, we do this by a brute-force method
3863 and do all possible simplifications until no more changes occur. Then
3864 we rebuild the operation. */
3865
3866 struct simplify_plus_minus_op_data
3867 {
3868 rtx op;
3869 short neg;
3870 };
3871
3872 static bool
3873 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3874 {
3875 int result;
3876
3877 result = (commutative_operand_precedence (y)
3878 - commutative_operand_precedence (x));
3879 if (result)
3880 return result > 0;
3881
3882 /* Group together equal REGs to do more simplification. */
3883 if (REG_P (x) && REG_P (y))
3884 return REGNO (x) > REGNO (y);
3885 else
3886 return false;
3887 }
3888
3889 static rtx
3890 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3891 rtx op1)
3892 {
3893 struct simplify_plus_minus_op_data ops[8];
3894 rtx result, tem;
3895 int n_ops = 2, input_ops = 2;
3896 int changed, n_constants = 0, canonicalized = 0;
3897 int i, j;
3898
3899 memset (ops, 0, sizeof ops);
3900
3901 /* Set up the two operands and then expand them until nothing has been
3902 changed. If we run out of room in our array, give up; this should
3903 almost never happen. */
3904
3905 ops[0].op = op0;
3906 ops[0].neg = 0;
3907 ops[1].op = op1;
3908 ops[1].neg = (code == MINUS);
3909
3910 do
3911 {
3912 changed = 0;
3913
3914 for (i = 0; i < n_ops; i++)
3915 {
3916 rtx this_op = ops[i].op;
3917 int this_neg = ops[i].neg;
3918 enum rtx_code this_code = GET_CODE (this_op);
3919
3920 switch (this_code)
3921 {
3922 case PLUS:
3923 case MINUS:
3924 if (n_ops == 7)
3925 return NULL_RTX;
3926
3927 ops[n_ops].op = XEXP (this_op, 1);
3928 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3929 n_ops++;
3930
3931 ops[i].op = XEXP (this_op, 0);
3932 input_ops++;
3933 changed = 1;
3934 canonicalized |= this_neg;
3935 break;
3936
3937 case NEG:
3938 ops[i].op = XEXP (this_op, 0);
3939 ops[i].neg = ! this_neg;
3940 changed = 1;
3941 canonicalized = 1;
3942 break;
3943
3944 case CONST:
3945 if (n_ops < 7
3946 && GET_CODE (XEXP (this_op, 0)) == PLUS
3947 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3948 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3949 {
3950 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3951 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3952 ops[n_ops].neg = this_neg;
3953 n_ops++;
3954 changed = 1;
3955 canonicalized = 1;
3956 }
3957 break;
3958
3959 case NOT:
3960 /* ~a -> (-a - 1) */
3961 if (n_ops != 7)
3962 {
3963 ops[n_ops].op = CONSTM1_RTX (mode);
3964 ops[n_ops++].neg = this_neg;
3965 ops[i].op = XEXP (this_op, 0);
3966 ops[i].neg = !this_neg;
3967 changed = 1;
3968 canonicalized = 1;
3969 }
3970 break;
3971
3972 case CONST_INT:
3973 n_constants++;
3974 if (this_neg)
3975 {
3976 ops[i].op = neg_const_int (mode, this_op);
3977 ops[i].neg = 0;
3978 changed = 1;
3979 canonicalized = 1;
3980 }
3981 break;
3982
3983 default:
3984 break;
3985 }
3986 }
3987 }
3988 while (changed);
3989
3990 if (n_constants > 1)
3991 canonicalized = 1;
3992
3993 gcc_assert (n_ops >= 2);
3994
3995 /* If we only have two operands, we can avoid the loops. */
3996 if (n_ops == 2)
3997 {
3998 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3999 rtx lhs, rhs;
4000
4001 /* Get the two operands. Be careful with the order, especially for
4002 the cases where code == MINUS. */
4003 if (ops[0].neg && ops[1].neg)
4004 {
4005 lhs = gen_rtx_NEG (mode, ops[0].op);
4006 rhs = ops[1].op;
4007 }
4008 else if (ops[0].neg)
4009 {
4010 lhs = ops[1].op;
4011 rhs = ops[0].op;
4012 }
4013 else
4014 {
4015 lhs = ops[0].op;
4016 rhs = ops[1].op;
4017 }
4018
4019 return simplify_const_binary_operation (code, mode, lhs, rhs);
4020 }
4021
4022 /* Now simplify each pair of operands until nothing changes. */
4023 do
4024 {
4025 /* Insertion sort is good enough for an eight-element array. */
4026 for (i = 1; i < n_ops; i++)
4027 {
4028 struct simplify_plus_minus_op_data save;
4029 j = i - 1;
4030 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4031 continue;
4032
4033 canonicalized = 1;
4034 save = ops[i];
4035 do
4036 ops[j + 1] = ops[j];
4037 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4038 ops[j + 1] = save;
4039 }
4040
4041 changed = 0;
4042 for (i = n_ops - 1; i > 0; i--)
4043 for (j = i - 1; j >= 0; j--)
4044 {
4045 rtx lhs = ops[j].op, rhs = ops[i].op;
4046 int lneg = ops[j].neg, rneg = ops[i].neg;
4047
4048 if (lhs != 0 && rhs != 0)
4049 {
4050 enum rtx_code ncode = PLUS;
4051
4052 if (lneg != rneg)
4053 {
4054 ncode = MINUS;
4055 if (lneg)
4056 tem = lhs, lhs = rhs, rhs = tem;
4057 }
4058 else if (swap_commutative_operands_p (lhs, rhs))
4059 tem = lhs, lhs = rhs, rhs = tem;
4060
4061 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4062 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4063 {
4064 rtx tem_lhs, tem_rhs;
4065
4066 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4067 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4068 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4069
4070 if (tem && !CONSTANT_P (tem))
4071 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4072 }
4073 else
4074 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4075
4076 /* Reject "simplifications" that just wrap the two
4077 arguments in a CONST. Failure to do so can result
4078 in infinite recursion with simplify_binary_operation
4079 when it calls us to simplify CONST operations. */
4080 if (tem
4081 && ! (GET_CODE (tem) == CONST
4082 && GET_CODE (XEXP (tem, 0)) == ncode
4083 && XEXP (XEXP (tem, 0), 0) == lhs
4084 && XEXP (XEXP (tem, 0), 1) == rhs))
4085 {
4086 lneg &= rneg;
4087 if (GET_CODE (tem) == NEG)
4088 tem = XEXP (tem, 0), lneg = !lneg;
4089 if (CONST_INT_P (tem) && lneg)
4090 tem = neg_const_int (mode, tem), lneg = 0;
4091
4092 ops[i].op = tem;
4093 ops[i].neg = lneg;
4094 ops[j].op = NULL_RTX;
4095 changed = 1;
4096 canonicalized = 1;
4097 }
4098 }
4099 }
4100
4101 /* If nothing changed, fail. */
4102 if (!canonicalized)
4103 return NULL_RTX;
4104
4105 /* Pack all the operands to the lower-numbered entries. */
4106 for (i = 0, j = 0; j < n_ops; j++)
4107 if (ops[j].op)
4108 {
4109 ops[i] = ops[j];
4110 i++;
4111 }
4112 n_ops = i;
4113 }
4114 while (changed);
4115
4116 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4117 if (n_ops == 2
4118 && CONST_INT_P (ops[1].op)
4119 && CONSTANT_P (ops[0].op)
4120 && ops[0].neg)
4121 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4122
4123 /* We suppressed creation of trivial CONST expressions in the
4124 combination loop to avoid recursion. Create one manually now.
4125 The combination loop should have ensured that there is exactly
4126 one CONST_INT, and the sort will have ensured that it is last
4127 in the array and that any other constant will be next-to-last. */
4128
4129 if (n_ops > 1
4130 && CONST_INT_P (ops[n_ops - 1].op)
4131 && CONSTANT_P (ops[n_ops - 2].op))
4132 {
4133 rtx value = ops[n_ops - 1].op;
4134 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4135 value = neg_const_int (mode, value);
4136 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4137 INTVAL (value));
4138 n_ops--;
4139 }
4140
4141 /* Put a non-negated operand first, if possible. */
4142
4143 for (i = 0; i < n_ops && ops[i].neg; i++)
4144 continue;
4145 if (i == n_ops)
4146 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4147 else if (i != 0)
4148 {
4149 tem = ops[0].op;
4150 ops[0] = ops[i];
4151 ops[i].op = tem;
4152 ops[i].neg = 1;
4153 }
4154
4155 /* Now make the result by performing the requested operations. */
4156 result = ops[0].op;
4157 for (i = 1; i < n_ops; i++)
4158 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4159 mode, result, ops[i].op);
4160
4161 return result;
4162 }
4163
4164 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4165 static bool
4166 plus_minus_operand_p (const_rtx x)
4167 {
4168 return GET_CODE (x) == PLUS
4169 || GET_CODE (x) == MINUS
4170 || (GET_CODE (x) == CONST
4171 && GET_CODE (XEXP (x, 0)) == PLUS
4172 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4173 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4174 }
4175
4176 /* Like simplify_binary_operation except used for relational operators.
4177 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4178 not also be VOIDmode.
4179
4180 CMP_MODE specifies in which mode the comparison is done in, so it is
4181 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4182 the operands or, if both are VOIDmode, the operands are compared in
4183 "infinite precision". */
4184 rtx
4185 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4186 enum machine_mode cmp_mode, rtx op0, rtx op1)
4187 {
4188 rtx tem, trueop0, trueop1;
4189
4190 if (cmp_mode == VOIDmode)
4191 cmp_mode = GET_MODE (op0);
4192 if (cmp_mode == VOIDmode)
4193 cmp_mode = GET_MODE (op1);
4194
4195 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4196 if (tem)
4197 {
4198 if (SCALAR_FLOAT_MODE_P (mode))
4199 {
4200 if (tem == const0_rtx)
4201 return CONST0_RTX (mode);
4202 #ifdef FLOAT_STORE_FLAG_VALUE
4203 {
4204 REAL_VALUE_TYPE val;
4205 val = FLOAT_STORE_FLAG_VALUE (mode);
4206 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4207 }
4208 #else
4209 return NULL_RTX;
4210 #endif
4211 }
4212 if (VECTOR_MODE_P (mode))
4213 {
4214 if (tem == const0_rtx)
4215 return CONST0_RTX (mode);
4216 #ifdef VECTOR_STORE_FLAG_VALUE
4217 {
4218 int i, units;
4219 rtvec v;
4220
4221 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4222 if (val == NULL_RTX)
4223 return NULL_RTX;
4224 if (val == const1_rtx)
4225 return CONST1_RTX (mode);
4226
4227 units = GET_MODE_NUNITS (mode);
4228 v = rtvec_alloc (units);
4229 for (i = 0; i < units; i++)
4230 RTVEC_ELT (v, i) = val;
4231 return gen_rtx_raw_CONST_VECTOR (mode, v);
4232 }
4233 #else
4234 return NULL_RTX;
4235 #endif
4236 }
4237
4238 return tem;
4239 }
4240
4241 /* For the following tests, ensure const0_rtx is op1. */
4242 if (swap_commutative_operands_p (op0, op1)
4243 || (op0 == const0_rtx && op1 != const0_rtx))
4244 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4245
4246 /* If op0 is a compare, extract the comparison arguments from it. */
4247 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4248 return simplify_gen_relational (code, mode, VOIDmode,
4249 XEXP (op0, 0), XEXP (op0, 1));
4250
4251 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4252 || CC0_P (op0))
4253 return NULL_RTX;
4254
4255 trueop0 = avoid_constant_pool_reference (op0);
4256 trueop1 = avoid_constant_pool_reference (op1);
4257 return simplify_relational_operation_1 (code, mode, cmp_mode,
4258 trueop0, trueop1);
4259 }
4260
4261 /* This part of simplify_relational_operation is only used when CMP_MODE
4262 is not in class MODE_CC (i.e. it is a real comparison).
4263
4264 MODE is the mode of the result, while CMP_MODE specifies in which
4265 mode the comparison is done in, so it is the mode of the operands. */
4266
4267 static rtx
4268 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4269 enum machine_mode cmp_mode, rtx op0, rtx op1)
4270 {
4271 enum rtx_code op0code = GET_CODE (op0);
4272
4273 if (op1 == const0_rtx && COMPARISON_P (op0))
4274 {
4275 /* If op0 is a comparison, extract the comparison arguments
4276 from it. */
4277 if (code == NE)
4278 {
4279 if (GET_MODE (op0) == mode)
4280 return simplify_rtx (op0);
4281 else
4282 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4283 XEXP (op0, 0), XEXP (op0, 1));
4284 }
4285 else if (code == EQ)
4286 {
4287 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4288 if (new_code != UNKNOWN)
4289 return simplify_gen_relational (new_code, mode, VOIDmode,
4290 XEXP (op0, 0), XEXP (op0, 1));
4291 }
4292 }
4293
4294 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4295 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4296 if ((code == LTU || code == GEU)
4297 && GET_CODE (op0) == PLUS
4298 && CONST_INT_P (XEXP (op0, 1))
4299 && (rtx_equal_p (op1, XEXP (op0, 0))
4300 || rtx_equal_p (op1, XEXP (op0, 1))))
4301 {
4302 rtx new_cmp
4303 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4304 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4305 cmp_mode, XEXP (op0, 0), new_cmp);
4306 }
4307
4308 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4309 if ((code == LTU || code == GEU)
4310 && GET_CODE (op0) == PLUS
4311 && rtx_equal_p (op1, XEXP (op0, 1))
4312 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4313 && !rtx_equal_p (op1, XEXP (op0, 0)))
4314 return simplify_gen_relational (code, mode, cmp_mode, op0,
4315 copy_rtx (XEXP (op0, 0)));
4316
4317 if (op1 == const0_rtx)
4318 {
4319 /* Canonicalize (GTU x 0) as (NE x 0). */
4320 if (code == GTU)
4321 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4322 /* Canonicalize (LEU x 0) as (EQ x 0). */
4323 if (code == LEU)
4324 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4325 }
4326 else if (op1 == const1_rtx)
4327 {
4328 switch (code)
4329 {
4330 case GE:
4331 /* Canonicalize (GE x 1) as (GT x 0). */
4332 return simplify_gen_relational (GT, mode, cmp_mode,
4333 op0, const0_rtx);
4334 case GEU:
4335 /* Canonicalize (GEU x 1) as (NE x 0). */
4336 return simplify_gen_relational (NE, mode, cmp_mode,
4337 op0, const0_rtx);
4338 case LT:
4339 /* Canonicalize (LT x 1) as (LE x 0). */
4340 return simplify_gen_relational (LE, mode, cmp_mode,
4341 op0, const0_rtx);
4342 case LTU:
4343 /* Canonicalize (LTU x 1) as (EQ x 0). */
4344 return simplify_gen_relational (EQ, mode, cmp_mode,
4345 op0, const0_rtx);
4346 default:
4347 break;
4348 }
4349 }
4350 else if (op1 == constm1_rtx)
4351 {
4352 /* Canonicalize (LE x -1) as (LT x 0). */
4353 if (code == LE)
4354 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4355 /* Canonicalize (GT x -1) as (GE x 0). */
4356 if (code == GT)
4357 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4358 }
4359
4360 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4361 if ((code == EQ || code == NE)
4362 && (op0code == PLUS || op0code == MINUS)
4363 && CONSTANT_P (op1)
4364 && CONSTANT_P (XEXP (op0, 1))
4365 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4366 {
4367 rtx x = XEXP (op0, 0);
4368 rtx c = XEXP (op0, 1);
4369 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4370 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4371
4372 /* Detect an infinite recursive condition, where we oscillate at this
4373 simplification case between:
4374 A + B == C <---> C - B == A,
4375 where A, B, and C are all constants with non-simplifiable expressions,
4376 usually SYMBOL_REFs. */
4377 if (GET_CODE (tem) == invcode
4378 && CONSTANT_P (x)
4379 && rtx_equal_p (c, XEXP (tem, 1)))
4380 return NULL_RTX;
4381
4382 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4383 }
4384
4385 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4386 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4387 if (code == NE
4388 && op1 == const0_rtx
4389 && GET_MODE_CLASS (mode) == MODE_INT
4390 && cmp_mode != VOIDmode
4391 /* ??? Work-around BImode bugs in the ia64 backend. */
4392 && mode != BImode
4393 && cmp_mode != BImode
4394 && nonzero_bits (op0, cmp_mode) == 1
4395 && STORE_FLAG_VALUE == 1)
4396 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4397 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4398 : lowpart_subreg (mode, op0, cmp_mode);
4399
4400 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4401 if ((code == EQ || code == NE)
4402 && op1 == const0_rtx
4403 && op0code == XOR)
4404 return simplify_gen_relational (code, mode, cmp_mode,
4405 XEXP (op0, 0), XEXP (op0, 1));
4406
4407 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4408 if ((code == EQ || code == NE)
4409 && op0code == XOR
4410 && rtx_equal_p (XEXP (op0, 0), op1)
4411 && !side_effects_p (XEXP (op0, 0)))
4412 return simplify_gen_relational (code, mode, cmp_mode,
4413 XEXP (op0, 1), const0_rtx);
4414
4415 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4416 if ((code == EQ || code == NE)
4417 && op0code == XOR
4418 && rtx_equal_p (XEXP (op0, 1), op1)
4419 && !side_effects_p (XEXP (op0, 1)))
4420 return simplify_gen_relational (code, mode, cmp_mode,
4421 XEXP (op0, 0), const0_rtx);
4422
4423 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4424 if ((code == EQ || code == NE)
4425 && op0code == XOR
4426 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4427 && (CONST_INT_P (XEXP (op0, 1))
4428 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4429 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4430 simplify_gen_binary (XOR, cmp_mode,
4431 XEXP (op0, 1), op1));
4432
4433 if (op0code == POPCOUNT && op1 == const0_rtx)
4434 switch (code)
4435 {
4436 case EQ:
4437 case LE:
4438 case LEU:
4439 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4440 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4441 XEXP (op0, 0), const0_rtx);
4442
4443 case NE:
4444 case GT:
4445 case GTU:
4446 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4447 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4448 XEXP (op0, 0), const0_rtx);
4449
4450 default:
4451 break;
4452 }
4453
4454 return NULL_RTX;
4455 }
4456
4457 enum
4458 {
4459 CMP_EQ = 1,
4460 CMP_LT = 2,
4461 CMP_GT = 4,
4462 CMP_LTU = 8,
4463 CMP_GTU = 16
4464 };
4465
4466
4467 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4468 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4469 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4470 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4471 For floating-point comparisons, assume that the operands were ordered. */
4472
4473 static rtx
4474 comparison_result (enum rtx_code code, int known_results)
4475 {
4476 switch (code)
4477 {
4478 case EQ:
4479 case UNEQ:
4480 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4481 case NE:
4482 case LTGT:
4483 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4484
4485 case LT:
4486 case UNLT:
4487 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4488 case GE:
4489 case UNGE:
4490 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4491
4492 case GT:
4493 case UNGT:
4494 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4495 case LE:
4496 case UNLE:
4497 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4498
4499 case LTU:
4500 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4501 case GEU:
4502 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4503
4504 case GTU:
4505 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4506 case LEU:
4507 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4508
4509 case ORDERED:
4510 return const_true_rtx;
4511 case UNORDERED:
4512 return const0_rtx;
4513 default:
4514 gcc_unreachable ();
4515 }
4516 }
4517
4518 /* Check if the given comparison (done in the given MODE) is actually a
4519 tautology or a contradiction.
4520 If no simplification is possible, this function returns zero.
4521 Otherwise, it returns either const_true_rtx or const0_rtx. */
4522
4523 rtx
4524 simplify_const_relational_operation (enum rtx_code code,
4525 enum machine_mode mode,
4526 rtx op0, rtx op1)
4527 {
4528 rtx tem;
4529 rtx trueop0;
4530 rtx trueop1;
4531
4532 gcc_assert (mode != VOIDmode
4533 || (GET_MODE (op0) == VOIDmode
4534 && GET_MODE (op1) == VOIDmode));
4535
4536 /* If op0 is a compare, extract the comparison arguments from it. */
4537 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4538 {
4539 op1 = XEXP (op0, 1);
4540 op0 = XEXP (op0, 0);
4541
4542 if (GET_MODE (op0) != VOIDmode)
4543 mode = GET_MODE (op0);
4544 else if (GET_MODE (op1) != VOIDmode)
4545 mode = GET_MODE (op1);
4546 else
4547 return 0;
4548 }
4549
4550 /* We can't simplify MODE_CC values since we don't know what the
4551 actual comparison is. */
4552 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4553 return 0;
4554
4555 /* Make sure the constant is second. */
4556 if (swap_commutative_operands_p (op0, op1))
4557 {
4558 tem = op0, op0 = op1, op1 = tem;
4559 code = swap_condition (code);
4560 }
4561
4562 trueop0 = avoid_constant_pool_reference (op0);
4563 trueop1 = avoid_constant_pool_reference (op1);
4564
4565 /* For integer comparisons of A and B maybe we can simplify A - B and can
4566 then simplify a comparison of that with zero. If A and B are both either
4567 a register or a CONST_INT, this can't help; testing for these cases will
4568 prevent infinite recursion here and speed things up.
4569
4570 We can only do this for EQ and NE comparisons as otherwise we may
4571 lose or introduce overflow which we cannot disregard as undefined as
4572 we do not know the signedness of the operation on either the left or
4573 the right hand side of the comparison. */
4574
4575 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4576 && (code == EQ || code == NE)
4577 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4578 && (REG_P (op1) || CONST_INT_P (trueop1)))
4579 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4580 /* We cannot do this if tem is a nonzero address. */
4581 && ! nonzero_address_p (tem))
4582 return simplify_const_relational_operation (signed_condition (code),
4583 mode, tem, const0_rtx);
4584
4585 if (! HONOR_NANS (mode) && code == ORDERED)
4586 return const_true_rtx;
4587
4588 if (! HONOR_NANS (mode) && code == UNORDERED)
4589 return const0_rtx;
4590
4591 /* For modes without NaNs, if the two operands are equal, we know the
4592 result except if they have side-effects. Even with NaNs we know
4593 the result of unordered comparisons and, if signaling NaNs are
4594 irrelevant, also the result of LT/GT/LTGT. */
4595 if ((! HONOR_NANS (GET_MODE (trueop0))
4596 || code == UNEQ || code == UNLE || code == UNGE
4597 || ((code == LT || code == GT || code == LTGT)
4598 && ! HONOR_SNANS (GET_MODE (trueop0))))
4599 && rtx_equal_p (trueop0, trueop1)
4600 && ! side_effects_p (trueop0))
4601 return comparison_result (code, CMP_EQ);
4602
4603 /* If the operands are floating-point constants, see if we can fold
4604 the result. */
4605 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4606 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4607 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4608 {
4609 REAL_VALUE_TYPE d0, d1;
4610
4611 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4612 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4613
4614 /* Comparisons are unordered iff at least one of the values is NaN. */
4615 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4616 switch (code)
4617 {
4618 case UNEQ:
4619 case UNLT:
4620 case UNGT:
4621 case UNLE:
4622 case UNGE:
4623 case NE:
4624 case UNORDERED:
4625 return const_true_rtx;
4626 case EQ:
4627 case LT:
4628 case GT:
4629 case LE:
4630 case GE:
4631 case LTGT:
4632 case ORDERED:
4633 return const0_rtx;
4634 default:
4635 return 0;
4636 }
4637
4638 return comparison_result (code,
4639 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4640 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4641 }
4642
4643 /* Otherwise, see if the operands are both integers. */
4644 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4645 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4646 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4647 {
4648 int width = GET_MODE_PRECISION (mode);
4649 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4650 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4651
4652 /* Get the two words comprising each integer constant. */
4653 if (CONST_DOUBLE_AS_INT_P (trueop0))
4654 {
4655 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4656 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4657 }
4658 else
4659 {
4660 l0u = l0s = INTVAL (trueop0);
4661 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4662 }
4663
4664 if (CONST_DOUBLE_AS_INT_P (trueop1))
4665 {
4666 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4667 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4668 }
4669 else
4670 {
4671 l1u = l1s = INTVAL (trueop1);
4672 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4673 }
4674
4675 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4676 we have to sign or zero-extend the values. */
4677 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4678 {
4679 l0u &= GET_MODE_MASK (mode);
4680 l1u &= GET_MODE_MASK (mode);
4681
4682 if (val_signbit_known_set_p (mode, l0s))
4683 l0s |= ~GET_MODE_MASK (mode);
4684
4685 if (val_signbit_known_set_p (mode, l1s))
4686 l1s |= ~GET_MODE_MASK (mode);
4687 }
4688 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4689 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4690
4691 if (h0u == h1u && l0u == l1u)
4692 return comparison_result (code, CMP_EQ);
4693 else
4694 {
4695 int cr;
4696 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4697 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4698 return comparison_result (code, cr);
4699 }
4700 }
4701
4702 /* Optimize comparisons with upper and lower bounds. */
4703 if (HWI_COMPUTABLE_MODE_P (mode)
4704 && CONST_INT_P (trueop1))
4705 {
4706 int sign;
4707 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4708 HOST_WIDE_INT val = INTVAL (trueop1);
4709 HOST_WIDE_INT mmin, mmax;
4710
4711 if (code == GEU
4712 || code == LEU
4713 || code == GTU
4714 || code == LTU)
4715 sign = 0;
4716 else
4717 sign = 1;
4718
4719 /* Get a reduced range if the sign bit is zero. */
4720 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4721 {
4722 mmin = 0;
4723 mmax = nonzero;
4724 }
4725 else
4726 {
4727 rtx mmin_rtx, mmax_rtx;
4728 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4729
4730 mmin = INTVAL (mmin_rtx);
4731 mmax = INTVAL (mmax_rtx);
4732 if (sign)
4733 {
4734 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4735
4736 mmin >>= (sign_copies - 1);
4737 mmax >>= (sign_copies - 1);
4738 }
4739 }
4740
4741 switch (code)
4742 {
4743 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4744 case GEU:
4745 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4746 return const_true_rtx;
4747 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4748 return const0_rtx;
4749 break;
4750 case GE:
4751 if (val <= mmin)
4752 return const_true_rtx;
4753 if (val > mmax)
4754 return const0_rtx;
4755 break;
4756
4757 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4758 case LEU:
4759 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4760 return const_true_rtx;
4761 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4762 return const0_rtx;
4763 break;
4764 case LE:
4765 if (val >= mmax)
4766 return const_true_rtx;
4767 if (val < mmin)
4768 return const0_rtx;
4769 break;
4770
4771 case EQ:
4772 /* x == y is always false for y out of range. */
4773 if (val < mmin || val > mmax)
4774 return const0_rtx;
4775 break;
4776
4777 /* x > y is always false for y >= mmax, always true for y < mmin. */
4778 case GTU:
4779 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4780 return const0_rtx;
4781 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4782 return const_true_rtx;
4783 break;
4784 case GT:
4785 if (val >= mmax)
4786 return const0_rtx;
4787 if (val < mmin)
4788 return const_true_rtx;
4789 break;
4790
4791 /* x < y is always false for y <= mmin, always true for y > mmax. */
4792 case LTU:
4793 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4794 return const0_rtx;
4795 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4796 return const_true_rtx;
4797 break;
4798 case LT:
4799 if (val <= mmin)
4800 return const0_rtx;
4801 if (val > mmax)
4802 return const_true_rtx;
4803 break;
4804
4805 case NE:
4806 /* x != y is always true for y out of range. */
4807 if (val < mmin || val > mmax)
4808 return const_true_rtx;
4809 break;
4810
4811 default:
4812 break;
4813 }
4814 }
4815
4816 /* Optimize integer comparisons with zero. */
4817 if (trueop1 == const0_rtx)
4818 {
4819 /* Some addresses are known to be nonzero. We don't know
4820 their sign, but equality comparisons are known. */
4821 if (nonzero_address_p (trueop0))
4822 {
4823 if (code == EQ || code == LEU)
4824 return const0_rtx;
4825 if (code == NE || code == GTU)
4826 return const_true_rtx;
4827 }
4828
4829 /* See if the first operand is an IOR with a constant. If so, we
4830 may be able to determine the result of this comparison. */
4831 if (GET_CODE (op0) == IOR)
4832 {
4833 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4834 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4835 {
4836 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4837 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4838 && (UINTVAL (inner_const)
4839 & ((unsigned HOST_WIDE_INT) 1
4840 << sign_bitnum)));
4841
4842 switch (code)
4843 {
4844 case EQ:
4845 case LEU:
4846 return const0_rtx;
4847 case NE:
4848 case GTU:
4849 return const_true_rtx;
4850 case LT:
4851 case LE:
4852 if (has_sign)
4853 return const_true_rtx;
4854 break;
4855 case GT:
4856 case GE:
4857 if (has_sign)
4858 return const0_rtx;
4859 break;
4860 default:
4861 break;
4862 }
4863 }
4864 }
4865 }
4866
4867 /* Optimize comparison of ABS with zero. */
4868 if (trueop1 == CONST0_RTX (mode)
4869 && (GET_CODE (trueop0) == ABS
4870 || (GET_CODE (trueop0) == FLOAT_EXTEND
4871 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4872 {
4873 switch (code)
4874 {
4875 case LT:
4876 /* Optimize abs(x) < 0.0. */
4877 if (!HONOR_SNANS (mode)
4878 && (!INTEGRAL_MODE_P (mode)
4879 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4880 {
4881 if (INTEGRAL_MODE_P (mode)
4882 && (issue_strict_overflow_warning
4883 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4884 warning (OPT_Wstrict_overflow,
4885 ("assuming signed overflow does not occur when "
4886 "assuming abs (x) < 0 is false"));
4887 return const0_rtx;
4888 }
4889 break;
4890
4891 case GE:
4892 /* Optimize abs(x) >= 0.0. */
4893 if (!HONOR_NANS (mode)
4894 && (!INTEGRAL_MODE_P (mode)
4895 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4896 {
4897 if (INTEGRAL_MODE_P (mode)
4898 && (issue_strict_overflow_warning
4899 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4900 warning (OPT_Wstrict_overflow,
4901 ("assuming signed overflow does not occur when "
4902 "assuming abs (x) >= 0 is true"));
4903 return const_true_rtx;
4904 }
4905 break;
4906
4907 case UNGE:
4908 /* Optimize ! (abs(x) < 0.0). */
4909 return const_true_rtx;
4910
4911 default:
4912 break;
4913 }
4914 }
4915
4916 return 0;
4917 }
4918 \f
4919 /* Simplify CODE, an operation with result mode MODE and three operands,
4920 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4921 a constant. Return 0 if no simplifications is possible. */
4922
4923 rtx
4924 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4925 enum machine_mode op0_mode, rtx op0, rtx op1,
4926 rtx op2)
4927 {
4928 unsigned int width = GET_MODE_PRECISION (mode);
4929 bool any_change = false;
4930 rtx tem;
4931
4932 /* VOIDmode means "infinite" precision. */
4933 if (width == 0)
4934 width = HOST_BITS_PER_WIDE_INT;
4935
4936 switch (code)
4937 {
4938 case FMA:
4939 /* Simplify negations around the multiplication. */
4940 /* -a * -b + c => a * b + c. */
4941 if (GET_CODE (op0) == NEG)
4942 {
4943 tem = simplify_unary_operation (NEG, mode, op1, mode);
4944 if (tem)
4945 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4946 }
4947 else if (GET_CODE (op1) == NEG)
4948 {
4949 tem = simplify_unary_operation (NEG, mode, op0, mode);
4950 if (tem)
4951 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4952 }
4953
4954 /* Canonicalize the two multiplication operands. */
4955 /* a * -b + c => -b * a + c. */
4956 if (swap_commutative_operands_p (op0, op1))
4957 tem = op0, op0 = op1, op1 = tem, any_change = true;
4958
4959 if (any_change)
4960 return gen_rtx_FMA (mode, op0, op1, op2);
4961 return NULL_RTX;
4962
4963 case SIGN_EXTRACT:
4964 case ZERO_EXTRACT:
4965 if (CONST_INT_P (op0)
4966 && CONST_INT_P (op1)
4967 && CONST_INT_P (op2)
4968 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4969 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4970 {
4971 /* Extracting a bit-field from a constant */
4972 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4973 HOST_WIDE_INT op1val = INTVAL (op1);
4974 HOST_WIDE_INT op2val = INTVAL (op2);
4975 if (BITS_BIG_ENDIAN)
4976 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4977 else
4978 val >>= op2val;
4979
4980 if (HOST_BITS_PER_WIDE_INT != op1val)
4981 {
4982 /* First zero-extend. */
4983 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4984 /* If desired, propagate sign bit. */
4985 if (code == SIGN_EXTRACT
4986 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4987 != 0)
4988 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4989 }
4990
4991 return gen_int_mode (val, mode);
4992 }
4993 break;
4994
4995 case IF_THEN_ELSE:
4996 if (CONST_INT_P (op0))
4997 return op0 != const0_rtx ? op1 : op2;
4998
4999 /* Convert c ? a : a into "a". */
5000 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5001 return op1;
5002
5003 /* Convert a != b ? a : b into "a". */
5004 if (GET_CODE (op0) == NE
5005 && ! side_effects_p (op0)
5006 && ! HONOR_NANS (mode)
5007 && ! HONOR_SIGNED_ZEROS (mode)
5008 && ((rtx_equal_p (XEXP (op0, 0), op1)
5009 && rtx_equal_p (XEXP (op0, 1), op2))
5010 || (rtx_equal_p (XEXP (op0, 0), op2)
5011 && rtx_equal_p (XEXP (op0, 1), op1))))
5012 return op1;
5013
5014 /* Convert a == b ? a : b into "b". */
5015 if (GET_CODE (op0) == EQ
5016 && ! side_effects_p (op0)
5017 && ! HONOR_NANS (mode)
5018 && ! HONOR_SIGNED_ZEROS (mode)
5019 && ((rtx_equal_p (XEXP (op0, 0), op1)
5020 && rtx_equal_p (XEXP (op0, 1), op2))
5021 || (rtx_equal_p (XEXP (op0, 0), op2)
5022 && rtx_equal_p (XEXP (op0, 1), op1))))
5023 return op2;
5024
5025 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5026 {
5027 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5028 ? GET_MODE (XEXP (op0, 1))
5029 : GET_MODE (XEXP (op0, 0)));
5030 rtx temp;
5031
5032 /* Look for happy constants in op1 and op2. */
5033 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5034 {
5035 HOST_WIDE_INT t = INTVAL (op1);
5036 HOST_WIDE_INT f = INTVAL (op2);
5037
5038 if (t == STORE_FLAG_VALUE && f == 0)
5039 code = GET_CODE (op0);
5040 else if (t == 0 && f == STORE_FLAG_VALUE)
5041 {
5042 enum rtx_code tmp;
5043 tmp = reversed_comparison_code (op0, NULL_RTX);
5044 if (tmp == UNKNOWN)
5045 break;
5046 code = tmp;
5047 }
5048 else
5049 break;
5050
5051 return simplify_gen_relational (code, mode, cmp_mode,
5052 XEXP (op0, 0), XEXP (op0, 1));
5053 }
5054
5055 if (cmp_mode == VOIDmode)
5056 cmp_mode = op0_mode;
5057 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5058 cmp_mode, XEXP (op0, 0),
5059 XEXP (op0, 1));
5060
5061 /* See if any simplifications were possible. */
5062 if (temp)
5063 {
5064 if (CONST_INT_P (temp))
5065 return temp == const0_rtx ? op2 : op1;
5066 else if (temp)
5067 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5068 }
5069 }
5070 break;
5071
5072 case VEC_MERGE:
5073 gcc_assert (GET_MODE (op0) == mode);
5074 gcc_assert (GET_MODE (op1) == mode);
5075 gcc_assert (VECTOR_MODE_P (mode));
5076 op2 = avoid_constant_pool_reference (op2);
5077 if (CONST_INT_P (op2))
5078 {
5079 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5080 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5081 int mask = (1 << n_elts) - 1;
5082
5083 if (!(INTVAL (op2) & mask))
5084 return op1;
5085 if ((INTVAL (op2) & mask) == mask)
5086 return op0;
5087
5088 op0 = avoid_constant_pool_reference (op0);
5089 op1 = avoid_constant_pool_reference (op1);
5090 if (GET_CODE (op0) == CONST_VECTOR
5091 && GET_CODE (op1) == CONST_VECTOR)
5092 {
5093 rtvec v = rtvec_alloc (n_elts);
5094 unsigned int i;
5095
5096 for (i = 0; i < n_elts; i++)
5097 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5098 ? CONST_VECTOR_ELT (op0, i)
5099 : CONST_VECTOR_ELT (op1, i));
5100 return gen_rtx_CONST_VECTOR (mode, v);
5101 }
5102 }
5103 break;
5104
5105 default:
5106 gcc_unreachable ();
5107 }
5108
5109 return 0;
5110 }
5111
5112 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5113 or CONST_VECTOR,
5114 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5115
5116 Works by unpacking OP into a collection of 8-bit values
5117 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5118 and then repacking them again for OUTERMODE. */
5119
5120 static rtx
5121 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5122 enum machine_mode innermode, unsigned int byte)
5123 {
5124 /* We support up to 512-bit values (for V8DFmode). */
5125 enum {
5126 max_bitsize = 512,
5127 value_bit = 8,
5128 value_mask = (1 << value_bit) - 1
5129 };
5130 unsigned char value[max_bitsize / value_bit];
5131 int value_start;
5132 int i;
5133 int elem;
5134
5135 int num_elem;
5136 rtx * elems;
5137 int elem_bitsize;
5138 rtx result_s;
5139 rtvec result_v = NULL;
5140 enum mode_class outer_class;
5141 enum machine_mode outer_submode;
5142
5143 /* Some ports misuse CCmode. */
5144 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5145 return op;
5146
5147 /* We have no way to represent a complex constant at the rtl level. */
5148 if (COMPLEX_MODE_P (outermode))
5149 return NULL_RTX;
5150
5151 /* Unpack the value. */
5152
5153 if (GET_CODE (op) == CONST_VECTOR)
5154 {
5155 num_elem = CONST_VECTOR_NUNITS (op);
5156 elems = &CONST_VECTOR_ELT (op, 0);
5157 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5158 }
5159 else
5160 {
5161 num_elem = 1;
5162 elems = &op;
5163 elem_bitsize = max_bitsize;
5164 }
5165 /* If this asserts, it is too complicated; reducing value_bit may help. */
5166 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5167 /* I don't know how to handle endianness of sub-units. */
5168 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5169
5170 for (elem = 0; elem < num_elem; elem++)
5171 {
5172 unsigned char * vp;
5173 rtx el = elems[elem];
5174
5175 /* Vectors are kept in target memory order. (This is probably
5176 a mistake.) */
5177 {
5178 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5179 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5180 / BITS_PER_UNIT);
5181 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5182 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5183 unsigned bytele = (subword_byte % UNITS_PER_WORD
5184 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5185 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5186 }
5187
5188 switch (GET_CODE (el))
5189 {
5190 case CONST_INT:
5191 for (i = 0;
5192 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5193 i += value_bit)
5194 *vp++ = INTVAL (el) >> i;
5195 /* CONST_INTs are always logically sign-extended. */
5196 for (; i < elem_bitsize; i += value_bit)
5197 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5198 break;
5199
5200 case CONST_DOUBLE:
5201 if (GET_MODE (el) == VOIDmode)
5202 {
5203 unsigned char extend = 0;
5204 /* If this triggers, someone should have generated a
5205 CONST_INT instead. */
5206 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5207
5208 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5209 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5210 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5211 {
5212 *vp++
5213 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5214 i += value_bit;
5215 }
5216
5217 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5218 extend = -1;
5219 for (; i < elem_bitsize; i += value_bit)
5220 *vp++ = extend;
5221 }
5222 else
5223 {
5224 long tmp[max_bitsize / 32];
5225 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5226
5227 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5228 gcc_assert (bitsize <= elem_bitsize);
5229 gcc_assert (bitsize % value_bit == 0);
5230
5231 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5232 GET_MODE (el));
5233
5234 /* real_to_target produces its result in words affected by
5235 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5236 and use WORDS_BIG_ENDIAN instead; see the documentation
5237 of SUBREG in rtl.texi. */
5238 for (i = 0; i < bitsize; i += value_bit)
5239 {
5240 int ibase;
5241 if (WORDS_BIG_ENDIAN)
5242 ibase = bitsize - 1 - i;
5243 else
5244 ibase = i;
5245 *vp++ = tmp[ibase / 32] >> i % 32;
5246 }
5247
5248 /* It shouldn't matter what's done here, so fill it with
5249 zero. */
5250 for (; i < elem_bitsize; i += value_bit)
5251 *vp++ = 0;
5252 }
5253 break;
5254
5255 case CONST_FIXED:
5256 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5257 {
5258 for (i = 0; i < elem_bitsize; i += value_bit)
5259 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5260 }
5261 else
5262 {
5263 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5264 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5265 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5266 i += value_bit)
5267 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5268 >> (i - HOST_BITS_PER_WIDE_INT);
5269 for (; i < elem_bitsize; i += value_bit)
5270 *vp++ = 0;
5271 }
5272 break;
5273
5274 default:
5275 gcc_unreachable ();
5276 }
5277 }
5278
5279 /* Now, pick the right byte to start with. */
5280 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5281 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5282 will already have offset 0. */
5283 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5284 {
5285 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5286 - byte);
5287 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5288 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5289 byte = (subword_byte % UNITS_PER_WORD
5290 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5291 }
5292
5293 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5294 so if it's become negative it will instead be very large.) */
5295 gcc_assert (byte < GET_MODE_SIZE (innermode));
5296
5297 /* Convert from bytes to chunks of size value_bit. */
5298 value_start = byte * (BITS_PER_UNIT / value_bit);
5299
5300 /* Re-pack the value. */
5301
5302 if (VECTOR_MODE_P (outermode))
5303 {
5304 num_elem = GET_MODE_NUNITS (outermode);
5305 result_v = rtvec_alloc (num_elem);
5306 elems = &RTVEC_ELT (result_v, 0);
5307 outer_submode = GET_MODE_INNER (outermode);
5308 }
5309 else
5310 {
5311 num_elem = 1;
5312 elems = &result_s;
5313 outer_submode = outermode;
5314 }
5315
5316 outer_class = GET_MODE_CLASS (outer_submode);
5317 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5318
5319 gcc_assert (elem_bitsize % value_bit == 0);
5320 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5321
5322 for (elem = 0; elem < num_elem; elem++)
5323 {
5324 unsigned char *vp;
5325
5326 /* Vectors are stored in target memory order. (This is probably
5327 a mistake.) */
5328 {
5329 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5330 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5331 / BITS_PER_UNIT);
5332 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5333 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5334 unsigned bytele = (subword_byte % UNITS_PER_WORD
5335 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5336 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5337 }
5338
5339 switch (outer_class)
5340 {
5341 case MODE_INT:
5342 case MODE_PARTIAL_INT:
5343 {
5344 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5345
5346 for (i = 0;
5347 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5348 i += value_bit)
5349 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5350 for (; i < elem_bitsize; i += value_bit)
5351 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5352 << (i - HOST_BITS_PER_WIDE_INT);
5353
5354 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5355 know why. */
5356 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5357 elems[elem] = gen_int_mode (lo, outer_submode);
5358 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5359 elems[elem] = immed_double_const (lo, hi, outer_submode);
5360 else
5361 return NULL_RTX;
5362 }
5363 break;
5364
5365 case MODE_FLOAT:
5366 case MODE_DECIMAL_FLOAT:
5367 {
5368 REAL_VALUE_TYPE r;
5369 long tmp[max_bitsize / 32];
5370
5371 /* real_from_target wants its input in words affected by
5372 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5373 and use WORDS_BIG_ENDIAN instead; see the documentation
5374 of SUBREG in rtl.texi. */
5375 for (i = 0; i < max_bitsize / 32; i++)
5376 tmp[i] = 0;
5377 for (i = 0; i < elem_bitsize; i += value_bit)
5378 {
5379 int ibase;
5380 if (WORDS_BIG_ENDIAN)
5381 ibase = elem_bitsize - 1 - i;
5382 else
5383 ibase = i;
5384 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5385 }
5386
5387 real_from_target (&r, tmp, outer_submode);
5388 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5389 }
5390 break;
5391
5392 case MODE_FRACT:
5393 case MODE_UFRACT:
5394 case MODE_ACCUM:
5395 case MODE_UACCUM:
5396 {
5397 FIXED_VALUE_TYPE f;
5398 f.data.low = 0;
5399 f.data.high = 0;
5400 f.mode = outer_submode;
5401
5402 for (i = 0;
5403 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5404 i += value_bit)
5405 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5406 for (; i < elem_bitsize; i += value_bit)
5407 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5408 << (i - HOST_BITS_PER_WIDE_INT));
5409
5410 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5411 }
5412 break;
5413
5414 default:
5415 gcc_unreachable ();
5416 }
5417 }
5418 if (VECTOR_MODE_P (outermode))
5419 return gen_rtx_CONST_VECTOR (outermode, result_v);
5420 else
5421 return result_s;
5422 }
5423
5424 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5425 Return 0 if no simplifications are possible. */
5426 rtx
5427 simplify_subreg (enum machine_mode outermode, rtx op,
5428 enum machine_mode innermode, unsigned int byte)
5429 {
5430 /* Little bit of sanity checking. */
5431 gcc_assert (innermode != VOIDmode);
5432 gcc_assert (outermode != VOIDmode);
5433 gcc_assert (innermode != BLKmode);
5434 gcc_assert (outermode != BLKmode);
5435
5436 gcc_assert (GET_MODE (op) == innermode
5437 || GET_MODE (op) == VOIDmode);
5438
5439 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5440 gcc_assert (byte < GET_MODE_SIZE (innermode));
5441
5442 if (outermode == innermode && !byte)
5443 return op;
5444
5445 if (CONST_INT_P (op)
5446 || CONST_DOUBLE_P (op)
5447 || GET_CODE (op) == CONST_FIXED
5448 || GET_CODE (op) == CONST_VECTOR)
5449 return simplify_immed_subreg (outermode, op, innermode, byte);
5450
5451 /* Changing mode twice with SUBREG => just change it once,
5452 or not at all if changing back op starting mode. */
5453 if (GET_CODE (op) == SUBREG)
5454 {
5455 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5456 int final_offset = byte + SUBREG_BYTE (op);
5457 rtx newx;
5458
5459 if (outermode == innermostmode
5460 && byte == 0 && SUBREG_BYTE (op) == 0)
5461 return SUBREG_REG (op);
5462
5463 /* The SUBREG_BYTE represents offset, as if the value were stored
5464 in memory. Irritating exception is paradoxical subreg, where
5465 we define SUBREG_BYTE to be 0. On big endian machines, this
5466 value should be negative. For a moment, undo this exception. */
5467 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5468 {
5469 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5470 if (WORDS_BIG_ENDIAN)
5471 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5472 if (BYTES_BIG_ENDIAN)
5473 final_offset += difference % UNITS_PER_WORD;
5474 }
5475 if (SUBREG_BYTE (op) == 0
5476 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5477 {
5478 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5479 if (WORDS_BIG_ENDIAN)
5480 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5481 if (BYTES_BIG_ENDIAN)
5482 final_offset += difference % UNITS_PER_WORD;
5483 }
5484
5485 /* See whether resulting subreg will be paradoxical. */
5486 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5487 {
5488 /* In nonparadoxical subregs we can't handle negative offsets. */
5489 if (final_offset < 0)
5490 return NULL_RTX;
5491 /* Bail out in case resulting subreg would be incorrect. */
5492 if (final_offset % GET_MODE_SIZE (outermode)
5493 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5494 return NULL_RTX;
5495 }
5496 else
5497 {
5498 int offset = 0;
5499 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5500
5501 /* In paradoxical subreg, see if we are still looking on lower part.
5502 If so, our SUBREG_BYTE will be 0. */
5503 if (WORDS_BIG_ENDIAN)
5504 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5505 if (BYTES_BIG_ENDIAN)
5506 offset += difference % UNITS_PER_WORD;
5507 if (offset == final_offset)
5508 final_offset = 0;
5509 else
5510 return NULL_RTX;
5511 }
5512
5513 /* Recurse for further possible simplifications. */
5514 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5515 final_offset);
5516 if (newx)
5517 return newx;
5518 if (validate_subreg (outermode, innermostmode,
5519 SUBREG_REG (op), final_offset))
5520 {
5521 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5522 if (SUBREG_PROMOTED_VAR_P (op)
5523 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5524 && GET_MODE_CLASS (outermode) == MODE_INT
5525 && IN_RANGE (GET_MODE_SIZE (outermode),
5526 GET_MODE_SIZE (innermode),
5527 GET_MODE_SIZE (innermostmode))
5528 && subreg_lowpart_p (newx))
5529 {
5530 SUBREG_PROMOTED_VAR_P (newx) = 1;
5531 SUBREG_PROMOTED_UNSIGNED_SET
5532 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5533 }
5534 return newx;
5535 }
5536 return NULL_RTX;
5537 }
5538
5539 /* Merge implicit and explicit truncations. */
5540
5541 if (GET_CODE (op) == TRUNCATE
5542 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5543 && subreg_lowpart_offset (outermode, innermode) == byte)
5544 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5545 GET_MODE (XEXP (op, 0)));
5546
5547 /* SUBREG of a hard register => just change the register number
5548 and/or mode. If the hard register is not valid in that mode,
5549 suppress this simplification. If the hard register is the stack,
5550 frame, or argument pointer, leave this as a SUBREG. */
5551
5552 if (REG_P (op) && HARD_REGISTER_P (op))
5553 {
5554 unsigned int regno, final_regno;
5555
5556 regno = REGNO (op);
5557 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5558 if (HARD_REGISTER_NUM_P (final_regno))
5559 {
5560 rtx x;
5561 int final_offset = byte;
5562
5563 /* Adjust offset for paradoxical subregs. */
5564 if (byte == 0
5565 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5566 {
5567 int difference = (GET_MODE_SIZE (innermode)
5568 - GET_MODE_SIZE (outermode));
5569 if (WORDS_BIG_ENDIAN)
5570 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5571 if (BYTES_BIG_ENDIAN)
5572 final_offset += difference % UNITS_PER_WORD;
5573 }
5574
5575 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5576
5577 /* Propagate original regno. We don't have any way to specify
5578 the offset inside original regno, so do so only for lowpart.
5579 The information is used only by alias analysis that can not
5580 grog partial register anyway. */
5581
5582 if (subreg_lowpart_offset (outermode, innermode) == byte)
5583 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5584 return x;
5585 }
5586 }
5587
5588 /* If we have a SUBREG of a register that we are replacing and we are
5589 replacing it with a MEM, make a new MEM and try replacing the
5590 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5591 or if we would be widening it. */
5592
5593 if (MEM_P (op)
5594 && ! mode_dependent_address_p (XEXP (op, 0))
5595 /* Allow splitting of volatile memory references in case we don't
5596 have instruction to move the whole thing. */
5597 && (! MEM_VOLATILE_P (op)
5598 || ! have_insn_for (SET, innermode))
5599 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5600 return adjust_address_nv (op, outermode, byte);
5601
5602 /* Handle complex values represented as CONCAT
5603 of real and imaginary part. */
5604 if (GET_CODE (op) == CONCAT)
5605 {
5606 unsigned int part_size, final_offset;
5607 rtx part, res;
5608
5609 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5610 if (byte < part_size)
5611 {
5612 part = XEXP (op, 0);
5613 final_offset = byte;
5614 }
5615 else
5616 {
5617 part = XEXP (op, 1);
5618 final_offset = byte - part_size;
5619 }
5620
5621 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5622 return NULL_RTX;
5623
5624 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5625 if (res)
5626 return res;
5627 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5628 return gen_rtx_SUBREG (outermode, part, final_offset);
5629 return NULL_RTX;
5630 }
5631
5632 /* Optimize SUBREG truncations of zero and sign extended values. */
5633 if ((GET_CODE (op) == ZERO_EXTEND
5634 || GET_CODE (op) == SIGN_EXTEND)
5635 && SCALAR_INT_MODE_P (innermode)
5636 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5637 {
5638 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5639
5640 /* If we're requesting the lowpart of a zero or sign extension,
5641 there are three possibilities. If the outermode is the same
5642 as the origmode, we can omit both the extension and the subreg.
5643 If the outermode is not larger than the origmode, we can apply
5644 the truncation without the extension. Finally, if the outermode
5645 is larger than the origmode, but both are integer modes, we
5646 can just extend to the appropriate mode. */
5647 if (bitpos == 0)
5648 {
5649 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5650 if (outermode == origmode)
5651 return XEXP (op, 0);
5652 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5653 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5654 subreg_lowpart_offset (outermode,
5655 origmode));
5656 if (SCALAR_INT_MODE_P (outermode))
5657 return simplify_gen_unary (GET_CODE (op), outermode,
5658 XEXP (op, 0), origmode);
5659 }
5660
5661 /* A SUBREG resulting from a zero extension may fold to zero if
5662 it extracts higher bits that the ZERO_EXTEND's source bits. */
5663 if (GET_CODE (op) == ZERO_EXTEND
5664 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5665 return CONST0_RTX (outermode);
5666 }
5667
5668 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5669 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5670 the outer subreg is effectively a truncation to the original mode. */
5671 if ((GET_CODE (op) == LSHIFTRT
5672 || GET_CODE (op) == ASHIFTRT)
5673 && SCALAR_INT_MODE_P (outermode)
5674 && SCALAR_INT_MODE_P (innermode)
5675 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5676 to avoid the possibility that an outer LSHIFTRT shifts by more
5677 than the sign extension's sign_bit_copies and introduces zeros
5678 into the high bits of the result. */
5679 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5680 && CONST_INT_P (XEXP (op, 1))
5681 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5683 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5684 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5685 return simplify_gen_binary (ASHIFTRT, outermode,
5686 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5687
5688 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5690 the outer subreg is effectively a truncation to the original mode. */
5691 if ((GET_CODE (op) == LSHIFTRT
5692 || GET_CODE (op) == ASHIFTRT)
5693 && SCALAR_INT_MODE_P (outermode)
5694 && SCALAR_INT_MODE_P (innermode)
5695 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5696 && CONST_INT_P (XEXP (op, 1))
5697 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5698 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5699 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5700 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5701 return simplify_gen_binary (LSHIFTRT, outermode,
5702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5703
5704 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5705 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5706 the outer subreg is effectively a truncation to the original mode. */
5707 if (GET_CODE (op) == ASHIFT
5708 && SCALAR_INT_MODE_P (outermode)
5709 && SCALAR_INT_MODE_P (innermode)
5710 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5711 && CONST_INT_P (XEXP (op, 1))
5712 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5713 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5714 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5715 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5716 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5717 return simplify_gen_binary (ASHIFT, outermode,
5718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5719
5720 /* Recognize a word extraction from a multi-word subreg. */
5721 if ((GET_CODE (op) == LSHIFTRT
5722 || GET_CODE (op) == ASHIFTRT)
5723 && SCALAR_INT_MODE_P (innermode)
5724 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5725 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5726 && CONST_INT_P (XEXP (op, 1))
5727 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5728 && INTVAL (XEXP (op, 1)) >= 0
5729 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5730 && byte == subreg_lowpart_offset (outermode, innermode))
5731 {
5732 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5733 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5734 (WORDS_BIG_ENDIAN
5735 ? byte - shifted_bytes
5736 : byte + shifted_bytes));
5737 }
5738
5739 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5740 and try replacing the SUBREG and shift with it. Don't do this if
5741 the MEM has a mode-dependent address or if we would be widening it. */
5742
5743 if ((GET_CODE (op) == LSHIFTRT
5744 || GET_CODE (op) == ASHIFTRT)
5745 && SCALAR_INT_MODE_P (innermode)
5746 && MEM_P (XEXP (op, 0))
5747 && CONST_INT_P (XEXP (op, 1))
5748 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5749 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5750 && INTVAL (XEXP (op, 1)) > 0
5751 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5752 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5753 && ! MEM_VOLATILE_P (XEXP (op, 0))
5754 && byte == subreg_lowpart_offset (outermode, innermode)
5755 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5756 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5757 {
5758 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5759 return adjust_address_nv (XEXP (op, 0), outermode,
5760 (WORDS_BIG_ENDIAN
5761 ? byte - shifted_bytes
5762 : byte + shifted_bytes));
5763 }
5764
5765 return NULL_RTX;
5766 }
5767
5768 /* Make a SUBREG operation or equivalent if it folds. */
5769
5770 rtx
5771 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5772 enum machine_mode innermode, unsigned int byte)
5773 {
5774 rtx newx;
5775
5776 newx = simplify_subreg (outermode, op, innermode, byte);
5777 if (newx)
5778 return newx;
5779
5780 if (GET_CODE (op) == SUBREG
5781 || GET_CODE (op) == CONCAT
5782 || GET_MODE (op) == VOIDmode)
5783 return NULL_RTX;
5784
5785 if (validate_subreg (outermode, innermode, op, byte))
5786 return gen_rtx_SUBREG (outermode, op, byte);
5787
5788 return NULL_RTX;
5789 }
5790
5791 /* Simplify X, an rtx expression.
5792
5793 Return the simplified expression or NULL if no simplifications
5794 were possible.
5795
5796 This is the preferred entry point into the simplification routines;
5797 however, we still allow passes to call the more specific routines.
5798
5799 Right now GCC has three (yes, three) major bodies of RTL simplification
5800 code that need to be unified.
5801
5802 1. fold_rtx in cse.c. This code uses various CSE specific
5803 information to aid in RTL simplification.
5804
5805 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5806 it uses combine specific information to aid in RTL
5807 simplification.
5808
5809 3. The routines in this file.
5810
5811
5812 Long term we want to only have one body of simplification code; to
5813 get to that state I recommend the following steps:
5814
5815 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5816 which are not pass dependent state into these routines.
5817
5818 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5819 use this routine whenever possible.
5820
5821 3. Allow for pass dependent state to be provided to these
5822 routines and add simplifications based on the pass dependent
5823 state. Remove code from cse.c & combine.c that becomes
5824 redundant/dead.
5825
5826 It will take time, but ultimately the compiler will be easier to
5827 maintain and improve. It's totally silly that when we add a
5828 simplification that it needs to be added to 4 places (3 for RTL
5829 simplification and 1 for tree simplification. */
5830
5831 rtx
5832 simplify_rtx (const_rtx x)
5833 {
5834 const enum rtx_code code = GET_CODE (x);
5835 const enum machine_mode mode = GET_MODE (x);
5836
5837 switch (GET_RTX_CLASS (code))
5838 {
5839 case RTX_UNARY:
5840 return simplify_unary_operation (code, mode,
5841 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5842 case RTX_COMM_ARITH:
5843 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5844 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5845
5846 /* Fall through.... */
5847
5848 case RTX_BIN_ARITH:
5849 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5850
5851 case RTX_TERNARY:
5852 case RTX_BITFIELD_OPS:
5853 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5854 XEXP (x, 0), XEXP (x, 1),
5855 XEXP (x, 2));
5856
5857 case RTX_COMPARE:
5858 case RTX_COMM_COMPARE:
5859 return simplify_relational_operation (code, mode,
5860 ((GET_MODE (XEXP (x, 0))
5861 != VOIDmode)
5862 ? GET_MODE (XEXP (x, 0))
5863 : GET_MODE (XEXP (x, 1))),
5864 XEXP (x, 0),
5865 XEXP (x, 1));
5866
5867 case RTX_EXTRA:
5868 if (code == SUBREG)
5869 return simplify_subreg (mode, SUBREG_REG (x),
5870 GET_MODE (SUBREG_REG (x)),
5871 SUBREG_BYTE (x));
5872 break;
5873
5874 case RTX_OBJ:
5875 if (code == LO_SUM)
5876 {
5877 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5878 if (GET_CODE (XEXP (x, 0)) == HIGH
5879 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5880 return XEXP (x, 1);
5881 }
5882 break;
5883
5884 default:
5885 break;
5886 }
5887 return NULL;
5888 }