6b0d56ed3ea480c2c8417b87adccb588426ddce3
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
41
42 /* Simplification and canonicalization of RTL. */
43
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
64 \f
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
69 {
70 return gen_int_mode (- INTVAL (i), mode);
71 }
72
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
75
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 {
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
81
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
84
85 width = GET_MODE_PRECISION (mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
95 {
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
98 }
99 else
100 /* FIXME: We don't yet have a representation for wider modes. */
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107
108 /* Test whether VAL is equal to the most significant bit of mode MODE
109 (after masking with the mode mask of MODE). Returns false if the
110 precision of MODE is too large to handle. */
111
112 bool
113 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 {
115 unsigned int width;
116
117 if (GET_MODE_CLASS (mode) != MODE_INT)
118 return false;
119
120 width = GET_MODE_PRECISION (mode);
121 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
122 return false;
123
124 val &= GET_MODE_MASK (mode);
125 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
126 }
127
128 /* Test whether the most significant bit of mode MODE is set in VAL.
129 Returns false if the precision of MODE is too large to handle. */
130 bool
131 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 {
133 unsigned int width;
134
135 if (GET_MODE_CLASS (mode) != MODE_INT)
136 return false;
137
138 width = GET_MODE_PRECISION (mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
143 return val != 0;
144 }
145
146 /* Test whether the most significant bit of mode MODE is clear in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 if (GET_MODE_CLASS (mode) != MODE_INT)
154 return false;
155
156 width = GET_MODE_PRECISION (mode);
157 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
158 return false;
159
160 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
161 return val == 0;
162 }
163 \f
164 /* Make a binary operation by properly ordering the operands and
165 seeing if the expression folds. */
166
167 rtx
168 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
169 rtx op1)
170 {
171 rtx tem;
172
173 /* If this simplifies, do it. */
174 tem = simplify_binary_operation (code, mode, op0, op1);
175 if (tem)
176 return tem;
177
178 /* Put complex operands first and constants second if commutative. */
179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
180 && swap_commutative_operands_p (op0, op1))
181 tem = op0, op0 = op1, op1 = tem;
182
183 return gen_rtx_fmt_ee (code, mode, op0, op1);
184 }
185 \f
186 /* If X is a MEM referencing the constant pool, return the real value.
187 Otherwise return X. */
188 rtx
189 avoid_constant_pool_reference (rtx x)
190 {
191 rtx c, tmp, addr;
192 enum machine_mode cmode;
193 HOST_WIDE_INT offset = 0;
194
195 switch (GET_CODE (x))
196 {
197 case MEM:
198 break;
199
200 case FLOAT_EXTEND:
201 /* Handle float extensions of constant pool references. */
202 tmp = XEXP (x, 0);
203 c = avoid_constant_pool_reference (tmp);
204 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
205 {
206 REAL_VALUE_TYPE d;
207
208 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
209 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 }
211 return x;
212
213 default:
214 return x;
215 }
216
217 if (GET_MODE (x) == BLKmode)
218 return x;
219
220 addr = XEXP (x, 0);
221
222 /* Call target hook to avoid the effects of -fpic etc.... */
223 addr = targetm.delegitimize_address (addr);
224
225 /* Split the address into a base and integer offset. */
226 if (GET_CODE (addr) == CONST
227 && GET_CODE (XEXP (addr, 0)) == PLUS
228 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 {
230 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
231 addr = XEXP (XEXP (addr, 0), 0);
232 }
233
234 if (GET_CODE (addr) == LO_SUM)
235 addr = XEXP (addr, 1);
236
237 /* If this is a constant pool reference, we can turn it into its
238 constant and hope that simplifications happen. */
239 if (GET_CODE (addr) == SYMBOL_REF
240 && CONSTANT_POOL_ADDRESS_P (addr))
241 {
242 c = get_pool_constant (addr);
243 cmode = get_pool_mode (addr);
244
245 /* If we're accessing the constant in a different mode than it was
246 originally stored, attempt to fix that up via subreg simplifications.
247 If that fails we have no choice but to return the original memory. */
248 if (offset != 0 || cmode != GET_MODE (x))
249 {
250 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251 if (tem && CONSTANT_P (tem))
252 return tem;
253 }
254 else
255 return c;
256 }
257
258 return x;
259 }
260 \f
261 /* Simplify a MEM based on its attributes. This is the default
262 delegitimize_address target hook, and it's recommended that every
263 overrider call it. */
264
265 rtx
266 delegitimize_mem_from_attrs (rtx x)
267 {
268 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
269 use their base addresses as equivalent. */
270 if (MEM_P (x)
271 && MEM_EXPR (x)
272 && MEM_OFFSET_KNOWN_P (x))
273 {
274 tree decl = MEM_EXPR (x);
275 enum machine_mode mode = GET_MODE (x);
276 HOST_WIDE_INT offset = 0;
277
278 switch (TREE_CODE (decl))
279 {
280 default:
281 decl = NULL;
282 break;
283
284 case VAR_DECL:
285 break;
286
287 case ARRAY_REF:
288 case ARRAY_RANGE_REF:
289 case COMPONENT_REF:
290 case BIT_FIELD_REF:
291 case REALPART_EXPR:
292 case IMAGPART_EXPR:
293 case VIEW_CONVERT_EXPR:
294 {
295 HOST_WIDE_INT bitsize, bitpos;
296 tree toffset;
297 int unsignedp, volatilep = 0;
298
299 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
300 &mode, &unsignedp, &volatilep, false);
301 if (bitsize != GET_MODE_BITSIZE (mode)
302 || (bitpos % BITS_PER_UNIT)
303 || (toffset && !host_integerp (toffset, 0)))
304 decl = NULL;
305 else
306 {
307 offset += bitpos / BITS_PER_UNIT;
308 if (toffset)
309 offset += TREE_INT_CST_LOW (toffset);
310 }
311 break;
312 }
313 }
314
315 if (decl
316 && mode == GET_MODE (x)
317 && TREE_CODE (decl) == VAR_DECL
318 && (TREE_STATIC (decl)
319 || DECL_THREAD_LOCAL_P (decl))
320 && DECL_RTL_SET_P (decl)
321 && MEM_P (DECL_RTL (decl)))
322 {
323 rtx newx;
324
325 offset += MEM_OFFSET (x);
326
327 newx = DECL_RTL (decl);
328
329 if (MEM_P (newx))
330 {
331 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332
333 /* Avoid creating a new MEM needlessly if we already had
334 the same address. We do if there's no OFFSET and the
335 old address X is identical to NEWX, or if X is of the
336 form (plus NEWX OFFSET), or the NEWX is of the form
337 (plus Y (const_int Z)) and X is that with the offset
338 added: (plus Y (const_int Z+OFFSET)). */
339 if (!((offset == 0
340 || (GET_CODE (o) == PLUS
341 && GET_CODE (XEXP (o, 1)) == CONST_INT
342 && (offset == INTVAL (XEXP (o, 1))
343 || (GET_CODE (n) == PLUS
344 && GET_CODE (XEXP (n, 1)) == CONST_INT
345 && (INTVAL (XEXP (n, 1)) + offset
346 == INTVAL (XEXP (o, 1)))
347 && (n = XEXP (n, 0))))
348 && (o = XEXP (o, 0))))
349 && rtx_equal_p (o, n)))
350 x = adjust_address_nv (newx, mode, offset);
351 }
352 else if (GET_MODE (x) == GET_MODE (newx)
353 && offset == 0)
354 x = newx;
355 }
356 }
357
358 return x;
359 }
360 \f
361 /* Make a unary operation by first seeing if it folds and otherwise making
362 the specified operation. */
363
364 rtx
365 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
366 enum machine_mode op_mode)
367 {
368 rtx tem;
369
370 /* If this simplifies, use it. */
371 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
372 return tem;
373
374 return gen_rtx_fmt_e (code, mode, op);
375 }
376
377 /* Likewise for ternary operations. */
378
379 rtx
380 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
381 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 {
383 rtx tem;
384
385 /* If this simplifies, use it. */
386 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
387 op0, op1, op2)))
388 return tem;
389
390 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 }
392
393 /* Likewise, for relational operations.
394 CMP_MODE specifies mode comparison is done in. */
395
396 rtx
397 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
398 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 {
400 rtx tem;
401
402 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
403 op0, op1)))
404 return tem;
405
406 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 }
408 \f
409 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
410 and simplify the result. If FN is non-NULL, call this callback on each
411 X, if it returns non-NULL, replace X with its return value and simplify the
412 result. */
413
414 rtx
415 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
416 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 {
418 enum rtx_code code = GET_CODE (x);
419 enum machine_mode mode = GET_MODE (x);
420 enum machine_mode op_mode;
421 const char *fmt;
422 rtx op0, op1, op2, newx, op;
423 rtvec vec, newvec;
424 int i, j;
425
426 if (__builtin_expect (fn != NULL, 0))
427 {
428 newx = fn (x, old_rtx, data);
429 if (newx)
430 return newx;
431 }
432 else if (rtx_equal_p (x, old_rtx))
433 return copy_rtx ((rtx) data);
434
435 switch (GET_RTX_CLASS (code))
436 {
437 case RTX_UNARY:
438 op0 = XEXP (x, 0);
439 op_mode = GET_MODE (op0);
440 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
441 if (op0 == XEXP (x, 0))
442 return x;
443 return simplify_gen_unary (code, mode, op0, op_mode);
444
445 case RTX_BIN_ARITH:
446 case RTX_COMM_ARITH:
447 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
448 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
449 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
450 return x;
451 return simplify_gen_binary (code, mode, op0, op1);
452
453 case RTX_COMPARE:
454 case RTX_COMM_COMPARE:
455 op0 = XEXP (x, 0);
456 op1 = XEXP (x, 1);
457 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
458 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
459 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
460 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
461 return x;
462 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463
464 case RTX_TERNARY:
465 case RTX_BITFIELD_OPS:
466 op0 = XEXP (x, 0);
467 op_mode = GET_MODE (op0);
468 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
469 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
470 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
471 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
472 return x;
473 if (op_mode == VOIDmode)
474 op_mode = GET_MODE (op0);
475 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476
477 case RTX_EXTRA:
478 if (code == SUBREG)
479 {
480 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
481 if (op0 == SUBREG_REG (x))
482 return x;
483 op0 = simplify_gen_subreg (GET_MODE (x), op0,
484 GET_MODE (SUBREG_REG (x)),
485 SUBREG_BYTE (x));
486 return op0 ? op0 : x;
487 }
488 break;
489
490 case RTX_OBJ:
491 if (code == MEM)
492 {
493 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
494 if (op0 == XEXP (x, 0))
495 return x;
496 return replace_equiv_address_nv (x, op0);
497 }
498 else if (code == LO_SUM)
499 {
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502
503 /* (lo_sum (high x) x) -> x */
504 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
505 return op1;
506
507 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
508 return x;
509 return gen_rtx_LO_SUM (mode, op0, op1);
510 }
511 break;
512
513 default:
514 break;
515 }
516
517 newx = x;
518 fmt = GET_RTX_FORMAT (code);
519 for (i = 0; fmt[i]; i++)
520 switch (fmt[i])
521 {
522 case 'E':
523 vec = XVEC (x, i);
524 newvec = XVEC (newx, i);
525 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 {
527 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
528 old_rtx, fn, data);
529 if (op != RTVEC_ELT (vec, j))
530 {
531 if (newvec == vec)
532 {
533 newvec = shallow_copy_rtvec (vec);
534 if (x == newx)
535 newx = shallow_copy_rtx (x);
536 XVEC (newx, i) = newvec;
537 }
538 RTVEC_ELT (newvec, j) = op;
539 }
540 }
541 break;
542
543 case 'e':
544 if (XEXP (x, i))
545 {
546 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
547 if (op != XEXP (x, i))
548 {
549 if (x == newx)
550 newx = shallow_copy_rtx (x);
551 XEXP (newx, i) = op;
552 }
553 }
554 break;
555 }
556 return newx;
557 }
558
559 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
560 resulting RTX. Return a new RTX which is as simplified as possible. */
561
562 rtx
563 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 {
565 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 }
567 \f
568 /* Try to simplify a unary operation CODE whose output mode is to be
569 MODE with input operand OP whose mode was originally OP_MODE.
570 Return zero if no simplification can be made. */
571 rtx
572 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
573 rtx op, enum machine_mode op_mode)
574 {
575 rtx trueop, tem;
576
577 trueop = avoid_constant_pool_reference (op);
578
579 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
580 if (tem)
581 return tem;
582
583 return simplify_unary_operation_1 (code, mode, op);
584 }
585
586 /* Perform some simplifications we can do even if the operands
587 aren't constant. */
588 static rtx
589 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
590 {
591 enum rtx_code reversed;
592 rtx temp;
593
594 switch (code)
595 {
596 case NOT:
597 /* (not (not X)) == X. */
598 if (GET_CODE (op) == NOT)
599 return XEXP (op, 0);
600
601 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
602 comparison is all ones. */
603 if (COMPARISON_P (op)
604 && (mode == BImode || STORE_FLAG_VALUE == -1)
605 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
606 return simplify_gen_relational (reversed, mode, VOIDmode,
607 XEXP (op, 0), XEXP (op, 1));
608
609 /* (not (plus X -1)) can become (neg X). */
610 if (GET_CODE (op) == PLUS
611 && XEXP (op, 1) == constm1_rtx)
612 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
613
614 /* Similarly, (not (neg X)) is (plus X -1). */
615 if (GET_CODE (op) == NEG)
616 return plus_constant (mode, XEXP (op, 0), -1);
617
618 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
619 if (GET_CODE (op) == XOR
620 && CONST_INT_P (XEXP (op, 1))
621 && (temp = simplify_unary_operation (NOT, mode,
622 XEXP (op, 1), mode)) != 0)
623 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
624
625 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
626 if (GET_CODE (op) == PLUS
627 && CONST_INT_P (XEXP (op, 1))
628 && mode_signbit_p (mode, XEXP (op, 1))
629 && (temp = simplify_unary_operation (NOT, mode,
630 XEXP (op, 1), mode)) != 0)
631 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
632
633
634 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
635 operands other than 1, but that is not valid. We could do a
636 similar simplification for (not (lshiftrt C X)) where C is
637 just the sign bit, but this doesn't seem common enough to
638 bother with. */
639 if (GET_CODE (op) == ASHIFT
640 && XEXP (op, 0) == const1_rtx)
641 {
642 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
643 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
644 }
645
646 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
647 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
648 so we can perform the above simplification. */
649
650 if (STORE_FLAG_VALUE == -1
651 && GET_CODE (op) == ASHIFTRT
652 && GET_CODE (XEXP (op, 1))
653 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
654 return simplify_gen_relational (GE, mode, VOIDmode,
655 XEXP (op, 0), const0_rtx);
656
657
658 if (GET_CODE (op) == SUBREG
659 && subreg_lowpart_p (op)
660 && (GET_MODE_SIZE (GET_MODE (op))
661 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
662 && GET_CODE (SUBREG_REG (op)) == ASHIFT
663 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
664 {
665 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
666 rtx x;
667
668 x = gen_rtx_ROTATE (inner_mode,
669 simplify_gen_unary (NOT, inner_mode, const1_rtx,
670 inner_mode),
671 XEXP (SUBREG_REG (op), 1));
672 return rtl_hooks.gen_lowpart_no_emit (mode, x);
673 }
674
675 /* Apply De Morgan's laws to reduce number of patterns for machines
676 with negating logical insns (and-not, nand, etc.). If result has
677 only one NOT, put it first, since that is how the patterns are
678 coded. */
679
680 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
681 {
682 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
683 enum machine_mode op_mode;
684
685 op_mode = GET_MODE (in1);
686 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
687
688 op_mode = GET_MODE (in2);
689 if (op_mode == VOIDmode)
690 op_mode = mode;
691 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
692
693 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
694 {
695 rtx tem = in2;
696 in2 = in1; in1 = tem;
697 }
698
699 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
700 mode, in1, in2);
701 }
702 break;
703
704 case NEG:
705 /* (neg (neg X)) == X. */
706 if (GET_CODE (op) == NEG)
707 return XEXP (op, 0);
708
709 /* (neg (plus X 1)) can become (not X). */
710 if (GET_CODE (op) == PLUS
711 && XEXP (op, 1) == const1_rtx)
712 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
713
714 /* Similarly, (neg (not X)) is (plus X 1). */
715 if (GET_CODE (op) == NOT)
716 return plus_constant (mode, XEXP (op, 0), 1);
717
718 /* (neg (minus X Y)) can become (minus Y X). This transformation
719 isn't safe for modes with signed zeros, since if X and Y are
720 both +0, (minus Y X) is the same as (minus X Y). If the
721 rounding mode is towards +infinity (or -infinity) then the two
722 expressions will be rounded differently. */
723 if (GET_CODE (op) == MINUS
724 && !HONOR_SIGNED_ZEROS (mode)
725 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
726 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
727
728 if (GET_CODE (op) == PLUS
729 && !HONOR_SIGNED_ZEROS (mode)
730 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
731 {
732 /* (neg (plus A C)) is simplified to (minus -C A). */
733 if (CONST_INT_P (XEXP (op, 1))
734 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
735 {
736 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
737 if (temp)
738 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
739 }
740
741 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
742 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
743 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
744 }
745
746 /* (neg (mult A B)) becomes (mult A (neg B)).
747 This works even for floating-point values. */
748 if (GET_CODE (op) == MULT
749 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
750 {
751 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
752 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
753 }
754
755 /* NEG commutes with ASHIFT since it is multiplication. Only do
756 this if we can then eliminate the NEG (e.g., if the operand
757 is a constant). */
758 if (GET_CODE (op) == ASHIFT)
759 {
760 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
761 if (temp)
762 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
763 }
764
765 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
766 C is equal to the width of MODE minus 1. */
767 if (GET_CODE (op) == ASHIFTRT
768 && CONST_INT_P (XEXP (op, 1))
769 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
770 return simplify_gen_binary (LSHIFTRT, mode,
771 XEXP (op, 0), XEXP (op, 1));
772
773 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
774 C is equal to the width of MODE minus 1. */
775 if (GET_CODE (op) == LSHIFTRT
776 && CONST_INT_P (XEXP (op, 1))
777 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
778 return simplify_gen_binary (ASHIFTRT, mode,
779 XEXP (op, 0), XEXP (op, 1));
780
781 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
782 if (GET_CODE (op) == XOR
783 && XEXP (op, 1) == const1_rtx
784 && nonzero_bits (XEXP (op, 0), mode) == 1)
785 return plus_constant (mode, XEXP (op, 0), -1);
786
787 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
788 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
789 if (GET_CODE (op) == LT
790 && XEXP (op, 1) == const0_rtx
791 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
792 {
793 enum machine_mode inner = GET_MODE (XEXP (op, 0));
794 int isize = GET_MODE_PRECISION (inner);
795 if (STORE_FLAG_VALUE == 1)
796 {
797 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
798 GEN_INT (isize - 1));
799 if (mode == inner)
800 return temp;
801 if (GET_MODE_PRECISION (mode) > isize)
802 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
803 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
804 }
805 else if (STORE_FLAG_VALUE == -1)
806 {
807 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
808 GEN_INT (isize - 1));
809 if (mode == inner)
810 return temp;
811 if (GET_MODE_PRECISION (mode) > isize)
812 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
813 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
814 }
815 }
816 break;
817
818 case TRUNCATE:
819 /* We can't handle truncation to a partial integer mode here
820 because we don't know the real bitsize of the partial
821 integer mode. */
822 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
823 break;
824
825 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
826 if ((GET_CODE (op) == SIGN_EXTEND
827 || GET_CODE (op) == ZERO_EXTEND)
828 && GET_MODE (XEXP (op, 0)) == mode)
829 return XEXP (op, 0);
830
831 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
832 (OP:SI foo:SI) if OP is NEG or ABS. */
833 if ((GET_CODE (op) == ABS
834 || GET_CODE (op) == NEG)
835 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
836 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
837 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
838 return simplify_gen_unary (GET_CODE (op), mode,
839 XEXP (XEXP (op, 0), 0), mode);
840
841 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 (truncate:A X). */
843 if (GET_CODE (op) == SUBREG
844 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
845 && subreg_lowpart_p (op))
846 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
847 GET_MODE (XEXP (SUBREG_REG (op), 0)));
848
849 /* If we know that the value is already truncated, we can
850 replace the TRUNCATE with a SUBREG. Note that this is also
851 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
852 modes we just have to apply a different definition for
853 truncation. But don't do this for an (LSHIFTRT (MULT ...))
854 since this will cause problems with the umulXi3_highpart
855 patterns. */
856 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
857 ? (num_sign_bit_copies (op, GET_MODE (op))
858 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
859 - GET_MODE_PRECISION (mode)))
860 : truncated_to_mode (mode, op))
861 && ! (GET_CODE (op) == LSHIFTRT
862 && GET_CODE (XEXP (op, 0)) == MULT))
863 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864
865 /* A truncate of a comparison can be replaced with a subreg if
866 STORE_FLAG_VALUE permits. This is like the previous test,
867 but it works even if the comparison is done in a mode larger
868 than HOST_BITS_PER_WIDE_INT. */
869 if (HWI_COMPUTABLE_MODE_P (mode)
870 && COMPARISON_P (op)
871 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
872 return rtl_hooks.gen_lowpart_no_emit (mode, op);
873 break;
874
875 case FLOAT_TRUNCATE:
876 if (DECIMAL_FLOAT_MODE_P (mode))
877 break;
878
879 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
880 if (GET_CODE (op) == FLOAT_EXTEND
881 && GET_MODE (XEXP (op, 0)) == mode)
882 return XEXP (op, 0);
883
884 /* (float_truncate:SF (float_truncate:DF foo:XF))
885 = (float_truncate:SF foo:XF).
886 This may eliminate double rounding, so it is unsafe.
887
888 (float_truncate:SF (float_extend:XF foo:DF))
889 = (float_truncate:SF foo:DF).
890
891 (float_truncate:DF (float_extend:XF foo:SF))
892 = (float_extend:SF foo:DF). */
893 if ((GET_CODE (op) == FLOAT_TRUNCATE
894 && flag_unsafe_math_optimizations)
895 || GET_CODE (op) == FLOAT_EXTEND)
896 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
897 0)))
898 > GET_MODE_SIZE (mode)
899 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
900 mode,
901 XEXP (op, 0), mode);
902
903 /* (float_truncate (float x)) is (float x) */
904 if (GET_CODE (op) == FLOAT
905 && (flag_unsafe_math_optimizations
906 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
907 && ((unsigned)significand_size (GET_MODE (op))
908 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
909 - num_sign_bit_copies (XEXP (op, 0),
910 GET_MODE (XEXP (op, 0))))))))
911 return simplify_gen_unary (FLOAT, mode,
912 XEXP (op, 0),
913 GET_MODE (XEXP (op, 0)));
914
915 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
916 (OP:SF foo:SF) if OP is NEG or ABS. */
917 if ((GET_CODE (op) == ABS
918 || GET_CODE (op) == NEG)
919 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
920 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
921 return simplify_gen_unary (GET_CODE (op), mode,
922 XEXP (XEXP (op, 0), 0), mode);
923
924 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
925 is (float_truncate:SF x). */
926 if (GET_CODE (op) == SUBREG
927 && subreg_lowpart_p (op)
928 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
929 return SUBREG_REG (op);
930 break;
931
932 case FLOAT_EXTEND:
933 if (DECIMAL_FLOAT_MODE_P (mode))
934 break;
935
936 /* (float_extend (float_extend x)) is (float_extend x)
937
938 (float_extend (float x)) is (float x) assuming that double
939 rounding can't happen.
940 */
941 if (GET_CODE (op) == FLOAT_EXTEND
942 || (GET_CODE (op) == FLOAT
943 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
944 && ((unsigned)significand_size (GET_MODE (op))
945 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
946 - num_sign_bit_copies (XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)))))))
948 return simplify_gen_unary (GET_CODE (op), mode,
949 XEXP (op, 0),
950 GET_MODE (XEXP (op, 0)));
951
952 break;
953
954 case ABS:
955 /* (abs (neg <foo>)) -> (abs <foo>) */
956 if (GET_CODE (op) == NEG)
957 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
958 GET_MODE (XEXP (op, 0)));
959
960 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 do nothing. */
962 if (GET_MODE (op) == VOIDmode)
963 break;
964
965 /* If operand is something known to be positive, ignore the ABS. */
966 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
967 || val_signbit_known_clear_p (GET_MODE (op),
968 nonzero_bits (op, GET_MODE (op))))
969 return op;
970
971 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
972 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
973 return gen_rtx_NEG (mode, op);
974
975 break;
976
977 case FFS:
978 /* (ffs (*_extend <X>)) = (ffs <X>) */
979 if (GET_CODE (op) == SIGN_EXTEND
980 || GET_CODE (op) == ZERO_EXTEND)
981 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
982 GET_MODE (XEXP (op, 0)));
983 break;
984
985 case POPCOUNT:
986 switch (GET_CODE (op))
987 {
988 case BSWAP:
989 case ZERO_EXTEND:
990 /* (popcount (zero_extend <X>)) = (popcount <X>) */
991 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
992 GET_MODE (XEXP (op, 0)));
993
994 case ROTATE:
995 case ROTATERT:
996 /* Rotations don't affect popcount. */
997 if (!side_effects_p (XEXP (op, 1)))
998 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 GET_MODE (XEXP (op, 0)));
1000 break;
1001
1002 default:
1003 break;
1004 }
1005 break;
1006
1007 case PARITY:
1008 switch (GET_CODE (op))
1009 {
1010 case NOT:
1011 case BSWAP:
1012 case ZERO_EXTEND:
1013 case SIGN_EXTEND:
1014 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1015 GET_MODE (XEXP (op, 0)));
1016
1017 case ROTATE:
1018 case ROTATERT:
1019 /* Rotations don't affect parity. */
1020 if (!side_effects_p (XEXP (op, 1)))
1021 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 GET_MODE (XEXP (op, 0)));
1023 break;
1024
1025 default:
1026 break;
1027 }
1028 break;
1029
1030 case BSWAP:
1031 /* (bswap (bswap x)) -> x. */
1032 if (GET_CODE (op) == BSWAP)
1033 return XEXP (op, 0);
1034 break;
1035
1036 case FLOAT:
1037 /* (float (sign_extend <X>)) = (float <X>). */
1038 if (GET_CODE (op) == SIGN_EXTEND)
1039 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1040 GET_MODE (XEXP (op, 0)));
1041 break;
1042
1043 case SIGN_EXTEND:
1044 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 becomes just the MINUS if its mode is MODE. This allows
1046 folding switch statements on machines using casesi (such as
1047 the VAX). */
1048 if (GET_CODE (op) == TRUNCATE
1049 && GET_MODE (XEXP (op, 0)) == mode
1050 && GET_CODE (XEXP (op, 0)) == MINUS
1051 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1052 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1053 return XEXP (op, 0);
1054
1055 /* Extending a widening multiplication should be canonicalized to
1056 a wider widening multiplication. */
1057 if (GET_CODE (op) == MULT)
1058 {
1059 rtx lhs = XEXP (op, 0);
1060 rtx rhs = XEXP (op, 1);
1061 enum rtx_code lcode = GET_CODE (lhs);
1062 enum rtx_code rcode = GET_CODE (rhs);
1063
1064 /* Widening multiplies usually extend both operands, but sometimes
1065 they use a shift to extract a portion of a register. */
1066 if ((lcode == SIGN_EXTEND
1067 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1068 && (rcode == SIGN_EXTEND
1069 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1070 {
1071 enum machine_mode lmode = GET_MODE (lhs);
1072 enum machine_mode rmode = GET_MODE (rhs);
1073 int bits;
1074
1075 if (lcode == ASHIFTRT)
1076 /* Number of bits not shifted off the end. */
1077 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1078 else /* lcode == SIGN_EXTEND */
1079 /* Size of inner mode. */
1080 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1081
1082 if (rcode == ASHIFTRT)
1083 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1084 else /* rcode == SIGN_EXTEND */
1085 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1086
1087 /* We can only widen multiplies if the result is mathematiclly
1088 equivalent. I.e. if overflow was impossible. */
1089 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1090 return simplify_gen_binary
1091 (MULT, mode,
1092 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1093 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1094 }
1095 }
1096
1097 /* Check for a sign extension of a subreg of a promoted
1098 variable, where the promotion is sign-extended, and the
1099 target mode is the same as the variable's promotion. */
1100 if (GET_CODE (op) == SUBREG
1101 && SUBREG_PROMOTED_VAR_P (op)
1102 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1103 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1104 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1105
1106 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1107 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1108 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1109 {
1110 gcc_assert (GET_MODE_BITSIZE (mode)
1111 > GET_MODE_BITSIZE (GET_MODE (op)));
1112 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1113 GET_MODE (XEXP (op, 0)));
1114 }
1115
1116 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1117 is (sign_extend:M (subreg:O <X>)) if there is mode with
1118 GET_MODE_BITSIZE (N) - I bits.
1119 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1120 is similarly (zero_extend:M (subreg:O <X>)). */
1121 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1122 && GET_CODE (XEXP (op, 0)) == ASHIFT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1125 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1126 {
1127 enum machine_mode tmode
1128 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1129 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1130 gcc_assert (GET_MODE_BITSIZE (mode)
1131 > GET_MODE_BITSIZE (GET_MODE (op)));
1132 if (tmode != BLKmode)
1133 {
1134 rtx inner =
1135 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1136 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1137 ? SIGN_EXTEND : ZERO_EXTEND,
1138 mode, inner, tmode);
1139 }
1140 }
1141
1142 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1143 /* As we do not know which address space the pointer is referring to,
1144 we can do this only if the target does not support different pointer
1145 or address modes depending on the address space. */
1146 if (target_default_pointer_address_modes_p ()
1147 && ! POINTERS_EXTEND_UNSIGNED
1148 && mode == Pmode && GET_MODE (op) == ptr_mode
1149 && (CONSTANT_P (op)
1150 || (GET_CODE (op) == SUBREG
1151 && REG_P (SUBREG_REG (op))
1152 && REG_POINTER (SUBREG_REG (op))
1153 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1154 return convert_memory_address (Pmode, op);
1155 #endif
1156 break;
1157
1158 case ZERO_EXTEND:
1159 /* Check for a zero extension of a subreg of a promoted
1160 variable, where the promotion is zero-extended, and the
1161 target mode is the same as the variable's promotion. */
1162 if (GET_CODE (op) == SUBREG
1163 && SUBREG_PROMOTED_VAR_P (op)
1164 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1165 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1166 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1167
1168 /* Extending a widening multiplication should be canonicalized to
1169 a wider widening multiplication. */
1170 if (GET_CODE (op) == MULT)
1171 {
1172 rtx lhs = XEXP (op, 0);
1173 rtx rhs = XEXP (op, 1);
1174 enum rtx_code lcode = GET_CODE (lhs);
1175 enum rtx_code rcode = GET_CODE (rhs);
1176
1177 /* Widening multiplies usually extend both operands, but sometimes
1178 they use a shift to extract a portion of a register. */
1179 if ((lcode == ZERO_EXTEND
1180 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1181 && (rcode == ZERO_EXTEND
1182 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1183 {
1184 enum machine_mode lmode = GET_MODE (lhs);
1185 enum machine_mode rmode = GET_MODE (rhs);
1186 int bits;
1187
1188 if (lcode == LSHIFTRT)
1189 /* Number of bits not shifted off the end. */
1190 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1191 else /* lcode == ZERO_EXTEND */
1192 /* Size of inner mode. */
1193 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1194
1195 if (rcode == LSHIFTRT)
1196 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1197 else /* rcode == ZERO_EXTEND */
1198 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1199
1200 /* We can only widen multiplies if the result is mathematiclly
1201 equivalent. I.e. if overflow was impossible. */
1202 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1203 return simplify_gen_binary
1204 (MULT, mode,
1205 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1206 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1207 }
1208 }
1209
1210 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1211 if (GET_CODE (op) == ZERO_EXTEND)
1212 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1213 GET_MODE (XEXP (op, 0)));
1214
1215 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1216 is (zero_extend:M (subreg:O <X>)) if there is mode with
1217 GET_MODE_BITSIZE (N) - I bits. */
1218 if (GET_CODE (op) == LSHIFTRT
1219 && GET_CODE (XEXP (op, 0)) == ASHIFT
1220 && CONST_INT_P (XEXP (op, 1))
1221 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1222 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1223 {
1224 enum machine_mode tmode
1225 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1226 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1227 if (tmode != BLKmode)
1228 {
1229 rtx inner =
1230 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1231 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1232 }
1233 }
1234
1235 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1236 /* As we do not know which address space the pointer is referring to,
1237 we can do this only if the target does not support different pointer
1238 or address modes depending on the address space. */
1239 if (target_default_pointer_address_modes_p ()
1240 && POINTERS_EXTEND_UNSIGNED > 0
1241 && mode == Pmode && GET_MODE (op) == ptr_mode
1242 && (CONSTANT_P (op)
1243 || (GET_CODE (op) == SUBREG
1244 && REG_P (SUBREG_REG (op))
1245 && REG_POINTER (SUBREG_REG (op))
1246 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1247 return convert_memory_address (Pmode, op);
1248 #endif
1249 break;
1250
1251 default:
1252 break;
1253 }
1254
1255 return 0;
1256 }
1257
1258 /* Try to compute the value of a unary operation CODE whose output mode is to
1259 be MODE with input operand OP whose mode was originally OP_MODE.
1260 Return zero if the value cannot be computed. */
1261 rtx
1262 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1263 rtx op, enum machine_mode op_mode)
1264 {
1265 unsigned int width = GET_MODE_PRECISION (mode);
1266 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1267
1268 if (code == VEC_DUPLICATE)
1269 {
1270 gcc_assert (VECTOR_MODE_P (mode));
1271 if (GET_MODE (op) != VOIDmode)
1272 {
1273 if (!VECTOR_MODE_P (GET_MODE (op)))
1274 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1275 else
1276 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1277 (GET_MODE (op)));
1278 }
1279 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1280 || GET_CODE (op) == CONST_VECTOR)
1281 {
1282 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1283 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1284 rtvec v = rtvec_alloc (n_elts);
1285 unsigned int i;
1286
1287 if (GET_CODE (op) != CONST_VECTOR)
1288 for (i = 0; i < n_elts; i++)
1289 RTVEC_ELT (v, i) = op;
1290 else
1291 {
1292 enum machine_mode inmode = GET_MODE (op);
1293 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1294 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1295
1296 gcc_assert (in_n_elts < n_elts);
1297 gcc_assert ((n_elts % in_n_elts) == 0);
1298 for (i = 0; i < n_elts; i++)
1299 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1300 }
1301 return gen_rtx_CONST_VECTOR (mode, v);
1302 }
1303 }
1304
1305 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1306 {
1307 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1308 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1309 enum machine_mode opmode = GET_MODE (op);
1310 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1311 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1312 rtvec v = rtvec_alloc (n_elts);
1313 unsigned int i;
1314
1315 gcc_assert (op_n_elts == n_elts);
1316 for (i = 0; i < n_elts; i++)
1317 {
1318 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1319 CONST_VECTOR_ELT (op, i),
1320 GET_MODE_INNER (opmode));
1321 if (!x)
1322 return 0;
1323 RTVEC_ELT (v, i) = x;
1324 }
1325 return gen_rtx_CONST_VECTOR (mode, v);
1326 }
1327
1328 /* The order of these tests is critical so that, for example, we don't
1329 check the wrong mode (input vs. output) for a conversion operation,
1330 such as FIX. At some point, this should be simplified. */
1331
1332 if (code == FLOAT && GET_MODE (op) == VOIDmode
1333 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1334 {
1335 HOST_WIDE_INT hv, lv;
1336 REAL_VALUE_TYPE d;
1337
1338 if (CONST_INT_P (op))
1339 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1340 else
1341 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1342
1343 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1344 d = real_value_truncate (mode, d);
1345 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1346 }
1347 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1348 && (GET_CODE (op) == CONST_DOUBLE
1349 || CONST_INT_P (op)))
1350 {
1351 HOST_WIDE_INT hv, lv;
1352 REAL_VALUE_TYPE d;
1353
1354 if (CONST_INT_P (op))
1355 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1356 else
1357 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1358
1359 if (op_mode == VOIDmode
1360 || GET_MODE_PRECISION (op_mode) > 2 * HOST_BITS_PER_WIDE_INT)
1361 /* We should never get a negative number. */
1362 gcc_assert (hv >= 0);
1363 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1364 hv = 0, lv &= GET_MODE_MASK (op_mode);
1365
1366 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1367 d = real_value_truncate (mode, d);
1368 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1369 }
1370
1371 if (CONST_INT_P (op)
1372 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1373 {
1374 HOST_WIDE_INT arg0 = INTVAL (op);
1375 HOST_WIDE_INT val;
1376
1377 switch (code)
1378 {
1379 case NOT:
1380 val = ~ arg0;
1381 break;
1382
1383 case NEG:
1384 val = - arg0;
1385 break;
1386
1387 case ABS:
1388 val = (arg0 >= 0 ? arg0 : - arg0);
1389 break;
1390
1391 case FFS:
1392 arg0 &= GET_MODE_MASK (mode);
1393 val = ffs_hwi (arg0);
1394 break;
1395
1396 case CLZ:
1397 arg0 &= GET_MODE_MASK (mode);
1398 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1399 ;
1400 else
1401 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1402 break;
1403
1404 case CLRSB:
1405 arg0 &= GET_MODE_MASK (mode);
1406 if (arg0 == 0)
1407 val = GET_MODE_PRECISION (mode) - 1;
1408 else if (arg0 >= 0)
1409 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1410 else if (arg0 < 0)
1411 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1412 break;
1413
1414 case CTZ:
1415 arg0 &= GET_MODE_MASK (mode);
1416 if (arg0 == 0)
1417 {
1418 /* Even if the value at zero is undefined, we have to come
1419 up with some replacement. Seems good enough. */
1420 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1421 val = GET_MODE_PRECISION (mode);
1422 }
1423 else
1424 val = ctz_hwi (arg0);
1425 break;
1426
1427 case POPCOUNT:
1428 arg0 &= GET_MODE_MASK (mode);
1429 val = 0;
1430 while (arg0)
1431 val++, arg0 &= arg0 - 1;
1432 break;
1433
1434 case PARITY:
1435 arg0 &= GET_MODE_MASK (mode);
1436 val = 0;
1437 while (arg0)
1438 val++, arg0 &= arg0 - 1;
1439 val &= 1;
1440 break;
1441
1442 case BSWAP:
1443 {
1444 unsigned int s;
1445
1446 val = 0;
1447 for (s = 0; s < width; s += 8)
1448 {
1449 unsigned int d = width - s - 8;
1450 unsigned HOST_WIDE_INT byte;
1451 byte = (arg0 >> s) & 0xff;
1452 val |= byte << d;
1453 }
1454 }
1455 break;
1456
1457 case TRUNCATE:
1458 val = arg0;
1459 break;
1460
1461 case ZERO_EXTEND:
1462 /* When zero-extending a CONST_INT, we need to know its
1463 original mode. */
1464 gcc_assert (op_mode != VOIDmode);
1465 if (op_width == HOST_BITS_PER_WIDE_INT)
1466 {
1467 /* If we were really extending the mode,
1468 we would have to distinguish between zero-extension
1469 and sign-extension. */
1470 gcc_assert (width == op_width);
1471 val = arg0;
1472 }
1473 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1474 val = arg0 & GET_MODE_MASK (op_mode);
1475 else
1476 return 0;
1477 break;
1478
1479 case SIGN_EXTEND:
1480 if (op_mode == VOIDmode)
1481 op_mode = mode;
1482 op_width = GET_MODE_PRECISION (op_mode);
1483 if (op_width == HOST_BITS_PER_WIDE_INT)
1484 {
1485 /* If we were really extending the mode,
1486 we would have to distinguish between zero-extension
1487 and sign-extension. */
1488 gcc_assert (width == op_width);
1489 val = arg0;
1490 }
1491 else if (op_width < HOST_BITS_PER_WIDE_INT)
1492 {
1493 val = arg0 & GET_MODE_MASK (op_mode);
1494 if (val_signbit_known_set_p (op_mode, val))
1495 val |= ~GET_MODE_MASK (op_mode);
1496 }
1497 else
1498 return 0;
1499 break;
1500
1501 case SQRT:
1502 case FLOAT_EXTEND:
1503 case FLOAT_TRUNCATE:
1504 case SS_TRUNCATE:
1505 case US_TRUNCATE:
1506 case SS_NEG:
1507 case US_NEG:
1508 case SS_ABS:
1509 return 0;
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514
1515 return gen_int_mode (val, mode);
1516 }
1517
1518 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1519 for a DImode operation on a CONST_INT. */
1520 else if (GET_MODE (op) == VOIDmode
1521 && width <= HOST_BITS_PER_WIDE_INT * 2
1522 && (GET_CODE (op) == CONST_DOUBLE
1523 || CONST_INT_P (op)))
1524 {
1525 unsigned HOST_WIDE_INT l1, lv;
1526 HOST_WIDE_INT h1, hv;
1527
1528 if (GET_CODE (op) == CONST_DOUBLE)
1529 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1530 else
1531 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1532
1533 switch (code)
1534 {
1535 case NOT:
1536 lv = ~ l1;
1537 hv = ~ h1;
1538 break;
1539
1540 case NEG:
1541 neg_double (l1, h1, &lv, &hv);
1542 break;
1543
1544 case ABS:
1545 if (h1 < 0)
1546 neg_double (l1, h1, &lv, &hv);
1547 else
1548 lv = l1, hv = h1;
1549 break;
1550
1551 case FFS:
1552 hv = 0;
1553 if (l1 != 0)
1554 lv = ffs_hwi (l1);
1555 else if (h1 != 0)
1556 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1557 else
1558 lv = 0;
1559 break;
1560
1561 case CLZ:
1562 hv = 0;
1563 if (h1 != 0)
1564 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1565 - HOST_BITS_PER_WIDE_INT;
1566 else if (l1 != 0)
1567 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1568 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1569 lv = GET_MODE_PRECISION (mode);
1570 break;
1571
1572 case CTZ:
1573 hv = 0;
1574 if (l1 != 0)
1575 lv = ctz_hwi (l1);
1576 else if (h1 != 0)
1577 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1578 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1579 lv = GET_MODE_PRECISION (mode);
1580 break;
1581
1582 case POPCOUNT:
1583 hv = 0;
1584 lv = 0;
1585 while (l1)
1586 lv++, l1 &= l1 - 1;
1587 while (h1)
1588 lv++, h1 &= h1 - 1;
1589 break;
1590
1591 case PARITY:
1592 hv = 0;
1593 lv = 0;
1594 while (l1)
1595 lv++, l1 &= l1 - 1;
1596 while (h1)
1597 lv++, h1 &= h1 - 1;
1598 lv &= 1;
1599 break;
1600
1601 case BSWAP:
1602 {
1603 unsigned int s;
1604
1605 hv = 0;
1606 lv = 0;
1607 for (s = 0; s < width; s += 8)
1608 {
1609 unsigned int d = width - s - 8;
1610 unsigned HOST_WIDE_INT byte;
1611
1612 if (s < HOST_BITS_PER_WIDE_INT)
1613 byte = (l1 >> s) & 0xff;
1614 else
1615 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1616
1617 if (d < HOST_BITS_PER_WIDE_INT)
1618 lv |= byte << d;
1619 else
1620 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1621 }
1622 }
1623 break;
1624
1625 case TRUNCATE:
1626 /* This is just a change-of-mode, so do nothing. */
1627 lv = l1, hv = h1;
1628 break;
1629
1630 case ZERO_EXTEND:
1631 gcc_assert (op_mode != VOIDmode);
1632
1633 if (op_width > HOST_BITS_PER_WIDE_INT)
1634 return 0;
1635
1636 hv = 0;
1637 lv = l1 & GET_MODE_MASK (op_mode);
1638 break;
1639
1640 case SIGN_EXTEND:
1641 if (op_mode == VOIDmode
1642 || op_width > HOST_BITS_PER_WIDE_INT)
1643 return 0;
1644 else
1645 {
1646 lv = l1 & GET_MODE_MASK (op_mode);
1647 if (val_signbit_known_set_p (op_mode, lv))
1648 lv |= ~GET_MODE_MASK (op_mode);
1649
1650 hv = HWI_SIGN_EXTEND (lv);
1651 }
1652 break;
1653
1654 case SQRT:
1655 return 0;
1656
1657 default:
1658 return 0;
1659 }
1660
1661 return immed_double_const (lv, hv, mode);
1662 }
1663
1664 else if (GET_CODE (op) == CONST_DOUBLE
1665 && SCALAR_FLOAT_MODE_P (mode)
1666 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1667 {
1668 REAL_VALUE_TYPE d, t;
1669 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1670
1671 switch (code)
1672 {
1673 case SQRT:
1674 if (HONOR_SNANS (mode) && real_isnan (&d))
1675 return 0;
1676 real_sqrt (&t, mode, &d);
1677 d = t;
1678 break;
1679 case ABS:
1680 d = real_value_abs (&d);
1681 break;
1682 case NEG:
1683 d = real_value_negate (&d);
1684 break;
1685 case FLOAT_TRUNCATE:
1686 d = real_value_truncate (mode, d);
1687 break;
1688 case FLOAT_EXTEND:
1689 /* All this does is change the mode, unless changing
1690 mode class. */
1691 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1692 real_convert (&d, mode, &d);
1693 break;
1694 case FIX:
1695 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1696 break;
1697 case NOT:
1698 {
1699 long tmp[4];
1700 int i;
1701
1702 real_to_target (tmp, &d, GET_MODE (op));
1703 for (i = 0; i < 4; i++)
1704 tmp[i] = ~tmp[i];
1705 real_from_target (&d, tmp, mode);
1706 break;
1707 }
1708 default:
1709 gcc_unreachable ();
1710 }
1711 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1712 }
1713
1714 else if (GET_CODE (op) == CONST_DOUBLE
1715 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1716 && GET_MODE_CLASS (mode) == MODE_INT
1717 && width <= 2 * HOST_BITS_PER_WIDE_INT && width > 0)
1718 {
1719 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1720 operators are intentionally left unspecified (to ease implementation
1721 by target backends), for consistency, this routine implements the
1722 same semantics for constant folding as used by the middle-end. */
1723
1724 /* This was formerly used only for non-IEEE float.
1725 eggert@twinsun.com says it is safe for IEEE also. */
1726 HOST_WIDE_INT xh, xl, th, tl;
1727 REAL_VALUE_TYPE x, t;
1728 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1729 switch (code)
1730 {
1731 case FIX:
1732 if (REAL_VALUE_ISNAN (x))
1733 return const0_rtx;
1734
1735 /* Test against the signed upper bound. */
1736 if (width > HOST_BITS_PER_WIDE_INT)
1737 {
1738 th = ((unsigned HOST_WIDE_INT) 1
1739 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1740 tl = -1;
1741 }
1742 else
1743 {
1744 th = 0;
1745 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1746 }
1747 real_from_integer (&t, VOIDmode, tl, th, 0);
1748 if (REAL_VALUES_LESS (t, x))
1749 {
1750 xh = th;
1751 xl = tl;
1752 break;
1753 }
1754
1755 /* Test against the signed lower bound. */
1756 if (width > HOST_BITS_PER_WIDE_INT)
1757 {
1758 th = (unsigned HOST_WIDE_INT) (-1)
1759 << (width - HOST_BITS_PER_WIDE_INT - 1);
1760 tl = 0;
1761 }
1762 else
1763 {
1764 th = -1;
1765 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1766 }
1767 real_from_integer (&t, VOIDmode, tl, th, 0);
1768 if (REAL_VALUES_LESS (x, t))
1769 {
1770 xh = th;
1771 xl = tl;
1772 break;
1773 }
1774 REAL_VALUE_TO_INT (&xl, &xh, x);
1775 break;
1776
1777 case UNSIGNED_FIX:
1778 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1779 return const0_rtx;
1780
1781 /* Test against the unsigned upper bound. */
1782 if (width == 2 * HOST_BITS_PER_WIDE_INT)
1783 {
1784 th = -1;
1785 tl = -1;
1786 }
1787 else if (width >= HOST_BITS_PER_WIDE_INT)
1788 {
1789 th = ((unsigned HOST_WIDE_INT) 1
1790 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1791 tl = -1;
1792 }
1793 else
1794 {
1795 th = 0;
1796 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1797 }
1798 real_from_integer (&t, VOIDmode, tl, th, 1);
1799 if (REAL_VALUES_LESS (t, x))
1800 {
1801 xh = th;
1802 xl = tl;
1803 break;
1804 }
1805
1806 REAL_VALUE_TO_INT (&xl, &xh, x);
1807 break;
1808
1809 default:
1810 gcc_unreachable ();
1811 }
1812 return immed_double_const (xl, xh, mode);
1813 }
1814
1815 return NULL_RTX;
1816 }
1817 \f
1818 /* Subroutine of simplify_binary_operation to simplify a commutative,
1819 associative binary operation CODE with result mode MODE, operating
1820 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1821 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1822 canonicalization is possible. */
1823
1824 static rtx
1825 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1826 rtx op0, rtx op1)
1827 {
1828 rtx tem;
1829
1830 /* Linearize the operator to the left. */
1831 if (GET_CODE (op1) == code)
1832 {
1833 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1834 if (GET_CODE (op0) == code)
1835 {
1836 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1837 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1838 }
1839
1840 /* "a op (b op c)" becomes "(b op c) op a". */
1841 if (! swap_commutative_operands_p (op1, op0))
1842 return simplify_gen_binary (code, mode, op1, op0);
1843
1844 tem = op0;
1845 op0 = op1;
1846 op1 = tem;
1847 }
1848
1849 if (GET_CODE (op0) == code)
1850 {
1851 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1852 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1853 {
1854 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1855 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1856 }
1857
1858 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1859 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1860 if (tem != 0)
1861 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1862
1863 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1864 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1865 if (tem != 0)
1866 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1867 }
1868
1869 return 0;
1870 }
1871
1872
1873 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1874 and OP1. Return 0 if no simplification is possible.
1875
1876 Don't use this for relational operations such as EQ or LT.
1877 Use simplify_relational_operation instead. */
1878 rtx
1879 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1880 rtx op0, rtx op1)
1881 {
1882 rtx trueop0, trueop1;
1883 rtx tem;
1884
1885 /* Relational operations don't work here. We must know the mode
1886 of the operands in order to do the comparison correctly.
1887 Assuming a full word can give incorrect results.
1888 Consider comparing 128 with -128 in QImode. */
1889 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1890 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1891
1892 /* Make sure the constant is second. */
1893 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1894 && swap_commutative_operands_p (op0, op1))
1895 {
1896 tem = op0, op0 = op1, op1 = tem;
1897 }
1898
1899 trueop0 = avoid_constant_pool_reference (op0);
1900 trueop1 = avoid_constant_pool_reference (op1);
1901
1902 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1903 if (tem)
1904 return tem;
1905 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1906 }
1907
1908 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1909 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1910 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1911 actual constants. */
1912
1913 static rtx
1914 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1915 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1916 {
1917 rtx tem, reversed, opleft, opright;
1918 HOST_WIDE_INT val;
1919 unsigned int width = GET_MODE_PRECISION (mode);
1920
1921 /* Even if we can't compute a constant result,
1922 there are some cases worth simplifying. */
1923
1924 switch (code)
1925 {
1926 case PLUS:
1927 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1928 when x is NaN, infinite, or finite and nonzero. They aren't
1929 when x is -0 and the rounding mode is not towards -infinity,
1930 since (-0) + 0 is then 0. */
1931 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1932 return op0;
1933
1934 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1935 transformations are safe even for IEEE. */
1936 if (GET_CODE (op0) == NEG)
1937 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1938 else if (GET_CODE (op1) == NEG)
1939 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1940
1941 /* (~a) + 1 -> -a */
1942 if (INTEGRAL_MODE_P (mode)
1943 && GET_CODE (op0) == NOT
1944 && trueop1 == const1_rtx)
1945 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1946
1947 /* Handle both-operands-constant cases. We can only add
1948 CONST_INTs to constants since the sum of relocatable symbols
1949 can't be handled by most assemblers. Don't add CONST_INT
1950 to CONST_INT since overflow won't be computed properly if wider
1951 than HOST_BITS_PER_WIDE_INT. */
1952
1953 if ((GET_CODE (op0) == CONST
1954 || GET_CODE (op0) == SYMBOL_REF
1955 || GET_CODE (op0) == LABEL_REF)
1956 && CONST_INT_P (op1))
1957 return plus_constant (mode, op0, INTVAL (op1));
1958 else if ((GET_CODE (op1) == CONST
1959 || GET_CODE (op1) == SYMBOL_REF
1960 || GET_CODE (op1) == LABEL_REF)
1961 && CONST_INT_P (op0))
1962 return plus_constant (mode, op1, INTVAL (op0));
1963
1964 /* See if this is something like X * C - X or vice versa or
1965 if the multiplication is written as a shift. If so, we can
1966 distribute and make a new multiply, shift, or maybe just
1967 have X (if C is 2 in the example above). But don't make
1968 something more expensive than we had before. */
1969
1970 if (SCALAR_INT_MODE_P (mode))
1971 {
1972 double_int coeff0, coeff1;
1973 rtx lhs = op0, rhs = op1;
1974
1975 coeff0 = double_int_one;
1976 coeff1 = double_int_one;
1977
1978 if (GET_CODE (lhs) == NEG)
1979 {
1980 coeff0 = double_int_minus_one;
1981 lhs = XEXP (lhs, 0);
1982 }
1983 else if (GET_CODE (lhs) == MULT
1984 && CONST_INT_P (XEXP (lhs, 1)))
1985 {
1986 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1987 lhs = XEXP (lhs, 0);
1988 }
1989 else if (GET_CODE (lhs) == ASHIFT
1990 && CONST_INT_P (XEXP (lhs, 1))
1991 && INTVAL (XEXP (lhs, 1)) >= 0
1992 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1993 {
1994 coeff0 = double_int_setbit (double_int_zero,
1995 INTVAL (XEXP (lhs, 1)));
1996 lhs = XEXP (lhs, 0);
1997 }
1998
1999 if (GET_CODE (rhs) == NEG)
2000 {
2001 coeff1 = double_int_minus_one;
2002 rhs = XEXP (rhs, 0);
2003 }
2004 else if (GET_CODE (rhs) == MULT
2005 && CONST_INT_P (XEXP (rhs, 1)))
2006 {
2007 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2008 rhs = XEXP (rhs, 0);
2009 }
2010 else if (GET_CODE (rhs) == ASHIFT
2011 && CONST_INT_P (XEXP (rhs, 1))
2012 && INTVAL (XEXP (rhs, 1)) >= 0
2013 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2014 {
2015 coeff1 = double_int_setbit (double_int_zero,
2016 INTVAL (XEXP (rhs, 1)));
2017 rhs = XEXP (rhs, 0);
2018 }
2019
2020 if (rtx_equal_p (lhs, rhs))
2021 {
2022 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2023 rtx coeff;
2024 double_int val;
2025 bool speed = optimize_function_for_speed_p (cfun);
2026
2027 val = double_int_add (coeff0, coeff1);
2028 coeff = immed_double_int_const (val, mode);
2029
2030 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2031 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2032 ? tem : 0;
2033 }
2034 }
2035
2036 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2037 if ((CONST_INT_P (op1)
2038 || GET_CODE (op1) == CONST_DOUBLE)
2039 && GET_CODE (op0) == XOR
2040 && (CONST_INT_P (XEXP (op0, 1))
2041 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2042 && mode_signbit_p (mode, op1))
2043 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2044 simplify_gen_binary (XOR, mode, op1,
2045 XEXP (op0, 1)));
2046
2047 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2048 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2049 && GET_CODE (op0) == MULT
2050 && GET_CODE (XEXP (op0, 0)) == NEG)
2051 {
2052 rtx in1, in2;
2053
2054 in1 = XEXP (XEXP (op0, 0), 0);
2055 in2 = XEXP (op0, 1);
2056 return simplify_gen_binary (MINUS, mode, op1,
2057 simplify_gen_binary (MULT, mode,
2058 in1, in2));
2059 }
2060
2061 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2062 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2063 is 1. */
2064 if (COMPARISON_P (op0)
2065 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2066 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2067 && (reversed = reversed_comparison (op0, mode)))
2068 return
2069 simplify_gen_unary (NEG, mode, reversed, mode);
2070
2071 /* If one of the operands is a PLUS or a MINUS, see if we can
2072 simplify this by the associative law.
2073 Don't use the associative law for floating point.
2074 The inaccuracy makes it nonassociative,
2075 and subtle programs can break if operations are associated. */
2076
2077 if (INTEGRAL_MODE_P (mode)
2078 && (plus_minus_operand_p (op0)
2079 || plus_minus_operand_p (op1))
2080 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2081 return tem;
2082
2083 /* Reassociate floating point addition only when the user
2084 specifies associative math operations. */
2085 if (FLOAT_MODE_P (mode)
2086 && flag_associative_math)
2087 {
2088 tem = simplify_associative_operation (code, mode, op0, op1);
2089 if (tem)
2090 return tem;
2091 }
2092 break;
2093
2094 case COMPARE:
2095 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2096 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2097 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2098 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2099 {
2100 rtx xop00 = XEXP (op0, 0);
2101 rtx xop10 = XEXP (op1, 0);
2102
2103 #ifdef HAVE_cc0
2104 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2105 #else
2106 if (REG_P (xop00) && REG_P (xop10)
2107 && GET_MODE (xop00) == GET_MODE (xop10)
2108 && REGNO (xop00) == REGNO (xop10)
2109 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2110 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2111 #endif
2112 return xop00;
2113 }
2114 break;
2115
2116 case MINUS:
2117 /* We can't assume x-x is 0 even with non-IEEE floating point,
2118 but since it is zero except in very strange circumstances, we
2119 will treat it as zero with -ffinite-math-only. */
2120 if (rtx_equal_p (trueop0, trueop1)
2121 && ! side_effects_p (op0)
2122 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2123 return CONST0_RTX (mode);
2124
2125 /* Change subtraction from zero into negation. (0 - x) is the
2126 same as -x when x is NaN, infinite, or finite and nonzero.
2127 But if the mode has signed zeros, and does not round towards
2128 -infinity, then 0 - 0 is 0, not -0. */
2129 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2130 return simplify_gen_unary (NEG, mode, op1, mode);
2131
2132 /* (-1 - a) is ~a. */
2133 if (trueop0 == constm1_rtx)
2134 return simplify_gen_unary (NOT, mode, op1, mode);
2135
2136 /* Subtracting 0 has no effect unless the mode has signed zeros
2137 and supports rounding towards -infinity. In such a case,
2138 0 - 0 is -0. */
2139 if (!(HONOR_SIGNED_ZEROS (mode)
2140 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2141 && trueop1 == CONST0_RTX (mode))
2142 return op0;
2143
2144 /* See if this is something like X * C - X or vice versa or
2145 if the multiplication is written as a shift. If so, we can
2146 distribute and make a new multiply, shift, or maybe just
2147 have X (if C is 2 in the example above). But don't make
2148 something more expensive than we had before. */
2149
2150 if (SCALAR_INT_MODE_P (mode))
2151 {
2152 double_int coeff0, negcoeff1;
2153 rtx lhs = op0, rhs = op1;
2154
2155 coeff0 = double_int_one;
2156 negcoeff1 = double_int_minus_one;
2157
2158 if (GET_CODE (lhs) == NEG)
2159 {
2160 coeff0 = double_int_minus_one;
2161 lhs = XEXP (lhs, 0);
2162 }
2163 else if (GET_CODE (lhs) == MULT
2164 && CONST_INT_P (XEXP (lhs, 1)))
2165 {
2166 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2167 lhs = XEXP (lhs, 0);
2168 }
2169 else if (GET_CODE (lhs) == ASHIFT
2170 && CONST_INT_P (XEXP (lhs, 1))
2171 && INTVAL (XEXP (lhs, 1)) >= 0
2172 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2173 {
2174 coeff0 = double_int_setbit (double_int_zero,
2175 INTVAL (XEXP (lhs, 1)));
2176 lhs = XEXP (lhs, 0);
2177 }
2178
2179 if (GET_CODE (rhs) == NEG)
2180 {
2181 negcoeff1 = double_int_one;
2182 rhs = XEXP (rhs, 0);
2183 }
2184 else if (GET_CODE (rhs) == MULT
2185 && CONST_INT_P (XEXP (rhs, 1)))
2186 {
2187 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2188 rhs = XEXP (rhs, 0);
2189 }
2190 else if (GET_CODE (rhs) == ASHIFT
2191 && CONST_INT_P (XEXP (rhs, 1))
2192 && INTVAL (XEXP (rhs, 1)) >= 0
2193 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2194 {
2195 negcoeff1 = double_int_setbit (double_int_zero,
2196 INTVAL (XEXP (rhs, 1)));
2197 negcoeff1 = double_int_neg (negcoeff1);
2198 rhs = XEXP (rhs, 0);
2199 }
2200
2201 if (rtx_equal_p (lhs, rhs))
2202 {
2203 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2204 rtx coeff;
2205 double_int val;
2206 bool speed = optimize_function_for_speed_p (cfun);
2207
2208 val = double_int_add (coeff0, negcoeff1);
2209 coeff = immed_double_int_const (val, mode);
2210
2211 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2212 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2213 ? tem : 0;
2214 }
2215 }
2216
2217 /* (a - (-b)) -> (a + b). True even for IEEE. */
2218 if (GET_CODE (op1) == NEG)
2219 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2220
2221 /* (-x - c) may be simplified as (-c - x). */
2222 if (GET_CODE (op0) == NEG
2223 && (CONST_INT_P (op1)
2224 || GET_CODE (op1) == CONST_DOUBLE))
2225 {
2226 tem = simplify_unary_operation (NEG, mode, op1, mode);
2227 if (tem)
2228 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2229 }
2230
2231 /* Don't let a relocatable value get a negative coeff. */
2232 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2233 return simplify_gen_binary (PLUS, mode,
2234 op0,
2235 neg_const_int (mode, op1));
2236
2237 /* (x - (x & y)) -> (x & ~y) */
2238 if (GET_CODE (op1) == AND)
2239 {
2240 if (rtx_equal_p (op0, XEXP (op1, 0)))
2241 {
2242 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2243 GET_MODE (XEXP (op1, 1)));
2244 return simplify_gen_binary (AND, mode, op0, tem);
2245 }
2246 if (rtx_equal_p (op0, XEXP (op1, 1)))
2247 {
2248 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2249 GET_MODE (XEXP (op1, 0)));
2250 return simplify_gen_binary (AND, mode, op0, tem);
2251 }
2252 }
2253
2254 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2255 by reversing the comparison code if valid. */
2256 if (STORE_FLAG_VALUE == 1
2257 && trueop0 == const1_rtx
2258 && COMPARISON_P (op1)
2259 && (reversed = reversed_comparison (op1, mode)))
2260 return reversed;
2261
2262 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2263 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2264 && GET_CODE (op1) == MULT
2265 && GET_CODE (XEXP (op1, 0)) == NEG)
2266 {
2267 rtx in1, in2;
2268
2269 in1 = XEXP (XEXP (op1, 0), 0);
2270 in2 = XEXP (op1, 1);
2271 return simplify_gen_binary (PLUS, mode,
2272 simplify_gen_binary (MULT, mode,
2273 in1, in2),
2274 op0);
2275 }
2276
2277 /* Canonicalize (minus (neg A) (mult B C)) to
2278 (minus (mult (neg B) C) A). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 && GET_CODE (op1) == MULT
2281 && GET_CODE (op0) == NEG)
2282 {
2283 rtx in1, in2;
2284
2285 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2286 in2 = XEXP (op1, 1);
2287 return simplify_gen_binary (MINUS, mode,
2288 simplify_gen_binary (MULT, mode,
2289 in1, in2),
2290 XEXP (op0, 0));
2291 }
2292
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law. This will, for example,
2295 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2296 Don't use the associative law for floating point.
2297 The inaccuracy makes it nonassociative,
2298 and subtle programs can break if operations are associated. */
2299
2300 if (INTEGRAL_MODE_P (mode)
2301 && (plus_minus_operand_p (op0)
2302 || plus_minus_operand_p (op1))
2303 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2304 return tem;
2305 break;
2306
2307 case MULT:
2308 if (trueop1 == constm1_rtx)
2309 return simplify_gen_unary (NEG, mode, op0, mode);
2310
2311 if (GET_CODE (op0) == NEG)
2312 {
2313 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2314 /* If op1 is a MULT as well and simplify_unary_operation
2315 just moved the NEG to the second operand, simplify_gen_binary
2316 below could through simplify_associative_operation move
2317 the NEG around again and recurse endlessly. */
2318 if (temp
2319 && GET_CODE (op1) == MULT
2320 && GET_CODE (temp) == MULT
2321 && XEXP (op1, 0) == XEXP (temp, 0)
2322 && GET_CODE (XEXP (temp, 1)) == NEG
2323 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2324 temp = NULL_RTX;
2325 if (temp)
2326 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2327 }
2328 if (GET_CODE (op1) == NEG)
2329 {
2330 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2331 /* If op0 is a MULT as well and simplify_unary_operation
2332 just moved the NEG to the second operand, simplify_gen_binary
2333 below could through simplify_associative_operation move
2334 the NEG around again and recurse endlessly. */
2335 if (temp
2336 && GET_CODE (op0) == MULT
2337 && GET_CODE (temp) == MULT
2338 && XEXP (op0, 0) == XEXP (temp, 0)
2339 && GET_CODE (XEXP (temp, 1)) == NEG
2340 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2341 temp = NULL_RTX;
2342 if (temp)
2343 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2344 }
2345
2346 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2347 x is NaN, since x * 0 is then also NaN. Nor is it valid
2348 when the mode has signed zeros, since multiplying a negative
2349 number by 0 will give -0, not 0. */
2350 if (!HONOR_NANS (mode)
2351 && !HONOR_SIGNED_ZEROS (mode)
2352 && trueop1 == CONST0_RTX (mode)
2353 && ! side_effects_p (op0))
2354 return op1;
2355
2356 /* In IEEE floating point, x*1 is not equivalent to x for
2357 signalling NaNs. */
2358 if (!HONOR_SNANS (mode)
2359 && trueop1 == CONST1_RTX (mode))
2360 return op0;
2361
2362 /* Convert multiply by constant power of two into shift unless
2363 we are still generating RTL. This test is a kludge. */
2364 if (CONST_INT_P (trueop1)
2365 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2366 /* If the mode is larger than the host word size, and the
2367 uppermost bit is set, then this isn't a power of two due
2368 to implicit sign extension. */
2369 && (width <= HOST_BITS_PER_WIDE_INT
2370 || val != HOST_BITS_PER_WIDE_INT - 1))
2371 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2372
2373 /* Likewise for multipliers wider than a word. */
2374 if (GET_CODE (trueop1) == CONST_DOUBLE
2375 && (GET_MODE (trueop1) == VOIDmode
2376 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2377 && GET_MODE (op0) == mode
2378 && CONST_DOUBLE_LOW (trueop1) == 0
2379 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2380 && (val < 2 * HOST_BITS_PER_WIDE_INT - 1
2381 || GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT))
2382 return simplify_gen_binary (ASHIFT, mode, op0,
2383 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2384
2385 /* x*2 is x+x and x*(-1) is -x */
2386 if (GET_CODE (trueop1) == CONST_DOUBLE
2387 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2388 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2389 && GET_MODE (op0) == mode)
2390 {
2391 REAL_VALUE_TYPE d;
2392 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2393
2394 if (REAL_VALUES_EQUAL (d, dconst2))
2395 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2396
2397 if (!HONOR_SNANS (mode)
2398 && REAL_VALUES_EQUAL (d, dconstm1))
2399 return simplify_gen_unary (NEG, mode, op0, mode);
2400 }
2401
2402 /* Optimize -x * -x as x * x. */
2403 if (FLOAT_MODE_P (mode)
2404 && GET_CODE (op0) == NEG
2405 && GET_CODE (op1) == NEG
2406 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2407 && !side_effects_p (XEXP (op0, 0)))
2408 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2409
2410 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2411 if (SCALAR_FLOAT_MODE_P (mode)
2412 && GET_CODE (op0) == ABS
2413 && GET_CODE (op1) == ABS
2414 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2415 && !side_effects_p (XEXP (op0, 0)))
2416 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2417
2418 /* Reassociate multiplication, but for floating point MULTs
2419 only when the user specifies unsafe math optimizations. */
2420 if (! FLOAT_MODE_P (mode)
2421 || flag_unsafe_math_optimizations)
2422 {
2423 tem = simplify_associative_operation (code, mode, op0, op1);
2424 if (tem)
2425 return tem;
2426 }
2427 break;
2428
2429 case IOR:
2430 if (trueop1 == CONST0_RTX (mode))
2431 return op0;
2432 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2433 return op1;
2434 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2435 return op0;
2436 /* A | (~A) -> -1 */
2437 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2438 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2439 && ! side_effects_p (op0)
2440 && SCALAR_INT_MODE_P (mode))
2441 return constm1_rtx;
2442
2443 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2444 if (CONST_INT_P (op1)
2445 && HWI_COMPUTABLE_MODE_P (mode)
2446 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2447 return op1;
2448
2449 /* Canonicalize (X & C1) | C2. */
2450 if (GET_CODE (op0) == AND
2451 && CONST_INT_P (trueop1)
2452 && CONST_INT_P (XEXP (op0, 1)))
2453 {
2454 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2455 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2456 HOST_WIDE_INT c2 = INTVAL (trueop1);
2457
2458 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2459 if ((c1 & c2) == c1
2460 && !side_effects_p (XEXP (op0, 0)))
2461 return trueop1;
2462
2463 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2464 if (((c1|c2) & mask) == mask)
2465 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2466
2467 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2468 if (((c1 & ~c2) & mask) != (c1 & mask))
2469 {
2470 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2471 gen_int_mode (c1 & ~c2, mode));
2472 return simplify_gen_binary (IOR, mode, tem, op1);
2473 }
2474 }
2475
2476 /* Convert (A & B) | A to A. */
2477 if (GET_CODE (op0) == AND
2478 && (rtx_equal_p (XEXP (op0, 0), op1)
2479 || rtx_equal_p (XEXP (op0, 1), op1))
2480 && ! side_effects_p (XEXP (op0, 0))
2481 && ! side_effects_p (XEXP (op0, 1)))
2482 return op1;
2483
2484 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2485 mode size to (rotate A CX). */
2486
2487 if (GET_CODE (op1) == ASHIFT
2488 || GET_CODE (op1) == SUBREG)
2489 {
2490 opleft = op1;
2491 opright = op0;
2492 }
2493 else
2494 {
2495 opright = op1;
2496 opleft = op0;
2497 }
2498
2499 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2500 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2501 && CONST_INT_P (XEXP (opleft, 1))
2502 && CONST_INT_P (XEXP (opright, 1))
2503 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2504 == GET_MODE_PRECISION (mode)))
2505 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2506
2507 /* Same, but for ashift that has been "simplified" to a wider mode
2508 by simplify_shift_const. */
2509
2510 if (GET_CODE (opleft) == SUBREG
2511 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2512 && GET_CODE (opright) == LSHIFTRT
2513 && GET_CODE (XEXP (opright, 0)) == SUBREG
2514 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2515 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2516 && (GET_MODE_SIZE (GET_MODE (opleft))
2517 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2518 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2519 SUBREG_REG (XEXP (opright, 0)))
2520 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2521 && CONST_INT_P (XEXP (opright, 1))
2522 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2523 == GET_MODE_PRECISION (mode)))
2524 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2525 XEXP (SUBREG_REG (opleft), 1));
2526
2527 /* If we have (ior (and (X C1) C2)), simplify this by making
2528 C1 as small as possible if C1 actually changes. */
2529 if (CONST_INT_P (op1)
2530 && (HWI_COMPUTABLE_MODE_P (mode)
2531 || INTVAL (op1) > 0)
2532 && GET_CODE (op0) == AND
2533 && CONST_INT_P (XEXP (op0, 1))
2534 && CONST_INT_P (op1)
2535 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2536 return simplify_gen_binary (IOR, mode,
2537 simplify_gen_binary
2538 (AND, mode, XEXP (op0, 0),
2539 GEN_INT (UINTVAL (XEXP (op0, 1))
2540 & ~UINTVAL (op1))),
2541 op1);
2542
2543 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2544 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2545 the PLUS does not affect any of the bits in OP1: then we can do
2546 the IOR as a PLUS and we can associate. This is valid if OP1
2547 can be safely shifted left C bits. */
2548 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2549 && GET_CODE (XEXP (op0, 0)) == PLUS
2550 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2551 && CONST_INT_P (XEXP (op0, 1))
2552 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2553 {
2554 int count = INTVAL (XEXP (op0, 1));
2555 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2556
2557 if (mask >> count == INTVAL (trueop1)
2558 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2559 return simplify_gen_binary (ASHIFTRT, mode,
2560 plus_constant (mode, XEXP (op0, 0),
2561 mask),
2562 XEXP (op0, 1));
2563 }
2564
2565 tem = simplify_associative_operation (code, mode, op0, op1);
2566 if (tem)
2567 return tem;
2568 break;
2569
2570 case XOR:
2571 if (trueop1 == CONST0_RTX (mode))
2572 return op0;
2573 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2574 return simplify_gen_unary (NOT, mode, op0, mode);
2575 if (rtx_equal_p (trueop0, trueop1)
2576 && ! side_effects_p (op0)
2577 && GET_MODE_CLASS (mode) != MODE_CC)
2578 return CONST0_RTX (mode);
2579
2580 /* Canonicalize XOR of the most significant bit to PLUS. */
2581 if ((CONST_INT_P (op1)
2582 || GET_CODE (op1) == CONST_DOUBLE)
2583 && mode_signbit_p (mode, op1))
2584 return simplify_gen_binary (PLUS, mode, op0, op1);
2585 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2586 if ((CONST_INT_P (op1)
2587 || GET_CODE (op1) == CONST_DOUBLE)
2588 && GET_CODE (op0) == PLUS
2589 && (CONST_INT_P (XEXP (op0, 1))
2590 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2591 && mode_signbit_p (mode, XEXP (op0, 1)))
2592 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2593 simplify_gen_binary (XOR, mode, op1,
2594 XEXP (op0, 1)));
2595
2596 /* If we are XORing two things that have no bits in common,
2597 convert them into an IOR. This helps to detect rotation encoded
2598 using those methods and possibly other simplifications. */
2599
2600 if (HWI_COMPUTABLE_MODE_P (mode)
2601 && (nonzero_bits (op0, mode)
2602 & nonzero_bits (op1, mode)) == 0)
2603 return (simplify_gen_binary (IOR, mode, op0, op1));
2604
2605 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2606 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2607 (NOT y). */
2608 {
2609 int num_negated = 0;
2610
2611 if (GET_CODE (op0) == NOT)
2612 num_negated++, op0 = XEXP (op0, 0);
2613 if (GET_CODE (op1) == NOT)
2614 num_negated++, op1 = XEXP (op1, 0);
2615
2616 if (num_negated == 2)
2617 return simplify_gen_binary (XOR, mode, op0, op1);
2618 else if (num_negated == 1)
2619 return simplify_gen_unary (NOT, mode,
2620 simplify_gen_binary (XOR, mode, op0, op1),
2621 mode);
2622 }
2623
2624 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2625 correspond to a machine insn or result in further simplifications
2626 if B is a constant. */
2627
2628 if (GET_CODE (op0) == AND
2629 && rtx_equal_p (XEXP (op0, 1), op1)
2630 && ! side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode,
2632 simplify_gen_unary (NOT, mode,
2633 XEXP (op0, 0), mode),
2634 op1);
2635
2636 else if (GET_CODE (op0) == AND
2637 && rtx_equal_p (XEXP (op0, 0), op1)
2638 && ! side_effects_p (op1))
2639 return simplify_gen_binary (AND, mode,
2640 simplify_gen_unary (NOT, mode,
2641 XEXP (op0, 1), mode),
2642 op1);
2643
2644 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2645 we can transform like this:
2646 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2647 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2648 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2649 Attempt a few simplifications when B and C are both constants. */
2650 if (GET_CODE (op0) == AND
2651 && CONST_INT_P (op1)
2652 && CONST_INT_P (XEXP (op0, 1)))
2653 {
2654 rtx a = XEXP (op0, 0);
2655 rtx b = XEXP (op0, 1);
2656 rtx c = op1;
2657 HOST_WIDE_INT bval = INTVAL (b);
2658 HOST_WIDE_INT cval = INTVAL (c);
2659
2660 rtx na_c
2661 = simplify_binary_operation (AND, mode,
2662 simplify_gen_unary (NOT, mode, a, mode),
2663 c);
2664 if ((~cval & bval) == 0)
2665 {
2666 /* Try to simplify ~A&C | ~B&C. */
2667 if (na_c != NULL_RTX)
2668 return simplify_gen_binary (IOR, mode, na_c,
2669 GEN_INT (~bval & cval));
2670 }
2671 else
2672 {
2673 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2674 if (na_c == const0_rtx)
2675 {
2676 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2677 GEN_INT (~cval & bval));
2678 return simplify_gen_binary (IOR, mode, a_nc_b,
2679 GEN_INT (~bval & cval));
2680 }
2681 }
2682 }
2683
2684 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2685 comparison if STORE_FLAG_VALUE is 1. */
2686 if (STORE_FLAG_VALUE == 1
2687 && trueop1 == const1_rtx
2688 && COMPARISON_P (op0)
2689 && (reversed = reversed_comparison (op0, mode)))
2690 return reversed;
2691
2692 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2693 is (lt foo (const_int 0)), so we can perform the above
2694 simplification if STORE_FLAG_VALUE is 1. */
2695
2696 if (STORE_FLAG_VALUE == 1
2697 && trueop1 == const1_rtx
2698 && GET_CODE (op0) == LSHIFTRT
2699 && CONST_INT_P (XEXP (op0, 1))
2700 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2701 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2702
2703 /* (xor (comparison foo bar) (const_int sign-bit))
2704 when STORE_FLAG_VALUE is the sign bit. */
2705 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2706 && trueop1 == const_true_rtx
2707 && COMPARISON_P (op0)
2708 && (reversed = reversed_comparison (op0, mode)))
2709 return reversed;
2710
2711 tem = simplify_associative_operation (code, mode, op0, op1);
2712 if (tem)
2713 return tem;
2714 break;
2715
2716 case AND:
2717 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2718 return trueop1;
2719 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2720 return op0;
2721 if (HWI_COMPUTABLE_MODE_P (mode))
2722 {
2723 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2724 HOST_WIDE_INT nzop1;
2725 if (CONST_INT_P (trueop1))
2726 {
2727 HOST_WIDE_INT val1 = INTVAL (trueop1);
2728 /* If we are turning off bits already known off in OP0, we need
2729 not do an AND. */
2730 if ((nzop0 & ~val1) == 0)
2731 return op0;
2732 }
2733 nzop1 = nonzero_bits (trueop1, mode);
2734 /* If we are clearing all the nonzero bits, the result is zero. */
2735 if ((nzop1 & nzop0) == 0
2736 && !side_effects_p (op0) && !side_effects_p (op1))
2737 return CONST0_RTX (mode);
2738 }
2739 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2740 && GET_MODE_CLASS (mode) != MODE_CC)
2741 return op0;
2742 /* A & (~A) -> 0 */
2743 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2744 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2745 && ! side_effects_p (op0)
2746 && GET_MODE_CLASS (mode) != MODE_CC)
2747 return CONST0_RTX (mode);
2748
2749 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2750 there are no nonzero bits of C outside of X's mode. */
2751 if ((GET_CODE (op0) == SIGN_EXTEND
2752 || GET_CODE (op0) == ZERO_EXTEND)
2753 && CONST_INT_P (trueop1)
2754 && HWI_COMPUTABLE_MODE_P (mode)
2755 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2756 & UINTVAL (trueop1)) == 0)
2757 {
2758 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2759 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2760 gen_int_mode (INTVAL (trueop1),
2761 imode));
2762 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2763 }
2764
2765 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2766 we might be able to further simplify the AND with X and potentially
2767 remove the truncation altogether. */
2768 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2769 {
2770 rtx x = XEXP (op0, 0);
2771 enum machine_mode xmode = GET_MODE (x);
2772 tem = simplify_gen_binary (AND, xmode, x,
2773 gen_int_mode (INTVAL (trueop1), xmode));
2774 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2775 }
2776
2777 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2778 if (GET_CODE (op0) == IOR
2779 && CONST_INT_P (trueop1)
2780 && CONST_INT_P (XEXP (op0, 1)))
2781 {
2782 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2783 return simplify_gen_binary (IOR, mode,
2784 simplify_gen_binary (AND, mode,
2785 XEXP (op0, 0), op1),
2786 gen_int_mode (tmp, mode));
2787 }
2788
2789 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2790 insn (and may simplify more). */
2791 if (GET_CODE (op0) == XOR
2792 && rtx_equal_p (XEXP (op0, 0), op1)
2793 && ! side_effects_p (op1))
2794 return simplify_gen_binary (AND, mode,
2795 simplify_gen_unary (NOT, mode,
2796 XEXP (op0, 1), mode),
2797 op1);
2798
2799 if (GET_CODE (op0) == XOR
2800 && rtx_equal_p (XEXP (op0, 1), op1)
2801 && ! side_effects_p (op1))
2802 return simplify_gen_binary (AND, mode,
2803 simplify_gen_unary (NOT, mode,
2804 XEXP (op0, 0), mode),
2805 op1);
2806
2807 /* Similarly for (~(A ^ B)) & A. */
2808 if (GET_CODE (op0) == NOT
2809 && GET_CODE (XEXP (op0, 0)) == XOR
2810 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2811 && ! side_effects_p (op1))
2812 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2813
2814 if (GET_CODE (op0) == NOT
2815 && GET_CODE (XEXP (op0, 0)) == XOR
2816 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2817 && ! side_effects_p (op1))
2818 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2819
2820 /* Convert (A | B) & A to A. */
2821 if (GET_CODE (op0) == IOR
2822 && (rtx_equal_p (XEXP (op0, 0), op1)
2823 || rtx_equal_p (XEXP (op0, 1), op1))
2824 && ! side_effects_p (XEXP (op0, 0))
2825 && ! side_effects_p (XEXP (op0, 1)))
2826 return op1;
2827
2828 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2829 ((A & N) + B) & M -> (A + B) & M
2830 Similarly if (N & M) == 0,
2831 ((A | N) + B) & M -> (A + B) & M
2832 and for - instead of + and/or ^ instead of |.
2833 Also, if (N & M) == 0, then
2834 (A +- N) & M -> A & M. */
2835 if (CONST_INT_P (trueop1)
2836 && HWI_COMPUTABLE_MODE_P (mode)
2837 && ~UINTVAL (trueop1)
2838 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2839 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2840 {
2841 rtx pmop[2];
2842 int which;
2843
2844 pmop[0] = XEXP (op0, 0);
2845 pmop[1] = XEXP (op0, 1);
2846
2847 if (CONST_INT_P (pmop[1])
2848 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2849 return simplify_gen_binary (AND, mode, pmop[0], op1);
2850
2851 for (which = 0; which < 2; which++)
2852 {
2853 tem = pmop[which];
2854 switch (GET_CODE (tem))
2855 {
2856 case AND:
2857 if (CONST_INT_P (XEXP (tem, 1))
2858 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2859 == UINTVAL (trueop1))
2860 pmop[which] = XEXP (tem, 0);
2861 break;
2862 case IOR:
2863 case XOR:
2864 if (CONST_INT_P (XEXP (tem, 1))
2865 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2866 pmop[which] = XEXP (tem, 0);
2867 break;
2868 default:
2869 break;
2870 }
2871 }
2872
2873 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2874 {
2875 tem = simplify_gen_binary (GET_CODE (op0), mode,
2876 pmop[0], pmop[1]);
2877 return simplify_gen_binary (code, mode, tem, op1);
2878 }
2879 }
2880
2881 /* (and X (ior (not X) Y) -> (and X Y) */
2882 if (GET_CODE (op1) == IOR
2883 && GET_CODE (XEXP (op1, 0)) == NOT
2884 && op0 == XEXP (XEXP (op1, 0), 0))
2885 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2886
2887 /* (and (ior (not X) Y) X) -> (and X Y) */
2888 if (GET_CODE (op0) == IOR
2889 && GET_CODE (XEXP (op0, 0)) == NOT
2890 && op1 == XEXP (XEXP (op0, 0), 0))
2891 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2892
2893 tem = simplify_associative_operation (code, mode, op0, op1);
2894 if (tem)
2895 return tem;
2896 break;
2897
2898 case UDIV:
2899 /* 0/x is 0 (or x&0 if x has side-effects). */
2900 if (trueop0 == CONST0_RTX (mode))
2901 {
2902 if (side_effects_p (op1))
2903 return simplify_gen_binary (AND, mode, op1, trueop0);
2904 return trueop0;
2905 }
2906 /* x/1 is x. */
2907 if (trueop1 == CONST1_RTX (mode))
2908 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2909 /* Convert divide by power of two into shift. */
2910 if (CONST_INT_P (trueop1)
2911 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2912 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2913 break;
2914
2915 case DIV:
2916 /* Handle floating point and integers separately. */
2917 if (SCALAR_FLOAT_MODE_P (mode))
2918 {
2919 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2920 safe for modes with NaNs, since 0.0 / 0.0 will then be
2921 NaN rather than 0.0. Nor is it safe for modes with signed
2922 zeros, since dividing 0 by a negative number gives -0.0 */
2923 if (trueop0 == CONST0_RTX (mode)
2924 && !HONOR_NANS (mode)
2925 && !HONOR_SIGNED_ZEROS (mode)
2926 && ! side_effects_p (op1))
2927 return op0;
2928 /* x/1.0 is x. */
2929 if (trueop1 == CONST1_RTX (mode)
2930 && !HONOR_SNANS (mode))
2931 return op0;
2932
2933 if (GET_CODE (trueop1) == CONST_DOUBLE
2934 && trueop1 != CONST0_RTX (mode))
2935 {
2936 REAL_VALUE_TYPE d;
2937 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2938
2939 /* x/-1.0 is -x. */
2940 if (REAL_VALUES_EQUAL (d, dconstm1)
2941 && !HONOR_SNANS (mode))
2942 return simplify_gen_unary (NEG, mode, op0, mode);
2943
2944 /* Change FP division by a constant into multiplication.
2945 Only do this with -freciprocal-math. */
2946 if (flag_reciprocal_math
2947 && !REAL_VALUES_EQUAL (d, dconst0))
2948 {
2949 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2950 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2951 return simplify_gen_binary (MULT, mode, op0, tem);
2952 }
2953 }
2954 }
2955 else if (SCALAR_INT_MODE_P (mode))
2956 {
2957 /* 0/x is 0 (or x&0 if x has side-effects). */
2958 if (trueop0 == CONST0_RTX (mode)
2959 && !cfun->can_throw_non_call_exceptions)
2960 {
2961 if (side_effects_p (op1))
2962 return simplify_gen_binary (AND, mode, op1, trueop0);
2963 return trueop0;
2964 }
2965 /* x/1 is x. */
2966 if (trueop1 == CONST1_RTX (mode))
2967 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2968 /* x/-1 is -x. */
2969 if (trueop1 == constm1_rtx)
2970 {
2971 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2972 return simplify_gen_unary (NEG, mode, x, mode);
2973 }
2974 }
2975 break;
2976
2977 case UMOD:
2978 /* 0%x is 0 (or x&0 if x has side-effects). */
2979 if (trueop0 == CONST0_RTX (mode))
2980 {
2981 if (side_effects_p (op1))
2982 return simplify_gen_binary (AND, mode, op1, trueop0);
2983 return trueop0;
2984 }
2985 /* x%1 is 0 (of x&0 if x has side-effects). */
2986 if (trueop1 == CONST1_RTX (mode))
2987 {
2988 if (side_effects_p (op0))
2989 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2990 return CONST0_RTX (mode);
2991 }
2992 /* Implement modulus by power of two as AND. */
2993 if (CONST_INT_P (trueop1)
2994 && exact_log2 (UINTVAL (trueop1)) > 0)
2995 return simplify_gen_binary (AND, mode, op0,
2996 GEN_INT (INTVAL (op1) - 1));
2997 break;
2998
2999 case MOD:
3000 /* 0%x is 0 (or x&0 if x has side-effects). */
3001 if (trueop0 == CONST0_RTX (mode))
3002 {
3003 if (side_effects_p (op1))
3004 return simplify_gen_binary (AND, mode, op1, trueop0);
3005 return trueop0;
3006 }
3007 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3008 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3009 {
3010 if (side_effects_p (op0))
3011 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3012 return CONST0_RTX (mode);
3013 }
3014 break;
3015
3016 case ROTATERT:
3017 case ROTATE:
3018 case ASHIFTRT:
3019 if (trueop1 == CONST0_RTX (mode))
3020 return op0;
3021 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3022 return op0;
3023 /* Rotating ~0 always results in ~0. */
3024 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3025 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3026 && ! side_effects_p (op1))
3027 return op0;
3028 canonicalize_shift:
3029 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3030 {
3031 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3032 if (val != INTVAL (op1))
3033 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3034 }
3035 break;
3036
3037 case ASHIFT:
3038 case SS_ASHIFT:
3039 case US_ASHIFT:
3040 if (trueop1 == CONST0_RTX (mode))
3041 return op0;
3042 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3043 return op0;
3044 goto canonicalize_shift;
3045
3046 case LSHIFTRT:
3047 if (trueop1 == CONST0_RTX (mode))
3048 return op0;
3049 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3050 return op0;
3051 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3052 if (GET_CODE (op0) == CLZ
3053 && CONST_INT_P (trueop1)
3054 && STORE_FLAG_VALUE == 1
3055 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3056 {
3057 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3058 unsigned HOST_WIDE_INT zero_val = 0;
3059
3060 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3061 && zero_val == GET_MODE_PRECISION (imode)
3062 && INTVAL (trueop1) == exact_log2 (zero_val))
3063 return simplify_gen_relational (EQ, mode, imode,
3064 XEXP (op0, 0), const0_rtx);
3065 }
3066 goto canonicalize_shift;
3067
3068 case SMIN:
3069 if (width <= HOST_BITS_PER_WIDE_INT
3070 && mode_signbit_p (mode, trueop1)
3071 && ! side_effects_p (op0))
3072 return op1;
3073 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3074 return op0;
3075 tem = simplify_associative_operation (code, mode, op0, op1);
3076 if (tem)
3077 return tem;
3078 break;
3079
3080 case SMAX:
3081 if (width <= HOST_BITS_PER_WIDE_INT
3082 && CONST_INT_P (trueop1)
3083 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3084 && ! side_effects_p (op0))
3085 return op1;
3086 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3087 return op0;
3088 tem = simplify_associative_operation (code, mode, op0, op1);
3089 if (tem)
3090 return tem;
3091 break;
3092
3093 case UMIN:
3094 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3095 return op1;
3096 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3097 return op0;
3098 tem = simplify_associative_operation (code, mode, op0, op1);
3099 if (tem)
3100 return tem;
3101 break;
3102
3103 case UMAX:
3104 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3105 return op1;
3106 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3107 return op0;
3108 tem = simplify_associative_operation (code, mode, op0, op1);
3109 if (tem)
3110 return tem;
3111 break;
3112
3113 case SS_PLUS:
3114 case US_PLUS:
3115 case SS_MINUS:
3116 case US_MINUS:
3117 case SS_MULT:
3118 case US_MULT:
3119 case SS_DIV:
3120 case US_DIV:
3121 /* ??? There are simplifications that can be done. */
3122 return 0;
3123
3124 case VEC_SELECT:
3125 if (!VECTOR_MODE_P (mode))
3126 {
3127 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3128 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3129 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3130 gcc_assert (XVECLEN (trueop1, 0) == 1);
3131 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3132
3133 if (GET_CODE (trueop0) == CONST_VECTOR)
3134 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3135 (trueop1, 0, 0)));
3136
3137 /* Extract a scalar element from a nested VEC_SELECT expression
3138 (with optional nested VEC_CONCAT expression). Some targets
3139 (i386) extract scalar element from a vector using chain of
3140 nested VEC_SELECT expressions. When input operand is a memory
3141 operand, this operation can be simplified to a simple scalar
3142 load from an offseted memory address. */
3143 if (GET_CODE (trueop0) == VEC_SELECT)
3144 {
3145 rtx op0 = XEXP (trueop0, 0);
3146 rtx op1 = XEXP (trueop0, 1);
3147
3148 enum machine_mode opmode = GET_MODE (op0);
3149 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3150 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3151
3152 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3153 int elem;
3154
3155 rtvec vec;
3156 rtx tmp_op, tmp;
3157
3158 gcc_assert (GET_CODE (op1) == PARALLEL);
3159 gcc_assert (i < n_elts);
3160
3161 /* Select element, pointed by nested selector. */
3162 elem = INTVAL (XVECEXP (op1, 0, i));
3163
3164 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3165 if (GET_CODE (op0) == VEC_CONCAT)
3166 {
3167 rtx op00 = XEXP (op0, 0);
3168 rtx op01 = XEXP (op0, 1);
3169
3170 enum machine_mode mode00, mode01;
3171 int n_elts00, n_elts01;
3172
3173 mode00 = GET_MODE (op00);
3174 mode01 = GET_MODE (op01);
3175
3176 /* Find out number of elements of each operand. */
3177 if (VECTOR_MODE_P (mode00))
3178 {
3179 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3180 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3181 }
3182 else
3183 n_elts00 = 1;
3184
3185 if (VECTOR_MODE_P (mode01))
3186 {
3187 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3188 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3189 }
3190 else
3191 n_elts01 = 1;
3192
3193 gcc_assert (n_elts == n_elts00 + n_elts01);
3194
3195 /* Select correct operand of VEC_CONCAT
3196 and adjust selector. */
3197 if (elem < n_elts01)
3198 tmp_op = op00;
3199 else
3200 {
3201 tmp_op = op01;
3202 elem -= n_elts00;
3203 }
3204 }
3205 else
3206 tmp_op = op0;
3207
3208 vec = rtvec_alloc (1);
3209 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3210
3211 tmp = gen_rtx_fmt_ee (code, mode,
3212 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3213 return tmp;
3214 }
3215 if (GET_CODE (trueop0) == VEC_DUPLICATE
3216 && GET_MODE (XEXP (trueop0, 0)) == mode)
3217 return XEXP (trueop0, 0);
3218 }
3219 else
3220 {
3221 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3222 gcc_assert (GET_MODE_INNER (mode)
3223 == GET_MODE_INNER (GET_MODE (trueop0)));
3224 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3225
3226 if (GET_CODE (trueop0) == CONST_VECTOR)
3227 {
3228 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3229 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3230 rtvec v = rtvec_alloc (n_elts);
3231 unsigned int i;
3232
3233 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3234 for (i = 0; i < n_elts; i++)
3235 {
3236 rtx x = XVECEXP (trueop1, 0, i);
3237
3238 gcc_assert (CONST_INT_P (x));
3239 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3240 INTVAL (x));
3241 }
3242
3243 return gen_rtx_CONST_VECTOR (mode, v);
3244 }
3245 }
3246
3247 if (XVECLEN (trueop1, 0) == 1
3248 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3249 && GET_CODE (trueop0) == VEC_CONCAT)
3250 {
3251 rtx vec = trueop0;
3252 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3253
3254 /* Try to find the element in the VEC_CONCAT. */
3255 while (GET_MODE (vec) != mode
3256 && GET_CODE (vec) == VEC_CONCAT)
3257 {
3258 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3259 if (offset < vec_size)
3260 vec = XEXP (vec, 0);
3261 else
3262 {
3263 offset -= vec_size;
3264 vec = XEXP (vec, 1);
3265 }
3266 vec = avoid_constant_pool_reference (vec);
3267 }
3268
3269 if (GET_MODE (vec) == mode)
3270 return vec;
3271 }
3272
3273 return 0;
3274 case VEC_CONCAT:
3275 {
3276 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3277 ? GET_MODE (trueop0)
3278 : GET_MODE_INNER (mode));
3279 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3280 ? GET_MODE (trueop1)
3281 : GET_MODE_INNER (mode));
3282
3283 gcc_assert (VECTOR_MODE_P (mode));
3284 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3285 == GET_MODE_SIZE (mode));
3286
3287 if (VECTOR_MODE_P (op0_mode))
3288 gcc_assert (GET_MODE_INNER (mode)
3289 == GET_MODE_INNER (op0_mode));
3290 else
3291 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3292
3293 if (VECTOR_MODE_P (op1_mode))
3294 gcc_assert (GET_MODE_INNER (mode)
3295 == GET_MODE_INNER (op1_mode));
3296 else
3297 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3298
3299 if ((GET_CODE (trueop0) == CONST_VECTOR
3300 || CONST_INT_P (trueop0)
3301 || GET_CODE (trueop0) == CONST_DOUBLE)
3302 && (GET_CODE (trueop1) == CONST_VECTOR
3303 || CONST_INT_P (trueop1)
3304 || GET_CODE (trueop1) == CONST_DOUBLE))
3305 {
3306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3308 rtvec v = rtvec_alloc (n_elts);
3309 unsigned int i;
3310 unsigned in_n_elts = 1;
3311
3312 if (VECTOR_MODE_P (op0_mode))
3313 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3314 for (i = 0; i < n_elts; i++)
3315 {
3316 if (i < in_n_elts)
3317 {
3318 if (!VECTOR_MODE_P (op0_mode))
3319 RTVEC_ELT (v, i) = trueop0;
3320 else
3321 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3322 }
3323 else
3324 {
3325 if (!VECTOR_MODE_P (op1_mode))
3326 RTVEC_ELT (v, i) = trueop1;
3327 else
3328 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3329 i - in_n_elts);
3330 }
3331 }
3332
3333 return gen_rtx_CONST_VECTOR (mode, v);
3334 }
3335 }
3336 return 0;
3337
3338 default:
3339 gcc_unreachable ();
3340 }
3341
3342 return 0;
3343 }
3344
3345 rtx
3346 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3347 rtx op0, rtx op1)
3348 {
3349 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3350 HOST_WIDE_INT val;
3351 unsigned int width = GET_MODE_PRECISION (mode);
3352
3353 if (VECTOR_MODE_P (mode)
3354 && code != VEC_CONCAT
3355 && GET_CODE (op0) == CONST_VECTOR
3356 && GET_CODE (op1) == CONST_VECTOR)
3357 {
3358 unsigned n_elts = GET_MODE_NUNITS (mode);
3359 enum machine_mode op0mode = GET_MODE (op0);
3360 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3361 enum machine_mode op1mode = GET_MODE (op1);
3362 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3363 rtvec v = rtvec_alloc (n_elts);
3364 unsigned int i;
3365
3366 gcc_assert (op0_n_elts == n_elts);
3367 gcc_assert (op1_n_elts == n_elts);
3368 for (i = 0; i < n_elts; i++)
3369 {
3370 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3371 CONST_VECTOR_ELT (op0, i),
3372 CONST_VECTOR_ELT (op1, i));
3373 if (!x)
3374 return 0;
3375 RTVEC_ELT (v, i) = x;
3376 }
3377
3378 return gen_rtx_CONST_VECTOR (mode, v);
3379 }
3380
3381 if (VECTOR_MODE_P (mode)
3382 && code == VEC_CONCAT
3383 && (CONST_INT_P (op0)
3384 || GET_CODE (op0) == CONST_DOUBLE
3385 || GET_CODE (op0) == CONST_FIXED)
3386 && (CONST_INT_P (op1)
3387 || GET_CODE (op1) == CONST_DOUBLE
3388 || GET_CODE (op1) == CONST_FIXED))
3389 {
3390 unsigned n_elts = GET_MODE_NUNITS (mode);
3391 rtvec v = rtvec_alloc (n_elts);
3392
3393 gcc_assert (n_elts >= 2);
3394 if (n_elts == 2)
3395 {
3396 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3397 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3398
3399 RTVEC_ELT (v, 0) = op0;
3400 RTVEC_ELT (v, 1) = op1;
3401 }
3402 else
3403 {
3404 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3405 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3406 unsigned i;
3407
3408 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3409 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3410 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3411
3412 for (i = 0; i < op0_n_elts; ++i)
3413 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3414 for (i = 0; i < op1_n_elts; ++i)
3415 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3416 }
3417
3418 return gen_rtx_CONST_VECTOR (mode, v);
3419 }
3420
3421 if (SCALAR_FLOAT_MODE_P (mode)
3422 && GET_CODE (op0) == CONST_DOUBLE
3423 && GET_CODE (op1) == CONST_DOUBLE
3424 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3425 {
3426 if (code == AND
3427 || code == IOR
3428 || code == XOR)
3429 {
3430 long tmp0[4];
3431 long tmp1[4];
3432 REAL_VALUE_TYPE r;
3433 int i;
3434
3435 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3436 GET_MODE (op0));
3437 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3438 GET_MODE (op1));
3439 for (i = 0; i < 4; i++)
3440 {
3441 switch (code)
3442 {
3443 case AND:
3444 tmp0[i] &= tmp1[i];
3445 break;
3446 case IOR:
3447 tmp0[i] |= tmp1[i];
3448 break;
3449 case XOR:
3450 tmp0[i] ^= tmp1[i];
3451 break;
3452 default:
3453 gcc_unreachable ();
3454 }
3455 }
3456 real_from_target (&r, tmp0, mode);
3457 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3458 }
3459 else
3460 {
3461 REAL_VALUE_TYPE f0, f1, value, result;
3462 bool inexact;
3463
3464 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3465 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3466 real_convert (&f0, mode, &f0);
3467 real_convert (&f1, mode, &f1);
3468
3469 if (HONOR_SNANS (mode)
3470 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3471 return 0;
3472
3473 if (code == DIV
3474 && REAL_VALUES_EQUAL (f1, dconst0)
3475 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3476 return 0;
3477
3478 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3479 && flag_trapping_math
3480 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3481 {
3482 int s0 = REAL_VALUE_NEGATIVE (f0);
3483 int s1 = REAL_VALUE_NEGATIVE (f1);
3484
3485 switch (code)
3486 {
3487 case PLUS:
3488 /* Inf + -Inf = NaN plus exception. */
3489 if (s0 != s1)
3490 return 0;
3491 break;
3492 case MINUS:
3493 /* Inf - Inf = NaN plus exception. */
3494 if (s0 == s1)
3495 return 0;
3496 break;
3497 case DIV:
3498 /* Inf / Inf = NaN plus exception. */
3499 return 0;
3500 default:
3501 break;
3502 }
3503 }
3504
3505 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3506 && flag_trapping_math
3507 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3508 || (REAL_VALUE_ISINF (f1)
3509 && REAL_VALUES_EQUAL (f0, dconst0))))
3510 /* Inf * 0 = NaN plus exception. */
3511 return 0;
3512
3513 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3514 &f0, &f1);
3515 real_convert (&result, mode, &value);
3516
3517 /* Don't constant fold this floating point operation if
3518 the result has overflowed and flag_trapping_math. */
3519
3520 if (flag_trapping_math
3521 && MODE_HAS_INFINITIES (mode)
3522 && REAL_VALUE_ISINF (result)
3523 && !REAL_VALUE_ISINF (f0)
3524 && !REAL_VALUE_ISINF (f1))
3525 /* Overflow plus exception. */
3526 return 0;
3527
3528 /* Don't constant fold this floating point operation if the
3529 result may dependent upon the run-time rounding mode and
3530 flag_rounding_math is set, or if GCC's software emulation
3531 is unable to accurately represent the result. */
3532
3533 if ((flag_rounding_math
3534 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3535 && (inexact || !real_identical (&result, &value)))
3536 return NULL_RTX;
3537
3538 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3539 }
3540 }
3541
3542 /* We can fold some multi-word operations. */
3543 if (GET_MODE_CLASS (mode) == MODE_INT
3544 && width == HOST_BITS_PER_DOUBLE_INT
3545 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3546 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3547 {
3548 double_int o0, o1, res, tmp;
3549
3550 o0 = rtx_to_double_int (op0);
3551 o1 = rtx_to_double_int (op1);
3552
3553 switch (code)
3554 {
3555 case MINUS:
3556 /* A - B == A + (-B). */
3557 o1 = double_int_neg (o1);
3558
3559 /* Fall through.... */
3560
3561 case PLUS:
3562 res = double_int_add (o0, o1);
3563 break;
3564
3565 case MULT:
3566 res = double_int_mul (o0, o1);
3567 break;
3568
3569 case DIV:
3570 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3571 o0.low, o0.high, o1.low, o1.high,
3572 &res.low, &res.high,
3573 &tmp.low, &tmp.high))
3574 return 0;
3575 break;
3576
3577 case MOD:
3578 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3579 o0.low, o0.high, o1.low, o1.high,
3580 &tmp.low, &tmp.high,
3581 &res.low, &res.high))
3582 return 0;
3583 break;
3584
3585 case UDIV:
3586 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3587 o0.low, o0.high, o1.low, o1.high,
3588 &res.low, &res.high,
3589 &tmp.low, &tmp.high))
3590 return 0;
3591 break;
3592
3593 case UMOD:
3594 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3595 o0.low, o0.high, o1.low, o1.high,
3596 &tmp.low, &tmp.high,
3597 &res.low, &res.high))
3598 return 0;
3599 break;
3600
3601 case AND:
3602 res = double_int_and (o0, o1);
3603 break;
3604
3605 case IOR:
3606 res = double_int_ior (o0, o1);
3607 break;
3608
3609 case XOR:
3610 res = double_int_xor (o0, o1);
3611 break;
3612
3613 case SMIN:
3614 res = double_int_smin (o0, o1);
3615 break;
3616
3617 case SMAX:
3618 res = double_int_smax (o0, o1);
3619 break;
3620
3621 case UMIN:
3622 res = double_int_umin (o0, o1);
3623 break;
3624
3625 case UMAX:
3626 res = double_int_umax (o0, o1);
3627 break;
3628
3629 case LSHIFTRT: case ASHIFTRT:
3630 case ASHIFT:
3631 case ROTATE: case ROTATERT:
3632 {
3633 unsigned HOST_WIDE_INT cnt;
3634
3635 if (SHIFT_COUNT_TRUNCATED)
3636 o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3637
3638 if (!double_int_fits_in_uhwi_p (o1)
3639 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3640 return 0;
3641
3642 cnt = double_int_to_uhwi (o1);
3643
3644 if (code == LSHIFTRT || code == ASHIFTRT)
3645 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3646 code == ASHIFTRT);
3647 else if (code == ASHIFT)
3648 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3649 true);
3650 else if (code == ROTATE)
3651 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3652 else /* code == ROTATERT */
3653 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3654 }
3655 break;
3656
3657 default:
3658 return 0;
3659 }
3660
3661 return immed_double_int_const (res, mode);
3662 }
3663
3664 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3665 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3666 {
3667 /* Get the integer argument values in two forms:
3668 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3669
3670 arg0 = INTVAL (op0);
3671 arg1 = INTVAL (op1);
3672
3673 if (width < HOST_BITS_PER_WIDE_INT)
3674 {
3675 arg0 &= GET_MODE_MASK (mode);
3676 arg1 &= GET_MODE_MASK (mode);
3677
3678 arg0s = arg0;
3679 if (val_signbit_known_set_p (mode, arg0s))
3680 arg0s |= ~GET_MODE_MASK (mode);
3681
3682 arg1s = arg1;
3683 if (val_signbit_known_set_p (mode, arg1s))
3684 arg1s |= ~GET_MODE_MASK (mode);
3685 }
3686 else
3687 {
3688 arg0s = arg0;
3689 arg1s = arg1;
3690 }
3691
3692 /* Compute the value of the arithmetic. */
3693
3694 switch (code)
3695 {
3696 case PLUS:
3697 val = arg0s + arg1s;
3698 break;
3699
3700 case MINUS:
3701 val = arg0s - arg1s;
3702 break;
3703
3704 case MULT:
3705 val = arg0s * arg1s;
3706 break;
3707
3708 case DIV:
3709 if (arg1s == 0
3710 || ((unsigned HOST_WIDE_INT) arg0s
3711 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3712 && arg1s == -1))
3713 return 0;
3714 val = arg0s / arg1s;
3715 break;
3716
3717 case MOD:
3718 if (arg1s == 0
3719 || ((unsigned HOST_WIDE_INT) arg0s
3720 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3721 && arg1s == -1))
3722 return 0;
3723 val = arg0s % arg1s;
3724 break;
3725
3726 case UDIV:
3727 if (arg1 == 0
3728 || ((unsigned HOST_WIDE_INT) arg0s
3729 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3730 && arg1s == -1))
3731 return 0;
3732 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3733 break;
3734
3735 case UMOD:
3736 if (arg1 == 0
3737 || ((unsigned HOST_WIDE_INT) arg0s
3738 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3739 && arg1s == -1))
3740 return 0;
3741 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3742 break;
3743
3744 case AND:
3745 val = arg0 & arg1;
3746 break;
3747
3748 case IOR:
3749 val = arg0 | arg1;
3750 break;
3751
3752 case XOR:
3753 val = arg0 ^ arg1;
3754 break;
3755
3756 case LSHIFTRT:
3757 case ASHIFT:
3758 case ASHIFTRT:
3759 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3760 the value is in range. We can't return any old value for
3761 out-of-range arguments because either the middle-end (via
3762 shift_truncation_mask) or the back-end might be relying on
3763 target-specific knowledge. Nor can we rely on
3764 shift_truncation_mask, since the shift might not be part of an
3765 ashlM3, lshrM3 or ashrM3 instruction. */
3766 if (SHIFT_COUNT_TRUNCATED)
3767 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3768 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3769 return 0;
3770
3771 val = (code == ASHIFT
3772 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3773 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3774
3775 /* Sign-extend the result for arithmetic right shifts. */
3776 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3777 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3778 break;
3779
3780 case ROTATERT:
3781 if (arg1 < 0)
3782 return 0;
3783
3784 arg1 %= width;
3785 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3786 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3787 break;
3788
3789 case ROTATE:
3790 if (arg1 < 0)
3791 return 0;
3792
3793 arg1 %= width;
3794 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3795 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3796 break;
3797
3798 case COMPARE:
3799 /* Do nothing here. */
3800 return 0;
3801
3802 case SMIN:
3803 val = arg0s <= arg1s ? arg0s : arg1s;
3804 break;
3805
3806 case UMIN:
3807 val = ((unsigned HOST_WIDE_INT) arg0
3808 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3809 break;
3810
3811 case SMAX:
3812 val = arg0s > arg1s ? arg0s : arg1s;
3813 break;
3814
3815 case UMAX:
3816 val = ((unsigned HOST_WIDE_INT) arg0
3817 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3818 break;
3819
3820 case SS_PLUS:
3821 case US_PLUS:
3822 case SS_MINUS:
3823 case US_MINUS:
3824 case SS_MULT:
3825 case US_MULT:
3826 case SS_DIV:
3827 case US_DIV:
3828 case SS_ASHIFT:
3829 case US_ASHIFT:
3830 /* ??? There are simplifications that can be done. */
3831 return 0;
3832
3833 default:
3834 gcc_unreachable ();
3835 }
3836
3837 return gen_int_mode (val, mode);
3838 }
3839
3840 return NULL_RTX;
3841 }
3842
3843
3844 \f
3845 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3846 PLUS or MINUS.
3847
3848 Rather than test for specific case, we do this by a brute-force method
3849 and do all possible simplifications until no more changes occur. Then
3850 we rebuild the operation. */
3851
3852 struct simplify_plus_minus_op_data
3853 {
3854 rtx op;
3855 short neg;
3856 };
3857
3858 static bool
3859 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3860 {
3861 int result;
3862
3863 result = (commutative_operand_precedence (y)
3864 - commutative_operand_precedence (x));
3865 if (result)
3866 return result > 0;
3867
3868 /* Group together equal REGs to do more simplification. */
3869 if (REG_P (x) && REG_P (y))
3870 return REGNO (x) > REGNO (y);
3871 else
3872 return false;
3873 }
3874
3875 static rtx
3876 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3877 rtx op1)
3878 {
3879 struct simplify_plus_minus_op_data ops[8];
3880 rtx result, tem;
3881 int n_ops = 2, input_ops = 2;
3882 int changed, n_constants = 0, canonicalized = 0;
3883 int i, j;
3884
3885 memset (ops, 0, sizeof ops);
3886
3887 /* Set up the two operands and then expand them until nothing has been
3888 changed. If we run out of room in our array, give up; this should
3889 almost never happen. */
3890
3891 ops[0].op = op0;
3892 ops[0].neg = 0;
3893 ops[1].op = op1;
3894 ops[1].neg = (code == MINUS);
3895
3896 do
3897 {
3898 changed = 0;
3899
3900 for (i = 0; i < n_ops; i++)
3901 {
3902 rtx this_op = ops[i].op;
3903 int this_neg = ops[i].neg;
3904 enum rtx_code this_code = GET_CODE (this_op);
3905
3906 switch (this_code)
3907 {
3908 case PLUS:
3909 case MINUS:
3910 if (n_ops == 7)
3911 return NULL_RTX;
3912
3913 ops[n_ops].op = XEXP (this_op, 1);
3914 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3915 n_ops++;
3916
3917 ops[i].op = XEXP (this_op, 0);
3918 input_ops++;
3919 changed = 1;
3920 canonicalized |= this_neg;
3921 break;
3922
3923 case NEG:
3924 ops[i].op = XEXP (this_op, 0);
3925 ops[i].neg = ! this_neg;
3926 changed = 1;
3927 canonicalized = 1;
3928 break;
3929
3930 case CONST:
3931 if (n_ops < 7
3932 && GET_CODE (XEXP (this_op, 0)) == PLUS
3933 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3934 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3935 {
3936 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3937 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3938 ops[n_ops].neg = this_neg;
3939 n_ops++;
3940 changed = 1;
3941 canonicalized = 1;
3942 }
3943 break;
3944
3945 case NOT:
3946 /* ~a -> (-a - 1) */
3947 if (n_ops != 7)
3948 {
3949 ops[n_ops].op = CONSTM1_RTX (mode);
3950 ops[n_ops++].neg = this_neg;
3951 ops[i].op = XEXP (this_op, 0);
3952 ops[i].neg = !this_neg;
3953 changed = 1;
3954 canonicalized = 1;
3955 }
3956 break;
3957
3958 case CONST_INT:
3959 n_constants++;
3960 if (this_neg)
3961 {
3962 ops[i].op = neg_const_int (mode, this_op);
3963 ops[i].neg = 0;
3964 changed = 1;
3965 canonicalized = 1;
3966 }
3967 break;
3968
3969 default:
3970 break;
3971 }
3972 }
3973 }
3974 while (changed);
3975
3976 if (n_constants > 1)
3977 canonicalized = 1;
3978
3979 gcc_assert (n_ops >= 2);
3980
3981 /* If we only have two operands, we can avoid the loops. */
3982 if (n_ops == 2)
3983 {
3984 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3985 rtx lhs, rhs;
3986
3987 /* Get the two operands. Be careful with the order, especially for
3988 the cases where code == MINUS. */
3989 if (ops[0].neg && ops[1].neg)
3990 {
3991 lhs = gen_rtx_NEG (mode, ops[0].op);
3992 rhs = ops[1].op;
3993 }
3994 else if (ops[0].neg)
3995 {
3996 lhs = ops[1].op;
3997 rhs = ops[0].op;
3998 }
3999 else
4000 {
4001 lhs = ops[0].op;
4002 rhs = ops[1].op;
4003 }
4004
4005 return simplify_const_binary_operation (code, mode, lhs, rhs);
4006 }
4007
4008 /* Now simplify each pair of operands until nothing changes. */
4009 do
4010 {
4011 /* Insertion sort is good enough for an eight-element array. */
4012 for (i = 1; i < n_ops; i++)
4013 {
4014 struct simplify_plus_minus_op_data save;
4015 j = i - 1;
4016 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4017 continue;
4018
4019 canonicalized = 1;
4020 save = ops[i];
4021 do
4022 ops[j + 1] = ops[j];
4023 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4024 ops[j + 1] = save;
4025 }
4026
4027 changed = 0;
4028 for (i = n_ops - 1; i > 0; i--)
4029 for (j = i - 1; j >= 0; j--)
4030 {
4031 rtx lhs = ops[j].op, rhs = ops[i].op;
4032 int lneg = ops[j].neg, rneg = ops[i].neg;
4033
4034 if (lhs != 0 && rhs != 0)
4035 {
4036 enum rtx_code ncode = PLUS;
4037
4038 if (lneg != rneg)
4039 {
4040 ncode = MINUS;
4041 if (lneg)
4042 tem = lhs, lhs = rhs, rhs = tem;
4043 }
4044 else if (swap_commutative_operands_p (lhs, rhs))
4045 tem = lhs, lhs = rhs, rhs = tem;
4046
4047 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4048 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4049 {
4050 rtx tem_lhs, tem_rhs;
4051
4052 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4053 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4054 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4055
4056 if (tem && !CONSTANT_P (tem))
4057 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4058 }
4059 else
4060 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4061
4062 /* Reject "simplifications" that just wrap the two
4063 arguments in a CONST. Failure to do so can result
4064 in infinite recursion with simplify_binary_operation
4065 when it calls us to simplify CONST operations. */
4066 if (tem
4067 && ! (GET_CODE (tem) == CONST
4068 && GET_CODE (XEXP (tem, 0)) == ncode
4069 && XEXP (XEXP (tem, 0), 0) == lhs
4070 && XEXP (XEXP (tem, 0), 1) == rhs))
4071 {
4072 lneg &= rneg;
4073 if (GET_CODE (tem) == NEG)
4074 tem = XEXP (tem, 0), lneg = !lneg;
4075 if (CONST_INT_P (tem) && lneg)
4076 tem = neg_const_int (mode, tem), lneg = 0;
4077
4078 ops[i].op = tem;
4079 ops[i].neg = lneg;
4080 ops[j].op = NULL_RTX;
4081 changed = 1;
4082 canonicalized = 1;
4083 }
4084 }
4085 }
4086
4087 /* If nothing changed, fail. */
4088 if (!canonicalized)
4089 return NULL_RTX;
4090
4091 /* Pack all the operands to the lower-numbered entries. */
4092 for (i = 0, j = 0; j < n_ops; j++)
4093 if (ops[j].op)
4094 {
4095 ops[i] = ops[j];
4096 i++;
4097 }
4098 n_ops = i;
4099 }
4100 while (changed);
4101
4102 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4103 if (n_ops == 2
4104 && CONST_INT_P (ops[1].op)
4105 && CONSTANT_P (ops[0].op)
4106 && ops[0].neg)
4107 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4108
4109 /* We suppressed creation of trivial CONST expressions in the
4110 combination loop to avoid recursion. Create one manually now.
4111 The combination loop should have ensured that there is exactly
4112 one CONST_INT, and the sort will have ensured that it is last
4113 in the array and that any other constant will be next-to-last. */
4114
4115 if (n_ops > 1
4116 && CONST_INT_P (ops[n_ops - 1].op)
4117 && CONSTANT_P (ops[n_ops - 2].op))
4118 {
4119 rtx value = ops[n_ops - 1].op;
4120 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4121 value = neg_const_int (mode, value);
4122 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4123 INTVAL (value));
4124 n_ops--;
4125 }
4126
4127 /* Put a non-negated operand first, if possible. */
4128
4129 for (i = 0; i < n_ops && ops[i].neg; i++)
4130 continue;
4131 if (i == n_ops)
4132 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4133 else if (i != 0)
4134 {
4135 tem = ops[0].op;
4136 ops[0] = ops[i];
4137 ops[i].op = tem;
4138 ops[i].neg = 1;
4139 }
4140
4141 /* Now make the result by performing the requested operations. */
4142 result = ops[0].op;
4143 for (i = 1; i < n_ops; i++)
4144 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4145 mode, result, ops[i].op);
4146
4147 return result;
4148 }
4149
4150 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4151 static bool
4152 plus_minus_operand_p (const_rtx x)
4153 {
4154 return GET_CODE (x) == PLUS
4155 || GET_CODE (x) == MINUS
4156 || (GET_CODE (x) == CONST
4157 && GET_CODE (XEXP (x, 0)) == PLUS
4158 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4159 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4160 }
4161
4162 /* Like simplify_binary_operation except used for relational operators.
4163 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4164 not also be VOIDmode.
4165
4166 CMP_MODE specifies in which mode the comparison is done in, so it is
4167 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4168 the operands or, if both are VOIDmode, the operands are compared in
4169 "infinite precision". */
4170 rtx
4171 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4172 enum machine_mode cmp_mode, rtx op0, rtx op1)
4173 {
4174 rtx tem, trueop0, trueop1;
4175
4176 if (cmp_mode == VOIDmode)
4177 cmp_mode = GET_MODE (op0);
4178 if (cmp_mode == VOIDmode)
4179 cmp_mode = GET_MODE (op1);
4180
4181 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4182 if (tem)
4183 {
4184 if (SCALAR_FLOAT_MODE_P (mode))
4185 {
4186 if (tem == const0_rtx)
4187 return CONST0_RTX (mode);
4188 #ifdef FLOAT_STORE_FLAG_VALUE
4189 {
4190 REAL_VALUE_TYPE val;
4191 val = FLOAT_STORE_FLAG_VALUE (mode);
4192 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4193 }
4194 #else
4195 return NULL_RTX;
4196 #endif
4197 }
4198 if (VECTOR_MODE_P (mode))
4199 {
4200 if (tem == const0_rtx)
4201 return CONST0_RTX (mode);
4202 #ifdef VECTOR_STORE_FLAG_VALUE
4203 {
4204 int i, units;
4205 rtvec v;
4206
4207 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4208 if (val == NULL_RTX)
4209 return NULL_RTX;
4210 if (val == const1_rtx)
4211 return CONST1_RTX (mode);
4212
4213 units = GET_MODE_NUNITS (mode);
4214 v = rtvec_alloc (units);
4215 for (i = 0; i < units; i++)
4216 RTVEC_ELT (v, i) = val;
4217 return gen_rtx_raw_CONST_VECTOR (mode, v);
4218 }
4219 #else
4220 return NULL_RTX;
4221 #endif
4222 }
4223
4224 return tem;
4225 }
4226
4227 /* For the following tests, ensure const0_rtx is op1. */
4228 if (swap_commutative_operands_p (op0, op1)
4229 || (op0 == const0_rtx && op1 != const0_rtx))
4230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4231
4232 /* If op0 is a compare, extract the comparison arguments from it. */
4233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4234 return simplify_gen_relational (code, mode, VOIDmode,
4235 XEXP (op0, 0), XEXP (op0, 1));
4236
4237 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4238 || CC0_P (op0))
4239 return NULL_RTX;
4240
4241 trueop0 = avoid_constant_pool_reference (op0);
4242 trueop1 = avoid_constant_pool_reference (op1);
4243 return simplify_relational_operation_1 (code, mode, cmp_mode,
4244 trueop0, trueop1);
4245 }
4246
4247 /* This part of simplify_relational_operation is only used when CMP_MODE
4248 is not in class MODE_CC (i.e. it is a real comparison).
4249
4250 MODE is the mode of the result, while CMP_MODE specifies in which
4251 mode the comparison is done in, so it is the mode of the operands. */
4252
4253 static rtx
4254 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4255 enum machine_mode cmp_mode, rtx op0, rtx op1)
4256 {
4257 enum rtx_code op0code = GET_CODE (op0);
4258
4259 if (op1 == const0_rtx && COMPARISON_P (op0))
4260 {
4261 /* If op0 is a comparison, extract the comparison arguments
4262 from it. */
4263 if (code == NE)
4264 {
4265 if (GET_MODE (op0) == mode)
4266 return simplify_rtx (op0);
4267 else
4268 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4269 XEXP (op0, 0), XEXP (op0, 1));
4270 }
4271 else if (code == EQ)
4272 {
4273 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4274 if (new_code != UNKNOWN)
4275 return simplify_gen_relational (new_code, mode, VOIDmode,
4276 XEXP (op0, 0), XEXP (op0, 1));
4277 }
4278 }
4279
4280 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4281 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4282 if ((code == LTU || code == GEU)
4283 && GET_CODE (op0) == PLUS
4284 && CONST_INT_P (XEXP (op0, 1))
4285 && (rtx_equal_p (op1, XEXP (op0, 0))
4286 || rtx_equal_p (op1, XEXP (op0, 1))))
4287 {
4288 rtx new_cmp
4289 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4290 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4291 cmp_mode, XEXP (op0, 0), new_cmp);
4292 }
4293
4294 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4295 if ((code == LTU || code == GEU)
4296 && GET_CODE (op0) == PLUS
4297 && rtx_equal_p (op1, XEXP (op0, 1))
4298 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4299 && !rtx_equal_p (op1, XEXP (op0, 0)))
4300 return simplify_gen_relational (code, mode, cmp_mode, op0,
4301 copy_rtx (XEXP (op0, 0)));
4302
4303 if (op1 == const0_rtx)
4304 {
4305 /* Canonicalize (GTU x 0) as (NE x 0). */
4306 if (code == GTU)
4307 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4308 /* Canonicalize (LEU x 0) as (EQ x 0). */
4309 if (code == LEU)
4310 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4311 }
4312 else if (op1 == const1_rtx)
4313 {
4314 switch (code)
4315 {
4316 case GE:
4317 /* Canonicalize (GE x 1) as (GT x 0). */
4318 return simplify_gen_relational (GT, mode, cmp_mode,
4319 op0, const0_rtx);
4320 case GEU:
4321 /* Canonicalize (GEU x 1) as (NE x 0). */
4322 return simplify_gen_relational (NE, mode, cmp_mode,
4323 op0, const0_rtx);
4324 case LT:
4325 /* Canonicalize (LT x 1) as (LE x 0). */
4326 return simplify_gen_relational (LE, mode, cmp_mode,
4327 op0, const0_rtx);
4328 case LTU:
4329 /* Canonicalize (LTU x 1) as (EQ x 0). */
4330 return simplify_gen_relational (EQ, mode, cmp_mode,
4331 op0, const0_rtx);
4332 default:
4333 break;
4334 }
4335 }
4336 else if (op1 == constm1_rtx)
4337 {
4338 /* Canonicalize (LE x -1) as (LT x 0). */
4339 if (code == LE)
4340 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4341 /* Canonicalize (GT x -1) as (GE x 0). */
4342 if (code == GT)
4343 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4344 }
4345
4346 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4347 if ((code == EQ || code == NE)
4348 && (op0code == PLUS || op0code == MINUS)
4349 && CONSTANT_P (op1)
4350 && CONSTANT_P (XEXP (op0, 1))
4351 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4352 {
4353 rtx x = XEXP (op0, 0);
4354 rtx c = XEXP (op0, 1);
4355 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4356 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4357
4358 /* Detect an infinite recursive condition, where we oscillate at this
4359 simplification case between:
4360 A + B == C <---> C - B == A,
4361 where A, B, and C are all constants with non-simplifiable expressions,
4362 usually SYMBOL_REFs. */
4363 if (GET_CODE (tem) == invcode
4364 && CONSTANT_P (x)
4365 && rtx_equal_p (c, XEXP (tem, 1)))
4366 return NULL_RTX;
4367
4368 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4369 }
4370
4371 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4372 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4373 if (code == NE
4374 && op1 == const0_rtx
4375 && GET_MODE_CLASS (mode) == MODE_INT
4376 && cmp_mode != VOIDmode
4377 /* ??? Work-around BImode bugs in the ia64 backend. */
4378 && mode != BImode
4379 && cmp_mode != BImode
4380 && nonzero_bits (op0, cmp_mode) == 1
4381 && STORE_FLAG_VALUE == 1)
4382 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4383 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4384 : lowpart_subreg (mode, op0, cmp_mode);
4385
4386 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4387 if ((code == EQ || code == NE)
4388 && op1 == const0_rtx
4389 && op0code == XOR)
4390 return simplify_gen_relational (code, mode, cmp_mode,
4391 XEXP (op0, 0), XEXP (op0, 1));
4392
4393 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4394 if ((code == EQ || code == NE)
4395 && op0code == XOR
4396 && rtx_equal_p (XEXP (op0, 0), op1)
4397 && !side_effects_p (XEXP (op0, 0)))
4398 return simplify_gen_relational (code, mode, cmp_mode,
4399 XEXP (op0, 1), const0_rtx);
4400
4401 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4402 if ((code == EQ || code == NE)
4403 && op0code == XOR
4404 && rtx_equal_p (XEXP (op0, 1), op1)
4405 && !side_effects_p (XEXP (op0, 1)))
4406 return simplify_gen_relational (code, mode, cmp_mode,
4407 XEXP (op0, 0), const0_rtx);
4408
4409 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4410 if ((code == EQ || code == NE)
4411 && op0code == XOR
4412 && (CONST_INT_P (op1)
4413 || GET_CODE (op1) == CONST_DOUBLE)
4414 && (CONST_INT_P (XEXP (op0, 1))
4415 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4416 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4417 simplify_gen_binary (XOR, cmp_mode,
4418 XEXP (op0, 1), op1));
4419
4420 if (op0code == POPCOUNT && op1 == const0_rtx)
4421 switch (code)
4422 {
4423 case EQ:
4424 case LE:
4425 case LEU:
4426 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4427 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4428 XEXP (op0, 0), const0_rtx);
4429
4430 case NE:
4431 case GT:
4432 case GTU:
4433 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4434 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4435 XEXP (op0, 0), const0_rtx);
4436
4437 default:
4438 break;
4439 }
4440
4441 return NULL_RTX;
4442 }
4443
4444 enum
4445 {
4446 CMP_EQ = 1,
4447 CMP_LT = 2,
4448 CMP_GT = 4,
4449 CMP_LTU = 8,
4450 CMP_GTU = 16
4451 };
4452
4453
4454 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4455 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4456 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4457 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4458 For floating-point comparisons, assume that the operands were ordered. */
4459
4460 static rtx
4461 comparison_result (enum rtx_code code, int known_results)
4462 {
4463 switch (code)
4464 {
4465 case EQ:
4466 case UNEQ:
4467 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4468 case NE:
4469 case LTGT:
4470 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4471
4472 case LT:
4473 case UNLT:
4474 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4475 case GE:
4476 case UNGE:
4477 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4478
4479 case GT:
4480 case UNGT:
4481 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4482 case LE:
4483 case UNLE:
4484 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4485
4486 case LTU:
4487 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4488 case GEU:
4489 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4490
4491 case GTU:
4492 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4493 case LEU:
4494 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4495
4496 case ORDERED:
4497 return const_true_rtx;
4498 case UNORDERED:
4499 return const0_rtx;
4500 default:
4501 gcc_unreachable ();
4502 }
4503 }
4504
4505 /* Check if the given comparison (done in the given MODE) is actually a
4506 tautology or a contradiction.
4507 If no simplification is possible, this function returns zero.
4508 Otherwise, it returns either const_true_rtx or const0_rtx. */
4509
4510 rtx
4511 simplify_const_relational_operation (enum rtx_code code,
4512 enum machine_mode mode,
4513 rtx op0, rtx op1)
4514 {
4515 rtx tem;
4516 rtx trueop0;
4517 rtx trueop1;
4518
4519 gcc_assert (mode != VOIDmode
4520 || (GET_MODE (op0) == VOIDmode
4521 && GET_MODE (op1) == VOIDmode));
4522
4523 /* If op0 is a compare, extract the comparison arguments from it. */
4524 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4525 {
4526 op1 = XEXP (op0, 1);
4527 op0 = XEXP (op0, 0);
4528
4529 if (GET_MODE (op0) != VOIDmode)
4530 mode = GET_MODE (op0);
4531 else if (GET_MODE (op1) != VOIDmode)
4532 mode = GET_MODE (op1);
4533 else
4534 return 0;
4535 }
4536
4537 /* We can't simplify MODE_CC values since we don't know what the
4538 actual comparison is. */
4539 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4540 return 0;
4541
4542 /* Make sure the constant is second. */
4543 if (swap_commutative_operands_p (op0, op1))
4544 {
4545 tem = op0, op0 = op1, op1 = tem;
4546 code = swap_condition (code);
4547 }
4548
4549 trueop0 = avoid_constant_pool_reference (op0);
4550 trueop1 = avoid_constant_pool_reference (op1);
4551
4552 /* For integer comparisons of A and B maybe we can simplify A - B and can
4553 then simplify a comparison of that with zero. If A and B are both either
4554 a register or a CONST_INT, this can't help; testing for these cases will
4555 prevent infinite recursion here and speed things up.
4556
4557 We can only do this for EQ and NE comparisons as otherwise we may
4558 lose or introduce overflow which we cannot disregard as undefined as
4559 we do not know the signedness of the operation on either the left or
4560 the right hand side of the comparison. */
4561
4562 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4563 && (code == EQ || code == NE)
4564 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4565 && (REG_P (op1) || CONST_INT_P (trueop1)))
4566 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4567 /* We cannot do this if tem is a nonzero address. */
4568 && ! nonzero_address_p (tem))
4569 return simplify_const_relational_operation (signed_condition (code),
4570 mode, tem, const0_rtx);
4571
4572 if (! HONOR_NANS (mode) && code == ORDERED)
4573 return const_true_rtx;
4574
4575 if (! HONOR_NANS (mode) && code == UNORDERED)
4576 return const0_rtx;
4577
4578 /* For modes without NaNs, if the two operands are equal, we know the
4579 result except if they have side-effects. Even with NaNs we know
4580 the result of unordered comparisons and, if signaling NaNs are
4581 irrelevant, also the result of LT/GT/LTGT. */
4582 if ((! HONOR_NANS (GET_MODE (trueop0))
4583 || code == UNEQ || code == UNLE || code == UNGE
4584 || ((code == LT || code == GT || code == LTGT)
4585 && ! HONOR_SNANS (GET_MODE (trueop0))))
4586 && rtx_equal_p (trueop0, trueop1)
4587 && ! side_effects_p (trueop0))
4588 return comparison_result (code, CMP_EQ);
4589
4590 /* If the operands are floating-point constants, see if we can fold
4591 the result. */
4592 if (GET_CODE (trueop0) == CONST_DOUBLE
4593 && GET_CODE (trueop1) == CONST_DOUBLE
4594 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4595 {
4596 REAL_VALUE_TYPE d0, d1;
4597
4598 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4599 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4600
4601 /* Comparisons are unordered iff at least one of the values is NaN. */
4602 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4603 switch (code)
4604 {
4605 case UNEQ:
4606 case UNLT:
4607 case UNGT:
4608 case UNLE:
4609 case UNGE:
4610 case NE:
4611 case UNORDERED:
4612 return const_true_rtx;
4613 case EQ:
4614 case LT:
4615 case GT:
4616 case LE:
4617 case GE:
4618 case LTGT:
4619 case ORDERED:
4620 return const0_rtx;
4621 default:
4622 return 0;
4623 }
4624
4625 return comparison_result (code,
4626 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4627 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4628 }
4629
4630 /* Otherwise, see if the operands are both integers. */
4631 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4632 && (GET_CODE (trueop0) == CONST_DOUBLE
4633 || CONST_INT_P (trueop0))
4634 && (GET_CODE (trueop1) == CONST_DOUBLE
4635 || CONST_INT_P (trueop1)))
4636 {
4637 int width = GET_MODE_PRECISION (mode);
4638 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4639 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4640
4641 /* Get the two words comprising each integer constant. */
4642 if (GET_CODE (trueop0) == CONST_DOUBLE)
4643 {
4644 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4645 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4646 }
4647 else
4648 {
4649 l0u = l0s = INTVAL (trueop0);
4650 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4651 }
4652
4653 if (GET_CODE (trueop1) == CONST_DOUBLE)
4654 {
4655 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4656 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4657 }
4658 else
4659 {
4660 l1u = l1s = INTVAL (trueop1);
4661 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4662 }
4663
4664 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4665 we have to sign or zero-extend the values. */
4666 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4667 {
4668 l0u &= GET_MODE_MASK (mode);
4669 l1u &= GET_MODE_MASK (mode);
4670
4671 if (val_signbit_known_set_p (mode, l0s))
4672 l0s |= ~GET_MODE_MASK (mode);
4673
4674 if (val_signbit_known_set_p (mode, l1s))
4675 l1s |= ~GET_MODE_MASK (mode);
4676 }
4677 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4678 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4679
4680 if (h0u == h1u && l0u == l1u)
4681 return comparison_result (code, CMP_EQ);
4682 else
4683 {
4684 int cr;
4685 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4686 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4687 return comparison_result (code, cr);
4688 }
4689 }
4690
4691 /* Optimize comparisons with upper and lower bounds. */
4692 if (HWI_COMPUTABLE_MODE_P (mode)
4693 && CONST_INT_P (trueop1))
4694 {
4695 int sign;
4696 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4697 HOST_WIDE_INT val = INTVAL (trueop1);
4698 HOST_WIDE_INT mmin, mmax;
4699
4700 if (code == GEU
4701 || code == LEU
4702 || code == GTU
4703 || code == LTU)
4704 sign = 0;
4705 else
4706 sign = 1;
4707
4708 /* Get a reduced range if the sign bit is zero. */
4709 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4710 {
4711 mmin = 0;
4712 mmax = nonzero;
4713 }
4714 else
4715 {
4716 rtx mmin_rtx, mmax_rtx;
4717 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4718
4719 mmin = INTVAL (mmin_rtx);
4720 mmax = INTVAL (mmax_rtx);
4721 if (sign)
4722 {
4723 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4724
4725 mmin >>= (sign_copies - 1);
4726 mmax >>= (sign_copies - 1);
4727 }
4728 }
4729
4730 switch (code)
4731 {
4732 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4733 case GEU:
4734 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4735 return const_true_rtx;
4736 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4737 return const0_rtx;
4738 break;
4739 case GE:
4740 if (val <= mmin)
4741 return const_true_rtx;
4742 if (val > mmax)
4743 return const0_rtx;
4744 break;
4745
4746 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4747 case LEU:
4748 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4749 return const_true_rtx;
4750 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4751 return const0_rtx;
4752 break;
4753 case LE:
4754 if (val >= mmax)
4755 return const_true_rtx;
4756 if (val < mmin)
4757 return const0_rtx;
4758 break;
4759
4760 case EQ:
4761 /* x == y is always false for y out of range. */
4762 if (val < mmin || val > mmax)
4763 return const0_rtx;
4764 break;
4765
4766 /* x > y is always false for y >= mmax, always true for y < mmin. */
4767 case GTU:
4768 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4769 return const0_rtx;
4770 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4771 return const_true_rtx;
4772 break;
4773 case GT:
4774 if (val >= mmax)
4775 return const0_rtx;
4776 if (val < mmin)
4777 return const_true_rtx;
4778 break;
4779
4780 /* x < y is always false for y <= mmin, always true for y > mmax. */
4781 case LTU:
4782 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4783 return const0_rtx;
4784 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4785 return const_true_rtx;
4786 break;
4787 case LT:
4788 if (val <= mmin)
4789 return const0_rtx;
4790 if (val > mmax)
4791 return const_true_rtx;
4792 break;
4793
4794 case NE:
4795 /* x != y is always true for y out of range. */
4796 if (val < mmin || val > mmax)
4797 return const_true_rtx;
4798 break;
4799
4800 default:
4801 break;
4802 }
4803 }
4804
4805 /* Optimize integer comparisons with zero. */
4806 if (trueop1 == const0_rtx)
4807 {
4808 /* Some addresses are known to be nonzero. We don't know
4809 their sign, but equality comparisons are known. */
4810 if (nonzero_address_p (trueop0))
4811 {
4812 if (code == EQ || code == LEU)
4813 return const0_rtx;
4814 if (code == NE || code == GTU)
4815 return const_true_rtx;
4816 }
4817
4818 /* See if the first operand is an IOR with a constant. If so, we
4819 may be able to determine the result of this comparison. */
4820 if (GET_CODE (op0) == IOR)
4821 {
4822 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4823 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4824 {
4825 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4826 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4827 && (UINTVAL (inner_const)
4828 & ((unsigned HOST_WIDE_INT) 1
4829 << sign_bitnum)));
4830
4831 switch (code)
4832 {
4833 case EQ:
4834 case LEU:
4835 return const0_rtx;
4836 case NE:
4837 case GTU:
4838 return const_true_rtx;
4839 case LT:
4840 case LE:
4841 if (has_sign)
4842 return const_true_rtx;
4843 break;
4844 case GT:
4845 case GE:
4846 if (has_sign)
4847 return const0_rtx;
4848 break;
4849 default:
4850 break;
4851 }
4852 }
4853 }
4854 }
4855
4856 /* Optimize comparison of ABS with zero. */
4857 if (trueop1 == CONST0_RTX (mode)
4858 && (GET_CODE (trueop0) == ABS
4859 || (GET_CODE (trueop0) == FLOAT_EXTEND
4860 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4861 {
4862 switch (code)
4863 {
4864 case LT:
4865 /* Optimize abs(x) < 0.0. */
4866 if (!HONOR_SNANS (mode)
4867 && (!INTEGRAL_MODE_P (mode)
4868 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4869 {
4870 if (INTEGRAL_MODE_P (mode)
4871 && (issue_strict_overflow_warning
4872 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4873 warning (OPT_Wstrict_overflow,
4874 ("assuming signed overflow does not occur when "
4875 "assuming abs (x) < 0 is false"));
4876 return const0_rtx;
4877 }
4878 break;
4879
4880 case GE:
4881 /* Optimize abs(x) >= 0.0. */
4882 if (!HONOR_NANS (mode)
4883 && (!INTEGRAL_MODE_P (mode)
4884 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4885 {
4886 if (INTEGRAL_MODE_P (mode)
4887 && (issue_strict_overflow_warning
4888 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4889 warning (OPT_Wstrict_overflow,
4890 ("assuming signed overflow does not occur when "
4891 "assuming abs (x) >= 0 is true"));
4892 return const_true_rtx;
4893 }
4894 break;
4895
4896 case UNGE:
4897 /* Optimize ! (abs(x) < 0.0). */
4898 return const_true_rtx;
4899
4900 default:
4901 break;
4902 }
4903 }
4904
4905 return 0;
4906 }
4907 \f
4908 /* Simplify CODE, an operation with result mode MODE and three operands,
4909 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4910 a constant. Return 0 if no simplifications is possible. */
4911
4912 rtx
4913 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4914 enum machine_mode op0_mode, rtx op0, rtx op1,
4915 rtx op2)
4916 {
4917 unsigned int width = GET_MODE_PRECISION (mode);
4918 bool any_change = false;
4919 rtx tem;
4920
4921 /* VOIDmode means "infinite" precision. */
4922 if (width == 0)
4923 width = HOST_BITS_PER_WIDE_INT;
4924
4925 switch (code)
4926 {
4927 case FMA:
4928 /* Simplify negations around the multiplication. */
4929 /* -a * -b + c => a * b + c. */
4930 if (GET_CODE (op0) == NEG)
4931 {
4932 tem = simplify_unary_operation (NEG, mode, op1, mode);
4933 if (tem)
4934 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4935 }
4936 else if (GET_CODE (op1) == NEG)
4937 {
4938 tem = simplify_unary_operation (NEG, mode, op0, mode);
4939 if (tem)
4940 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4941 }
4942
4943 /* Canonicalize the two multiplication operands. */
4944 /* a * -b + c => -b * a + c. */
4945 if (swap_commutative_operands_p (op0, op1))
4946 tem = op0, op0 = op1, op1 = tem, any_change = true;
4947
4948 if (any_change)
4949 return gen_rtx_FMA (mode, op0, op1, op2);
4950 return NULL_RTX;
4951
4952 case SIGN_EXTRACT:
4953 case ZERO_EXTRACT:
4954 if (CONST_INT_P (op0)
4955 && CONST_INT_P (op1)
4956 && CONST_INT_P (op2)
4957 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4958 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4959 {
4960 /* Extracting a bit-field from a constant */
4961 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4962 HOST_WIDE_INT op1val = INTVAL (op1);
4963 HOST_WIDE_INT op2val = INTVAL (op2);
4964 if (BITS_BIG_ENDIAN)
4965 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4966 else
4967 val >>= op2val;
4968
4969 if (HOST_BITS_PER_WIDE_INT != op1val)
4970 {
4971 /* First zero-extend. */
4972 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4973 /* If desired, propagate sign bit. */
4974 if (code == SIGN_EXTRACT
4975 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4976 != 0)
4977 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4978 }
4979
4980 return gen_int_mode (val, mode);
4981 }
4982 break;
4983
4984 case IF_THEN_ELSE:
4985 if (CONST_INT_P (op0))
4986 return op0 != const0_rtx ? op1 : op2;
4987
4988 /* Convert c ? a : a into "a". */
4989 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4990 return op1;
4991
4992 /* Convert a != b ? a : b into "a". */
4993 if (GET_CODE (op0) == NE
4994 && ! side_effects_p (op0)
4995 && ! HONOR_NANS (mode)
4996 && ! HONOR_SIGNED_ZEROS (mode)
4997 && ((rtx_equal_p (XEXP (op0, 0), op1)
4998 && rtx_equal_p (XEXP (op0, 1), op2))
4999 || (rtx_equal_p (XEXP (op0, 0), op2)
5000 && rtx_equal_p (XEXP (op0, 1), op1))))
5001 return op1;
5002
5003 /* Convert a == b ? a : b into "b". */
5004 if (GET_CODE (op0) == EQ
5005 && ! side_effects_p (op0)
5006 && ! HONOR_NANS (mode)
5007 && ! HONOR_SIGNED_ZEROS (mode)
5008 && ((rtx_equal_p (XEXP (op0, 0), op1)
5009 && rtx_equal_p (XEXP (op0, 1), op2))
5010 || (rtx_equal_p (XEXP (op0, 0), op2)
5011 && rtx_equal_p (XEXP (op0, 1), op1))))
5012 return op2;
5013
5014 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5015 {
5016 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5017 ? GET_MODE (XEXP (op0, 1))
5018 : GET_MODE (XEXP (op0, 0)));
5019 rtx temp;
5020
5021 /* Look for happy constants in op1 and op2. */
5022 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5023 {
5024 HOST_WIDE_INT t = INTVAL (op1);
5025 HOST_WIDE_INT f = INTVAL (op2);
5026
5027 if (t == STORE_FLAG_VALUE && f == 0)
5028 code = GET_CODE (op0);
5029 else if (t == 0 && f == STORE_FLAG_VALUE)
5030 {
5031 enum rtx_code tmp;
5032 tmp = reversed_comparison_code (op0, NULL_RTX);
5033 if (tmp == UNKNOWN)
5034 break;
5035 code = tmp;
5036 }
5037 else
5038 break;
5039
5040 return simplify_gen_relational (code, mode, cmp_mode,
5041 XEXP (op0, 0), XEXP (op0, 1));
5042 }
5043
5044 if (cmp_mode == VOIDmode)
5045 cmp_mode = op0_mode;
5046 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5047 cmp_mode, XEXP (op0, 0),
5048 XEXP (op0, 1));
5049
5050 /* See if any simplifications were possible. */
5051 if (temp)
5052 {
5053 if (CONST_INT_P (temp))
5054 return temp == const0_rtx ? op2 : op1;
5055 else if (temp)
5056 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5057 }
5058 }
5059 break;
5060
5061 case VEC_MERGE:
5062 gcc_assert (GET_MODE (op0) == mode);
5063 gcc_assert (GET_MODE (op1) == mode);
5064 gcc_assert (VECTOR_MODE_P (mode));
5065 op2 = avoid_constant_pool_reference (op2);
5066 if (CONST_INT_P (op2))
5067 {
5068 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5069 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5070 int mask = (1 << n_elts) - 1;
5071
5072 if (!(INTVAL (op2) & mask))
5073 return op1;
5074 if ((INTVAL (op2) & mask) == mask)
5075 return op0;
5076
5077 op0 = avoid_constant_pool_reference (op0);
5078 op1 = avoid_constant_pool_reference (op1);
5079 if (GET_CODE (op0) == CONST_VECTOR
5080 && GET_CODE (op1) == CONST_VECTOR)
5081 {
5082 rtvec v = rtvec_alloc (n_elts);
5083 unsigned int i;
5084
5085 for (i = 0; i < n_elts; i++)
5086 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5087 ? CONST_VECTOR_ELT (op0, i)
5088 : CONST_VECTOR_ELT (op1, i));
5089 return gen_rtx_CONST_VECTOR (mode, v);
5090 }
5091 }
5092 break;
5093
5094 default:
5095 gcc_unreachable ();
5096 }
5097
5098 return 0;
5099 }
5100
5101 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5102 or CONST_VECTOR,
5103 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5104
5105 Works by unpacking OP into a collection of 8-bit values
5106 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5107 and then repacking them again for OUTERMODE. */
5108
5109 static rtx
5110 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5111 enum machine_mode innermode, unsigned int byte)
5112 {
5113 /* We support up to 512-bit values (for V8DFmode). */
5114 enum {
5115 max_bitsize = 512,
5116 value_bit = 8,
5117 value_mask = (1 << value_bit) - 1
5118 };
5119 unsigned char value[max_bitsize / value_bit];
5120 int value_start;
5121 int i;
5122 int elem;
5123
5124 int num_elem;
5125 rtx * elems;
5126 int elem_bitsize;
5127 rtx result_s;
5128 rtvec result_v = NULL;
5129 enum mode_class outer_class;
5130 enum machine_mode outer_submode;
5131
5132 /* Some ports misuse CCmode. */
5133 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5134 return op;
5135
5136 /* We have no way to represent a complex constant at the rtl level. */
5137 if (COMPLEX_MODE_P (outermode))
5138 return NULL_RTX;
5139
5140 /* Unpack the value. */
5141
5142 if (GET_CODE (op) == CONST_VECTOR)
5143 {
5144 num_elem = CONST_VECTOR_NUNITS (op);
5145 elems = &CONST_VECTOR_ELT (op, 0);
5146 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5147 }
5148 else
5149 {
5150 num_elem = 1;
5151 elems = &op;
5152 elem_bitsize = max_bitsize;
5153 }
5154 /* If this asserts, it is too complicated; reducing value_bit may help. */
5155 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5156 /* I don't know how to handle endianness of sub-units. */
5157 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5158
5159 for (elem = 0; elem < num_elem; elem++)
5160 {
5161 unsigned char * vp;
5162 rtx el = elems[elem];
5163
5164 /* Vectors are kept in target memory order. (This is probably
5165 a mistake.) */
5166 {
5167 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5168 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5169 / BITS_PER_UNIT);
5170 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5171 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5172 unsigned bytele = (subword_byte % UNITS_PER_WORD
5173 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5174 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5175 }
5176
5177 switch (GET_CODE (el))
5178 {
5179 case CONST_INT:
5180 for (i = 0;
5181 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5182 i += value_bit)
5183 *vp++ = INTVAL (el) >> i;
5184 /* CONST_INTs are always logically sign-extended. */
5185 for (; i < elem_bitsize; i += value_bit)
5186 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5187 break;
5188
5189 case CONST_DOUBLE:
5190 if (GET_MODE (el) == VOIDmode)
5191 {
5192 unsigned char extend = 0;
5193 /* If this triggers, someone should have generated a
5194 CONST_INT instead. */
5195 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5196
5197 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5198 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5199 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5200 {
5201 *vp++
5202 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5203 i += value_bit;
5204 }
5205
5206 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5207 extend = -1;
5208 for (; i < elem_bitsize; i += value_bit)
5209 *vp++ = extend;
5210 }
5211 else
5212 {
5213 long tmp[max_bitsize / 32];
5214 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5215
5216 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5217 gcc_assert (bitsize <= elem_bitsize);
5218 gcc_assert (bitsize % value_bit == 0);
5219
5220 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5221 GET_MODE (el));
5222
5223 /* real_to_target produces its result in words affected by
5224 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5225 and use WORDS_BIG_ENDIAN instead; see the documentation
5226 of SUBREG in rtl.texi. */
5227 for (i = 0; i < bitsize; i += value_bit)
5228 {
5229 int ibase;
5230 if (WORDS_BIG_ENDIAN)
5231 ibase = bitsize - 1 - i;
5232 else
5233 ibase = i;
5234 *vp++ = tmp[ibase / 32] >> i % 32;
5235 }
5236
5237 /* It shouldn't matter what's done here, so fill it with
5238 zero. */
5239 for (; i < elem_bitsize; i += value_bit)
5240 *vp++ = 0;
5241 }
5242 break;
5243
5244 case CONST_FIXED:
5245 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5246 {
5247 for (i = 0; i < elem_bitsize; i += value_bit)
5248 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5249 }
5250 else
5251 {
5252 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5253 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5254 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5255 i += value_bit)
5256 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5257 >> (i - HOST_BITS_PER_WIDE_INT);
5258 for (; i < elem_bitsize; i += value_bit)
5259 *vp++ = 0;
5260 }
5261 break;
5262
5263 default:
5264 gcc_unreachable ();
5265 }
5266 }
5267
5268 /* Now, pick the right byte to start with. */
5269 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5270 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5271 will already have offset 0. */
5272 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5273 {
5274 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5275 - byte);
5276 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5277 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5278 byte = (subword_byte % UNITS_PER_WORD
5279 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5280 }
5281
5282 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5283 so if it's become negative it will instead be very large.) */
5284 gcc_assert (byte < GET_MODE_SIZE (innermode));
5285
5286 /* Convert from bytes to chunks of size value_bit. */
5287 value_start = byte * (BITS_PER_UNIT / value_bit);
5288
5289 /* Re-pack the value. */
5290
5291 if (VECTOR_MODE_P (outermode))
5292 {
5293 num_elem = GET_MODE_NUNITS (outermode);
5294 result_v = rtvec_alloc (num_elem);
5295 elems = &RTVEC_ELT (result_v, 0);
5296 outer_submode = GET_MODE_INNER (outermode);
5297 }
5298 else
5299 {
5300 num_elem = 1;
5301 elems = &result_s;
5302 outer_submode = outermode;
5303 }
5304
5305 outer_class = GET_MODE_CLASS (outer_submode);
5306 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5307
5308 gcc_assert (elem_bitsize % value_bit == 0);
5309 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5310
5311 for (elem = 0; elem < num_elem; elem++)
5312 {
5313 unsigned char *vp;
5314
5315 /* Vectors are stored in target memory order. (This is probably
5316 a mistake.) */
5317 {
5318 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5319 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5320 / BITS_PER_UNIT);
5321 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5322 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5323 unsigned bytele = (subword_byte % UNITS_PER_WORD
5324 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5325 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5326 }
5327
5328 switch (outer_class)
5329 {
5330 case MODE_INT:
5331 case MODE_PARTIAL_INT:
5332 {
5333 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5334
5335 for (i = 0;
5336 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5337 i += value_bit)
5338 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5339 for (; i < elem_bitsize; i += value_bit)
5340 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5341 << (i - HOST_BITS_PER_WIDE_INT);
5342
5343 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5344 know why. */
5345 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5346 elems[elem] = gen_int_mode (lo, outer_submode);
5347 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5348 elems[elem] = immed_double_const (lo, hi, outer_submode);
5349 else
5350 return NULL_RTX;
5351 }
5352 break;
5353
5354 case MODE_FLOAT:
5355 case MODE_DECIMAL_FLOAT:
5356 {
5357 REAL_VALUE_TYPE r;
5358 long tmp[max_bitsize / 32];
5359
5360 /* real_from_target wants its input in words affected by
5361 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5362 and use WORDS_BIG_ENDIAN instead; see the documentation
5363 of SUBREG in rtl.texi. */
5364 for (i = 0; i < max_bitsize / 32; i++)
5365 tmp[i] = 0;
5366 for (i = 0; i < elem_bitsize; i += value_bit)
5367 {
5368 int ibase;
5369 if (WORDS_BIG_ENDIAN)
5370 ibase = elem_bitsize - 1 - i;
5371 else
5372 ibase = i;
5373 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5374 }
5375
5376 real_from_target (&r, tmp, outer_submode);
5377 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5378 }
5379 break;
5380
5381 case MODE_FRACT:
5382 case MODE_UFRACT:
5383 case MODE_ACCUM:
5384 case MODE_UACCUM:
5385 {
5386 FIXED_VALUE_TYPE f;
5387 f.data.low = 0;
5388 f.data.high = 0;
5389 f.mode = outer_submode;
5390
5391 for (i = 0;
5392 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5393 i += value_bit)
5394 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5395 for (; i < elem_bitsize; i += value_bit)
5396 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5397 << (i - HOST_BITS_PER_WIDE_INT));
5398
5399 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5400 }
5401 break;
5402
5403 default:
5404 gcc_unreachable ();
5405 }
5406 }
5407 if (VECTOR_MODE_P (outermode))
5408 return gen_rtx_CONST_VECTOR (outermode, result_v);
5409 else
5410 return result_s;
5411 }
5412
5413 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5414 Return 0 if no simplifications are possible. */
5415 rtx
5416 simplify_subreg (enum machine_mode outermode, rtx op,
5417 enum machine_mode innermode, unsigned int byte)
5418 {
5419 /* Little bit of sanity checking. */
5420 gcc_assert (innermode != VOIDmode);
5421 gcc_assert (outermode != VOIDmode);
5422 gcc_assert (innermode != BLKmode);
5423 gcc_assert (outermode != BLKmode);
5424
5425 gcc_assert (GET_MODE (op) == innermode
5426 || GET_MODE (op) == VOIDmode);
5427
5428 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5429 gcc_assert (byte < GET_MODE_SIZE (innermode));
5430
5431 if (outermode == innermode && !byte)
5432 return op;
5433
5434 if (CONST_INT_P (op)
5435 || GET_CODE (op) == CONST_DOUBLE
5436 || GET_CODE (op) == CONST_FIXED
5437 || GET_CODE (op) == CONST_VECTOR)
5438 return simplify_immed_subreg (outermode, op, innermode, byte);
5439
5440 /* Changing mode twice with SUBREG => just change it once,
5441 or not at all if changing back op starting mode. */
5442 if (GET_CODE (op) == SUBREG)
5443 {
5444 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5445 int final_offset = byte + SUBREG_BYTE (op);
5446 rtx newx;
5447
5448 if (outermode == innermostmode
5449 && byte == 0 && SUBREG_BYTE (op) == 0)
5450 return SUBREG_REG (op);
5451
5452 /* The SUBREG_BYTE represents offset, as if the value were stored
5453 in memory. Irritating exception is paradoxical subreg, where
5454 we define SUBREG_BYTE to be 0. On big endian machines, this
5455 value should be negative. For a moment, undo this exception. */
5456 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5457 {
5458 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5459 if (WORDS_BIG_ENDIAN)
5460 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5461 if (BYTES_BIG_ENDIAN)
5462 final_offset += difference % UNITS_PER_WORD;
5463 }
5464 if (SUBREG_BYTE (op) == 0
5465 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5466 {
5467 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5468 if (WORDS_BIG_ENDIAN)
5469 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5470 if (BYTES_BIG_ENDIAN)
5471 final_offset += difference % UNITS_PER_WORD;
5472 }
5473
5474 /* See whether resulting subreg will be paradoxical. */
5475 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5476 {
5477 /* In nonparadoxical subregs we can't handle negative offsets. */
5478 if (final_offset < 0)
5479 return NULL_RTX;
5480 /* Bail out in case resulting subreg would be incorrect. */
5481 if (final_offset % GET_MODE_SIZE (outermode)
5482 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5483 return NULL_RTX;
5484 }
5485 else
5486 {
5487 int offset = 0;
5488 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5489
5490 /* In paradoxical subreg, see if we are still looking on lower part.
5491 If so, our SUBREG_BYTE will be 0. */
5492 if (WORDS_BIG_ENDIAN)
5493 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5494 if (BYTES_BIG_ENDIAN)
5495 offset += difference % UNITS_PER_WORD;
5496 if (offset == final_offset)
5497 final_offset = 0;
5498 else
5499 return NULL_RTX;
5500 }
5501
5502 /* Recurse for further possible simplifications. */
5503 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5504 final_offset);
5505 if (newx)
5506 return newx;
5507 if (validate_subreg (outermode, innermostmode,
5508 SUBREG_REG (op), final_offset))
5509 {
5510 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5511 if (SUBREG_PROMOTED_VAR_P (op)
5512 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5513 && GET_MODE_CLASS (outermode) == MODE_INT
5514 && IN_RANGE (GET_MODE_SIZE (outermode),
5515 GET_MODE_SIZE (innermode),
5516 GET_MODE_SIZE (innermostmode))
5517 && subreg_lowpart_p (newx))
5518 {
5519 SUBREG_PROMOTED_VAR_P (newx) = 1;
5520 SUBREG_PROMOTED_UNSIGNED_SET
5521 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5522 }
5523 return newx;
5524 }
5525 return NULL_RTX;
5526 }
5527
5528 /* Merge implicit and explicit truncations. */
5529
5530 if (GET_CODE (op) == TRUNCATE
5531 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5532 && subreg_lowpart_offset (outermode, innermode) == byte)
5533 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5534 GET_MODE (XEXP (op, 0)));
5535
5536 /* SUBREG of a hard register => just change the register number
5537 and/or mode. If the hard register is not valid in that mode,
5538 suppress this simplification. If the hard register is the stack,
5539 frame, or argument pointer, leave this as a SUBREG. */
5540
5541 if (REG_P (op) && HARD_REGISTER_P (op))
5542 {
5543 unsigned int regno, final_regno;
5544
5545 regno = REGNO (op);
5546 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5547 if (HARD_REGISTER_NUM_P (final_regno))
5548 {
5549 rtx x;
5550 int final_offset = byte;
5551
5552 /* Adjust offset for paradoxical subregs. */
5553 if (byte == 0
5554 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5555 {
5556 int difference = (GET_MODE_SIZE (innermode)
5557 - GET_MODE_SIZE (outermode));
5558 if (WORDS_BIG_ENDIAN)
5559 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5560 if (BYTES_BIG_ENDIAN)
5561 final_offset += difference % UNITS_PER_WORD;
5562 }
5563
5564 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5565
5566 /* Propagate original regno. We don't have any way to specify
5567 the offset inside original regno, so do so only for lowpart.
5568 The information is used only by alias analysis that can not
5569 grog partial register anyway. */
5570
5571 if (subreg_lowpart_offset (outermode, innermode) == byte)
5572 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5573 return x;
5574 }
5575 }
5576
5577 /* If we have a SUBREG of a register that we are replacing and we are
5578 replacing it with a MEM, make a new MEM and try replacing the
5579 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5580 or if we would be widening it. */
5581
5582 if (MEM_P (op)
5583 && ! mode_dependent_address_p (XEXP (op, 0))
5584 /* Allow splitting of volatile memory references in case we don't
5585 have instruction to move the whole thing. */
5586 && (! MEM_VOLATILE_P (op)
5587 || ! have_insn_for (SET, innermode))
5588 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5589 return adjust_address_nv (op, outermode, byte);
5590
5591 /* Handle complex values represented as CONCAT
5592 of real and imaginary part. */
5593 if (GET_CODE (op) == CONCAT)
5594 {
5595 unsigned int part_size, final_offset;
5596 rtx part, res;
5597
5598 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5599 if (byte < part_size)
5600 {
5601 part = XEXP (op, 0);
5602 final_offset = byte;
5603 }
5604 else
5605 {
5606 part = XEXP (op, 1);
5607 final_offset = byte - part_size;
5608 }
5609
5610 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5611 return NULL_RTX;
5612
5613 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5614 if (res)
5615 return res;
5616 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5617 return gen_rtx_SUBREG (outermode, part, final_offset);
5618 return NULL_RTX;
5619 }
5620
5621 /* Optimize SUBREG truncations of zero and sign extended values. */
5622 if ((GET_CODE (op) == ZERO_EXTEND
5623 || GET_CODE (op) == SIGN_EXTEND)
5624 && SCALAR_INT_MODE_P (innermode)
5625 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5626 {
5627 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5628
5629 /* If we're requesting the lowpart of a zero or sign extension,
5630 there are three possibilities. If the outermode is the same
5631 as the origmode, we can omit both the extension and the subreg.
5632 If the outermode is not larger than the origmode, we can apply
5633 the truncation without the extension. Finally, if the outermode
5634 is larger than the origmode, but both are integer modes, we
5635 can just extend to the appropriate mode. */
5636 if (bitpos == 0)
5637 {
5638 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5639 if (outermode == origmode)
5640 return XEXP (op, 0);
5641 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5642 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5643 subreg_lowpart_offset (outermode,
5644 origmode));
5645 if (SCALAR_INT_MODE_P (outermode))
5646 return simplify_gen_unary (GET_CODE (op), outermode,
5647 XEXP (op, 0), origmode);
5648 }
5649
5650 /* A SUBREG resulting from a zero extension may fold to zero if
5651 it extracts higher bits that the ZERO_EXTEND's source bits. */
5652 if (GET_CODE (op) == ZERO_EXTEND
5653 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5654 return CONST0_RTX (outermode);
5655 }
5656
5657 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5659 the outer subreg is effectively a truncation to the original mode. */
5660 if ((GET_CODE (op) == LSHIFTRT
5661 || GET_CODE (op) == ASHIFTRT)
5662 && SCALAR_INT_MODE_P (outermode)
5663 && SCALAR_INT_MODE_P (innermode)
5664 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5665 to avoid the possibility that an outer LSHIFTRT shifts by more
5666 than the sign extension's sign_bit_copies and introduces zeros
5667 into the high bits of the result. */
5668 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5669 && CONST_INT_P (XEXP (op, 1))
5670 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5671 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5672 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5673 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5674 return simplify_gen_binary (ASHIFTRT, outermode,
5675 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5676
5677 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5678 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5679 the outer subreg is effectively a truncation to the original mode. */
5680 if ((GET_CODE (op) == LSHIFTRT
5681 || GET_CODE (op) == ASHIFTRT)
5682 && SCALAR_INT_MODE_P (outermode)
5683 && SCALAR_INT_MODE_P (innermode)
5684 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5685 && CONST_INT_P (XEXP (op, 1))
5686 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5687 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5688 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5689 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5690 return simplify_gen_binary (LSHIFTRT, outermode,
5691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5692
5693 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5694 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5695 the outer subreg is effectively a truncation to the original mode. */
5696 if (GET_CODE (op) == ASHIFT
5697 && SCALAR_INT_MODE_P (outermode)
5698 && SCALAR_INT_MODE_P (innermode)
5699 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5700 && CONST_INT_P (XEXP (op, 1))
5701 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5702 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5704 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5705 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5706 return simplify_gen_binary (ASHIFT, outermode,
5707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5708
5709 /* Recognize a word extraction from a multi-word subreg. */
5710 if ((GET_CODE (op) == LSHIFTRT
5711 || GET_CODE (op) == ASHIFTRT)
5712 && SCALAR_INT_MODE_P (innermode)
5713 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5714 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5715 && CONST_INT_P (XEXP (op, 1))
5716 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5717 && INTVAL (XEXP (op, 1)) >= 0
5718 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5719 && byte == subreg_lowpart_offset (outermode, innermode))
5720 {
5721 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5722 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5723 (WORDS_BIG_ENDIAN
5724 ? byte - shifted_bytes
5725 : byte + shifted_bytes));
5726 }
5727
5728 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5729 and try replacing the SUBREG and shift with it. Don't do this if
5730 the MEM has a mode-dependent address or if we would be widening it. */
5731
5732 if ((GET_CODE (op) == LSHIFTRT
5733 || GET_CODE (op) == ASHIFTRT)
5734 && SCALAR_INT_MODE_P (innermode)
5735 && MEM_P (XEXP (op, 0))
5736 && CONST_INT_P (XEXP (op, 1))
5737 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5738 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5739 && INTVAL (XEXP (op, 1)) > 0
5740 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5741 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5742 && ! MEM_VOLATILE_P (XEXP (op, 0))
5743 && byte == subreg_lowpart_offset (outermode, innermode)
5744 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5745 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5746 {
5747 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5748 return adjust_address_nv (XEXP (op, 0), outermode,
5749 (WORDS_BIG_ENDIAN
5750 ? byte - shifted_bytes
5751 : byte + shifted_bytes));
5752 }
5753
5754 return NULL_RTX;
5755 }
5756
5757 /* Make a SUBREG operation or equivalent if it folds. */
5758
5759 rtx
5760 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5761 enum machine_mode innermode, unsigned int byte)
5762 {
5763 rtx newx;
5764
5765 newx = simplify_subreg (outermode, op, innermode, byte);
5766 if (newx)
5767 return newx;
5768
5769 if (GET_CODE (op) == SUBREG
5770 || GET_CODE (op) == CONCAT
5771 || GET_MODE (op) == VOIDmode)
5772 return NULL_RTX;
5773
5774 if (validate_subreg (outermode, innermode, op, byte))
5775 return gen_rtx_SUBREG (outermode, op, byte);
5776
5777 return NULL_RTX;
5778 }
5779
5780 /* Simplify X, an rtx expression.
5781
5782 Return the simplified expression or NULL if no simplifications
5783 were possible.
5784
5785 This is the preferred entry point into the simplification routines;
5786 however, we still allow passes to call the more specific routines.
5787
5788 Right now GCC has three (yes, three) major bodies of RTL simplification
5789 code that need to be unified.
5790
5791 1. fold_rtx in cse.c. This code uses various CSE specific
5792 information to aid in RTL simplification.
5793
5794 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5795 it uses combine specific information to aid in RTL
5796 simplification.
5797
5798 3. The routines in this file.
5799
5800
5801 Long term we want to only have one body of simplification code; to
5802 get to that state I recommend the following steps:
5803
5804 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5805 which are not pass dependent state into these routines.
5806
5807 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5808 use this routine whenever possible.
5809
5810 3. Allow for pass dependent state to be provided to these
5811 routines and add simplifications based on the pass dependent
5812 state. Remove code from cse.c & combine.c that becomes
5813 redundant/dead.
5814
5815 It will take time, but ultimately the compiler will be easier to
5816 maintain and improve. It's totally silly that when we add a
5817 simplification that it needs to be added to 4 places (3 for RTL
5818 simplification and 1 for tree simplification. */
5819
5820 rtx
5821 simplify_rtx (const_rtx x)
5822 {
5823 const enum rtx_code code = GET_CODE (x);
5824 const enum machine_mode mode = GET_MODE (x);
5825
5826 switch (GET_RTX_CLASS (code))
5827 {
5828 case RTX_UNARY:
5829 return simplify_unary_operation (code, mode,
5830 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5831 case RTX_COMM_ARITH:
5832 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5833 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5834
5835 /* Fall through.... */
5836
5837 case RTX_BIN_ARITH:
5838 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5839
5840 case RTX_TERNARY:
5841 case RTX_BITFIELD_OPS:
5842 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5843 XEXP (x, 0), XEXP (x, 1),
5844 XEXP (x, 2));
5845
5846 case RTX_COMPARE:
5847 case RTX_COMM_COMPARE:
5848 return simplify_relational_operation (code, mode,
5849 ((GET_MODE (XEXP (x, 0))
5850 != VOIDmode)
5851 ? GET_MODE (XEXP (x, 0))
5852 : GET_MODE (XEXP (x, 1))),
5853 XEXP (x, 0),
5854 XEXP (x, 1));
5855
5856 case RTX_EXTRA:
5857 if (code == SUBREG)
5858 return simplify_subreg (mode, SUBREG_REG (x),
5859 GET_MODE (SUBREG_REG (x)),
5860 SUBREG_BYTE (x));
5861 break;
5862
5863 case RTX_OBJ:
5864 if (code == LO_SUM)
5865 {
5866 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5867 if (GET_CODE (XEXP (x, 0)) == HIGH
5868 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5869 return XEXP (x, 1);
5870 }
5871 break;
5872
5873 default:
5874 break;
5875 }
5876 return NULL;
5877 }