6c50d301fe06027bfa4aac1eb397f1766bb32886
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
569
570 RTL provides two ways of truncating a value:
571
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
575 an rvalue.
576
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582
583 2. a TRUNCATE. This form handles both scalar and compound integers.
584
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
589
590 simplify_gen_unary (TRUNCATE, ...)
591
592 and leave simplify_unary_operation to work out which representation
593 should be used.
594
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
599 truncation of:
600
601 (and:DI X Y)
602
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
607
608 (and:DI (reg:DI X) (const_int 63))
609
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
613
614 static rtx
615 simplify_truncation (enum machine_mode mode, rtx op,
616 enum machine_mode op_mode)
617 {
618 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620 gcc_assert (precision <= op_precision);
621
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
625 {
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
641 }
642
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op) == PLUS
646 || GET_CODE (op) == MINUS
647 || GET_CODE (op) == MULT)
648 {
649 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
650 if (op0)
651 {
652 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 if (op1)
654 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
655 }
656 }
657
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op) == LSHIFTRT
662 || GET_CODE (op) == ASHIFTRT)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision <= op_precision
668 && CONST_INT_P (XEXP (op, 1))
669 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
671 && UINTVAL (XEXP (op, 1)) < precision)
672 return simplify_gen_binary (ASHIFTRT, mode,
673 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 && CONST_INT_P (XEXP (op, 1))
681 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
683 && UINTVAL (XEXP (op, 1)) < precision)
684 return simplify_gen_binary (LSHIFTRT, mode,
685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op) == ASHIFT
691 && CONST_INT_P (XEXP (op, 1))
692 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
695 && UINTVAL (XEXP (op, 1)) < precision)
696 return simplify_gen_binary (ASHIFT, mode,
697 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && SCALAR_INT_MODE_P (mode)
703 && SCALAR_INT_MODE_P (op_mode)
704 && precision >= BITS_PER_WORD
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
708 && UINTVAL (XEXP (op, 1)) < op_precision)
709 {
710 int byte = subreg_lowpart_offset (mode, op_mode);
711 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
712 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 (WORDS_BIG_ENDIAN
714 ? byte - shifted_bytes
715 : byte + shifted_bytes));
716 }
717
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op) == LSHIFTRT
722 || GET_CODE (op) == ASHIFTRT)
723 && SCALAR_INT_MODE_P (op_mode)
724 && MEM_P (XEXP (op, 0))
725 && CONST_INT_P (XEXP (op, 1))
726 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
727 && INTVAL (XEXP (op, 1)) > 0
728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
729 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op, 0))
732 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 {
735 int byte = subreg_lowpart_offset (mode, op_mode);
736 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
737 return adjust_address_nv (XEXP (op, 0), mode,
738 (WORDS_BIG_ENDIAN
739 ? byte - shifted_bytes
740 : byte + shifted_bytes));
741 }
742
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op) == ABS
746 || GET_CODE (op) == NEG)
747 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
749 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (XEXP (op, 0), 0), mode);
752
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 (truncate:A X). */
755 if (GET_CODE (op) == SUBREG
756 && SCALAR_INT_MODE_P (mode)
757 && SCALAR_INT_MODE_P (op_mode)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
759 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
760 && subreg_lowpart_p (op))
761 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
762 GET_MODE (XEXP (SUBREG_REG (op), 0)));
763
764 /* (truncate:A (truncate:B X)) is (truncate:A X). */
765 if (GET_CODE (op) == TRUNCATE)
766 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
767 GET_MODE (XEXP (op, 0)));
768
769 return NULL_RTX;
770 }
771 \f
772 /* Try to simplify a unary operation CODE whose output mode is to be
773 MODE with input operand OP whose mode was originally OP_MODE.
774 Return zero if no simplification can be made. */
775 rtx
776 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
777 rtx op, enum machine_mode op_mode)
778 {
779 rtx trueop, tem;
780
781 trueop = avoid_constant_pool_reference (op);
782
783 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
784 if (tem)
785 return tem;
786
787 return simplify_unary_operation_1 (code, mode, op);
788 }
789
790 /* Perform some simplifications we can do even if the operands
791 aren't constant. */
792 static rtx
793 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
794 {
795 enum rtx_code reversed;
796 rtx temp;
797
798 switch (code)
799 {
800 case NOT:
801 /* (not (not X)) == X. */
802 if (GET_CODE (op) == NOT)
803 return XEXP (op, 0);
804
805 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
806 comparison is all ones. */
807 if (COMPARISON_P (op)
808 && (mode == BImode || STORE_FLAG_VALUE == -1)
809 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
810 return simplify_gen_relational (reversed, mode, VOIDmode,
811 XEXP (op, 0), XEXP (op, 1));
812
813 /* (not (plus X -1)) can become (neg X). */
814 if (GET_CODE (op) == PLUS
815 && XEXP (op, 1) == constm1_rtx)
816 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
817
818 /* Similarly, (not (neg X)) is (plus X -1). */
819 if (GET_CODE (op) == NEG)
820 return plus_constant (mode, XEXP (op, 0), -1);
821
822 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
823 if (GET_CODE (op) == XOR
824 && CONST_INT_P (XEXP (op, 1))
825 && (temp = simplify_unary_operation (NOT, mode,
826 XEXP (op, 1), mode)) != 0)
827 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
828
829 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
830 if (GET_CODE (op) == PLUS
831 && CONST_INT_P (XEXP (op, 1))
832 && mode_signbit_p (mode, XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
836
837
838 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
839 operands other than 1, but that is not valid. We could do a
840 similar simplification for (not (lshiftrt C X)) where C is
841 just the sign bit, but this doesn't seem common enough to
842 bother with. */
843 if (GET_CODE (op) == ASHIFT
844 && XEXP (op, 0) == const1_rtx)
845 {
846 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
847 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
848 }
849
850 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
851 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
852 so we can perform the above simplification. */
853
854 if (STORE_FLAG_VALUE == -1
855 && GET_CODE (op) == ASHIFTRT
856 && GET_CODE (XEXP (op, 1))
857 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
858 return simplify_gen_relational (GE, mode, VOIDmode,
859 XEXP (op, 0), const0_rtx);
860
861
862 if (GET_CODE (op) == SUBREG
863 && subreg_lowpart_p (op)
864 && (GET_MODE_SIZE (GET_MODE (op))
865 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
866 && GET_CODE (SUBREG_REG (op)) == ASHIFT
867 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
868 {
869 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
870 rtx x;
871
872 x = gen_rtx_ROTATE (inner_mode,
873 simplify_gen_unary (NOT, inner_mode, const1_rtx,
874 inner_mode),
875 XEXP (SUBREG_REG (op), 1));
876 return rtl_hooks.gen_lowpart_no_emit (mode, x);
877 }
878
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
882 coded. */
883
884 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
885 {
886 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
887 enum machine_mode op_mode;
888
889 op_mode = GET_MODE (in1);
890 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
891
892 op_mode = GET_MODE (in2);
893 if (op_mode == VOIDmode)
894 op_mode = mode;
895 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
896
897 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
898 {
899 rtx tem = in2;
900 in2 = in1; in1 = tem;
901 }
902
903 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
904 mode, in1, in2);
905 }
906 break;
907
908 case NEG:
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op) == NEG)
911 return XEXP (op, 0);
912
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op) == PLUS
915 && XEXP (op, 1) == const1_rtx)
916 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
917
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op) == NOT)
920 return plus_constant (mode, XEXP (op, 0), 1);
921
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
930 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
931
932 if (GET_CODE (op) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
935 {
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_INT_P (XEXP (op, 1))
938 || CONST_DOUBLE_P (XEXP (op, 1)))
939 {
940 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
941 if (temp)
942 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
943 }
944
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
947 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
948 }
949
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
954 {
955 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
956 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
957 }
958
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
961 is a constant). */
962 if (GET_CODE (op) == ASHIFT)
963 {
964 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
965 if (temp)
966 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
967 }
968
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op) == ASHIFTRT
972 && CONST_INT_P (XEXP (op, 1))
973 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
974 return simplify_gen_binary (LSHIFTRT, mode,
975 XEXP (op, 0), XEXP (op, 1));
976
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op) == LSHIFTRT
980 && CONST_INT_P (XEXP (op, 1))
981 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
982 return simplify_gen_binary (ASHIFTRT, mode,
983 XEXP (op, 0), XEXP (op, 1));
984
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op) == XOR
987 && XEXP (op, 1) == const1_rtx
988 && nonzero_bits (XEXP (op, 0), mode) == 1)
989 return plus_constant (mode, XEXP (op, 0), -1);
990
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op) == LT
994 && XEXP (op, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
996 {
997 enum machine_mode inner = GET_MODE (XEXP (op, 0));
998 int isize = GET_MODE_PRECISION (inner);
999 if (STORE_FLAG_VALUE == 1)
1000 {
1001 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1002 GEN_INT (isize - 1));
1003 if (mode == inner)
1004 return temp;
1005 if (GET_MODE_PRECISION (mode) > isize)
1006 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1007 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1008 }
1009 else if (STORE_FLAG_VALUE == -1)
1010 {
1011 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1018 }
1019 }
1020 break;
1021
1022 case TRUNCATE:
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op) == LSHIFTRT
1026 && GET_CODE (XEXP (op, 0)) == MULT)
1027 break;
1028
1029 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1030 {
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1032 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1033 /* We can't handle truncation to a partial integer mode here
1034 because we don't know the real bitsize of the partial
1035 integer mode. */
1036 break;
1037 }
1038
1039 if (GET_MODE (op) != VOIDmode)
1040 {
1041 temp = simplify_truncation (mode, op, GET_MODE (op));
1042 if (temp)
1043 return temp;
1044 }
1045
1046 /* If we know that the value is already truncated, we can
1047 replace the TRUNCATE with a SUBREG. */
1048 if (GET_MODE_NUNITS (mode) == 1
1049 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1050 || truncated_to_mode (mode, op)))
1051 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1052
1053 /* A truncate of a comparison can be replaced with a subreg if
1054 STORE_FLAG_VALUE permits. This is like the previous test,
1055 but it works even if the comparison is done in a mode larger
1056 than HOST_BITS_PER_WIDE_INT. */
1057 if (HWI_COMPUTABLE_MODE_P (mode)
1058 && COMPARISON_P (op)
1059 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1060 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1061
1062 /* A truncate of a memory is just loading the low part of the memory
1063 if we are not changing the meaning of the address. */
1064 if (GET_CODE (op) == MEM
1065 && !VECTOR_MODE_P (mode)
1066 && !MEM_VOLATILE_P (op)
1067 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1068 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1069
1070 break;
1071
1072 case FLOAT_TRUNCATE:
1073 if (DECIMAL_FLOAT_MODE_P (mode))
1074 break;
1075
1076 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1077 if (GET_CODE (op) == FLOAT_EXTEND
1078 && GET_MODE (XEXP (op, 0)) == mode)
1079 return XEXP (op, 0);
1080
1081 /* (float_truncate:SF (float_truncate:DF foo:XF))
1082 = (float_truncate:SF foo:XF).
1083 This may eliminate double rounding, so it is unsafe.
1084
1085 (float_truncate:SF (float_extend:XF foo:DF))
1086 = (float_truncate:SF foo:DF).
1087
1088 (float_truncate:DF (float_extend:XF foo:SF))
1089 = (float_extend:SF foo:DF). */
1090 if ((GET_CODE (op) == FLOAT_TRUNCATE
1091 && flag_unsafe_math_optimizations)
1092 || GET_CODE (op) == FLOAT_EXTEND)
1093 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1094 0)))
1095 > GET_MODE_SIZE (mode)
1096 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1097 mode,
1098 XEXP (op, 0), mode);
1099
1100 /* (float_truncate (float x)) is (float x) */
1101 if (GET_CODE (op) == FLOAT
1102 && (flag_unsafe_math_optimizations
1103 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1104 && ((unsigned)significand_size (GET_MODE (op))
1105 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1106 - num_sign_bit_copies (XEXP (op, 0),
1107 GET_MODE (XEXP (op, 0))))))))
1108 return simplify_gen_unary (FLOAT, mode,
1109 XEXP (op, 0),
1110 GET_MODE (XEXP (op, 0)));
1111
1112 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1113 (OP:SF foo:SF) if OP is NEG or ABS. */
1114 if ((GET_CODE (op) == ABS
1115 || GET_CODE (op) == NEG)
1116 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1117 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1118 return simplify_gen_unary (GET_CODE (op), mode,
1119 XEXP (XEXP (op, 0), 0), mode);
1120
1121 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1122 is (float_truncate:SF x). */
1123 if (GET_CODE (op) == SUBREG
1124 && subreg_lowpart_p (op)
1125 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1126 return SUBREG_REG (op);
1127 break;
1128
1129 case FLOAT_EXTEND:
1130 if (DECIMAL_FLOAT_MODE_P (mode))
1131 break;
1132
1133 /* (float_extend (float_extend x)) is (float_extend x)
1134
1135 (float_extend (float x)) is (float x) assuming that double
1136 rounding can't happen.
1137 */
1138 if (GET_CODE (op) == FLOAT_EXTEND
1139 || (GET_CODE (op) == FLOAT
1140 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1141 && ((unsigned)significand_size (GET_MODE (op))
1142 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1143 - num_sign_bit_copies (XEXP (op, 0),
1144 GET_MODE (XEXP (op, 0)))))))
1145 return simplify_gen_unary (GET_CODE (op), mode,
1146 XEXP (op, 0),
1147 GET_MODE (XEXP (op, 0)));
1148
1149 break;
1150
1151 case ABS:
1152 /* (abs (neg <foo>)) -> (abs <foo>) */
1153 if (GET_CODE (op) == NEG)
1154 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1155 GET_MODE (XEXP (op, 0)));
1156
1157 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1158 do nothing. */
1159 if (GET_MODE (op) == VOIDmode)
1160 break;
1161
1162 /* If operand is something known to be positive, ignore the ABS. */
1163 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1164 || val_signbit_known_clear_p (GET_MODE (op),
1165 nonzero_bits (op, GET_MODE (op))))
1166 return op;
1167
1168 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1169 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1170 return gen_rtx_NEG (mode, op);
1171
1172 break;
1173
1174 case FFS:
1175 /* (ffs (*_extend <X>)) = (ffs <X>) */
1176 if (GET_CODE (op) == SIGN_EXTEND
1177 || GET_CODE (op) == ZERO_EXTEND)
1178 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1179 GET_MODE (XEXP (op, 0)));
1180 break;
1181
1182 case POPCOUNT:
1183 switch (GET_CODE (op))
1184 {
1185 case BSWAP:
1186 case ZERO_EXTEND:
1187 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1188 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)));
1190
1191 case ROTATE:
1192 case ROTATERT:
1193 /* Rotations don't affect popcount. */
1194 if (!side_effects_p (XEXP (op, 1)))
1195 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1196 GET_MODE (XEXP (op, 0)));
1197 break;
1198
1199 default:
1200 break;
1201 }
1202 break;
1203
1204 case PARITY:
1205 switch (GET_CODE (op))
1206 {
1207 case NOT:
1208 case BSWAP:
1209 case ZERO_EXTEND:
1210 case SIGN_EXTEND:
1211 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 case ROTATE:
1215 case ROTATERT:
1216 /* Rotations don't affect parity. */
1217 if (!side_effects_p (XEXP (op, 1)))
1218 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1220 break;
1221
1222 default:
1223 break;
1224 }
1225 break;
1226
1227 case BSWAP:
1228 /* (bswap (bswap x)) -> x. */
1229 if (GET_CODE (op) == BSWAP)
1230 return XEXP (op, 0);
1231 break;
1232
1233 case FLOAT:
1234 /* (float (sign_extend <X>)) = (float <X>). */
1235 if (GET_CODE (op) == SIGN_EXTEND)
1236 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1238 break;
1239
1240 case SIGN_EXTEND:
1241 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1242 becomes just the MINUS if its mode is MODE. This allows
1243 folding switch statements on machines using casesi (such as
1244 the VAX). */
1245 if (GET_CODE (op) == TRUNCATE
1246 && GET_MODE (XEXP (op, 0)) == mode
1247 && GET_CODE (XEXP (op, 0)) == MINUS
1248 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1249 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1250 return XEXP (op, 0);
1251
1252 /* Extending a widening multiplication should be canonicalized to
1253 a wider widening multiplication. */
1254 if (GET_CODE (op) == MULT)
1255 {
1256 rtx lhs = XEXP (op, 0);
1257 rtx rhs = XEXP (op, 1);
1258 enum rtx_code lcode = GET_CODE (lhs);
1259 enum rtx_code rcode = GET_CODE (rhs);
1260
1261 /* Widening multiplies usually extend both operands, but sometimes
1262 they use a shift to extract a portion of a register. */
1263 if ((lcode == SIGN_EXTEND
1264 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1265 && (rcode == SIGN_EXTEND
1266 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1267 {
1268 enum machine_mode lmode = GET_MODE (lhs);
1269 enum machine_mode rmode = GET_MODE (rhs);
1270 int bits;
1271
1272 if (lcode == ASHIFTRT)
1273 /* Number of bits not shifted off the end. */
1274 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1275 else /* lcode == SIGN_EXTEND */
1276 /* Size of inner mode. */
1277 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1278
1279 if (rcode == ASHIFTRT)
1280 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1281 else /* rcode == SIGN_EXTEND */
1282 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1283
1284 /* We can only widen multiplies if the result is mathematiclly
1285 equivalent. I.e. if overflow was impossible. */
1286 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1287 return simplify_gen_binary
1288 (MULT, mode,
1289 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1290 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1291 }
1292 }
1293
1294 /* Check for a sign extension of a subreg of a promoted
1295 variable, where the promotion is sign-extended, and the
1296 target mode is the same as the variable's promotion. */
1297 if (GET_CODE (op) == SUBREG
1298 && SUBREG_PROMOTED_VAR_P (op)
1299 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1300 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1301 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1302
1303 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1304 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1305 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1306 {
1307 gcc_assert (GET_MODE_BITSIZE (mode)
1308 > GET_MODE_BITSIZE (GET_MODE (op)));
1309 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1311 }
1312
1313 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1314 is (sign_extend:M (subreg:O <X>)) if there is mode with
1315 GET_MODE_BITSIZE (N) - I bits.
1316 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1317 is similarly (zero_extend:M (subreg:O <X>)). */
1318 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1319 && GET_CODE (XEXP (op, 0)) == ASHIFT
1320 && CONST_INT_P (XEXP (op, 1))
1321 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1322 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1323 {
1324 enum machine_mode tmode
1325 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1326 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1327 gcc_assert (GET_MODE_BITSIZE (mode)
1328 > GET_MODE_BITSIZE (GET_MODE (op)));
1329 if (tmode != BLKmode)
1330 {
1331 rtx inner =
1332 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1333 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1334 ? SIGN_EXTEND : ZERO_EXTEND,
1335 mode, inner, tmode);
1336 }
1337 }
1338
1339 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1340 /* As we do not know which address space the pointer is referring to,
1341 we can do this only if the target does not support different pointer
1342 or address modes depending on the address space. */
1343 if (target_default_pointer_address_modes_p ()
1344 && ! POINTERS_EXTEND_UNSIGNED
1345 && mode == Pmode && GET_MODE (op) == ptr_mode
1346 && (CONSTANT_P (op)
1347 || (GET_CODE (op) == SUBREG
1348 && REG_P (SUBREG_REG (op))
1349 && REG_POINTER (SUBREG_REG (op))
1350 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1351 return convert_memory_address (Pmode, op);
1352 #endif
1353 break;
1354
1355 case ZERO_EXTEND:
1356 /* Check for a zero extension of a subreg of a promoted
1357 variable, where the promotion is zero-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op)
1361 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1362 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1363 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1364
1365 /* Extending a widening multiplication should be canonicalized to
1366 a wider widening multiplication. */
1367 if (GET_CODE (op) == MULT)
1368 {
1369 rtx lhs = XEXP (op, 0);
1370 rtx rhs = XEXP (op, 1);
1371 enum rtx_code lcode = GET_CODE (lhs);
1372 enum rtx_code rcode = GET_CODE (rhs);
1373
1374 /* Widening multiplies usually extend both operands, but sometimes
1375 they use a shift to extract a portion of a register. */
1376 if ((lcode == ZERO_EXTEND
1377 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1378 && (rcode == ZERO_EXTEND
1379 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1380 {
1381 enum machine_mode lmode = GET_MODE (lhs);
1382 enum machine_mode rmode = GET_MODE (rhs);
1383 int bits;
1384
1385 if (lcode == LSHIFTRT)
1386 /* Number of bits not shifted off the end. */
1387 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1388 else /* lcode == ZERO_EXTEND */
1389 /* Size of inner mode. */
1390 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1391
1392 if (rcode == LSHIFTRT)
1393 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1394 else /* rcode == ZERO_EXTEND */
1395 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1396
1397 /* We can only widen multiplies if the result is mathematiclly
1398 equivalent. I.e. if overflow was impossible. */
1399 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1400 return simplify_gen_binary
1401 (MULT, mode,
1402 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1403 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1404 }
1405 }
1406
1407 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1408 if (GET_CODE (op) == ZERO_EXTEND)
1409 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1410 GET_MODE (XEXP (op, 0)));
1411
1412 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1413 is (zero_extend:M (subreg:O <X>)) if there is mode with
1414 GET_MODE_BITSIZE (N) - I bits. */
1415 if (GET_CODE (op) == LSHIFTRT
1416 && GET_CODE (XEXP (op, 0)) == ASHIFT
1417 && CONST_INT_P (XEXP (op, 1))
1418 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1419 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1420 {
1421 enum machine_mode tmode
1422 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1423 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1424 if (tmode != BLKmode)
1425 {
1426 rtx inner =
1427 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1428 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1429 }
1430 }
1431
1432 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1433 /* As we do not know which address space the pointer is referring to,
1434 we can do this only if the target does not support different pointer
1435 or address modes depending on the address space. */
1436 if (target_default_pointer_address_modes_p ()
1437 && POINTERS_EXTEND_UNSIGNED > 0
1438 && mode == Pmode && GET_MODE (op) == ptr_mode
1439 && (CONSTANT_P (op)
1440 || (GET_CODE (op) == SUBREG
1441 && REG_P (SUBREG_REG (op))
1442 && REG_POINTER (SUBREG_REG (op))
1443 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1444 return convert_memory_address (Pmode, op);
1445 #endif
1446 break;
1447
1448 default:
1449 break;
1450 }
1451
1452 return 0;
1453 }
1454
1455 /* Try to compute the value of a unary operation CODE whose output mode is to
1456 be MODE with input operand OP whose mode was originally OP_MODE.
1457 Return zero if the value cannot be computed. */
1458 rtx
1459 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1460 rtx op, enum machine_mode op_mode)
1461 {
1462 unsigned int width = GET_MODE_PRECISION (mode);
1463 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1464
1465 if (code == VEC_DUPLICATE)
1466 {
1467 gcc_assert (VECTOR_MODE_P (mode));
1468 if (GET_MODE (op) != VOIDmode)
1469 {
1470 if (!VECTOR_MODE_P (GET_MODE (op)))
1471 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1472 else
1473 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1474 (GET_MODE (op)));
1475 }
1476 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1477 || GET_CODE (op) == CONST_VECTOR)
1478 {
1479 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1480 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1481 rtvec v = rtvec_alloc (n_elts);
1482 unsigned int i;
1483
1484 if (GET_CODE (op) != CONST_VECTOR)
1485 for (i = 0; i < n_elts; i++)
1486 RTVEC_ELT (v, i) = op;
1487 else
1488 {
1489 enum machine_mode inmode = GET_MODE (op);
1490 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1491 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1492
1493 gcc_assert (in_n_elts < n_elts);
1494 gcc_assert ((n_elts % in_n_elts) == 0);
1495 for (i = 0; i < n_elts; i++)
1496 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1497 }
1498 return gen_rtx_CONST_VECTOR (mode, v);
1499 }
1500 }
1501
1502 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1503 {
1504 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1505 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1506 enum machine_mode opmode = GET_MODE (op);
1507 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1508 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1509 rtvec v = rtvec_alloc (n_elts);
1510 unsigned int i;
1511
1512 gcc_assert (op_n_elts == n_elts);
1513 for (i = 0; i < n_elts; i++)
1514 {
1515 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1516 CONST_VECTOR_ELT (op, i),
1517 GET_MODE_INNER (opmode));
1518 if (!x)
1519 return 0;
1520 RTVEC_ELT (v, i) = x;
1521 }
1522 return gen_rtx_CONST_VECTOR (mode, v);
1523 }
1524
1525 /* The order of these tests is critical so that, for example, we don't
1526 check the wrong mode (input vs. output) for a conversion operation,
1527 such as FIX. At some point, this should be simplified. */
1528
1529 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1530 {
1531 HOST_WIDE_INT hv, lv;
1532 REAL_VALUE_TYPE d;
1533
1534 if (CONST_INT_P (op))
1535 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1536 else
1537 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1538
1539 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1540 d = real_value_truncate (mode, d);
1541 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1542 }
1543 else if (code == UNSIGNED_FLOAT
1544 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1545 {
1546 HOST_WIDE_INT hv, lv;
1547 REAL_VALUE_TYPE d;
1548
1549 if (CONST_INT_P (op))
1550 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1551 else
1552 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1553
1554 if (op_mode == VOIDmode
1555 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1556 /* We should never get a negative number. */
1557 gcc_assert (hv >= 0);
1558 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1559 hv = 0, lv &= GET_MODE_MASK (op_mode);
1560
1561 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1562 d = real_value_truncate (mode, d);
1563 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1564 }
1565
1566 if (CONST_INT_P (op)
1567 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1568 {
1569 HOST_WIDE_INT arg0 = INTVAL (op);
1570 HOST_WIDE_INT val;
1571
1572 switch (code)
1573 {
1574 case NOT:
1575 val = ~ arg0;
1576 break;
1577
1578 case NEG:
1579 val = - arg0;
1580 break;
1581
1582 case ABS:
1583 val = (arg0 >= 0 ? arg0 : - arg0);
1584 break;
1585
1586 case FFS:
1587 arg0 &= GET_MODE_MASK (mode);
1588 val = ffs_hwi (arg0);
1589 break;
1590
1591 case CLZ:
1592 arg0 &= GET_MODE_MASK (mode);
1593 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1594 ;
1595 else
1596 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1597 break;
1598
1599 case CLRSB:
1600 arg0 &= GET_MODE_MASK (mode);
1601 if (arg0 == 0)
1602 val = GET_MODE_PRECISION (mode) - 1;
1603 else if (arg0 >= 0)
1604 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1605 else if (arg0 < 0)
1606 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1607 break;
1608
1609 case CTZ:
1610 arg0 &= GET_MODE_MASK (mode);
1611 if (arg0 == 0)
1612 {
1613 /* Even if the value at zero is undefined, we have to come
1614 up with some replacement. Seems good enough. */
1615 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1616 val = GET_MODE_PRECISION (mode);
1617 }
1618 else
1619 val = ctz_hwi (arg0);
1620 break;
1621
1622 case POPCOUNT:
1623 arg0 &= GET_MODE_MASK (mode);
1624 val = 0;
1625 while (arg0)
1626 val++, arg0 &= arg0 - 1;
1627 break;
1628
1629 case PARITY:
1630 arg0 &= GET_MODE_MASK (mode);
1631 val = 0;
1632 while (arg0)
1633 val++, arg0 &= arg0 - 1;
1634 val &= 1;
1635 break;
1636
1637 case BSWAP:
1638 {
1639 unsigned int s;
1640
1641 val = 0;
1642 for (s = 0; s < width; s += 8)
1643 {
1644 unsigned int d = width - s - 8;
1645 unsigned HOST_WIDE_INT byte;
1646 byte = (arg0 >> s) & 0xff;
1647 val |= byte << d;
1648 }
1649 }
1650 break;
1651
1652 case TRUNCATE:
1653 val = arg0;
1654 break;
1655
1656 case ZERO_EXTEND:
1657 /* When zero-extending a CONST_INT, we need to know its
1658 original mode. */
1659 gcc_assert (op_mode != VOIDmode);
1660 if (op_width == HOST_BITS_PER_WIDE_INT)
1661 {
1662 /* If we were really extending the mode,
1663 we would have to distinguish between zero-extension
1664 and sign-extension. */
1665 gcc_assert (width == op_width);
1666 val = arg0;
1667 }
1668 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1669 val = arg0 & GET_MODE_MASK (op_mode);
1670 else
1671 return 0;
1672 break;
1673
1674 case SIGN_EXTEND:
1675 if (op_mode == VOIDmode)
1676 op_mode = mode;
1677 op_width = GET_MODE_PRECISION (op_mode);
1678 if (op_width == HOST_BITS_PER_WIDE_INT)
1679 {
1680 /* If we were really extending the mode,
1681 we would have to distinguish between zero-extension
1682 and sign-extension. */
1683 gcc_assert (width == op_width);
1684 val = arg0;
1685 }
1686 else if (op_width < HOST_BITS_PER_WIDE_INT)
1687 {
1688 val = arg0 & GET_MODE_MASK (op_mode);
1689 if (val_signbit_known_set_p (op_mode, val))
1690 val |= ~GET_MODE_MASK (op_mode);
1691 }
1692 else
1693 return 0;
1694 break;
1695
1696 case SQRT:
1697 case FLOAT_EXTEND:
1698 case FLOAT_TRUNCATE:
1699 case SS_TRUNCATE:
1700 case US_TRUNCATE:
1701 case SS_NEG:
1702 case US_NEG:
1703 case SS_ABS:
1704 return 0;
1705
1706 default:
1707 gcc_unreachable ();
1708 }
1709
1710 return gen_int_mode (val, mode);
1711 }
1712
1713 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1714 for a DImode operation on a CONST_INT. */
1715 else if (width <= HOST_BITS_PER_DOUBLE_INT
1716 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1717 {
1718 double_int first, value;
1719
1720 if (CONST_DOUBLE_AS_INT_P (op))
1721 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1722 CONST_DOUBLE_LOW (op));
1723 else
1724 first = double_int::from_shwi (INTVAL (op));
1725
1726 switch (code)
1727 {
1728 case NOT:
1729 value = ~first;
1730 break;
1731
1732 case NEG:
1733 value = -first;
1734 break;
1735
1736 case ABS:
1737 if (first.is_negative ())
1738 value = -first;
1739 else
1740 value = first;
1741 break;
1742
1743 case FFS:
1744 value.high = 0;
1745 if (first.low != 0)
1746 value.low = ffs_hwi (first.low);
1747 else if (first.high != 0)
1748 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1749 else
1750 value.low = 0;
1751 break;
1752
1753 case CLZ:
1754 value.high = 0;
1755 if (first.high != 0)
1756 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1757 - HOST_BITS_PER_WIDE_INT;
1758 else if (first.low != 0)
1759 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1760 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1761 value.low = GET_MODE_PRECISION (mode);
1762 break;
1763
1764 case CTZ:
1765 value.high = 0;
1766 if (first.low != 0)
1767 value.low = ctz_hwi (first.low);
1768 else if (first.high != 0)
1769 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1770 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1771 value.low = GET_MODE_PRECISION (mode);
1772 break;
1773
1774 case POPCOUNT:
1775 value = double_int_zero;
1776 while (first.low)
1777 {
1778 value.low++;
1779 first.low &= first.low - 1;
1780 }
1781 while (first.high)
1782 {
1783 value.low++;
1784 first.high &= first.high - 1;
1785 }
1786 break;
1787
1788 case PARITY:
1789 value = double_int_zero;
1790 while (first.low)
1791 {
1792 value.low++;
1793 first.low &= first.low - 1;
1794 }
1795 while (first.high)
1796 {
1797 value.low++;
1798 first.high &= first.high - 1;
1799 }
1800 value.low &= 1;
1801 break;
1802
1803 case BSWAP:
1804 {
1805 unsigned int s;
1806
1807 value = double_int_zero;
1808 for (s = 0; s < width; s += 8)
1809 {
1810 unsigned int d = width - s - 8;
1811 unsigned HOST_WIDE_INT byte;
1812
1813 if (s < HOST_BITS_PER_WIDE_INT)
1814 byte = (first.low >> s) & 0xff;
1815 else
1816 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1817
1818 if (d < HOST_BITS_PER_WIDE_INT)
1819 value.low |= byte << d;
1820 else
1821 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1822 }
1823 }
1824 break;
1825
1826 case TRUNCATE:
1827 /* This is just a change-of-mode, so do nothing. */
1828 value = first;
1829 break;
1830
1831 case ZERO_EXTEND:
1832 gcc_assert (op_mode != VOIDmode);
1833
1834 if (op_width > HOST_BITS_PER_WIDE_INT)
1835 return 0;
1836
1837 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1838 break;
1839
1840 case SIGN_EXTEND:
1841 if (op_mode == VOIDmode
1842 || op_width > HOST_BITS_PER_WIDE_INT)
1843 return 0;
1844 else
1845 {
1846 value.low = first.low & GET_MODE_MASK (op_mode);
1847 if (val_signbit_known_set_p (op_mode, value.low))
1848 value.low |= ~GET_MODE_MASK (op_mode);
1849
1850 value.high = HWI_SIGN_EXTEND (value.low);
1851 }
1852 break;
1853
1854 case SQRT:
1855 return 0;
1856
1857 default:
1858 return 0;
1859 }
1860
1861 return immed_double_int_const (value, mode);
1862 }
1863
1864 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1865 && SCALAR_FLOAT_MODE_P (mode)
1866 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1867 {
1868 REAL_VALUE_TYPE d, t;
1869 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1870
1871 switch (code)
1872 {
1873 case SQRT:
1874 if (HONOR_SNANS (mode) && real_isnan (&d))
1875 return 0;
1876 real_sqrt (&t, mode, &d);
1877 d = t;
1878 break;
1879 case ABS:
1880 d = real_value_abs (&d);
1881 break;
1882 case NEG:
1883 d = real_value_negate (&d);
1884 break;
1885 case FLOAT_TRUNCATE:
1886 d = real_value_truncate (mode, d);
1887 break;
1888 case FLOAT_EXTEND:
1889 /* All this does is change the mode, unless changing
1890 mode class. */
1891 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1892 real_convert (&d, mode, &d);
1893 break;
1894 case FIX:
1895 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1896 break;
1897 case NOT:
1898 {
1899 long tmp[4];
1900 int i;
1901
1902 real_to_target (tmp, &d, GET_MODE (op));
1903 for (i = 0; i < 4; i++)
1904 tmp[i] = ~tmp[i];
1905 real_from_target (&d, tmp, mode);
1906 break;
1907 }
1908 default:
1909 gcc_unreachable ();
1910 }
1911 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1912 }
1913
1914 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1915 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1916 && GET_MODE_CLASS (mode) == MODE_INT
1917 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1918 {
1919 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1920 operators are intentionally left unspecified (to ease implementation
1921 by target backends), for consistency, this routine implements the
1922 same semantics for constant folding as used by the middle-end. */
1923
1924 /* This was formerly used only for non-IEEE float.
1925 eggert@twinsun.com says it is safe for IEEE also. */
1926 HOST_WIDE_INT xh, xl, th, tl;
1927 REAL_VALUE_TYPE x, t;
1928 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1929 switch (code)
1930 {
1931 case FIX:
1932 if (REAL_VALUE_ISNAN (x))
1933 return const0_rtx;
1934
1935 /* Test against the signed upper bound. */
1936 if (width > HOST_BITS_PER_WIDE_INT)
1937 {
1938 th = ((unsigned HOST_WIDE_INT) 1
1939 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1940 tl = -1;
1941 }
1942 else
1943 {
1944 th = 0;
1945 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1946 }
1947 real_from_integer (&t, VOIDmode, tl, th, 0);
1948 if (REAL_VALUES_LESS (t, x))
1949 {
1950 xh = th;
1951 xl = tl;
1952 break;
1953 }
1954
1955 /* Test against the signed lower bound. */
1956 if (width > HOST_BITS_PER_WIDE_INT)
1957 {
1958 th = (unsigned HOST_WIDE_INT) (-1)
1959 << (width - HOST_BITS_PER_WIDE_INT - 1);
1960 tl = 0;
1961 }
1962 else
1963 {
1964 th = -1;
1965 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1966 }
1967 real_from_integer (&t, VOIDmode, tl, th, 0);
1968 if (REAL_VALUES_LESS (x, t))
1969 {
1970 xh = th;
1971 xl = tl;
1972 break;
1973 }
1974 REAL_VALUE_TO_INT (&xl, &xh, x);
1975 break;
1976
1977 case UNSIGNED_FIX:
1978 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1979 return const0_rtx;
1980
1981 /* Test against the unsigned upper bound. */
1982 if (width == HOST_BITS_PER_DOUBLE_INT)
1983 {
1984 th = -1;
1985 tl = -1;
1986 }
1987 else if (width >= HOST_BITS_PER_WIDE_INT)
1988 {
1989 th = ((unsigned HOST_WIDE_INT) 1
1990 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1991 tl = -1;
1992 }
1993 else
1994 {
1995 th = 0;
1996 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1997 }
1998 real_from_integer (&t, VOIDmode, tl, th, 1);
1999 if (REAL_VALUES_LESS (t, x))
2000 {
2001 xh = th;
2002 xl = tl;
2003 break;
2004 }
2005
2006 REAL_VALUE_TO_INT (&xl, &xh, x);
2007 break;
2008
2009 default:
2010 gcc_unreachable ();
2011 }
2012 return immed_double_const (xl, xh, mode);
2013 }
2014
2015 return NULL_RTX;
2016 }
2017 \f
2018 /* Subroutine of simplify_binary_operation to simplify a commutative,
2019 associative binary operation CODE with result mode MODE, operating
2020 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2021 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2022 canonicalization is possible. */
2023
2024 static rtx
2025 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2026 rtx op0, rtx op1)
2027 {
2028 rtx tem;
2029
2030 /* Linearize the operator to the left. */
2031 if (GET_CODE (op1) == code)
2032 {
2033 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2034 if (GET_CODE (op0) == code)
2035 {
2036 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2037 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2038 }
2039
2040 /* "a op (b op c)" becomes "(b op c) op a". */
2041 if (! swap_commutative_operands_p (op1, op0))
2042 return simplify_gen_binary (code, mode, op1, op0);
2043
2044 tem = op0;
2045 op0 = op1;
2046 op1 = tem;
2047 }
2048
2049 if (GET_CODE (op0) == code)
2050 {
2051 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2052 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2053 {
2054 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2055 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2056 }
2057
2058 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2059 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2060 if (tem != 0)
2061 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2062
2063 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2064 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2065 if (tem != 0)
2066 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2067 }
2068
2069 return 0;
2070 }
2071
2072
2073 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2074 and OP1. Return 0 if no simplification is possible.
2075
2076 Don't use this for relational operations such as EQ or LT.
2077 Use simplify_relational_operation instead. */
2078 rtx
2079 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2080 rtx op0, rtx op1)
2081 {
2082 rtx trueop0, trueop1;
2083 rtx tem;
2084
2085 /* Relational operations don't work here. We must know the mode
2086 of the operands in order to do the comparison correctly.
2087 Assuming a full word can give incorrect results.
2088 Consider comparing 128 with -128 in QImode. */
2089 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2090 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2091
2092 /* Make sure the constant is second. */
2093 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2094 && swap_commutative_operands_p (op0, op1))
2095 {
2096 tem = op0, op0 = op1, op1 = tem;
2097 }
2098
2099 trueop0 = avoid_constant_pool_reference (op0);
2100 trueop1 = avoid_constant_pool_reference (op1);
2101
2102 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2103 if (tem)
2104 return tem;
2105 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2106 }
2107
2108 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2109 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2110 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2111 actual constants. */
2112
2113 static rtx
2114 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2115 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2116 {
2117 rtx tem, reversed, opleft, opright;
2118 HOST_WIDE_INT val;
2119 unsigned int width = GET_MODE_PRECISION (mode);
2120
2121 /* Even if we can't compute a constant result,
2122 there are some cases worth simplifying. */
2123
2124 switch (code)
2125 {
2126 case PLUS:
2127 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2128 when x is NaN, infinite, or finite and nonzero. They aren't
2129 when x is -0 and the rounding mode is not towards -infinity,
2130 since (-0) + 0 is then 0. */
2131 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2132 return op0;
2133
2134 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2135 transformations are safe even for IEEE. */
2136 if (GET_CODE (op0) == NEG)
2137 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2138 else if (GET_CODE (op1) == NEG)
2139 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2140
2141 /* (~a) + 1 -> -a */
2142 if (INTEGRAL_MODE_P (mode)
2143 && GET_CODE (op0) == NOT
2144 && trueop1 == const1_rtx)
2145 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2146
2147 /* Handle both-operands-constant cases. We can only add
2148 CONST_INTs to constants since the sum of relocatable symbols
2149 can't be handled by most assemblers. Don't add CONST_INT
2150 to CONST_INT since overflow won't be computed properly if wider
2151 than HOST_BITS_PER_WIDE_INT. */
2152
2153 if ((GET_CODE (op0) == CONST
2154 || GET_CODE (op0) == SYMBOL_REF
2155 || GET_CODE (op0) == LABEL_REF)
2156 && CONST_INT_P (op1))
2157 return plus_constant (mode, op0, INTVAL (op1));
2158 else if ((GET_CODE (op1) == CONST
2159 || GET_CODE (op1) == SYMBOL_REF
2160 || GET_CODE (op1) == LABEL_REF)
2161 && CONST_INT_P (op0))
2162 return plus_constant (mode, op1, INTVAL (op0));
2163
2164 /* See if this is something like X * C - X or vice versa or
2165 if the multiplication is written as a shift. If so, we can
2166 distribute and make a new multiply, shift, or maybe just
2167 have X (if C is 2 in the example above). But don't make
2168 something more expensive than we had before. */
2169
2170 if (SCALAR_INT_MODE_P (mode))
2171 {
2172 double_int coeff0, coeff1;
2173 rtx lhs = op0, rhs = op1;
2174
2175 coeff0 = double_int_one;
2176 coeff1 = double_int_one;
2177
2178 if (GET_CODE (lhs) == NEG)
2179 {
2180 coeff0 = double_int_minus_one;
2181 lhs = XEXP (lhs, 0);
2182 }
2183 else if (GET_CODE (lhs) == MULT
2184 && CONST_INT_P (XEXP (lhs, 1)))
2185 {
2186 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2187 lhs = XEXP (lhs, 0);
2188 }
2189 else if (GET_CODE (lhs) == ASHIFT
2190 && CONST_INT_P (XEXP (lhs, 1))
2191 && INTVAL (XEXP (lhs, 1)) >= 0
2192 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2193 {
2194 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2195 lhs = XEXP (lhs, 0);
2196 }
2197
2198 if (GET_CODE (rhs) == NEG)
2199 {
2200 coeff1 = double_int_minus_one;
2201 rhs = XEXP (rhs, 0);
2202 }
2203 else if (GET_CODE (rhs) == MULT
2204 && CONST_INT_P (XEXP (rhs, 1)))
2205 {
2206 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2207 rhs = XEXP (rhs, 0);
2208 }
2209 else if (GET_CODE (rhs) == ASHIFT
2210 && CONST_INT_P (XEXP (rhs, 1))
2211 && INTVAL (XEXP (rhs, 1)) >= 0
2212 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2213 {
2214 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2215 rhs = XEXP (rhs, 0);
2216 }
2217
2218 if (rtx_equal_p (lhs, rhs))
2219 {
2220 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2221 rtx coeff;
2222 double_int val;
2223 bool speed = optimize_function_for_speed_p (cfun);
2224
2225 val = coeff0 + coeff1;
2226 coeff = immed_double_int_const (val, mode);
2227
2228 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2229 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2230 ? tem : 0;
2231 }
2232 }
2233
2234 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2235 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2236 && GET_CODE (op0) == XOR
2237 && (CONST_INT_P (XEXP (op0, 1))
2238 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2239 && mode_signbit_p (mode, op1))
2240 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2241 simplify_gen_binary (XOR, mode, op1,
2242 XEXP (op0, 1)));
2243
2244 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2245 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2246 && GET_CODE (op0) == MULT
2247 && GET_CODE (XEXP (op0, 0)) == NEG)
2248 {
2249 rtx in1, in2;
2250
2251 in1 = XEXP (XEXP (op0, 0), 0);
2252 in2 = XEXP (op0, 1);
2253 return simplify_gen_binary (MINUS, mode, op1,
2254 simplify_gen_binary (MULT, mode,
2255 in1, in2));
2256 }
2257
2258 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2259 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2260 is 1. */
2261 if (COMPARISON_P (op0)
2262 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2263 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2264 && (reversed = reversed_comparison (op0, mode)))
2265 return
2266 simplify_gen_unary (NEG, mode, reversed, mode);
2267
2268 /* If one of the operands is a PLUS or a MINUS, see if we can
2269 simplify this by the associative law.
2270 Don't use the associative law for floating point.
2271 The inaccuracy makes it nonassociative,
2272 and subtle programs can break if operations are associated. */
2273
2274 if (INTEGRAL_MODE_P (mode)
2275 && (plus_minus_operand_p (op0)
2276 || plus_minus_operand_p (op1))
2277 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2278 return tem;
2279
2280 /* Reassociate floating point addition only when the user
2281 specifies associative math operations. */
2282 if (FLOAT_MODE_P (mode)
2283 && flag_associative_math)
2284 {
2285 tem = simplify_associative_operation (code, mode, op0, op1);
2286 if (tem)
2287 return tem;
2288 }
2289 break;
2290
2291 case COMPARE:
2292 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2293 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2294 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2295 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2296 {
2297 rtx xop00 = XEXP (op0, 0);
2298 rtx xop10 = XEXP (op1, 0);
2299
2300 #ifdef HAVE_cc0
2301 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2302 #else
2303 if (REG_P (xop00) && REG_P (xop10)
2304 && GET_MODE (xop00) == GET_MODE (xop10)
2305 && REGNO (xop00) == REGNO (xop10)
2306 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2307 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2308 #endif
2309 return xop00;
2310 }
2311 break;
2312
2313 case MINUS:
2314 /* We can't assume x-x is 0 even with non-IEEE floating point,
2315 but since it is zero except in very strange circumstances, we
2316 will treat it as zero with -ffinite-math-only. */
2317 if (rtx_equal_p (trueop0, trueop1)
2318 && ! side_effects_p (op0)
2319 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2320 return CONST0_RTX (mode);
2321
2322 /* Change subtraction from zero into negation. (0 - x) is the
2323 same as -x when x is NaN, infinite, or finite and nonzero.
2324 But if the mode has signed zeros, and does not round towards
2325 -infinity, then 0 - 0 is 0, not -0. */
2326 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2327 return simplify_gen_unary (NEG, mode, op1, mode);
2328
2329 /* (-1 - a) is ~a. */
2330 if (trueop0 == constm1_rtx)
2331 return simplify_gen_unary (NOT, mode, op1, mode);
2332
2333 /* Subtracting 0 has no effect unless the mode has signed zeros
2334 and supports rounding towards -infinity. In such a case,
2335 0 - 0 is -0. */
2336 if (!(HONOR_SIGNED_ZEROS (mode)
2337 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2338 && trueop1 == CONST0_RTX (mode))
2339 return op0;
2340
2341 /* See if this is something like X * C - X or vice versa or
2342 if the multiplication is written as a shift. If so, we can
2343 distribute and make a new multiply, shift, or maybe just
2344 have X (if C is 2 in the example above). But don't make
2345 something more expensive than we had before. */
2346
2347 if (SCALAR_INT_MODE_P (mode))
2348 {
2349 double_int coeff0, negcoeff1;
2350 rtx lhs = op0, rhs = op1;
2351
2352 coeff0 = double_int_one;
2353 negcoeff1 = double_int_minus_one;
2354
2355 if (GET_CODE (lhs) == NEG)
2356 {
2357 coeff0 = double_int_minus_one;
2358 lhs = XEXP (lhs, 0);
2359 }
2360 else if (GET_CODE (lhs) == MULT
2361 && CONST_INT_P (XEXP (lhs, 1)))
2362 {
2363 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2364 lhs = XEXP (lhs, 0);
2365 }
2366 else if (GET_CODE (lhs) == ASHIFT
2367 && CONST_INT_P (XEXP (lhs, 1))
2368 && INTVAL (XEXP (lhs, 1)) >= 0
2369 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2370 {
2371 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2372 lhs = XEXP (lhs, 0);
2373 }
2374
2375 if (GET_CODE (rhs) == NEG)
2376 {
2377 negcoeff1 = double_int_one;
2378 rhs = XEXP (rhs, 0);
2379 }
2380 else if (GET_CODE (rhs) == MULT
2381 && CONST_INT_P (XEXP (rhs, 1)))
2382 {
2383 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2384 rhs = XEXP (rhs, 0);
2385 }
2386 else if (GET_CODE (rhs) == ASHIFT
2387 && CONST_INT_P (XEXP (rhs, 1))
2388 && INTVAL (XEXP (rhs, 1)) >= 0
2389 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2390 {
2391 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2392 negcoeff1 = -negcoeff1;
2393 rhs = XEXP (rhs, 0);
2394 }
2395
2396 if (rtx_equal_p (lhs, rhs))
2397 {
2398 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2399 rtx coeff;
2400 double_int val;
2401 bool speed = optimize_function_for_speed_p (cfun);
2402
2403 val = coeff0 + negcoeff1;
2404 coeff = immed_double_int_const (val, mode);
2405
2406 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2407 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2408 ? tem : 0;
2409 }
2410 }
2411
2412 /* (a - (-b)) -> (a + b). True even for IEEE. */
2413 if (GET_CODE (op1) == NEG)
2414 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2415
2416 /* (-x - c) may be simplified as (-c - x). */
2417 if (GET_CODE (op0) == NEG
2418 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2419 {
2420 tem = simplify_unary_operation (NEG, mode, op1, mode);
2421 if (tem)
2422 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2423 }
2424
2425 /* Don't let a relocatable value get a negative coeff. */
2426 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2427 return simplify_gen_binary (PLUS, mode,
2428 op0,
2429 neg_const_int (mode, op1));
2430
2431 /* (x - (x & y)) -> (x & ~y) */
2432 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2433 {
2434 if (rtx_equal_p (op0, XEXP (op1, 0)))
2435 {
2436 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2437 GET_MODE (XEXP (op1, 1)));
2438 return simplify_gen_binary (AND, mode, op0, tem);
2439 }
2440 if (rtx_equal_p (op0, XEXP (op1, 1)))
2441 {
2442 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2443 GET_MODE (XEXP (op1, 0)));
2444 return simplify_gen_binary (AND, mode, op0, tem);
2445 }
2446 }
2447
2448 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2449 by reversing the comparison code if valid. */
2450 if (STORE_FLAG_VALUE == 1
2451 && trueop0 == const1_rtx
2452 && COMPARISON_P (op1)
2453 && (reversed = reversed_comparison (op1, mode)))
2454 return reversed;
2455
2456 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2457 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2458 && GET_CODE (op1) == MULT
2459 && GET_CODE (XEXP (op1, 0)) == NEG)
2460 {
2461 rtx in1, in2;
2462
2463 in1 = XEXP (XEXP (op1, 0), 0);
2464 in2 = XEXP (op1, 1);
2465 return simplify_gen_binary (PLUS, mode,
2466 simplify_gen_binary (MULT, mode,
2467 in1, in2),
2468 op0);
2469 }
2470
2471 /* Canonicalize (minus (neg A) (mult B C)) to
2472 (minus (mult (neg B) C) A). */
2473 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2474 && GET_CODE (op1) == MULT
2475 && GET_CODE (op0) == NEG)
2476 {
2477 rtx in1, in2;
2478
2479 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2480 in2 = XEXP (op1, 1);
2481 return simplify_gen_binary (MINUS, mode,
2482 simplify_gen_binary (MULT, mode,
2483 in1, in2),
2484 XEXP (op0, 0));
2485 }
2486
2487 /* If one of the operands is a PLUS or a MINUS, see if we can
2488 simplify this by the associative law. This will, for example,
2489 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2490 Don't use the associative law for floating point.
2491 The inaccuracy makes it nonassociative,
2492 and subtle programs can break if operations are associated. */
2493
2494 if (INTEGRAL_MODE_P (mode)
2495 && (plus_minus_operand_p (op0)
2496 || plus_minus_operand_p (op1))
2497 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2498 return tem;
2499 break;
2500
2501 case MULT:
2502 if (trueop1 == constm1_rtx)
2503 return simplify_gen_unary (NEG, mode, op0, mode);
2504
2505 if (GET_CODE (op0) == NEG)
2506 {
2507 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2508 /* If op1 is a MULT as well and simplify_unary_operation
2509 just moved the NEG to the second operand, simplify_gen_binary
2510 below could through simplify_associative_operation move
2511 the NEG around again and recurse endlessly. */
2512 if (temp
2513 && GET_CODE (op1) == MULT
2514 && GET_CODE (temp) == MULT
2515 && XEXP (op1, 0) == XEXP (temp, 0)
2516 && GET_CODE (XEXP (temp, 1)) == NEG
2517 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2518 temp = NULL_RTX;
2519 if (temp)
2520 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2521 }
2522 if (GET_CODE (op1) == NEG)
2523 {
2524 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2525 /* If op0 is a MULT as well and simplify_unary_operation
2526 just moved the NEG to the second operand, simplify_gen_binary
2527 below could through simplify_associative_operation move
2528 the NEG around again and recurse endlessly. */
2529 if (temp
2530 && GET_CODE (op0) == MULT
2531 && GET_CODE (temp) == MULT
2532 && XEXP (op0, 0) == XEXP (temp, 0)
2533 && GET_CODE (XEXP (temp, 1)) == NEG
2534 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2535 temp = NULL_RTX;
2536 if (temp)
2537 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2538 }
2539
2540 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2541 x is NaN, since x * 0 is then also NaN. Nor is it valid
2542 when the mode has signed zeros, since multiplying a negative
2543 number by 0 will give -0, not 0. */
2544 if (!HONOR_NANS (mode)
2545 && !HONOR_SIGNED_ZEROS (mode)
2546 && trueop1 == CONST0_RTX (mode)
2547 && ! side_effects_p (op0))
2548 return op1;
2549
2550 /* In IEEE floating point, x*1 is not equivalent to x for
2551 signalling NaNs. */
2552 if (!HONOR_SNANS (mode)
2553 && trueop1 == CONST1_RTX (mode))
2554 return op0;
2555
2556 /* Convert multiply by constant power of two into shift unless
2557 we are still generating RTL. This test is a kludge. */
2558 if (CONST_INT_P (trueop1)
2559 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2560 /* If the mode is larger than the host word size, and the
2561 uppermost bit is set, then this isn't a power of two due
2562 to implicit sign extension. */
2563 && (width <= HOST_BITS_PER_WIDE_INT
2564 || val != HOST_BITS_PER_WIDE_INT - 1))
2565 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2566
2567 /* Likewise for multipliers wider than a word. */
2568 if (CONST_DOUBLE_AS_INT_P (trueop1)
2569 && GET_MODE (op0) == mode
2570 && CONST_DOUBLE_LOW (trueop1) == 0
2571 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2572 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2573 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2574 return simplify_gen_binary (ASHIFT, mode, op0,
2575 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2576
2577 /* x*2 is x+x and x*(-1) is -x */
2578 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2579 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2580 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2581 && GET_MODE (op0) == mode)
2582 {
2583 REAL_VALUE_TYPE d;
2584 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2585
2586 if (REAL_VALUES_EQUAL (d, dconst2))
2587 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2588
2589 if (!HONOR_SNANS (mode)
2590 && REAL_VALUES_EQUAL (d, dconstm1))
2591 return simplify_gen_unary (NEG, mode, op0, mode);
2592 }
2593
2594 /* Optimize -x * -x as x * x. */
2595 if (FLOAT_MODE_P (mode)
2596 && GET_CODE (op0) == NEG
2597 && GET_CODE (op1) == NEG
2598 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2599 && !side_effects_p (XEXP (op0, 0)))
2600 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2601
2602 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2603 if (SCALAR_FLOAT_MODE_P (mode)
2604 && GET_CODE (op0) == ABS
2605 && GET_CODE (op1) == ABS
2606 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2607 && !side_effects_p (XEXP (op0, 0)))
2608 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2609
2610 /* Reassociate multiplication, but for floating point MULTs
2611 only when the user specifies unsafe math optimizations. */
2612 if (! FLOAT_MODE_P (mode)
2613 || flag_unsafe_math_optimizations)
2614 {
2615 tem = simplify_associative_operation (code, mode, op0, op1);
2616 if (tem)
2617 return tem;
2618 }
2619 break;
2620
2621 case IOR:
2622 if (trueop1 == CONST0_RTX (mode))
2623 return op0;
2624 if (INTEGRAL_MODE_P (mode)
2625 && trueop1 == CONSTM1_RTX (mode)
2626 && !side_effects_p (op0))
2627 return op1;
2628 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2629 return op0;
2630 /* A | (~A) -> -1 */
2631 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2632 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2633 && ! side_effects_p (op0)
2634 && SCALAR_INT_MODE_P (mode))
2635 return constm1_rtx;
2636
2637 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2638 if (CONST_INT_P (op1)
2639 && HWI_COMPUTABLE_MODE_P (mode)
2640 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2641 && !side_effects_p (op0))
2642 return op1;
2643
2644 /* Canonicalize (X & C1) | C2. */
2645 if (GET_CODE (op0) == AND
2646 && CONST_INT_P (trueop1)
2647 && CONST_INT_P (XEXP (op0, 1)))
2648 {
2649 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2650 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2651 HOST_WIDE_INT c2 = INTVAL (trueop1);
2652
2653 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2654 if ((c1 & c2) == c1
2655 && !side_effects_p (XEXP (op0, 0)))
2656 return trueop1;
2657
2658 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2659 if (((c1|c2) & mask) == mask)
2660 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2661
2662 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2663 if (((c1 & ~c2) & mask) != (c1 & mask))
2664 {
2665 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2666 gen_int_mode (c1 & ~c2, mode));
2667 return simplify_gen_binary (IOR, mode, tem, op1);
2668 }
2669 }
2670
2671 /* Convert (A & B) | A to A. */
2672 if (GET_CODE (op0) == AND
2673 && (rtx_equal_p (XEXP (op0, 0), op1)
2674 || rtx_equal_p (XEXP (op0, 1), op1))
2675 && ! side_effects_p (XEXP (op0, 0))
2676 && ! side_effects_p (XEXP (op0, 1)))
2677 return op1;
2678
2679 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2680 mode size to (rotate A CX). */
2681
2682 if (GET_CODE (op1) == ASHIFT
2683 || GET_CODE (op1) == SUBREG)
2684 {
2685 opleft = op1;
2686 opright = op0;
2687 }
2688 else
2689 {
2690 opright = op1;
2691 opleft = op0;
2692 }
2693
2694 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2695 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2696 && CONST_INT_P (XEXP (opleft, 1))
2697 && CONST_INT_P (XEXP (opright, 1))
2698 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2699 == GET_MODE_PRECISION (mode)))
2700 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2701
2702 /* Same, but for ashift that has been "simplified" to a wider mode
2703 by simplify_shift_const. */
2704
2705 if (GET_CODE (opleft) == SUBREG
2706 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2707 && GET_CODE (opright) == LSHIFTRT
2708 && GET_CODE (XEXP (opright, 0)) == SUBREG
2709 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2710 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2711 && (GET_MODE_SIZE (GET_MODE (opleft))
2712 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2713 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2714 SUBREG_REG (XEXP (opright, 0)))
2715 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2716 && CONST_INT_P (XEXP (opright, 1))
2717 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2718 == GET_MODE_PRECISION (mode)))
2719 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2720 XEXP (SUBREG_REG (opleft), 1));
2721
2722 /* If we have (ior (and (X C1) C2)), simplify this by making
2723 C1 as small as possible if C1 actually changes. */
2724 if (CONST_INT_P (op1)
2725 && (HWI_COMPUTABLE_MODE_P (mode)
2726 || INTVAL (op1) > 0)
2727 && GET_CODE (op0) == AND
2728 && CONST_INT_P (XEXP (op0, 1))
2729 && CONST_INT_P (op1)
2730 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2731 return simplify_gen_binary (IOR, mode,
2732 simplify_gen_binary
2733 (AND, mode, XEXP (op0, 0),
2734 GEN_INT (UINTVAL (XEXP (op0, 1))
2735 & ~UINTVAL (op1))),
2736 op1);
2737
2738 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2739 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2740 the PLUS does not affect any of the bits in OP1: then we can do
2741 the IOR as a PLUS and we can associate. This is valid if OP1
2742 can be safely shifted left C bits. */
2743 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2744 && GET_CODE (XEXP (op0, 0)) == PLUS
2745 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2746 && CONST_INT_P (XEXP (op0, 1))
2747 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2748 {
2749 int count = INTVAL (XEXP (op0, 1));
2750 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2751
2752 if (mask >> count == INTVAL (trueop1)
2753 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2754 return simplify_gen_binary (ASHIFTRT, mode,
2755 plus_constant (mode, XEXP (op0, 0),
2756 mask),
2757 XEXP (op0, 1));
2758 }
2759
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2764
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2774
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2781 && GET_CODE (op0) == PLUS
2782 && (CONST_INT_P (XEXP (op0, 1))
2783 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2784 && mode_signbit_p (mode, XEXP (op0, 1)))
2785 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2786 simplify_gen_binary (XOR, mode, op1,
2787 XEXP (op0, 1)));
2788
2789 /* If we are XORing two things that have no bits in common,
2790 convert them into an IOR. This helps to detect rotation encoded
2791 using those methods and possibly other simplifications. */
2792
2793 if (HWI_COMPUTABLE_MODE_P (mode)
2794 && (nonzero_bits (op0, mode)
2795 & nonzero_bits (op1, mode)) == 0)
2796 return (simplify_gen_binary (IOR, mode, op0, op1));
2797
2798 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2799 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2800 (NOT y). */
2801 {
2802 int num_negated = 0;
2803
2804 if (GET_CODE (op0) == NOT)
2805 num_negated++, op0 = XEXP (op0, 0);
2806 if (GET_CODE (op1) == NOT)
2807 num_negated++, op1 = XEXP (op1, 0);
2808
2809 if (num_negated == 2)
2810 return simplify_gen_binary (XOR, mode, op0, op1);
2811 else if (num_negated == 1)
2812 return simplify_gen_unary (NOT, mode,
2813 simplify_gen_binary (XOR, mode, op0, op1),
2814 mode);
2815 }
2816
2817 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2818 correspond to a machine insn or result in further simplifications
2819 if B is a constant. */
2820
2821 if (GET_CODE (op0) == AND
2822 && rtx_equal_p (XEXP (op0, 1), op1)
2823 && ! side_effects_p (op1))
2824 return simplify_gen_binary (AND, mode,
2825 simplify_gen_unary (NOT, mode,
2826 XEXP (op0, 0), mode),
2827 op1);
2828
2829 else if (GET_CODE (op0) == AND
2830 && rtx_equal_p (XEXP (op0, 0), op1)
2831 && ! side_effects_p (op1))
2832 return simplify_gen_binary (AND, mode,
2833 simplify_gen_unary (NOT, mode,
2834 XEXP (op0, 1), mode),
2835 op1);
2836
2837 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2838 we can transform like this:
2839 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2840 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2841 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2842 Attempt a few simplifications when B and C are both constants. */
2843 if (GET_CODE (op0) == AND
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1)))
2846 {
2847 rtx a = XEXP (op0, 0);
2848 rtx b = XEXP (op0, 1);
2849 rtx c = op1;
2850 HOST_WIDE_INT bval = INTVAL (b);
2851 HOST_WIDE_INT cval = INTVAL (c);
2852
2853 rtx na_c
2854 = simplify_binary_operation (AND, mode,
2855 simplify_gen_unary (NOT, mode, a, mode),
2856 c);
2857 if ((~cval & bval) == 0)
2858 {
2859 /* Try to simplify ~A&C | ~B&C. */
2860 if (na_c != NULL_RTX)
2861 return simplify_gen_binary (IOR, mode, na_c,
2862 GEN_INT (~bval & cval));
2863 }
2864 else
2865 {
2866 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2867 if (na_c == const0_rtx)
2868 {
2869 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2870 GEN_INT (~cval & bval));
2871 return simplify_gen_binary (IOR, mode, a_nc_b,
2872 GEN_INT (~bval & cval));
2873 }
2874 }
2875 }
2876
2877 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2878 comparison if STORE_FLAG_VALUE is 1. */
2879 if (STORE_FLAG_VALUE == 1
2880 && trueop1 == const1_rtx
2881 && COMPARISON_P (op0)
2882 && (reversed = reversed_comparison (op0, mode)))
2883 return reversed;
2884
2885 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2886 is (lt foo (const_int 0)), so we can perform the above
2887 simplification if STORE_FLAG_VALUE is 1. */
2888
2889 if (STORE_FLAG_VALUE == 1
2890 && trueop1 == const1_rtx
2891 && GET_CODE (op0) == LSHIFTRT
2892 && CONST_INT_P (XEXP (op0, 1))
2893 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2894 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2895
2896 /* (xor (comparison foo bar) (const_int sign-bit))
2897 when STORE_FLAG_VALUE is the sign bit. */
2898 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2899 && trueop1 == const_true_rtx
2900 && COMPARISON_P (op0)
2901 && (reversed = reversed_comparison (op0, mode)))
2902 return reversed;
2903
2904 tem = simplify_associative_operation (code, mode, op0, op1);
2905 if (tem)
2906 return tem;
2907 break;
2908
2909 case AND:
2910 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2911 return trueop1;
2912 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2913 return op0;
2914 if (HWI_COMPUTABLE_MODE_P (mode))
2915 {
2916 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2917 HOST_WIDE_INT nzop1;
2918 if (CONST_INT_P (trueop1))
2919 {
2920 HOST_WIDE_INT val1 = INTVAL (trueop1);
2921 /* If we are turning off bits already known off in OP0, we need
2922 not do an AND. */
2923 if ((nzop0 & ~val1) == 0)
2924 return op0;
2925 }
2926 nzop1 = nonzero_bits (trueop1, mode);
2927 /* If we are clearing all the nonzero bits, the result is zero. */
2928 if ((nzop1 & nzop0) == 0
2929 && !side_effects_p (op0) && !side_effects_p (op1))
2930 return CONST0_RTX (mode);
2931 }
2932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2933 && GET_MODE_CLASS (mode) != MODE_CC)
2934 return op0;
2935 /* A & (~A) -> 0 */
2936 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2937 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2938 && ! side_effects_p (op0)
2939 && GET_MODE_CLASS (mode) != MODE_CC)
2940 return CONST0_RTX (mode);
2941
2942 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2943 there are no nonzero bits of C outside of X's mode. */
2944 if ((GET_CODE (op0) == SIGN_EXTEND
2945 || GET_CODE (op0) == ZERO_EXTEND)
2946 && CONST_INT_P (trueop1)
2947 && HWI_COMPUTABLE_MODE_P (mode)
2948 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2949 & UINTVAL (trueop1)) == 0)
2950 {
2951 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2952 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2953 gen_int_mode (INTVAL (trueop1),
2954 imode));
2955 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2956 }
2957
2958 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2959 we might be able to further simplify the AND with X and potentially
2960 remove the truncation altogether. */
2961 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2962 {
2963 rtx x = XEXP (op0, 0);
2964 enum machine_mode xmode = GET_MODE (x);
2965 tem = simplify_gen_binary (AND, xmode, x,
2966 gen_int_mode (INTVAL (trueop1), xmode));
2967 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2968 }
2969
2970 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2971 if (GET_CODE (op0) == IOR
2972 && CONST_INT_P (trueop1)
2973 && CONST_INT_P (XEXP (op0, 1)))
2974 {
2975 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2976 return simplify_gen_binary (IOR, mode,
2977 simplify_gen_binary (AND, mode,
2978 XEXP (op0, 0), op1),
2979 gen_int_mode (tmp, mode));
2980 }
2981
2982 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2983 insn (and may simplify more). */
2984 if (GET_CODE (op0) == XOR
2985 && rtx_equal_p (XEXP (op0, 0), op1)
2986 && ! side_effects_p (op1))
2987 return simplify_gen_binary (AND, mode,
2988 simplify_gen_unary (NOT, mode,
2989 XEXP (op0, 1), mode),
2990 op1);
2991
2992 if (GET_CODE (op0) == XOR
2993 && rtx_equal_p (XEXP (op0, 1), op1)
2994 && ! side_effects_p (op1))
2995 return simplify_gen_binary (AND, mode,
2996 simplify_gen_unary (NOT, mode,
2997 XEXP (op0, 0), mode),
2998 op1);
2999
3000 /* Similarly for (~(A ^ B)) & A. */
3001 if (GET_CODE (op0) == NOT
3002 && GET_CODE (XEXP (op0, 0)) == XOR
3003 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3004 && ! side_effects_p (op1))
3005 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3006
3007 if (GET_CODE (op0) == NOT
3008 && GET_CODE (XEXP (op0, 0)) == XOR
3009 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3010 && ! side_effects_p (op1))
3011 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3012
3013 /* Convert (A | B) & A to A. */
3014 if (GET_CODE (op0) == IOR
3015 && (rtx_equal_p (XEXP (op0, 0), op1)
3016 || rtx_equal_p (XEXP (op0, 1), op1))
3017 && ! side_effects_p (XEXP (op0, 0))
3018 && ! side_effects_p (XEXP (op0, 1)))
3019 return op1;
3020
3021 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3022 ((A & N) + B) & M -> (A + B) & M
3023 Similarly if (N & M) == 0,
3024 ((A | N) + B) & M -> (A + B) & M
3025 and for - instead of + and/or ^ instead of |.
3026 Also, if (N & M) == 0, then
3027 (A +- N) & M -> A & M. */
3028 if (CONST_INT_P (trueop1)
3029 && HWI_COMPUTABLE_MODE_P (mode)
3030 && ~UINTVAL (trueop1)
3031 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3032 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3033 {
3034 rtx pmop[2];
3035 int which;
3036
3037 pmop[0] = XEXP (op0, 0);
3038 pmop[1] = XEXP (op0, 1);
3039
3040 if (CONST_INT_P (pmop[1])
3041 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3042 return simplify_gen_binary (AND, mode, pmop[0], op1);
3043
3044 for (which = 0; which < 2; which++)
3045 {
3046 tem = pmop[which];
3047 switch (GET_CODE (tem))
3048 {
3049 case AND:
3050 if (CONST_INT_P (XEXP (tem, 1))
3051 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3052 == UINTVAL (trueop1))
3053 pmop[which] = XEXP (tem, 0);
3054 break;
3055 case IOR:
3056 case XOR:
3057 if (CONST_INT_P (XEXP (tem, 1))
3058 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3059 pmop[which] = XEXP (tem, 0);
3060 break;
3061 default:
3062 break;
3063 }
3064 }
3065
3066 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3067 {
3068 tem = simplify_gen_binary (GET_CODE (op0), mode,
3069 pmop[0], pmop[1]);
3070 return simplify_gen_binary (code, mode, tem, op1);
3071 }
3072 }
3073
3074 /* (and X (ior (not X) Y) -> (and X Y) */
3075 if (GET_CODE (op1) == IOR
3076 && GET_CODE (XEXP (op1, 0)) == NOT
3077 && op0 == XEXP (XEXP (op1, 0), 0))
3078 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3079
3080 /* (and (ior (not X) Y) X) -> (and X Y) */
3081 if (GET_CODE (op0) == IOR
3082 && GET_CODE (XEXP (op0, 0)) == NOT
3083 && op1 == XEXP (XEXP (op0, 0), 0))
3084 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3085
3086 tem = simplify_associative_operation (code, mode, op0, op1);
3087 if (tem)
3088 return tem;
3089 break;
3090
3091 case UDIV:
3092 /* 0/x is 0 (or x&0 if x has side-effects). */
3093 if (trueop0 == CONST0_RTX (mode))
3094 {
3095 if (side_effects_p (op1))
3096 return simplify_gen_binary (AND, mode, op1, trueop0);
3097 return trueop0;
3098 }
3099 /* x/1 is x. */
3100 if (trueop1 == CONST1_RTX (mode))
3101 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3102 /* Convert divide by power of two into shift. */
3103 if (CONST_INT_P (trueop1)
3104 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3105 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3106 break;
3107
3108 case DIV:
3109 /* Handle floating point and integers separately. */
3110 if (SCALAR_FLOAT_MODE_P (mode))
3111 {
3112 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3113 safe for modes with NaNs, since 0.0 / 0.0 will then be
3114 NaN rather than 0.0. Nor is it safe for modes with signed
3115 zeros, since dividing 0 by a negative number gives -0.0 */
3116 if (trueop0 == CONST0_RTX (mode)
3117 && !HONOR_NANS (mode)
3118 && !HONOR_SIGNED_ZEROS (mode)
3119 && ! side_effects_p (op1))
3120 return op0;
3121 /* x/1.0 is x. */
3122 if (trueop1 == CONST1_RTX (mode)
3123 && !HONOR_SNANS (mode))
3124 return op0;
3125
3126 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3127 && trueop1 != CONST0_RTX (mode))
3128 {
3129 REAL_VALUE_TYPE d;
3130 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3131
3132 /* x/-1.0 is -x. */
3133 if (REAL_VALUES_EQUAL (d, dconstm1)
3134 && !HONOR_SNANS (mode))
3135 return simplify_gen_unary (NEG, mode, op0, mode);
3136
3137 /* Change FP division by a constant into multiplication.
3138 Only do this with -freciprocal-math. */
3139 if (flag_reciprocal_math
3140 && !REAL_VALUES_EQUAL (d, dconst0))
3141 {
3142 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3143 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3144 return simplify_gen_binary (MULT, mode, op0, tem);
3145 }
3146 }
3147 }
3148 else if (SCALAR_INT_MODE_P (mode))
3149 {
3150 /* 0/x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0 == CONST0_RTX (mode)
3152 && !cfun->can_throw_non_call_exceptions)
3153 {
3154 if (side_effects_p (op1))
3155 return simplify_gen_binary (AND, mode, op1, trueop0);
3156 return trueop0;
3157 }
3158 /* x/1 is x. */
3159 if (trueop1 == CONST1_RTX (mode))
3160 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3161 /* x/-1 is -x. */
3162 if (trueop1 == constm1_rtx)
3163 {
3164 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3165 return simplify_gen_unary (NEG, mode, x, mode);
3166 }
3167 }
3168 break;
3169
3170 case UMOD:
3171 /* 0%x is 0 (or x&0 if x has side-effects). */
3172 if (trueop0 == CONST0_RTX (mode))
3173 {
3174 if (side_effects_p (op1))
3175 return simplify_gen_binary (AND, mode, op1, trueop0);
3176 return trueop0;
3177 }
3178 /* x%1 is 0 (of x&0 if x has side-effects). */
3179 if (trueop1 == CONST1_RTX (mode))
3180 {
3181 if (side_effects_p (op0))
3182 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3183 return CONST0_RTX (mode);
3184 }
3185 /* Implement modulus by power of two as AND. */
3186 if (CONST_INT_P (trueop1)
3187 && exact_log2 (UINTVAL (trueop1)) > 0)
3188 return simplify_gen_binary (AND, mode, op0,
3189 GEN_INT (INTVAL (op1) - 1));
3190 break;
3191
3192 case MOD:
3193 /* 0%x is 0 (or x&0 if x has side-effects). */
3194 if (trueop0 == CONST0_RTX (mode))
3195 {
3196 if (side_effects_p (op1))
3197 return simplify_gen_binary (AND, mode, op1, trueop0);
3198 return trueop0;
3199 }
3200 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3201 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3202 {
3203 if (side_effects_p (op0))
3204 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3205 return CONST0_RTX (mode);
3206 }
3207 break;
3208
3209 case ROTATERT:
3210 case ROTATE:
3211 case ASHIFTRT:
3212 if (trueop1 == CONST0_RTX (mode))
3213 return op0;
3214 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3215 return op0;
3216 /* Rotating ~0 always results in ~0. */
3217 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3218 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3219 && ! side_effects_p (op1))
3220 return op0;
3221 canonicalize_shift:
3222 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3223 {
3224 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3225 if (val != INTVAL (op1))
3226 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3227 }
3228 break;
3229
3230 case ASHIFT:
3231 case SS_ASHIFT:
3232 case US_ASHIFT:
3233 if (trueop1 == CONST0_RTX (mode))
3234 return op0;
3235 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3236 return op0;
3237 goto canonicalize_shift;
3238
3239 case LSHIFTRT:
3240 if (trueop1 == CONST0_RTX (mode))
3241 return op0;
3242 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3243 return op0;
3244 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3245 if (GET_CODE (op0) == CLZ
3246 && CONST_INT_P (trueop1)
3247 && STORE_FLAG_VALUE == 1
3248 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3249 {
3250 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3251 unsigned HOST_WIDE_INT zero_val = 0;
3252
3253 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3254 && zero_val == GET_MODE_PRECISION (imode)
3255 && INTVAL (trueop1) == exact_log2 (zero_val))
3256 return simplify_gen_relational (EQ, mode, imode,
3257 XEXP (op0, 0), const0_rtx);
3258 }
3259 goto canonicalize_shift;
3260
3261 case SMIN:
3262 if (width <= HOST_BITS_PER_WIDE_INT
3263 && mode_signbit_p (mode, trueop1)
3264 && ! side_effects_p (op0))
3265 return op1;
3266 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3267 return op0;
3268 tem = simplify_associative_operation (code, mode, op0, op1);
3269 if (tem)
3270 return tem;
3271 break;
3272
3273 case SMAX:
3274 if (width <= HOST_BITS_PER_WIDE_INT
3275 && CONST_INT_P (trueop1)
3276 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3277 && ! side_effects_p (op0))
3278 return op1;
3279 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3280 return op0;
3281 tem = simplify_associative_operation (code, mode, op0, op1);
3282 if (tem)
3283 return tem;
3284 break;
3285
3286 case UMIN:
3287 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3288 return op1;
3289 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3290 return op0;
3291 tem = simplify_associative_operation (code, mode, op0, op1);
3292 if (tem)
3293 return tem;
3294 break;
3295
3296 case UMAX:
3297 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3298 return op1;
3299 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3300 return op0;
3301 tem = simplify_associative_operation (code, mode, op0, op1);
3302 if (tem)
3303 return tem;
3304 break;
3305
3306 case SS_PLUS:
3307 case US_PLUS:
3308 case SS_MINUS:
3309 case US_MINUS:
3310 case SS_MULT:
3311 case US_MULT:
3312 case SS_DIV:
3313 case US_DIV:
3314 /* ??? There are simplifications that can be done. */
3315 return 0;
3316
3317 case VEC_SELECT:
3318 if (!VECTOR_MODE_P (mode))
3319 {
3320 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3321 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3322 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3323 gcc_assert (XVECLEN (trueop1, 0) == 1);
3324 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3325
3326 if (GET_CODE (trueop0) == CONST_VECTOR)
3327 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3328 (trueop1, 0, 0)));
3329
3330 /* Extract a scalar element from a nested VEC_SELECT expression
3331 (with optional nested VEC_CONCAT expression). Some targets
3332 (i386) extract scalar element from a vector using chain of
3333 nested VEC_SELECT expressions. When input operand is a memory
3334 operand, this operation can be simplified to a simple scalar
3335 load from an offseted memory address. */
3336 if (GET_CODE (trueop0) == VEC_SELECT)
3337 {
3338 rtx op0 = XEXP (trueop0, 0);
3339 rtx op1 = XEXP (trueop0, 1);
3340
3341 enum machine_mode opmode = GET_MODE (op0);
3342 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3343 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3344
3345 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3346 int elem;
3347
3348 rtvec vec;
3349 rtx tmp_op, tmp;
3350
3351 gcc_assert (GET_CODE (op1) == PARALLEL);
3352 gcc_assert (i < n_elts);
3353
3354 /* Select element, pointed by nested selector. */
3355 elem = INTVAL (XVECEXP (op1, 0, i));
3356
3357 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3358 if (GET_CODE (op0) == VEC_CONCAT)
3359 {
3360 rtx op00 = XEXP (op0, 0);
3361 rtx op01 = XEXP (op0, 1);
3362
3363 enum machine_mode mode00, mode01;
3364 int n_elts00, n_elts01;
3365
3366 mode00 = GET_MODE (op00);
3367 mode01 = GET_MODE (op01);
3368
3369 /* Find out number of elements of each operand. */
3370 if (VECTOR_MODE_P (mode00))
3371 {
3372 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3373 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3374 }
3375 else
3376 n_elts00 = 1;
3377
3378 if (VECTOR_MODE_P (mode01))
3379 {
3380 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3381 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3382 }
3383 else
3384 n_elts01 = 1;
3385
3386 gcc_assert (n_elts == n_elts00 + n_elts01);
3387
3388 /* Select correct operand of VEC_CONCAT
3389 and adjust selector. */
3390 if (elem < n_elts01)
3391 tmp_op = op00;
3392 else
3393 {
3394 tmp_op = op01;
3395 elem -= n_elts00;
3396 }
3397 }
3398 else
3399 tmp_op = op0;
3400
3401 vec = rtvec_alloc (1);
3402 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3403
3404 tmp = gen_rtx_fmt_ee (code, mode,
3405 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3406 return tmp;
3407 }
3408 if (GET_CODE (trueop0) == VEC_DUPLICATE
3409 && GET_MODE (XEXP (trueop0, 0)) == mode)
3410 return XEXP (trueop0, 0);
3411 }
3412 else
3413 {
3414 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3415 gcc_assert (GET_MODE_INNER (mode)
3416 == GET_MODE_INNER (GET_MODE (trueop0)));
3417 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3418
3419 if (GET_CODE (trueop0) == CONST_VECTOR)
3420 {
3421 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3422 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3423 rtvec v = rtvec_alloc (n_elts);
3424 unsigned int i;
3425
3426 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3427 for (i = 0; i < n_elts; i++)
3428 {
3429 rtx x = XVECEXP (trueop1, 0, i);
3430
3431 gcc_assert (CONST_INT_P (x));
3432 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3433 INTVAL (x));
3434 }
3435
3436 return gen_rtx_CONST_VECTOR (mode, v);
3437 }
3438
3439 /* Recognize the identity. */
3440 if (GET_MODE (trueop0) == mode)
3441 {
3442 bool maybe_ident = true;
3443 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3444 {
3445 rtx j = XVECEXP (trueop1, 0, i);
3446 if (!CONST_INT_P (j) || INTVAL (j) != i)
3447 {
3448 maybe_ident = false;
3449 break;
3450 }
3451 }
3452 if (maybe_ident)
3453 return trueop0;
3454 }
3455
3456 /* If we build {a,b} then permute it, build the result directly. */
3457 if (XVECLEN (trueop1, 0) == 2
3458 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3459 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3460 && GET_CODE (trueop0) == VEC_CONCAT
3461 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3462 && GET_MODE (XEXP (trueop0, 0)) == mode
3463 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3464 && GET_MODE (XEXP (trueop0, 1)) == mode)
3465 {
3466 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3467 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3468 rtx subop0, subop1;
3469
3470 gcc_assert (i0 < 4 && i1 < 4);
3471 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3472 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3473
3474 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3475 }
3476
3477 if (XVECLEN (trueop1, 0) == 2
3478 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3479 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3480 && GET_CODE (trueop0) == VEC_CONCAT
3481 && GET_MODE (trueop0) == mode)
3482 {
3483 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3484 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3485 rtx subop0, subop1;
3486
3487 gcc_assert (i0 < 2 && i1 < 2);
3488 subop0 = XEXP (trueop0, i0);
3489 subop1 = XEXP (trueop0, i1);
3490
3491 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3492 }
3493 }
3494
3495 if (XVECLEN (trueop1, 0) == 1
3496 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3497 && GET_CODE (trueop0) == VEC_CONCAT)
3498 {
3499 rtx vec = trueop0;
3500 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3501
3502 /* Try to find the element in the VEC_CONCAT. */
3503 while (GET_MODE (vec) != mode
3504 && GET_CODE (vec) == VEC_CONCAT)
3505 {
3506 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3507 if (offset < vec_size)
3508 vec = XEXP (vec, 0);
3509 else
3510 {
3511 offset -= vec_size;
3512 vec = XEXP (vec, 1);
3513 }
3514 vec = avoid_constant_pool_reference (vec);
3515 }
3516
3517 if (GET_MODE (vec) == mode)
3518 return vec;
3519 }
3520
3521 return 0;
3522 case VEC_CONCAT:
3523 {
3524 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3525 ? GET_MODE (trueop0)
3526 : GET_MODE_INNER (mode));
3527 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3528 ? GET_MODE (trueop1)
3529 : GET_MODE_INNER (mode));
3530
3531 gcc_assert (VECTOR_MODE_P (mode));
3532 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3533 == GET_MODE_SIZE (mode));
3534
3535 if (VECTOR_MODE_P (op0_mode))
3536 gcc_assert (GET_MODE_INNER (mode)
3537 == GET_MODE_INNER (op0_mode));
3538 else
3539 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3540
3541 if (VECTOR_MODE_P (op1_mode))
3542 gcc_assert (GET_MODE_INNER (mode)
3543 == GET_MODE_INNER (op1_mode));
3544 else
3545 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3546
3547 if ((GET_CODE (trueop0) == CONST_VECTOR
3548 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3549 && (GET_CODE (trueop1) == CONST_VECTOR
3550 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3551 {
3552 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3553 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3554 rtvec v = rtvec_alloc (n_elts);
3555 unsigned int i;
3556 unsigned in_n_elts = 1;
3557
3558 if (VECTOR_MODE_P (op0_mode))
3559 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3560 for (i = 0; i < n_elts; i++)
3561 {
3562 if (i < in_n_elts)
3563 {
3564 if (!VECTOR_MODE_P (op0_mode))
3565 RTVEC_ELT (v, i) = trueop0;
3566 else
3567 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3568 }
3569 else
3570 {
3571 if (!VECTOR_MODE_P (op1_mode))
3572 RTVEC_ELT (v, i) = trueop1;
3573 else
3574 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3575 i - in_n_elts);
3576 }
3577 }
3578
3579 return gen_rtx_CONST_VECTOR (mode, v);
3580 }
3581
3582 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3583 if (GET_CODE (trueop0) == VEC_SELECT
3584 && GET_CODE (trueop1) == VEC_SELECT
3585 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3586 {
3587 rtx par0 = XEXP (trueop0, 1);
3588 rtx par1 = XEXP (trueop1, 1);
3589 int len0 = XVECLEN (par0, 0);
3590 int len1 = XVECLEN (par1, 0);
3591 rtvec vec = rtvec_alloc (len0 + len1);
3592 for (int i = 0; i < len0; i++)
3593 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3594 for (int i = 0; i < len1; i++)
3595 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3596 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3597 gen_rtx_PARALLEL (VOIDmode, vec));
3598 }
3599 }
3600 return 0;
3601
3602 default:
3603 gcc_unreachable ();
3604 }
3605
3606 return 0;
3607 }
3608
3609 rtx
3610 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3611 rtx op0, rtx op1)
3612 {
3613 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3614 HOST_WIDE_INT val;
3615 unsigned int width = GET_MODE_PRECISION (mode);
3616
3617 if (VECTOR_MODE_P (mode)
3618 && code != VEC_CONCAT
3619 && GET_CODE (op0) == CONST_VECTOR
3620 && GET_CODE (op1) == CONST_VECTOR)
3621 {
3622 unsigned n_elts = GET_MODE_NUNITS (mode);
3623 enum machine_mode op0mode = GET_MODE (op0);
3624 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3625 enum machine_mode op1mode = GET_MODE (op1);
3626 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3627 rtvec v = rtvec_alloc (n_elts);
3628 unsigned int i;
3629
3630 gcc_assert (op0_n_elts == n_elts);
3631 gcc_assert (op1_n_elts == n_elts);
3632 for (i = 0; i < n_elts; i++)
3633 {
3634 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3635 CONST_VECTOR_ELT (op0, i),
3636 CONST_VECTOR_ELT (op1, i));
3637 if (!x)
3638 return 0;
3639 RTVEC_ELT (v, i) = x;
3640 }
3641
3642 return gen_rtx_CONST_VECTOR (mode, v);
3643 }
3644
3645 if (VECTOR_MODE_P (mode)
3646 && code == VEC_CONCAT
3647 && (CONST_INT_P (op0)
3648 || GET_CODE (op0) == CONST_FIXED
3649 || CONST_DOUBLE_P (op0))
3650 && (CONST_INT_P (op1)
3651 || CONST_DOUBLE_P (op1)
3652 || GET_CODE (op1) == CONST_FIXED))
3653 {
3654 unsigned n_elts = GET_MODE_NUNITS (mode);
3655 rtvec v = rtvec_alloc (n_elts);
3656
3657 gcc_assert (n_elts >= 2);
3658 if (n_elts == 2)
3659 {
3660 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3661 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3662
3663 RTVEC_ELT (v, 0) = op0;
3664 RTVEC_ELT (v, 1) = op1;
3665 }
3666 else
3667 {
3668 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3669 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3670 unsigned i;
3671
3672 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3673 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3674 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3675
3676 for (i = 0; i < op0_n_elts; ++i)
3677 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3678 for (i = 0; i < op1_n_elts; ++i)
3679 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3680 }
3681
3682 return gen_rtx_CONST_VECTOR (mode, v);
3683 }
3684
3685 if (SCALAR_FLOAT_MODE_P (mode)
3686 && CONST_DOUBLE_AS_FLOAT_P (op0)
3687 && CONST_DOUBLE_AS_FLOAT_P (op1)
3688 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3689 {
3690 if (code == AND
3691 || code == IOR
3692 || code == XOR)
3693 {
3694 long tmp0[4];
3695 long tmp1[4];
3696 REAL_VALUE_TYPE r;
3697 int i;
3698
3699 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3700 GET_MODE (op0));
3701 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3702 GET_MODE (op1));
3703 for (i = 0; i < 4; i++)
3704 {
3705 switch (code)
3706 {
3707 case AND:
3708 tmp0[i] &= tmp1[i];
3709 break;
3710 case IOR:
3711 tmp0[i] |= tmp1[i];
3712 break;
3713 case XOR:
3714 tmp0[i] ^= tmp1[i];
3715 break;
3716 default:
3717 gcc_unreachable ();
3718 }
3719 }
3720 real_from_target (&r, tmp0, mode);
3721 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3722 }
3723 else
3724 {
3725 REAL_VALUE_TYPE f0, f1, value, result;
3726 bool inexact;
3727
3728 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3729 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3730 real_convert (&f0, mode, &f0);
3731 real_convert (&f1, mode, &f1);
3732
3733 if (HONOR_SNANS (mode)
3734 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3735 return 0;
3736
3737 if (code == DIV
3738 && REAL_VALUES_EQUAL (f1, dconst0)
3739 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3740 return 0;
3741
3742 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3743 && flag_trapping_math
3744 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3745 {
3746 int s0 = REAL_VALUE_NEGATIVE (f0);
3747 int s1 = REAL_VALUE_NEGATIVE (f1);
3748
3749 switch (code)
3750 {
3751 case PLUS:
3752 /* Inf + -Inf = NaN plus exception. */
3753 if (s0 != s1)
3754 return 0;
3755 break;
3756 case MINUS:
3757 /* Inf - Inf = NaN plus exception. */
3758 if (s0 == s1)
3759 return 0;
3760 break;
3761 case DIV:
3762 /* Inf / Inf = NaN plus exception. */
3763 return 0;
3764 default:
3765 break;
3766 }
3767 }
3768
3769 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3770 && flag_trapping_math
3771 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3772 || (REAL_VALUE_ISINF (f1)
3773 && REAL_VALUES_EQUAL (f0, dconst0))))
3774 /* Inf * 0 = NaN plus exception. */
3775 return 0;
3776
3777 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3778 &f0, &f1);
3779 real_convert (&result, mode, &value);
3780
3781 /* Don't constant fold this floating point operation if
3782 the result has overflowed and flag_trapping_math. */
3783
3784 if (flag_trapping_math
3785 && MODE_HAS_INFINITIES (mode)
3786 && REAL_VALUE_ISINF (result)
3787 && !REAL_VALUE_ISINF (f0)
3788 && !REAL_VALUE_ISINF (f1))
3789 /* Overflow plus exception. */
3790 return 0;
3791
3792 /* Don't constant fold this floating point operation if the
3793 result may dependent upon the run-time rounding mode and
3794 flag_rounding_math is set, or if GCC's software emulation
3795 is unable to accurately represent the result. */
3796
3797 if ((flag_rounding_math
3798 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3799 && (inexact || !real_identical (&result, &value)))
3800 return NULL_RTX;
3801
3802 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3803 }
3804 }
3805
3806 /* We can fold some multi-word operations. */
3807 if (GET_MODE_CLASS (mode) == MODE_INT
3808 && width == HOST_BITS_PER_DOUBLE_INT
3809 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3810 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3811 {
3812 double_int o0, o1, res, tmp;
3813 bool overflow;
3814
3815 o0 = rtx_to_double_int (op0);
3816 o1 = rtx_to_double_int (op1);
3817
3818 switch (code)
3819 {
3820 case MINUS:
3821 /* A - B == A + (-B). */
3822 o1 = -o1;
3823
3824 /* Fall through.... */
3825
3826 case PLUS:
3827 res = o0 + o1;
3828 break;
3829
3830 case MULT:
3831 res = o0 * o1;
3832 break;
3833
3834 case DIV:
3835 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3836 &tmp, &overflow);
3837 if (overflow)
3838 return 0;
3839 break;
3840
3841 case MOD:
3842 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3843 &res, &overflow);
3844 if (overflow)
3845 return 0;
3846 break;
3847
3848 case UDIV:
3849 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3850 &tmp, &overflow);
3851 if (overflow)
3852 return 0;
3853 break;
3854
3855 case UMOD:
3856 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3857 &res, &overflow);
3858 if (overflow)
3859 return 0;
3860 break;
3861
3862 case AND:
3863 res = o0 & o1;
3864 break;
3865
3866 case IOR:
3867 res = o0 | o1;
3868 break;
3869
3870 case XOR:
3871 res = o0 ^ o1;
3872 break;
3873
3874 case SMIN:
3875 res = o0.smin (o1);
3876 break;
3877
3878 case SMAX:
3879 res = o0.smax (o1);
3880 break;
3881
3882 case UMIN:
3883 res = o0.umin (o1);
3884 break;
3885
3886 case UMAX:
3887 res = o0.umax (o1);
3888 break;
3889
3890 case LSHIFTRT: case ASHIFTRT:
3891 case ASHIFT:
3892 case ROTATE: case ROTATERT:
3893 {
3894 unsigned HOST_WIDE_INT cnt;
3895
3896 if (SHIFT_COUNT_TRUNCATED)
3897 {
3898 o1.high = 0;
3899 o1.low &= GET_MODE_PRECISION (mode) - 1;
3900 }
3901
3902 if (!o1.fits_uhwi ()
3903 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3904 return 0;
3905
3906 cnt = o1.to_uhwi ();
3907 unsigned short prec = GET_MODE_PRECISION (mode);
3908
3909 if (code == LSHIFTRT || code == ASHIFTRT)
3910 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3911 else if (code == ASHIFT)
3912 res = o0.alshift (cnt, prec);
3913 else if (code == ROTATE)
3914 res = o0.lrotate (cnt, prec);
3915 else /* code == ROTATERT */
3916 res = o0.rrotate (cnt, prec);
3917 }
3918 break;
3919
3920 default:
3921 return 0;
3922 }
3923
3924 return immed_double_int_const (res, mode);
3925 }
3926
3927 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3928 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3929 {
3930 /* Get the integer argument values in two forms:
3931 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3932
3933 arg0 = INTVAL (op0);
3934 arg1 = INTVAL (op1);
3935
3936 if (width < HOST_BITS_PER_WIDE_INT)
3937 {
3938 arg0 &= GET_MODE_MASK (mode);
3939 arg1 &= GET_MODE_MASK (mode);
3940
3941 arg0s = arg0;
3942 if (val_signbit_known_set_p (mode, arg0s))
3943 arg0s |= ~GET_MODE_MASK (mode);
3944
3945 arg1s = arg1;
3946 if (val_signbit_known_set_p (mode, arg1s))
3947 arg1s |= ~GET_MODE_MASK (mode);
3948 }
3949 else
3950 {
3951 arg0s = arg0;
3952 arg1s = arg1;
3953 }
3954
3955 /* Compute the value of the arithmetic. */
3956
3957 switch (code)
3958 {
3959 case PLUS:
3960 val = arg0s + arg1s;
3961 break;
3962
3963 case MINUS:
3964 val = arg0s - arg1s;
3965 break;
3966
3967 case MULT:
3968 val = arg0s * arg1s;
3969 break;
3970
3971 case DIV:
3972 if (arg1s == 0
3973 || ((unsigned HOST_WIDE_INT) arg0s
3974 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3975 && arg1s == -1))
3976 return 0;
3977 val = arg0s / arg1s;
3978 break;
3979
3980 case MOD:
3981 if (arg1s == 0
3982 || ((unsigned HOST_WIDE_INT) arg0s
3983 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3984 && arg1s == -1))
3985 return 0;
3986 val = arg0s % arg1s;
3987 break;
3988
3989 case UDIV:
3990 if (arg1 == 0
3991 || ((unsigned HOST_WIDE_INT) arg0s
3992 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3993 && arg1s == -1))
3994 return 0;
3995 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3996 break;
3997
3998 case UMOD:
3999 if (arg1 == 0
4000 || ((unsigned HOST_WIDE_INT) arg0s
4001 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4002 && arg1s == -1))
4003 return 0;
4004 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4005 break;
4006
4007 case AND:
4008 val = arg0 & arg1;
4009 break;
4010
4011 case IOR:
4012 val = arg0 | arg1;
4013 break;
4014
4015 case XOR:
4016 val = arg0 ^ arg1;
4017 break;
4018
4019 case LSHIFTRT:
4020 case ASHIFT:
4021 case ASHIFTRT:
4022 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4023 the value is in range. We can't return any old value for
4024 out-of-range arguments because either the middle-end (via
4025 shift_truncation_mask) or the back-end might be relying on
4026 target-specific knowledge. Nor can we rely on
4027 shift_truncation_mask, since the shift might not be part of an
4028 ashlM3, lshrM3 or ashrM3 instruction. */
4029 if (SHIFT_COUNT_TRUNCATED)
4030 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4031 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4032 return 0;
4033
4034 val = (code == ASHIFT
4035 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4036 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4037
4038 /* Sign-extend the result for arithmetic right shifts. */
4039 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4040 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4041 break;
4042
4043 case ROTATERT:
4044 if (arg1 < 0)
4045 return 0;
4046
4047 arg1 %= width;
4048 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4049 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4050 break;
4051
4052 case ROTATE:
4053 if (arg1 < 0)
4054 return 0;
4055
4056 arg1 %= width;
4057 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4058 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4059 break;
4060
4061 case COMPARE:
4062 /* Do nothing here. */
4063 return 0;
4064
4065 case SMIN:
4066 val = arg0s <= arg1s ? arg0s : arg1s;
4067 break;
4068
4069 case UMIN:
4070 val = ((unsigned HOST_WIDE_INT) arg0
4071 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4072 break;
4073
4074 case SMAX:
4075 val = arg0s > arg1s ? arg0s : arg1s;
4076 break;
4077
4078 case UMAX:
4079 val = ((unsigned HOST_WIDE_INT) arg0
4080 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4081 break;
4082
4083 case SS_PLUS:
4084 case US_PLUS:
4085 case SS_MINUS:
4086 case US_MINUS:
4087 case SS_MULT:
4088 case US_MULT:
4089 case SS_DIV:
4090 case US_DIV:
4091 case SS_ASHIFT:
4092 case US_ASHIFT:
4093 /* ??? There are simplifications that can be done. */
4094 return 0;
4095
4096 default:
4097 gcc_unreachable ();
4098 }
4099
4100 return gen_int_mode (val, mode);
4101 }
4102
4103 return NULL_RTX;
4104 }
4105
4106
4107 \f
4108 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4109 PLUS or MINUS.
4110
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation. */
4114
4115 struct simplify_plus_minus_op_data
4116 {
4117 rtx op;
4118 short neg;
4119 };
4120
4121 static bool
4122 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4123 {
4124 int result;
4125
4126 result = (commutative_operand_precedence (y)
4127 - commutative_operand_precedence (x));
4128 if (result)
4129 return result > 0;
4130
4131 /* Group together equal REGs to do more simplification. */
4132 if (REG_P (x) && REG_P (y))
4133 return REGNO (x) > REGNO (y);
4134 else
4135 return false;
4136 }
4137
4138 static rtx
4139 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4140 rtx op1)
4141 {
4142 struct simplify_plus_minus_op_data ops[8];
4143 rtx result, tem;
4144 int n_ops = 2, input_ops = 2;
4145 int changed, n_constants = 0, canonicalized = 0;
4146 int i, j;
4147
4148 memset (ops, 0, sizeof ops);
4149
4150 /* Set up the two operands and then expand them until nothing has been
4151 changed. If we run out of room in our array, give up; this should
4152 almost never happen. */
4153
4154 ops[0].op = op0;
4155 ops[0].neg = 0;
4156 ops[1].op = op1;
4157 ops[1].neg = (code == MINUS);
4158
4159 do
4160 {
4161 changed = 0;
4162
4163 for (i = 0; i < n_ops; i++)
4164 {
4165 rtx this_op = ops[i].op;
4166 int this_neg = ops[i].neg;
4167 enum rtx_code this_code = GET_CODE (this_op);
4168
4169 switch (this_code)
4170 {
4171 case PLUS:
4172 case MINUS:
4173 if (n_ops == 7)
4174 return NULL_RTX;
4175
4176 ops[n_ops].op = XEXP (this_op, 1);
4177 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4178 n_ops++;
4179
4180 ops[i].op = XEXP (this_op, 0);
4181 input_ops++;
4182 changed = 1;
4183 canonicalized |= this_neg;
4184 break;
4185
4186 case NEG:
4187 ops[i].op = XEXP (this_op, 0);
4188 ops[i].neg = ! this_neg;
4189 changed = 1;
4190 canonicalized = 1;
4191 break;
4192
4193 case CONST:
4194 if (n_ops < 7
4195 && GET_CODE (XEXP (this_op, 0)) == PLUS
4196 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4197 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4198 {
4199 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4200 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4201 ops[n_ops].neg = this_neg;
4202 n_ops++;
4203 changed = 1;
4204 canonicalized = 1;
4205 }
4206 break;
4207
4208 case NOT:
4209 /* ~a -> (-a - 1) */
4210 if (n_ops != 7)
4211 {
4212 ops[n_ops].op = CONSTM1_RTX (mode);
4213 ops[n_ops++].neg = this_neg;
4214 ops[i].op = XEXP (this_op, 0);
4215 ops[i].neg = !this_neg;
4216 changed = 1;
4217 canonicalized = 1;
4218 }
4219 break;
4220
4221 case CONST_INT:
4222 n_constants++;
4223 if (this_neg)
4224 {
4225 ops[i].op = neg_const_int (mode, this_op);
4226 ops[i].neg = 0;
4227 changed = 1;
4228 canonicalized = 1;
4229 }
4230 break;
4231
4232 default:
4233 break;
4234 }
4235 }
4236 }
4237 while (changed);
4238
4239 if (n_constants > 1)
4240 canonicalized = 1;
4241
4242 gcc_assert (n_ops >= 2);
4243
4244 /* If we only have two operands, we can avoid the loops. */
4245 if (n_ops == 2)
4246 {
4247 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4248 rtx lhs, rhs;
4249
4250 /* Get the two operands. Be careful with the order, especially for
4251 the cases where code == MINUS. */
4252 if (ops[0].neg && ops[1].neg)
4253 {
4254 lhs = gen_rtx_NEG (mode, ops[0].op);
4255 rhs = ops[1].op;
4256 }
4257 else if (ops[0].neg)
4258 {
4259 lhs = ops[1].op;
4260 rhs = ops[0].op;
4261 }
4262 else
4263 {
4264 lhs = ops[0].op;
4265 rhs = ops[1].op;
4266 }
4267
4268 return simplify_const_binary_operation (code, mode, lhs, rhs);
4269 }
4270
4271 /* Now simplify each pair of operands until nothing changes. */
4272 do
4273 {
4274 /* Insertion sort is good enough for an eight-element array. */
4275 for (i = 1; i < n_ops; i++)
4276 {
4277 struct simplify_plus_minus_op_data save;
4278 j = i - 1;
4279 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4280 continue;
4281
4282 canonicalized = 1;
4283 save = ops[i];
4284 do
4285 ops[j + 1] = ops[j];
4286 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4287 ops[j + 1] = save;
4288 }
4289
4290 changed = 0;
4291 for (i = n_ops - 1; i > 0; i--)
4292 for (j = i - 1; j >= 0; j--)
4293 {
4294 rtx lhs = ops[j].op, rhs = ops[i].op;
4295 int lneg = ops[j].neg, rneg = ops[i].neg;
4296
4297 if (lhs != 0 && rhs != 0)
4298 {
4299 enum rtx_code ncode = PLUS;
4300
4301 if (lneg != rneg)
4302 {
4303 ncode = MINUS;
4304 if (lneg)
4305 tem = lhs, lhs = rhs, rhs = tem;
4306 }
4307 else if (swap_commutative_operands_p (lhs, rhs))
4308 tem = lhs, lhs = rhs, rhs = tem;
4309
4310 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4311 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4312 {
4313 rtx tem_lhs, tem_rhs;
4314
4315 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4316 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4317 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4318
4319 if (tem && !CONSTANT_P (tem))
4320 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4321 }
4322 else
4323 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4324
4325 /* Reject "simplifications" that just wrap the two
4326 arguments in a CONST. Failure to do so can result
4327 in infinite recursion with simplify_binary_operation
4328 when it calls us to simplify CONST operations. */
4329 if (tem
4330 && ! (GET_CODE (tem) == CONST
4331 && GET_CODE (XEXP (tem, 0)) == ncode
4332 && XEXP (XEXP (tem, 0), 0) == lhs
4333 && XEXP (XEXP (tem, 0), 1) == rhs))
4334 {
4335 lneg &= rneg;
4336 if (GET_CODE (tem) == NEG)
4337 tem = XEXP (tem, 0), lneg = !lneg;
4338 if (CONST_INT_P (tem) && lneg)
4339 tem = neg_const_int (mode, tem), lneg = 0;
4340
4341 ops[i].op = tem;
4342 ops[i].neg = lneg;
4343 ops[j].op = NULL_RTX;
4344 changed = 1;
4345 canonicalized = 1;
4346 }
4347 }
4348 }
4349
4350 /* If nothing changed, fail. */
4351 if (!canonicalized)
4352 return NULL_RTX;
4353
4354 /* Pack all the operands to the lower-numbered entries. */
4355 for (i = 0, j = 0; j < n_ops; j++)
4356 if (ops[j].op)
4357 {
4358 ops[i] = ops[j];
4359 i++;
4360 }
4361 n_ops = i;
4362 }
4363 while (changed);
4364
4365 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4366 if (n_ops == 2
4367 && CONST_INT_P (ops[1].op)
4368 && CONSTANT_P (ops[0].op)
4369 && ops[0].neg)
4370 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4371
4372 /* We suppressed creation of trivial CONST expressions in the
4373 combination loop to avoid recursion. Create one manually now.
4374 The combination loop should have ensured that there is exactly
4375 one CONST_INT, and the sort will have ensured that it is last
4376 in the array and that any other constant will be next-to-last. */
4377
4378 if (n_ops > 1
4379 && CONST_INT_P (ops[n_ops - 1].op)
4380 && CONSTANT_P (ops[n_ops - 2].op))
4381 {
4382 rtx value = ops[n_ops - 1].op;
4383 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4384 value = neg_const_int (mode, value);
4385 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4386 INTVAL (value));
4387 n_ops--;
4388 }
4389
4390 /* Put a non-negated operand first, if possible. */
4391
4392 for (i = 0; i < n_ops && ops[i].neg; i++)
4393 continue;
4394 if (i == n_ops)
4395 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4396 else if (i != 0)
4397 {
4398 tem = ops[0].op;
4399 ops[0] = ops[i];
4400 ops[i].op = tem;
4401 ops[i].neg = 1;
4402 }
4403
4404 /* Now make the result by performing the requested operations. */
4405 result = ops[0].op;
4406 for (i = 1; i < n_ops; i++)
4407 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4408 mode, result, ops[i].op);
4409
4410 return result;
4411 }
4412
4413 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4414 static bool
4415 plus_minus_operand_p (const_rtx x)
4416 {
4417 return GET_CODE (x) == PLUS
4418 || GET_CODE (x) == MINUS
4419 || (GET_CODE (x) == CONST
4420 && GET_CODE (XEXP (x, 0)) == PLUS
4421 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4422 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4423 }
4424
4425 /* Like simplify_binary_operation except used for relational operators.
4426 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4427 not also be VOIDmode.
4428
4429 CMP_MODE specifies in which mode the comparison is done in, so it is
4430 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4431 the operands or, if both are VOIDmode, the operands are compared in
4432 "infinite precision". */
4433 rtx
4434 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4435 enum machine_mode cmp_mode, rtx op0, rtx op1)
4436 {
4437 rtx tem, trueop0, trueop1;
4438
4439 if (cmp_mode == VOIDmode)
4440 cmp_mode = GET_MODE (op0);
4441 if (cmp_mode == VOIDmode)
4442 cmp_mode = GET_MODE (op1);
4443
4444 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4445 if (tem)
4446 {
4447 if (SCALAR_FLOAT_MODE_P (mode))
4448 {
4449 if (tem == const0_rtx)
4450 return CONST0_RTX (mode);
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4452 {
4453 REAL_VALUE_TYPE val;
4454 val = FLOAT_STORE_FLAG_VALUE (mode);
4455 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4456 }
4457 #else
4458 return NULL_RTX;
4459 #endif
4460 }
4461 if (VECTOR_MODE_P (mode))
4462 {
4463 if (tem == const0_rtx)
4464 return CONST0_RTX (mode);
4465 #ifdef VECTOR_STORE_FLAG_VALUE
4466 {
4467 int i, units;
4468 rtvec v;
4469
4470 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4471 if (val == NULL_RTX)
4472 return NULL_RTX;
4473 if (val == const1_rtx)
4474 return CONST1_RTX (mode);
4475
4476 units = GET_MODE_NUNITS (mode);
4477 v = rtvec_alloc (units);
4478 for (i = 0; i < units; i++)
4479 RTVEC_ELT (v, i) = val;
4480 return gen_rtx_raw_CONST_VECTOR (mode, v);
4481 }
4482 #else
4483 return NULL_RTX;
4484 #endif
4485 }
4486
4487 return tem;
4488 }
4489
4490 /* For the following tests, ensure const0_rtx is op1. */
4491 if (swap_commutative_operands_p (op0, op1)
4492 || (op0 == const0_rtx && op1 != const0_rtx))
4493 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4494
4495 /* If op0 is a compare, extract the comparison arguments from it. */
4496 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4497 return simplify_gen_relational (code, mode, VOIDmode,
4498 XEXP (op0, 0), XEXP (op0, 1));
4499
4500 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4501 || CC0_P (op0))
4502 return NULL_RTX;
4503
4504 trueop0 = avoid_constant_pool_reference (op0);
4505 trueop1 = avoid_constant_pool_reference (op1);
4506 return simplify_relational_operation_1 (code, mode, cmp_mode,
4507 trueop0, trueop1);
4508 }
4509
4510 /* This part of simplify_relational_operation is only used when CMP_MODE
4511 is not in class MODE_CC (i.e. it is a real comparison).
4512
4513 MODE is the mode of the result, while CMP_MODE specifies in which
4514 mode the comparison is done in, so it is the mode of the operands. */
4515
4516 static rtx
4517 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4518 enum machine_mode cmp_mode, rtx op0, rtx op1)
4519 {
4520 enum rtx_code op0code = GET_CODE (op0);
4521
4522 if (op1 == const0_rtx && COMPARISON_P (op0))
4523 {
4524 /* If op0 is a comparison, extract the comparison arguments
4525 from it. */
4526 if (code == NE)
4527 {
4528 if (GET_MODE (op0) == mode)
4529 return simplify_rtx (op0);
4530 else
4531 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4532 XEXP (op0, 0), XEXP (op0, 1));
4533 }
4534 else if (code == EQ)
4535 {
4536 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4537 if (new_code != UNKNOWN)
4538 return simplify_gen_relational (new_code, mode, VOIDmode,
4539 XEXP (op0, 0), XEXP (op0, 1));
4540 }
4541 }
4542
4543 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4544 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4545 if ((code == LTU || code == GEU)
4546 && GET_CODE (op0) == PLUS
4547 && CONST_INT_P (XEXP (op0, 1))
4548 && (rtx_equal_p (op1, XEXP (op0, 0))
4549 || rtx_equal_p (op1, XEXP (op0, 1)))
4550 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4551 && XEXP (op0, 1) != const0_rtx)
4552 {
4553 rtx new_cmp
4554 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4555 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4556 cmp_mode, XEXP (op0, 0), new_cmp);
4557 }
4558
4559 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4560 if ((code == LTU || code == GEU)
4561 && GET_CODE (op0) == PLUS
4562 && rtx_equal_p (op1, XEXP (op0, 1))
4563 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4564 && !rtx_equal_p (op1, XEXP (op0, 0)))
4565 return simplify_gen_relational (code, mode, cmp_mode, op0,
4566 copy_rtx (XEXP (op0, 0)));
4567
4568 if (op1 == const0_rtx)
4569 {
4570 /* Canonicalize (GTU x 0) as (NE x 0). */
4571 if (code == GTU)
4572 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4573 /* Canonicalize (LEU x 0) as (EQ x 0). */
4574 if (code == LEU)
4575 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4576 }
4577 else if (op1 == const1_rtx)
4578 {
4579 switch (code)
4580 {
4581 case GE:
4582 /* Canonicalize (GE x 1) as (GT x 0). */
4583 return simplify_gen_relational (GT, mode, cmp_mode,
4584 op0, const0_rtx);
4585 case GEU:
4586 /* Canonicalize (GEU x 1) as (NE x 0). */
4587 return simplify_gen_relational (NE, mode, cmp_mode,
4588 op0, const0_rtx);
4589 case LT:
4590 /* Canonicalize (LT x 1) as (LE x 0). */
4591 return simplify_gen_relational (LE, mode, cmp_mode,
4592 op0, const0_rtx);
4593 case LTU:
4594 /* Canonicalize (LTU x 1) as (EQ x 0). */
4595 return simplify_gen_relational (EQ, mode, cmp_mode,
4596 op0, const0_rtx);
4597 default:
4598 break;
4599 }
4600 }
4601 else if (op1 == constm1_rtx)
4602 {
4603 /* Canonicalize (LE x -1) as (LT x 0). */
4604 if (code == LE)
4605 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4606 /* Canonicalize (GT x -1) as (GE x 0). */
4607 if (code == GT)
4608 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4609 }
4610
4611 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4612 if ((code == EQ || code == NE)
4613 && (op0code == PLUS || op0code == MINUS)
4614 && CONSTANT_P (op1)
4615 && CONSTANT_P (XEXP (op0, 1))
4616 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4617 {
4618 rtx x = XEXP (op0, 0);
4619 rtx c = XEXP (op0, 1);
4620 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4621 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4622
4623 /* Detect an infinite recursive condition, where we oscillate at this
4624 simplification case between:
4625 A + B == C <---> C - B == A,
4626 where A, B, and C are all constants with non-simplifiable expressions,
4627 usually SYMBOL_REFs. */
4628 if (GET_CODE (tem) == invcode
4629 && CONSTANT_P (x)
4630 && rtx_equal_p (c, XEXP (tem, 1)))
4631 return NULL_RTX;
4632
4633 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4634 }
4635
4636 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4637 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4638 if (code == NE
4639 && op1 == const0_rtx
4640 && GET_MODE_CLASS (mode) == MODE_INT
4641 && cmp_mode != VOIDmode
4642 /* ??? Work-around BImode bugs in the ia64 backend. */
4643 && mode != BImode
4644 && cmp_mode != BImode
4645 && nonzero_bits (op0, cmp_mode) == 1
4646 && STORE_FLAG_VALUE == 1)
4647 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4648 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4649 : lowpart_subreg (mode, op0, cmp_mode);
4650
4651 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4652 if ((code == EQ || code == NE)
4653 && op1 == const0_rtx
4654 && op0code == XOR)
4655 return simplify_gen_relational (code, mode, cmp_mode,
4656 XEXP (op0, 0), XEXP (op0, 1));
4657
4658 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4659 if ((code == EQ || code == NE)
4660 && op0code == XOR
4661 && rtx_equal_p (XEXP (op0, 0), op1)
4662 && !side_effects_p (XEXP (op0, 0)))
4663 return simplify_gen_relational (code, mode, cmp_mode,
4664 XEXP (op0, 1), const0_rtx);
4665
4666 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4667 if ((code == EQ || code == NE)
4668 && op0code == XOR
4669 && rtx_equal_p (XEXP (op0, 1), op1)
4670 && !side_effects_p (XEXP (op0, 1)))
4671 return simplify_gen_relational (code, mode, cmp_mode,
4672 XEXP (op0, 0), const0_rtx);
4673
4674 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4675 if ((code == EQ || code == NE)
4676 && op0code == XOR
4677 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4678 && (CONST_INT_P (XEXP (op0, 1))
4679 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4680 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4681 simplify_gen_binary (XOR, cmp_mode,
4682 XEXP (op0, 1), op1));
4683
4684 if (op0code == POPCOUNT && op1 == const0_rtx)
4685 switch (code)
4686 {
4687 case EQ:
4688 case LE:
4689 case LEU:
4690 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4691 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4692 XEXP (op0, 0), const0_rtx);
4693
4694 case NE:
4695 case GT:
4696 case GTU:
4697 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4698 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4699 XEXP (op0, 0), const0_rtx);
4700
4701 default:
4702 break;
4703 }
4704
4705 return NULL_RTX;
4706 }
4707
4708 enum
4709 {
4710 CMP_EQ = 1,
4711 CMP_LT = 2,
4712 CMP_GT = 4,
4713 CMP_LTU = 8,
4714 CMP_GTU = 16
4715 };
4716
4717
4718 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4719 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4720 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4721 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4722 For floating-point comparisons, assume that the operands were ordered. */
4723
4724 static rtx
4725 comparison_result (enum rtx_code code, int known_results)
4726 {
4727 switch (code)
4728 {
4729 case EQ:
4730 case UNEQ:
4731 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4732 case NE:
4733 case LTGT:
4734 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4735
4736 case LT:
4737 case UNLT:
4738 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4739 case GE:
4740 case UNGE:
4741 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4742
4743 case GT:
4744 case UNGT:
4745 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4746 case LE:
4747 case UNLE:
4748 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4749
4750 case LTU:
4751 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4752 case GEU:
4753 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4754
4755 case GTU:
4756 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4757 case LEU:
4758 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4759
4760 case ORDERED:
4761 return const_true_rtx;
4762 case UNORDERED:
4763 return const0_rtx;
4764 default:
4765 gcc_unreachable ();
4766 }
4767 }
4768
4769 /* Check if the given comparison (done in the given MODE) is actually a
4770 tautology or a contradiction.
4771 If no simplification is possible, this function returns zero.
4772 Otherwise, it returns either const_true_rtx or const0_rtx. */
4773
4774 rtx
4775 simplify_const_relational_operation (enum rtx_code code,
4776 enum machine_mode mode,
4777 rtx op0, rtx op1)
4778 {
4779 rtx tem;
4780 rtx trueop0;
4781 rtx trueop1;
4782
4783 gcc_assert (mode != VOIDmode
4784 || (GET_MODE (op0) == VOIDmode
4785 && GET_MODE (op1) == VOIDmode));
4786
4787 /* If op0 is a compare, extract the comparison arguments from it. */
4788 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4789 {
4790 op1 = XEXP (op0, 1);
4791 op0 = XEXP (op0, 0);
4792
4793 if (GET_MODE (op0) != VOIDmode)
4794 mode = GET_MODE (op0);
4795 else if (GET_MODE (op1) != VOIDmode)
4796 mode = GET_MODE (op1);
4797 else
4798 return 0;
4799 }
4800
4801 /* We can't simplify MODE_CC values since we don't know what the
4802 actual comparison is. */
4803 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4804 return 0;
4805
4806 /* Make sure the constant is second. */
4807 if (swap_commutative_operands_p (op0, op1))
4808 {
4809 tem = op0, op0 = op1, op1 = tem;
4810 code = swap_condition (code);
4811 }
4812
4813 trueop0 = avoid_constant_pool_reference (op0);
4814 trueop1 = avoid_constant_pool_reference (op1);
4815
4816 /* For integer comparisons of A and B maybe we can simplify A - B and can
4817 then simplify a comparison of that with zero. If A and B are both either
4818 a register or a CONST_INT, this can't help; testing for these cases will
4819 prevent infinite recursion here and speed things up.
4820
4821 We can only do this for EQ and NE comparisons as otherwise we may
4822 lose or introduce overflow which we cannot disregard as undefined as
4823 we do not know the signedness of the operation on either the left or
4824 the right hand side of the comparison. */
4825
4826 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4827 && (code == EQ || code == NE)
4828 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4829 && (REG_P (op1) || CONST_INT_P (trueop1)))
4830 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4831 /* We cannot do this if tem is a nonzero address. */
4832 && ! nonzero_address_p (tem))
4833 return simplify_const_relational_operation (signed_condition (code),
4834 mode, tem, const0_rtx);
4835
4836 if (! HONOR_NANS (mode) && code == ORDERED)
4837 return const_true_rtx;
4838
4839 if (! HONOR_NANS (mode) && code == UNORDERED)
4840 return const0_rtx;
4841
4842 /* For modes without NaNs, if the two operands are equal, we know the
4843 result except if they have side-effects. Even with NaNs we know
4844 the result of unordered comparisons and, if signaling NaNs are
4845 irrelevant, also the result of LT/GT/LTGT. */
4846 if ((! HONOR_NANS (GET_MODE (trueop0))
4847 || code == UNEQ || code == UNLE || code == UNGE
4848 || ((code == LT || code == GT || code == LTGT)
4849 && ! HONOR_SNANS (GET_MODE (trueop0))))
4850 && rtx_equal_p (trueop0, trueop1)
4851 && ! side_effects_p (trueop0))
4852 return comparison_result (code, CMP_EQ);
4853
4854 /* If the operands are floating-point constants, see if we can fold
4855 the result. */
4856 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4857 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4858 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4859 {
4860 REAL_VALUE_TYPE d0, d1;
4861
4862 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4863 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4864
4865 /* Comparisons are unordered iff at least one of the values is NaN. */
4866 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4867 switch (code)
4868 {
4869 case UNEQ:
4870 case UNLT:
4871 case UNGT:
4872 case UNLE:
4873 case UNGE:
4874 case NE:
4875 case UNORDERED:
4876 return const_true_rtx;
4877 case EQ:
4878 case LT:
4879 case GT:
4880 case LE:
4881 case GE:
4882 case LTGT:
4883 case ORDERED:
4884 return const0_rtx;
4885 default:
4886 return 0;
4887 }
4888
4889 return comparison_result (code,
4890 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4891 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4892 }
4893
4894 /* Otherwise, see if the operands are both integers. */
4895 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4896 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4897 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4898 {
4899 int width = GET_MODE_PRECISION (mode);
4900 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4901 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4902
4903 /* Get the two words comprising each integer constant. */
4904 if (CONST_DOUBLE_AS_INT_P (trueop0))
4905 {
4906 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4907 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4908 }
4909 else
4910 {
4911 l0u = l0s = INTVAL (trueop0);
4912 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4913 }
4914
4915 if (CONST_DOUBLE_AS_INT_P (trueop1))
4916 {
4917 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4918 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4919 }
4920 else
4921 {
4922 l1u = l1s = INTVAL (trueop1);
4923 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4924 }
4925
4926 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4927 we have to sign or zero-extend the values. */
4928 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4929 {
4930 l0u &= GET_MODE_MASK (mode);
4931 l1u &= GET_MODE_MASK (mode);
4932
4933 if (val_signbit_known_set_p (mode, l0s))
4934 l0s |= ~GET_MODE_MASK (mode);
4935
4936 if (val_signbit_known_set_p (mode, l1s))
4937 l1s |= ~GET_MODE_MASK (mode);
4938 }
4939 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4940 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4941
4942 if (h0u == h1u && l0u == l1u)
4943 return comparison_result (code, CMP_EQ);
4944 else
4945 {
4946 int cr;
4947 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4948 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4949 return comparison_result (code, cr);
4950 }
4951 }
4952
4953 /* Optimize comparisons with upper and lower bounds. */
4954 if (HWI_COMPUTABLE_MODE_P (mode)
4955 && CONST_INT_P (trueop1))
4956 {
4957 int sign;
4958 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4959 HOST_WIDE_INT val = INTVAL (trueop1);
4960 HOST_WIDE_INT mmin, mmax;
4961
4962 if (code == GEU
4963 || code == LEU
4964 || code == GTU
4965 || code == LTU)
4966 sign = 0;
4967 else
4968 sign = 1;
4969
4970 /* Get a reduced range if the sign bit is zero. */
4971 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4972 {
4973 mmin = 0;
4974 mmax = nonzero;
4975 }
4976 else
4977 {
4978 rtx mmin_rtx, mmax_rtx;
4979 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4980
4981 mmin = INTVAL (mmin_rtx);
4982 mmax = INTVAL (mmax_rtx);
4983 if (sign)
4984 {
4985 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4986
4987 mmin >>= (sign_copies - 1);
4988 mmax >>= (sign_copies - 1);
4989 }
4990 }
4991
4992 switch (code)
4993 {
4994 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4995 case GEU:
4996 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4997 return const_true_rtx;
4998 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4999 return const0_rtx;
5000 break;
5001 case GE:
5002 if (val <= mmin)
5003 return const_true_rtx;
5004 if (val > mmax)
5005 return const0_rtx;
5006 break;
5007
5008 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5009 case LEU:
5010 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5011 return const_true_rtx;
5012 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5013 return const0_rtx;
5014 break;
5015 case LE:
5016 if (val >= mmax)
5017 return const_true_rtx;
5018 if (val < mmin)
5019 return const0_rtx;
5020 break;
5021
5022 case EQ:
5023 /* x == y is always false for y out of range. */
5024 if (val < mmin || val > mmax)
5025 return const0_rtx;
5026 break;
5027
5028 /* x > y is always false for y >= mmax, always true for y < mmin. */
5029 case GTU:
5030 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5031 return const0_rtx;
5032 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5033 return const_true_rtx;
5034 break;
5035 case GT:
5036 if (val >= mmax)
5037 return const0_rtx;
5038 if (val < mmin)
5039 return const_true_rtx;
5040 break;
5041
5042 /* x < y is always false for y <= mmin, always true for y > mmax. */
5043 case LTU:
5044 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5045 return const0_rtx;
5046 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5047 return const_true_rtx;
5048 break;
5049 case LT:
5050 if (val <= mmin)
5051 return const0_rtx;
5052 if (val > mmax)
5053 return const_true_rtx;
5054 break;
5055
5056 case NE:
5057 /* x != y is always true for y out of range. */
5058 if (val < mmin || val > mmax)
5059 return const_true_rtx;
5060 break;
5061
5062 default:
5063 break;
5064 }
5065 }
5066
5067 /* Optimize integer comparisons with zero. */
5068 if (trueop1 == const0_rtx)
5069 {
5070 /* Some addresses are known to be nonzero. We don't know
5071 their sign, but equality comparisons are known. */
5072 if (nonzero_address_p (trueop0))
5073 {
5074 if (code == EQ || code == LEU)
5075 return const0_rtx;
5076 if (code == NE || code == GTU)
5077 return const_true_rtx;
5078 }
5079
5080 /* See if the first operand is an IOR with a constant. If so, we
5081 may be able to determine the result of this comparison. */
5082 if (GET_CODE (op0) == IOR)
5083 {
5084 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5085 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5086 {
5087 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5088 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5089 && (UINTVAL (inner_const)
5090 & ((unsigned HOST_WIDE_INT) 1
5091 << sign_bitnum)));
5092
5093 switch (code)
5094 {
5095 case EQ:
5096 case LEU:
5097 return const0_rtx;
5098 case NE:
5099 case GTU:
5100 return const_true_rtx;
5101 case LT:
5102 case LE:
5103 if (has_sign)
5104 return const_true_rtx;
5105 break;
5106 case GT:
5107 case GE:
5108 if (has_sign)
5109 return const0_rtx;
5110 break;
5111 default:
5112 break;
5113 }
5114 }
5115 }
5116 }
5117
5118 /* Optimize comparison of ABS with zero. */
5119 if (trueop1 == CONST0_RTX (mode)
5120 && (GET_CODE (trueop0) == ABS
5121 || (GET_CODE (trueop0) == FLOAT_EXTEND
5122 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5123 {
5124 switch (code)
5125 {
5126 case LT:
5127 /* Optimize abs(x) < 0.0. */
5128 if (!HONOR_SNANS (mode)
5129 && (!INTEGRAL_MODE_P (mode)
5130 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5131 {
5132 if (INTEGRAL_MODE_P (mode)
5133 && (issue_strict_overflow_warning
5134 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5135 warning (OPT_Wstrict_overflow,
5136 ("assuming signed overflow does not occur when "
5137 "assuming abs (x) < 0 is false"));
5138 return const0_rtx;
5139 }
5140 break;
5141
5142 case GE:
5143 /* Optimize abs(x) >= 0.0. */
5144 if (!HONOR_NANS (mode)
5145 && (!INTEGRAL_MODE_P (mode)
5146 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5147 {
5148 if (INTEGRAL_MODE_P (mode)
5149 && (issue_strict_overflow_warning
5150 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5151 warning (OPT_Wstrict_overflow,
5152 ("assuming signed overflow does not occur when "
5153 "assuming abs (x) >= 0 is true"));
5154 return const_true_rtx;
5155 }
5156 break;
5157
5158 case UNGE:
5159 /* Optimize ! (abs(x) < 0.0). */
5160 return const_true_rtx;
5161
5162 default:
5163 break;
5164 }
5165 }
5166
5167 return 0;
5168 }
5169 \f
5170 /* Simplify CODE, an operation with result mode MODE and three operands,
5171 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5172 a constant. Return 0 if no simplifications is possible. */
5173
5174 rtx
5175 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5176 enum machine_mode op0_mode, rtx op0, rtx op1,
5177 rtx op2)
5178 {
5179 unsigned int width = GET_MODE_PRECISION (mode);
5180 bool any_change = false;
5181 rtx tem;
5182
5183 /* VOIDmode means "infinite" precision. */
5184 if (width == 0)
5185 width = HOST_BITS_PER_WIDE_INT;
5186
5187 switch (code)
5188 {
5189 case FMA:
5190 /* Simplify negations around the multiplication. */
5191 /* -a * -b + c => a * b + c. */
5192 if (GET_CODE (op0) == NEG)
5193 {
5194 tem = simplify_unary_operation (NEG, mode, op1, mode);
5195 if (tem)
5196 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5197 }
5198 else if (GET_CODE (op1) == NEG)
5199 {
5200 tem = simplify_unary_operation (NEG, mode, op0, mode);
5201 if (tem)
5202 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5203 }
5204
5205 /* Canonicalize the two multiplication operands. */
5206 /* a * -b + c => -b * a + c. */
5207 if (swap_commutative_operands_p (op0, op1))
5208 tem = op0, op0 = op1, op1 = tem, any_change = true;
5209
5210 if (any_change)
5211 return gen_rtx_FMA (mode, op0, op1, op2);
5212 return NULL_RTX;
5213
5214 case SIGN_EXTRACT:
5215 case ZERO_EXTRACT:
5216 if (CONST_INT_P (op0)
5217 && CONST_INT_P (op1)
5218 && CONST_INT_P (op2)
5219 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5220 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5221 {
5222 /* Extracting a bit-field from a constant */
5223 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5224 HOST_WIDE_INT op1val = INTVAL (op1);
5225 HOST_WIDE_INT op2val = INTVAL (op2);
5226 if (BITS_BIG_ENDIAN)
5227 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5228 else
5229 val >>= op2val;
5230
5231 if (HOST_BITS_PER_WIDE_INT != op1val)
5232 {
5233 /* First zero-extend. */
5234 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5235 /* If desired, propagate sign bit. */
5236 if (code == SIGN_EXTRACT
5237 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5238 != 0)
5239 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5240 }
5241
5242 return gen_int_mode (val, mode);
5243 }
5244 break;
5245
5246 case IF_THEN_ELSE:
5247 if (CONST_INT_P (op0))
5248 return op0 != const0_rtx ? op1 : op2;
5249
5250 /* Convert c ? a : a into "a". */
5251 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5252 return op1;
5253
5254 /* Convert a != b ? a : b into "a". */
5255 if (GET_CODE (op0) == NE
5256 && ! side_effects_p (op0)
5257 && ! HONOR_NANS (mode)
5258 && ! HONOR_SIGNED_ZEROS (mode)
5259 && ((rtx_equal_p (XEXP (op0, 0), op1)
5260 && rtx_equal_p (XEXP (op0, 1), op2))
5261 || (rtx_equal_p (XEXP (op0, 0), op2)
5262 && rtx_equal_p (XEXP (op0, 1), op1))))
5263 return op1;
5264
5265 /* Convert a == b ? a : b into "b". */
5266 if (GET_CODE (op0) == EQ
5267 && ! side_effects_p (op0)
5268 && ! HONOR_NANS (mode)
5269 && ! HONOR_SIGNED_ZEROS (mode)
5270 && ((rtx_equal_p (XEXP (op0, 0), op1)
5271 && rtx_equal_p (XEXP (op0, 1), op2))
5272 || (rtx_equal_p (XEXP (op0, 0), op2)
5273 && rtx_equal_p (XEXP (op0, 1), op1))))
5274 return op2;
5275
5276 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5277 {
5278 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5279 ? GET_MODE (XEXP (op0, 1))
5280 : GET_MODE (XEXP (op0, 0)));
5281 rtx temp;
5282
5283 /* Look for happy constants in op1 and op2. */
5284 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5285 {
5286 HOST_WIDE_INT t = INTVAL (op1);
5287 HOST_WIDE_INT f = INTVAL (op2);
5288
5289 if (t == STORE_FLAG_VALUE && f == 0)
5290 code = GET_CODE (op0);
5291 else if (t == 0 && f == STORE_FLAG_VALUE)
5292 {
5293 enum rtx_code tmp;
5294 tmp = reversed_comparison_code (op0, NULL_RTX);
5295 if (tmp == UNKNOWN)
5296 break;
5297 code = tmp;
5298 }
5299 else
5300 break;
5301
5302 return simplify_gen_relational (code, mode, cmp_mode,
5303 XEXP (op0, 0), XEXP (op0, 1));
5304 }
5305
5306 if (cmp_mode == VOIDmode)
5307 cmp_mode = op0_mode;
5308 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5309 cmp_mode, XEXP (op0, 0),
5310 XEXP (op0, 1));
5311
5312 /* See if any simplifications were possible. */
5313 if (temp)
5314 {
5315 if (CONST_INT_P (temp))
5316 return temp == const0_rtx ? op2 : op1;
5317 else if (temp)
5318 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5319 }
5320 }
5321 break;
5322
5323 case VEC_MERGE:
5324 gcc_assert (GET_MODE (op0) == mode);
5325 gcc_assert (GET_MODE (op1) == mode);
5326 gcc_assert (VECTOR_MODE_P (mode));
5327 op2 = avoid_constant_pool_reference (op2);
5328 if (CONST_INT_P (op2))
5329 {
5330 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5331 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5332 int mask = (1 << n_elts) - 1;
5333
5334 if (!(INTVAL (op2) & mask))
5335 return op1;
5336 if ((INTVAL (op2) & mask) == mask)
5337 return op0;
5338
5339 op0 = avoid_constant_pool_reference (op0);
5340 op1 = avoid_constant_pool_reference (op1);
5341 if (GET_CODE (op0) == CONST_VECTOR
5342 && GET_CODE (op1) == CONST_VECTOR)
5343 {
5344 rtvec v = rtvec_alloc (n_elts);
5345 unsigned int i;
5346
5347 for (i = 0; i < n_elts; i++)
5348 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5349 ? CONST_VECTOR_ELT (op0, i)
5350 : CONST_VECTOR_ELT (op1, i));
5351 return gen_rtx_CONST_VECTOR (mode, v);
5352 }
5353 }
5354 break;
5355
5356 default:
5357 gcc_unreachable ();
5358 }
5359
5360 return 0;
5361 }
5362
5363 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5364 or CONST_VECTOR,
5365 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5366
5367 Works by unpacking OP into a collection of 8-bit values
5368 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5369 and then repacking them again for OUTERMODE. */
5370
5371 static rtx
5372 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5373 enum machine_mode innermode, unsigned int byte)
5374 {
5375 /* We support up to 512-bit values (for V8DFmode). */
5376 enum {
5377 max_bitsize = 512,
5378 value_bit = 8,
5379 value_mask = (1 << value_bit) - 1
5380 };
5381 unsigned char value[max_bitsize / value_bit];
5382 int value_start;
5383 int i;
5384 int elem;
5385
5386 int num_elem;
5387 rtx * elems;
5388 int elem_bitsize;
5389 rtx result_s;
5390 rtvec result_v = NULL;
5391 enum mode_class outer_class;
5392 enum machine_mode outer_submode;
5393
5394 /* Some ports misuse CCmode. */
5395 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5396 return op;
5397
5398 /* We have no way to represent a complex constant at the rtl level. */
5399 if (COMPLEX_MODE_P (outermode))
5400 return NULL_RTX;
5401
5402 /* Unpack the value. */
5403
5404 if (GET_CODE (op) == CONST_VECTOR)
5405 {
5406 num_elem = CONST_VECTOR_NUNITS (op);
5407 elems = &CONST_VECTOR_ELT (op, 0);
5408 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5409 }
5410 else
5411 {
5412 num_elem = 1;
5413 elems = &op;
5414 elem_bitsize = max_bitsize;
5415 }
5416 /* If this asserts, it is too complicated; reducing value_bit may help. */
5417 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5418 /* I don't know how to handle endianness of sub-units. */
5419 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5420
5421 for (elem = 0; elem < num_elem; elem++)
5422 {
5423 unsigned char * vp;
5424 rtx el = elems[elem];
5425
5426 /* Vectors are kept in target memory order. (This is probably
5427 a mistake.) */
5428 {
5429 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5430 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5431 / BITS_PER_UNIT);
5432 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5433 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5434 unsigned bytele = (subword_byte % UNITS_PER_WORD
5435 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5436 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5437 }
5438
5439 switch (GET_CODE (el))
5440 {
5441 case CONST_INT:
5442 for (i = 0;
5443 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5444 i += value_bit)
5445 *vp++ = INTVAL (el) >> i;
5446 /* CONST_INTs are always logically sign-extended. */
5447 for (; i < elem_bitsize; i += value_bit)
5448 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5449 break;
5450
5451 case CONST_DOUBLE:
5452 if (GET_MODE (el) == VOIDmode)
5453 {
5454 unsigned char extend = 0;
5455 /* If this triggers, someone should have generated a
5456 CONST_INT instead. */
5457 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5458
5459 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5460 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5461 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5462 {
5463 *vp++
5464 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5465 i += value_bit;
5466 }
5467
5468 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5469 extend = -1;
5470 for (; i < elem_bitsize; i += value_bit)
5471 *vp++ = extend;
5472 }
5473 else
5474 {
5475 long tmp[max_bitsize / 32];
5476 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5477
5478 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5479 gcc_assert (bitsize <= elem_bitsize);
5480 gcc_assert (bitsize % value_bit == 0);
5481
5482 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5483 GET_MODE (el));
5484
5485 /* real_to_target produces its result in words affected by
5486 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5487 and use WORDS_BIG_ENDIAN instead; see the documentation
5488 of SUBREG in rtl.texi. */
5489 for (i = 0; i < bitsize; i += value_bit)
5490 {
5491 int ibase;
5492 if (WORDS_BIG_ENDIAN)
5493 ibase = bitsize - 1 - i;
5494 else
5495 ibase = i;
5496 *vp++ = tmp[ibase / 32] >> i % 32;
5497 }
5498
5499 /* It shouldn't matter what's done here, so fill it with
5500 zero. */
5501 for (; i < elem_bitsize; i += value_bit)
5502 *vp++ = 0;
5503 }
5504 break;
5505
5506 case CONST_FIXED:
5507 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5508 {
5509 for (i = 0; i < elem_bitsize; i += value_bit)
5510 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5511 }
5512 else
5513 {
5514 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5515 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5516 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5517 i += value_bit)
5518 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5519 >> (i - HOST_BITS_PER_WIDE_INT);
5520 for (; i < elem_bitsize; i += value_bit)
5521 *vp++ = 0;
5522 }
5523 break;
5524
5525 default:
5526 gcc_unreachable ();
5527 }
5528 }
5529
5530 /* Now, pick the right byte to start with. */
5531 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5532 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5533 will already have offset 0. */
5534 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5535 {
5536 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5537 - byte);
5538 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5539 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5540 byte = (subword_byte % UNITS_PER_WORD
5541 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5542 }
5543
5544 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5545 so if it's become negative it will instead be very large.) */
5546 gcc_assert (byte < GET_MODE_SIZE (innermode));
5547
5548 /* Convert from bytes to chunks of size value_bit. */
5549 value_start = byte * (BITS_PER_UNIT / value_bit);
5550
5551 /* Re-pack the value. */
5552
5553 if (VECTOR_MODE_P (outermode))
5554 {
5555 num_elem = GET_MODE_NUNITS (outermode);
5556 result_v = rtvec_alloc (num_elem);
5557 elems = &RTVEC_ELT (result_v, 0);
5558 outer_submode = GET_MODE_INNER (outermode);
5559 }
5560 else
5561 {
5562 num_elem = 1;
5563 elems = &result_s;
5564 outer_submode = outermode;
5565 }
5566
5567 outer_class = GET_MODE_CLASS (outer_submode);
5568 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5569
5570 gcc_assert (elem_bitsize % value_bit == 0);
5571 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5572
5573 for (elem = 0; elem < num_elem; elem++)
5574 {
5575 unsigned char *vp;
5576
5577 /* Vectors are stored in target memory order. (This is probably
5578 a mistake.) */
5579 {
5580 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5581 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5582 / BITS_PER_UNIT);
5583 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5584 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5585 unsigned bytele = (subword_byte % UNITS_PER_WORD
5586 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5587 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5588 }
5589
5590 switch (outer_class)
5591 {
5592 case MODE_INT:
5593 case MODE_PARTIAL_INT:
5594 {
5595 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5596
5597 for (i = 0;
5598 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5599 i += value_bit)
5600 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5601 for (; i < elem_bitsize; i += value_bit)
5602 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5603 << (i - HOST_BITS_PER_WIDE_INT);
5604
5605 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5606 know why. */
5607 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5608 elems[elem] = gen_int_mode (lo, outer_submode);
5609 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5610 elems[elem] = immed_double_const (lo, hi, outer_submode);
5611 else
5612 return NULL_RTX;
5613 }
5614 break;
5615
5616 case MODE_FLOAT:
5617 case MODE_DECIMAL_FLOAT:
5618 {
5619 REAL_VALUE_TYPE r;
5620 long tmp[max_bitsize / 32];
5621
5622 /* real_from_target wants its input in words affected by
5623 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5624 and use WORDS_BIG_ENDIAN instead; see the documentation
5625 of SUBREG in rtl.texi. */
5626 for (i = 0; i < max_bitsize / 32; i++)
5627 tmp[i] = 0;
5628 for (i = 0; i < elem_bitsize; i += value_bit)
5629 {
5630 int ibase;
5631 if (WORDS_BIG_ENDIAN)
5632 ibase = elem_bitsize - 1 - i;
5633 else
5634 ibase = i;
5635 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5636 }
5637
5638 real_from_target (&r, tmp, outer_submode);
5639 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5640 }
5641 break;
5642
5643 case MODE_FRACT:
5644 case MODE_UFRACT:
5645 case MODE_ACCUM:
5646 case MODE_UACCUM:
5647 {
5648 FIXED_VALUE_TYPE f;
5649 f.data.low = 0;
5650 f.data.high = 0;
5651 f.mode = outer_submode;
5652
5653 for (i = 0;
5654 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5655 i += value_bit)
5656 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5657 for (; i < elem_bitsize; i += value_bit)
5658 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5659 << (i - HOST_BITS_PER_WIDE_INT));
5660
5661 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5662 }
5663 break;
5664
5665 default:
5666 gcc_unreachable ();
5667 }
5668 }
5669 if (VECTOR_MODE_P (outermode))
5670 return gen_rtx_CONST_VECTOR (outermode, result_v);
5671 else
5672 return result_s;
5673 }
5674
5675 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5676 Return 0 if no simplifications are possible. */
5677 rtx
5678 simplify_subreg (enum machine_mode outermode, rtx op,
5679 enum machine_mode innermode, unsigned int byte)
5680 {
5681 /* Little bit of sanity checking. */
5682 gcc_assert (innermode != VOIDmode);
5683 gcc_assert (outermode != VOIDmode);
5684 gcc_assert (innermode != BLKmode);
5685 gcc_assert (outermode != BLKmode);
5686
5687 gcc_assert (GET_MODE (op) == innermode
5688 || GET_MODE (op) == VOIDmode);
5689
5690 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5691 gcc_assert (byte < GET_MODE_SIZE (innermode));
5692
5693 if (outermode == innermode && !byte)
5694 return op;
5695
5696 if (CONST_INT_P (op)
5697 || CONST_DOUBLE_P (op)
5698 || GET_CODE (op) == CONST_FIXED
5699 || GET_CODE (op) == CONST_VECTOR)
5700 return simplify_immed_subreg (outermode, op, innermode, byte);
5701
5702 /* Changing mode twice with SUBREG => just change it once,
5703 or not at all if changing back op starting mode. */
5704 if (GET_CODE (op) == SUBREG)
5705 {
5706 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5707 int final_offset = byte + SUBREG_BYTE (op);
5708 rtx newx;
5709
5710 if (outermode == innermostmode
5711 && byte == 0 && SUBREG_BYTE (op) == 0)
5712 return SUBREG_REG (op);
5713
5714 /* The SUBREG_BYTE represents offset, as if the value were stored
5715 in memory. Irritating exception is paradoxical subreg, where
5716 we define SUBREG_BYTE to be 0. On big endian machines, this
5717 value should be negative. For a moment, undo this exception. */
5718 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5719 {
5720 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5721 if (WORDS_BIG_ENDIAN)
5722 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5723 if (BYTES_BIG_ENDIAN)
5724 final_offset += difference % UNITS_PER_WORD;
5725 }
5726 if (SUBREG_BYTE (op) == 0
5727 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5728 {
5729 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5730 if (WORDS_BIG_ENDIAN)
5731 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5732 if (BYTES_BIG_ENDIAN)
5733 final_offset += difference % UNITS_PER_WORD;
5734 }
5735
5736 /* See whether resulting subreg will be paradoxical. */
5737 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5738 {
5739 /* In nonparadoxical subregs we can't handle negative offsets. */
5740 if (final_offset < 0)
5741 return NULL_RTX;
5742 /* Bail out in case resulting subreg would be incorrect. */
5743 if (final_offset % GET_MODE_SIZE (outermode)
5744 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5745 return NULL_RTX;
5746 }
5747 else
5748 {
5749 int offset = 0;
5750 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5751
5752 /* In paradoxical subreg, see if we are still looking on lower part.
5753 If so, our SUBREG_BYTE will be 0. */
5754 if (WORDS_BIG_ENDIAN)
5755 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5756 if (BYTES_BIG_ENDIAN)
5757 offset += difference % UNITS_PER_WORD;
5758 if (offset == final_offset)
5759 final_offset = 0;
5760 else
5761 return NULL_RTX;
5762 }
5763
5764 /* Recurse for further possible simplifications. */
5765 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5766 final_offset);
5767 if (newx)
5768 return newx;
5769 if (validate_subreg (outermode, innermostmode,
5770 SUBREG_REG (op), final_offset))
5771 {
5772 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5773 if (SUBREG_PROMOTED_VAR_P (op)
5774 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5775 && GET_MODE_CLASS (outermode) == MODE_INT
5776 && IN_RANGE (GET_MODE_SIZE (outermode),
5777 GET_MODE_SIZE (innermode),
5778 GET_MODE_SIZE (innermostmode))
5779 && subreg_lowpart_p (newx))
5780 {
5781 SUBREG_PROMOTED_VAR_P (newx) = 1;
5782 SUBREG_PROMOTED_UNSIGNED_SET
5783 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5784 }
5785 return newx;
5786 }
5787 return NULL_RTX;
5788 }
5789
5790 /* SUBREG of a hard register => just change the register number
5791 and/or mode. If the hard register is not valid in that mode,
5792 suppress this simplification. If the hard register is the stack,
5793 frame, or argument pointer, leave this as a SUBREG. */
5794
5795 if (REG_P (op) && HARD_REGISTER_P (op))
5796 {
5797 unsigned int regno, final_regno;
5798
5799 regno = REGNO (op);
5800 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5801 if (HARD_REGISTER_NUM_P (final_regno))
5802 {
5803 rtx x;
5804 int final_offset = byte;
5805
5806 /* Adjust offset for paradoxical subregs. */
5807 if (byte == 0
5808 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5809 {
5810 int difference = (GET_MODE_SIZE (innermode)
5811 - GET_MODE_SIZE (outermode));
5812 if (WORDS_BIG_ENDIAN)
5813 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5814 if (BYTES_BIG_ENDIAN)
5815 final_offset += difference % UNITS_PER_WORD;
5816 }
5817
5818 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5819
5820 /* Propagate original regno. We don't have any way to specify
5821 the offset inside original regno, so do so only for lowpart.
5822 The information is used only by alias analysis that can not
5823 grog partial register anyway. */
5824
5825 if (subreg_lowpart_offset (outermode, innermode) == byte)
5826 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5827 return x;
5828 }
5829 }
5830
5831 /* If we have a SUBREG of a register that we are replacing and we are
5832 replacing it with a MEM, make a new MEM and try replacing the
5833 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5834 or if we would be widening it. */
5835
5836 if (MEM_P (op)
5837 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5838 /* Allow splitting of volatile memory references in case we don't
5839 have instruction to move the whole thing. */
5840 && (! MEM_VOLATILE_P (op)
5841 || ! have_insn_for (SET, innermode))
5842 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5843 return adjust_address_nv (op, outermode, byte);
5844
5845 /* Handle complex values represented as CONCAT
5846 of real and imaginary part. */
5847 if (GET_CODE (op) == CONCAT)
5848 {
5849 unsigned int part_size, final_offset;
5850 rtx part, res;
5851
5852 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5853 if (byte < part_size)
5854 {
5855 part = XEXP (op, 0);
5856 final_offset = byte;
5857 }
5858 else
5859 {
5860 part = XEXP (op, 1);
5861 final_offset = byte - part_size;
5862 }
5863
5864 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5865 return NULL_RTX;
5866
5867 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5868 if (res)
5869 return res;
5870 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5871 return gen_rtx_SUBREG (outermode, part, final_offset);
5872 return NULL_RTX;
5873 }
5874
5875 /* A SUBREG resulting from a zero extension may fold to zero if
5876 it extracts higher bits that the ZERO_EXTEND's source bits. */
5877 if (GET_CODE (op) == ZERO_EXTEND)
5878 {
5879 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5880 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5881 return CONST0_RTX (outermode);
5882 }
5883
5884 if (SCALAR_INT_MODE_P (outermode)
5885 && SCALAR_INT_MODE_P (innermode)
5886 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5887 && byte == subreg_lowpart_offset (outermode, innermode))
5888 {
5889 rtx tem = simplify_truncation (outermode, op, innermode);
5890 if (tem)
5891 return tem;
5892 }
5893
5894 return NULL_RTX;
5895 }
5896
5897 /* Make a SUBREG operation or equivalent if it folds. */
5898
5899 rtx
5900 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5901 enum machine_mode innermode, unsigned int byte)
5902 {
5903 rtx newx;
5904
5905 newx = simplify_subreg (outermode, op, innermode, byte);
5906 if (newx)
5907 return newx;
5908
5909 if (GET_CODE (op) == SUBREG
5910 || GET_CODE (op) == CONCAT
5911 || GET_MODE (op) == VOIDmode)
5912 return NULL_RTX;
5913
5914 if (validate_subreg (outermode, innermode, op, byte))
5915 return gen_rtx_SUBREG (outermode, op, byte);
5916
5917 return NULL_RTX;
5918 }
5919
5920 /* Simplify X, an rtx expression.
5921
5922 Return the simplified expression or NULL if no simplifications
5923 were possible.
5924
5925 This is the preferred entry point into the simplification routines;
5926 however, we still allow passes to call the more specific routines.
5927
5928 Right now GCC has three (yes, three) major bodies of RTL simplification
5929 code that need to be unified.
5930
5931 1. fold_rtx in cse.c. This code uses various CSE specific
5932 information to aid in RTL simplification.
5933
5934 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5935 it uses combine specific information to aid in RTL
5936 simplification.
5937
5938 3. The routines in this file.
5939
5940
5941 Long term we want to only have one body of simplification code; to
5942 get to that state I recommend the following steps:
5943
5944 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5945 which are not pass dependent state into these routines.
5946
5947 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5948 use this routine whenever possible.
5949
5950 3. Allow for pass dependent state to be provided to these
5951 routines and add simplifications based on the pass dependent
5952 state. Remove code from cse.c & combine.c that becomes
5953 redundant/dead.
5954
5955 It will take time, but ultimately the compiler will be easier to
5956 maintain and improve. It's totally silly that when we add a
5957 simplification that it needs to be added to 4 places (3 for RTL
5958 simplification and 1 for tree simplification. */
5959
5960 rtx
5961 simplify_rtx (const_rtx x)
5962 {
5963 const enum rtx_code code = GET_CODE (x);
5964 const enum machine_mode mode = GET_MODE (x);
5965
5966 switch (GET_RTX_CLASS (code))
5967 {
5968 case RTX_UNARY:
5969 return simplify_unary_operation (code, mode,
5970 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5971 case RTX_COMM_ARITH:
5972 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5973 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5974
5975 /* Fall through.... */
5976
5977 case RTX_BIN_ARITH:
5978 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5979
5980 case RTX_TERNARY:
5981 case RTX_BITFIELD_OPS:
5982 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5983 XEXP (x, 0), XEXP (x, 1),
5984 XEXP (x, 2));
5985
5986 case RTX_COMPARE:
5987 case RTX_COMM_COMPARE:
5988 return simplify_relational_operation (code, mode,
5989 ((GET_MODE (XEXP (x, 0))
5990 != VOIDmode)
5991 ? GET_MODE (XEXP (x, 0))
5992 : GET_MODE (XEXP (x, 1))),
5993 XEXP (x, 0),
5994 XEXP (x, 1));
5995
5996 case RTX_EXTRA:
5997 if (code == SUBREG)
5998 return simplify_subreg (mode, SUBREG_REG (x),
5999 GET_MODE (SUBREG_REG (x)),
6000 SUBREG_BYTE (x));
6001 break;
6002
6003 case RTX_OBJ:
6004 if (code == LO_SUM)
6005 {
6006 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6007 if (GET_CODE (XEXP (x, 0)) == HIGH
6008 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6009 return XEXP (x, 1);
6010 }
6011 break;
6012
6013 default:
6014 break;
6015 }
6016 return NULL;
6017 }