alias.c: Reorder #include statements and remove duplicates.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "tm_p.h"
30 #include "expmed.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "varasm.h"
39 #include "flags.h"
40 #include "dojump.h"
41 #include "explow.h"
42 #include "calls.h"
43 #include "stmt.h"
44 #include "expr.h"
45
46 /* Simplification and canonicalization of RTL. */
47
48 /* Much code operates on (low, high) pairs; the low value is an
49 unsigned wide int, the high value a signed wide int. We
50 occasionally need to sign extend from low to high as if low were a
51 signed wide int. */
52 #define HWI_SIGN_EXTEND(low) \
53 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
54
55 static rtx neg_const_int (machine_mode, const_rtx);
56 static bool plus_minus_operand_p (const_rtx);
57 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
58 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
59 unsigned int);
60 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
61 rtx, rtx);
62 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
63 machine_mode, rtx, rtx);
64 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
65 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
66 rtx, rtx, rtx, rtx);
67 \f
68 /* Negate a CONST_INT rtx, truncating (because a conversion from a
69 maximally negative number can overflow). */
70 static rtx
71 neg_const_int (machine_mode mode, const_rtx i)
72 {
73 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
74 }
75
76 /* Test whether expression, X, is an immediate constant that represents
77 the most significant bit of machine mode MODE. */
78
79 bool
80 mode_signbit_p (machine_mode mode, const_rtx x)
81 {
82 unsigned HOST_WIDE_INT val;
83 unsigned int width;
84
85 if (GET_MODE_CLASS (mode) != MODE_INT)
86 return false;
87
88 width = GET_MODE_PRECISION (mode);
89 if (width == 0)
90 return false;
91
92 if (width <= HOST_BITS_PER_WIDE_INT
93 && CONST_INT_P (x))
94 val = INTVAL (x);
95 #if TARGET_SUPPORTS_WIDE_INT
96 else if (CONST_WIDE_INT_P (x))
97 {
98 unsigned int i;
99 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
100 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
101 return false;
102 for (i = 0; i < elts - 1; i++)
103 if (CONST_WIDE_INT_ELT (x, i) != 0)
104 return false;
105 val = CONST_WIDE_INT_ELT (x, elts - 1);
106 width %= HOST_BITS_PER_WIDE_INT;
107 if (width == 0)
108 width = HOST_BITS_PER_WIDE_INT;
109 }
110 #else
111 else if (width <= HOST_BITS_PER_DOUBLE_INT
112 && CONST_DOUBLE_AS_INT_P (x)
113 && CONST_DOUBLE_LOW (x) == 0)
114 {
115 val = CONST_DOUBLE_HIGH (x);
116 width -= HOST_BITS_PER_WIDE_INT;
117 }
118 #endif
119 else
120 /* X is not an integer constant. */
121 return false;
122
123 if (width < HOST_BITS_PER_WIDE_INT)
124 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
125 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
126 }
127
128 /* Test whether VAL is equal to the most significant bit of mode MODE
129 (after masking with the mode mask of MODE). Returns false if the
130 precision of MODE is too large to handle. */
131
132 bool
133 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
134 {
135 unsigned int width;
136
137 if (GET_MODE_CLASS (mode) != MODE_INT)
138 return false;
139
140 width = GET_MODE_PRECISION (mode);
141 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
142 return false;
143
144 val &= GET_MODE_MASK (mode);
145 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
146 }
147
148 /* Test whether the most significant bit of mode MODE is set in VAL.
149 Returns false if the precision of MODE is too large to handle. */
150 bool
151 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
152 {
153 unsigned int width;
154
155 if (GET_MODE_CLASS (mode) != MODE_INT)
156 return false;
157
158 width = GET_MODE_PRECISION (mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
161
162 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
163 return val != 0;
164 }
165
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168 bool
169 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 {
171 unsigned int width;
172
173 if (GET_MODE_CLASS (mode) != MODE_INT)
174 return false;
175
176 width = GET_MODE_PRECISION (mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if ((offset != 0 || cmode != GET_MODE (x))
265 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
266 {
267 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
268 if (tem && CONSTANT_P (tem))
269 return tem;
270 }
271 else
272 return c;
273 }
274
275 return x;
276 }
277 \f
278 /* Simplify a MEM based on its attributes. This is the default
279 delegitimize_address target hook, and it's recommended that every
280 overrider call it. */
281
282 rtx
283 delegitimize_mem_from_attrs (rtx x)
284 {
285 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
286 use their base addresses as equivalent. */
287 if (MEM_P (x)
288 && MEM_EXPR (x)
289 && MEM_OFFSET_KNOWN_P (x))
290 {
291 tree decl = MEM_EXPR (x);
292 machine_mode mode = GET_MODE (x);
293 HOST_WIDE_INT offset = 0;
294
295 switch (TREE_CODE (decl))
296 {
297 default:
298 decl = NULL;
299 break;
300
301 case VAR_DECL:
302 break;
303
304 case ARRAY_REF:
305 case ARRAY_RANGE_REF:
306 case COMPONENT_REF:
307 case BIT_FIELD_REF:
308 case REALPART_EXPR:
309 case IMAGPART_EXPR:
310 case VIEW_CONVERT_EXPR:
311 {
312 HOST_WIDE_INT bitsize, bitpos;
313 tree toffset;
314 int unsignedp, volatilep = 0;
315
316 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
317 &mode, &unsignedp, &volatilep, false);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
323 {
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
327 }
328 break;
329 }
330 }
331
332 if (decl
333 && mode == GET_MODE (x)
334 && TREE_CODE (decl) == VAR_DECL
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
339 {
340 rtx newx;
341
342 offset += MEM_OFFSET (x);
343
344 newx = DECL_RTL (decl);
345
346 if (MEM_P (newx))
347 {
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
368 }
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
372 }
373 }
374
375 return x;
376 }
377 \f
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
380
381 rtx
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
384 {
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
390
391 return gen_rtx_fmt_e (code, mode, op);
392 }
393
394 /* Likewise for ternary operations. */
395
396 rtx
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
399 {
400 rtx tem;
401
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
406
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
408 }
409
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
412
413 rtx
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
416 {
417 rtx tem;
418
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
422
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
424 }
425 \f
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
430
431 rtx
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
434 {
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
442
443 if (__builtin_expect (fn != NULL, 0))
444 {
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
448 }
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
451
452 switch (GET_RTX_CLASS (code))
453 {
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
461
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
469
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
480
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
493
494 case RTX_EXTRA:
495 if (code == SUBREG)
496 {
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
504 }
505 break;
506
507 case RTX_OBJ:
508 if (code == MEM)
509 {
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
514 }
515 else if (code == LO_SUM)
516 {
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
519
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
522 {
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
528 }
529
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
533 }
534 break;
535
536 default:
537 break;
538 }
539
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
544 {
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
549 {
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
553 {
554 if (newvec == vec)
555 {
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
560 }
561 RTVEC_ELT (newvec, j) = op;
562 }
563 }
564 break;
565
566 case 'e':
567 if (XEXP (x, i))
568 {
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
571 {
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
575 }
576 }
577 break;
578 }
579 return newx;
580 }
581
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
584
585 rtx
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
587 {
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
589 }
590 \f
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
593
594 RTL provides two ways of truncating a value:
595
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
600
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606
607 2. a TRUNCATE. This form handles both scalar and compound integers.
608
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
613
614 simplify_gen_unary (TRUNCATE, ...)
615
616 and leave simplify_unary_operation to work out which representation
617 should be used.
618
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
624
625 (and:DI X Y)
626
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
631
632 (and:DI (reg:DI X) (const_int 63))
633
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
637
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
641 {
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 gcc_assert (precision <= op_precision);
645
646 /* Optimize truncations of zero and sign extended values. */
647 if (GET_CODE (op) == ZERO_EXTEND
648 || GET_CODE (op) == SIGN_EXTEND)
649 {
650 /* There are three possibilities. If MODE is the same as the
651 origmode, we can omit both the extension and the subreg.
652 If MODE is not larger than the origmode, we can apply the
653 truncation without the extension. Finally, if the outermode
654 is larger than the origmode, we can just extend to the appropriate
655 mode. */
656 machine_mode origmode = GET_MODE (XEXP (op, 0));
657 if (mode == origmode)
658 return XEXP (op, 0);
659 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
660 return simplify_gen_unary (TRUNCATE, mode,
661 XEXP (op, 0), origmode);
662 else
663 return simplify_gen_unary (GET_CODE (op), mode,
664 XEXP (op, 0), origmode);
665 }
666
667 /* If the machine can perform operations in the truncated mode, distribute
668 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
669 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
670 if (1
671 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
672 && (GET_CODE (op) == PLUS
673 || GET_CODE (op) == MINUS
674 || GET_CODE (op) == MULT))
675 {
676 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
677 if (op0)
678 {
679 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
680 if (op1)
681 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
682 }
683 }
684
685 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if ((GET_CODE (op) == LSHIFTRT
689 || GET_CODE (op) == ASHIFTRT)
690 /* Ensure that OP_MODE is at least twice as wide as MODE
691 to avoid the possibility that an outer LSHIFTRT shifts by more
692 than the sign extension's sign_bit_copies and introduces zeros
693 into the high bits of the result. */
694 && 2 * precision <= op_precision
695 && CONST_INT_P (XEXP (op, 1))
696 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 && UINTVAL (XEXP (op, 1)) < precision)
699 return simplify_gen_binary (ASHIFTRT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
701
702 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
703 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
704 the outer subreg is effectively a truncation to the original mode. */
705 if ((GET_CODE (op) == LSHIFTRT
706 || GET_CODE (op) == ASHIFTRT)
707 && CONST_INT_P (XEXP (op, 1))
708 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
709 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
710 && UINTVAL (XEXP (op, 1)) < precision)
711 return simplify_gen_binary (LSHIFTRT, mode,
712 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
713
714 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
715 to (ashift:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if (GET_CODE (op) == ASHIFT
718 && CONST_INT_P (XEXP (op, 1))
719 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
720 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722 && UINTVAL (XEXP (op, 1)) < precision)
723 return simplify_gen_binary (ASHIFT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
725
726 /* Recognize a word extraction from a multi-word subreg. */
727 if ((GET_CODE (op) == LSHIFTRT
728 || GET_CODE (op) == ASHIFTRT)
729 && SCALAR_INT_MODE_P (mode)
730 && SCALAR_INT_MODE_P (op_mode)
731 && precision >= BITS_PER_WORD
732 && 2 * precision <= op_precision
733 && CONST_INT_P (XEXP (op, 1))
734 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
735 && UINTVAL (XEXP (op, 1)) < op_precision)
736 {
737 int byte = subreg_lowpart_offset (mode, op_mode);
738 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
739 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
740 (WORDS_BIG_ENDIAN
741 ? byte - shifted_bytes
742 : byte + shifted_bytes));
743 }
744
745 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
746 and try replacing the TRUNCATE and shift with it. Don't do this
747 if the MEM has a mode-dependent address. */
748 if ((GET_CODE (op) == LSHIFTRT
749 || GET_CODE (op) == ASHIFTRT)
750 && SCALAR_INT_MODE_P (op_mode)
751 && MEM_P (XEXP (op, 0))
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
754 && INTVAL (XEXP (op, 1)) > 0
755 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
756 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
757 MEM_ADDR_SPACE (XEXP (op, 0)))
758 && ! MEM_VOLATILE_P (XEXP (op, 0))
759 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
760 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
761 {
762 int byte = subreg_lowpart_offset (mode, op_mode);
763 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
764 return adjust_address_nv (XEXP (op, 0), mode,
765 (WORDS_BIG_ENDIAN
766 ? byte - shifted_bytes
767 : byte + shifted_bytes));
768 }
769
770 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
771 (OP:SI foo:SI) if OP is NEG or ABS. */
772 if ((GET_CODE (op) == ABS
773 || GET_CODE (op) == NEG)
774 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
775 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
776 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
777 return simplify_gen_unary (GET_CODE (op), mode,
778 XEXP (XEXP (op, 0), 0), mode);
779
780 /* (truncate:A (subreg:B (truncate:C X) 0)) is
781 (truncate:A X). */
782 if (GET_CODE (op) == SUBREG
783 && SCALAR_INT_MODE_P (mode)
784 && SCALAR_INT_MODE_P (op_mode)
785 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
786 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
787 && subreg_lowpart_p (op))
788 {
789 rtx inner = XEXP (SUBREG_REG (op), 0);
790 if (GET_MODE_PRECISION (mode)
791 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
792 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
793 else
794 /* If subreg above is paradoxical and C is narrower
795 than A, return (subreg:A (truncate:C X) 0). */
796 return simplify_gen_subreg (mode, SUBREG_REG (op),
797 GET_MODE (SUBREG_REG (op)), 0);
798 }
799
800 /* (truncate:A (truncate:B X)) is (truncate:A X). */
801 if (GET_CODE (op) == TRUNCATE)
802 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
803 GET_MODE (XEXP (op, 0)));
804
805 return NULL_RTX;
806 }
807 \f
808 /* Try to simplify a unary operation CODE whose output mode is to be
809 MODE with input operand OP whose mode was originally OP_MODE.
810 Return zero if no simplification can be made. */
811 rtx
812 simplify_unary_operation (enum rtx_code code, machine_mode mode,
813 rtx op, machine_mode op_mode)
814 {
815 rtx trueop, tem;
816
817 trueop = avoid_constant_pool_reference (op);
818
819 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
820 if (tem)
821 return tem;
822
823 return simplify_unary_operation_1 (code, mode, op);
824 }
825
826 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
827 to be exact. */
828
829 static bool
830 exact_int_to_float_conversion_p (const_rtx op)
831 {
832 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
833 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
834 /* Constants shouldn't reach here. */
835 gcc_assert (op0_mode != VOIDmode);
836 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
837 int in_bits = in_prec;
838 if (HWI_COMPUTABLE_MODE_P (op0_mode))
839 {
840 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
841 if (GET_CODE (op) == FLOAT)
842 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
843 else if (GET_CODE (op) == UNSIGNED_FLOAT)
844 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
845 else
846 gcc_unreachable ();
847 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
848 }
849 return in_bits <= out_bits;
850 }
851
852 /* Perform some simplifications we can do even if the operands
853 aren't constant. */
854 static rtx
855 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
856 {
857 enum rtx_code reversed;
858 rtx temp;
859
860 switch (code)
861 {
862 case NOT:
863 /* (not (not X)) == X. */
864 if (GET_CODE (op) == NOT)
865 return XEXP (op, 0);
866
867 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
868 comparison is all ones. */
869 if (COMPARISON_P (op)
870 && (mode == BImode || STORE_FLAG_VALUE == -1)
871 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
872 return simplify_gen_relational (reversed, mode, VOIDmode,
873 XEXP (op, 0), XEXP (op, 1));
874
875 /* (not (plus X -1)) can become (neg X). */
876 if (GET_CODE (op) == PLUS
877 && XEXP (op, 1) == constm1_rtx)
878 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
879
880 /* Similarly, (not (neg X)) is (plus X -1). */
881 if (GET_CODE (op) == NEG)
882 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
883 CONSTM1_RTX (mode));
884
885 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
886 if (GET_CODE (op) == XOR
887 && CONST_INT_P (XEXP (op, 1))
888 && (temp = simplify_unary_operation (NOT, mode,
889 XEXP (op, 1), mode)) != 0)
890 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
891
892 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
893 if (GET_CODE (op) == PLUS
894 && CONST_INT_P (XEXP (op, 1))
895 && mode_signbit_p (mode, XEXP (op, 1))
896 && (temp = simplify_unary_operation (NOT, mode,
897 XEXP (op, 1), mode)) != 0)
898 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
899
900
901 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
902 operands other than 1, but that is not valid. We could do a
903 similar simplification for (not (lshiftrt C X)) where C is
904 just the sign bit, but this doesn't seem common enough to
905 bother with. */
906 if (GET_CODE (op) == ASHIFT
907 && XEXP (op, 0) == const1_rtx)
908 {
909 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
910 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
911 }
912
913 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
914 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
915 so we can perform the above simplification. */
916 if (STORE_FLAG_VALUE == -1
917 && GET_CODE (op) == ASHIFTRT
918 && CONST_INT_P (XEXP (op, 1))
919 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
920 return simplify_gen_relational (GE, mode, VOIDmode,
921 XEXP (op, 0), const0_rtx);
922
923
924 if (GET_CODE (op) == SUBREG
925 && subreg_lowpart_p (op)
926 && (GET_MODE_SIZE (GET_MODE (op))
927 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
928 && GET_CODE (SUBREG_REG (op)) == ASHIFT
929 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
930 {
931 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
932 rtx x;
933
934 x = gen_rtx_ROTATE (inner_mode,
935 simplify_gen_unary (NOT, inner_mode, const1_rtx,
936 inner_mode),
937 XEXP (SUBREG_REG (op), 1));
938 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
939 if (temp)
940 return temp;
941 }
942
943 /* Apply De Morgan's laws to reduce number of patterns for machines
944 with negating logical insns (and-not, nand, etc.). If result has
945 only one NOT, put it first, since that is how the patterns are
946 coded. */
947 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
948 {
949 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
950 machine_mode op_mode;
951
952 op_mode = GET_MODE (in1);
953 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
954
955 op_mode = GET_MODE (in2);
956 if (op_mode == VOIDmode)
957 op_mode = mode;
958 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
959
960 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
961 std::swap (in1, in2);
962
963 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
964 mode, in1, in2);
965 }
966
967 /* (not (bswap x)) -> (bswap (not x)). */
968 if (GET_CODE (op) == BSWAP)
969 {
970 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
971 return simplify_gen_unary (BSWAP, mode, x, mode);
972 }
973 break;
974
975 case NEG:
976 /* (neg (neg X)) == X. */
977 if (GET_CODE (op) == NEG)
978 return XEXP (op, 0);
979
980 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
981 If comparison is not reversible use
982 x ? y : (neg y). */
983 if (GET_CODE (op) == IF_THEN_ELSE)
984 {
985 rtx cond = XEXP (op, 0);
986 rtx true_rtx = XEXP (op, 1);
987 rtx false_rtx = XEXP (op, 2);
988
989 if ((GET_CODE (true_rtx) == NEG
990 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
991 || (GET_CODE (false_rtx) == NEG
992 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
993 {
994 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
995 temp = reversed_comparison (cond, mode);
996 else
997 {
998 temp = cond;
999 std::swap (true_rtx, false_rtx);
1000 }
1001 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1002 mode, temp, true_rtx, false_rtx);
1003 }
1004 }
1005
1006 /* (neg (plus X 1)) can become (not X). */
1007 if (GET_CODE (op) == PLUS
1008 && XEXP (op, 1) == const1_rtx)
1009 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1010
1011 /* Similarly, (neg (not X)) is (plus X 1). */
1012 if (GET_CODE (op) == NOT)
1013 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1014 CONST1_RTX (mode));
1015
1016 /* (neg (minus X Y)) can become (minus Y X). This transformation
1017 isn't safe for modes with signed zeros, since if X and Y are
1018 both +0, (minus Y X) is the same as (minus X Y). If the
1019 rounding mode is towards +infinity (or -infinity) then the two
1020 expressions will be rounded differently. */
1021 if (GET_CODE (op) == MINUS
1022 && !HONOR_SIGNED_ZEROS (mode)
1023 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1024 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1025
1026 if (GET_CODE (op) == PLUS
1027 && !HONOR_SIGNED_ZEROS (mode)
1028 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1029 {
1030 /* (neg (plus A C)) is simplified to (minus -C A). */
1031 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1032 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1033 {
1034 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1035 if (temp)
1036 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1037 }
1038
1039 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1040 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1041 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1042 }
1043
1044 /* (neg (mult A B)) becomes (mult A (neg B)).
1045 This works even for floating-point values. */
1046 if (GET_CODE (op) == MULT
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1048 {
1049 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1050 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1051 }
1052
1053 /* NEG commutes with ASHIFT since it is multiplication. Only do
1054 this if we can then eliminate the NEG (e.g., if the operand
1055 is a constant). */
1056 if (GET_CODE (op) == ASHIFT)
1057 {
1058 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1059 if (temp)
1060 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1061 }
1062
1063 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1064 C is equal to the width of MODE minus 1. */
1065 if (GET_CODE (op) == ASHIFTRT
1066 && CONST_INT_P (XEXP (op, 1))
1067 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1068 return simplify_gen_binary (LSHIFTRT, mode,
1069 XEXP (op, 0), XEXP (op, 1));
1070
1071 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1072 C is equal to the width of MODE minus 1. */
1073 if (GET_CODE (op) == LSHIFTRT
1074 && CONST_INT_P (XEXP (op, 1))
1075 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1076 return simplify_gen_binary (ASHIFTRT, mode,
1077 XEXP (op, 0), XEXP (op, 1));
1078
1079 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1080 if (GET_CODE (op) == XOR
1081 && XEXP (op, 1) == const1_rtx
1082 && nonzero_bits (XEXP (op, 0), mode) == 1)
1083 return plus_constant (mode, XEXP (op, 0), -1);
1084
1085 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1086 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1087 if (GET_CODE (op) == LT
1088 && XEXP (op, 1) == const0_rtx
1089 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1090 {
1091 machine_mode inner = GET_MODE (XEXP (op, 0));
1092 int isize = GET_MODE_PRECISION (inner);
1093 if (STORE_FLAG_VALUE == 1)
1094 {
1095 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1096 GEN_INT (isize - 1));
1097 if (mode == inner)
1098 return temp;
1099 if (GET_MODE_PRECISION (mode) > isize)
1100 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1101 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1102 }
1103 else if (STORE_FLAG_VALUE == -1)
1104 {
1105 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1106 GEN_INT (isize - 1));
1107 if (mode == inner)
1108 return temp;
1109 if (GET_MODE_PRECISION (mode) > isize)
1110 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1111 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1112 }
1113 }
1114 break;
1115
1116 case TRUNCATE:
1117 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1118 with the umulXi3_highpart patterns. */
1119 if (GET_CODE (op) == LSHIFTRT
1120 && GET_CODE (XEXP (op, 0)) == MULT)
1121 break;
1122
1123 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1124 {
1125 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1126 {
1127 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1128 if (temp)
1129 return temp;
1130 }
1131 /* We can't handle truncation to a partial integer mode here
1132 because we don't know the real bitsize of the partial
1133 integer mode. */
1134 break;
1135 }
1136
1137 if (GET_MODE (op) != VOIDmode)
1138 {
1139 temp = simplify_truncation (mode, op, GET_MODE (op));
1140 if (temp)
1141 return temp;
1142 }
1143
1144 /* If we know that the value is already truncated, we can
1145 replace the TRUNCATE with a SUBREG. */
1146 if (GET_MODE_NUNITS (mode) == 1
1147 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1148 || truncated_to_mode (mode, op)))
1149 {
1150 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1151 if (temp)
1152 return temp;
1153 }
1154
1155 /* A truncate of a comparison can be replaced with a subreg if
1156 STORE_FLAG_VALUE permits. This is like the previous test,
1157 but it works even if the comparison is done in a mode larger
1158 than HOST_BITS_PER_WIDE_INT. */
1159 if (HWI_COMPUTABLE_MODE_P (mode)
1160 && COMPARISON_P (op)
1161 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1162 {
1163 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1164 if (temp)
1165 return temp;
1166 }
1167
1168 /* A truncate of a memory is just loading the low part of the memory
1169 if we are not changing the meaning of the address. */
1170 if (GET_CODE (op) == MEM
1171 && !VECTOR_MODE_P (mode)
1172 && !MEM_VOLATILE_P (op)
1173 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1174 {
1175 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1176 if (temp)
1177 return temp;
1178 }
1179
1180 break;
1181
1182 case FLOAT_TRUNCATE:
1183 if (DECIMAL_FLOAT_MODE_P (mode))
1184 break;
1185
1186 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1187 if (GET_CODE (op) == FLOAT_EXTEND
1188 && GET_MODE (XEXP (op, 0)) == mode)
1189 return XEXP (op, 0);
1190
1191 /* (float_truncate:SF (float_truncate:DF foo:XF))
1192 = (float_truncate:SF foo:XF).
1193 This may eliminate double rounding, so it is unsafe.
1194
1195 (float_truncate:SF (float_extend:XF foo:DF))
1196 = (float_truncate:SF foo:DF).
1197
1198 (float_truncate:DF (float_extend:XF foo:SF))
1199 = (float_extend:DF foo:SF). */
1200 if ((GET_CODE (op) == FLOAT_TRUNCATE
1201 && flag_unsafe_math_optimizations)
1202 || GET_CODE (op) == FLOAT_EXTEND)
1203 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1204 0)))
1205 > GET_MODE_SIZE (mode)
1206 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1207 mode,
1208 XEXP (op, 0), mode);
1209
1210 /* (float_truncate (float x)) is (float x) */
1211 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1212 && (flag_unsafe_math_optimizations
1213 || exact_int_to_float_conversion_p (op)))
1214 return simplify_gen_unary (GET_CODE (op), mode,
1215 XEXP (op, 0),
1216 GET_MODE (XEXP (op, 0)));
1217
1218 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1219 (OP:SF foo:SF) if OP is NEG or ABS. */
1220 if ((GET_CODE (op) == ABS
1221 || GET_CODE (op) == NEG)
1222 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1223 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1224 return simplify_gen_unary (GET_CODE (op), mode,
1225 XEXP (XEXP (op, 0), 0), mode);
1226
1227 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1228 is (float_truncate:SF x). */
1229 if (GET_CODE (op) == SUBREG
1230 && subreg_lowpart_p (op)
1231 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1232 return SUBREG_REG (op);
1233 break;
1234
1235 case FLOAT_EXTEND:
1236 if (DECIMAL_FLOAT_MODE_P (mode))
1237 break;
1238
1239 /* (float_extend (float_extend x)) is (float_extend x)
1240
1241 (float_extend (float x)) is (float x) assuming that double
1242 rounding can't happen.
1243 */
1244 if (GET_CODE (op) == FLOAT_EXTEND
1245 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1246 && exact_int_to_float_conversion_p (op)))
1247 return simplify_gen_unary (GET_CODE (op), mode,
1248 XEXP (op, 0),
1249 GET_MODE (XEXP (op, 0)));
1250
1251 break;
1252
1253 case ABS:
1254 /* (abs (neg <foo>)) -> (abs <foo>) */
1255 if (GET_CODE (op) == NEG)
1256 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1257 GET_MODE (XEXP (op, 0)));
1258
1259 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1260 do nothing. */
1261 if (GET_MODE (op) == VOIDmode)
1262 break;
1263
1264 /* If operand is something known to be positive, ignore the ABS. */
1265 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1266 || val_signbit_known_clear_p (GET_MODE (op),
1267 nonzero_bits (op, GET_MODE (op))))
1268 return op;
1269
1270 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1271 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1272 return gen_rtx_NEG (mode, op);
1273
1274 break;
1275
1276 case FFS:
1277 /* (ffs (*_extend <X>)) = (ffs <X>) */
1278 if (GET_CODE (op) == SIGN_EXTEND
1279 || GET_CODE (op) == ZERO_EXTEND)
1280 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1281 GET_MODE (XEXP (op, 0)));
1282 break;
1283
1284 case POPCOUNT:
1285 switch (GET_CODE (op))
1286 {
1287 case BSWAP:
1288 case ZERO_EXTEND:
1289 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1290 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1291 GET_MODE (XEXP (op, 0)));
1292
1293 case ROTATE:
1294 case ROTATERT:
1295 /* Rotations don't affect popcount. */
1296 if (!side_effects_p (XEXP (op, 1)))
1297 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1298 GET_MODE (XEXP (op, 0)));
1299 break;
1300
1301 default:
1302 break;
1303 }
1304 break;
1305
1306 case PARITY:
1307 switch (GET_CODE (op))
1308 {
1309 case NOT:
1310 case BSWAP:
1311 case ZERO_EXTEND:
1312 case SIGN_EXTEND:
1313 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1315
1316 case ROTATE:
1317 case ROTATERT:
1318 /* Rotations don't affect parity. */
1319 if (!side_effects_p (XEXP (op, 1)))
1320 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1321 GET_MODE (XEXP (op, 0)));
1322 break;
1323
1324 default:
1325 break;
1326 }
1327 break;
1328
1329 case BSWAP:
1330 /* (bswap (bswap x)) -> x. */
1331 if (GET_CODE (op) == BSWAP)
1332 return XEXP (op, 0);
1333 break;
1334
1335 case FLOAT:
1336 /* (float (sign_extend <X>)) = (float <X>). */
1337 if (GET_CODE (op) == SIGN_EXTEND)
1338 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1339 GET_MODE (XEXP (op, 0)));
1340 break;
1341
1342 case SIGN_EXTEND:
1343 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1344 becomes just the MINUS if its mode is MODE. This allows
1345 folding switch statements on machines using casesi (such as
1346 the VAX). */
1347 if (GET_CODE (op) == TRUNCATE
1348 && GET_MODE (XEXP (op, 0)) == mode
1349 && GET_CODE (XEXP (op, 0)) == MINUS
1350 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1351 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1352 return XEXP (op, 0);
1353
1354 /* Extending a widening multiplication should be canonicalized to
1355 a wider widening multiplication. */
1356 if (GET_CODE (op) == MULT)
1357 {
1358 rtx lhs = XEXP (op, 0);
1359 rtx rhs = XEXP (op, 1);
1360 enum rtx_code lcode = GET_CODE (lhs);
1361 enum rtx_code rcode = GET_CODE (rhs);
1362
1363 /* Widening multiplies usually extend both operands, but sometimes
1364 they use a shift to extract a portion of a register. */
1365 if ((lcode == SIGN_EXTEND
1366 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1367 && (rcode == SIGN_EXTEND
1368 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1369 {
1370 machine_mode lmode = GET_MODE (lhs);
1371 machine_mode rmode = GET_MODE (rhs);
1372 int bits;
1373
1374 if (lcode == ASHIFTRT)
1375 /* Number of bits not shifted off the end. */
1376 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1377 else /* lcode == SIGN_EXTEND */
1378 /* Size of inner mode. */
1379 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1380
1381 if (rcode == ASHIFTRT)
1382 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1383 else /* rcode == SIGN_EXTEND */
1384 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1385
1386 /* We can only widen multiplies if the result is mathematiclly
1387 equivalent. I.e. if overflow was impossible. */
1388 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1389 return simplify_gen_binary
1390 (MULT, mode,
1391 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1392 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1393 }
1394 }
1395
1396 /* Check for a sign extension of a subreg of a promoted
1397 variable, where the promotion is sign-extended, and the
1398 target mode is the same as the variable's promotion. */
1399 if (GET_CODE (op) == SUBREG
1400 && SUBREG_PROMOTED_VAR_P (op)
1401 && SUBREG_PROMOTED_SIGNED_P (op)
1402 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1403 {
1404 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1405 if (temp)
1406 return temp;
1407 }
1408
1409 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1410 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1411 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1412 {
1413 gcc_assert (GET_MODE_PRECISION (mode)
1414 > GET_MODE_PRECISION (GET_MODE (op)));
1415 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1416 GET_MODE (XEXP (op, 0)));
1417 }
1418
1419 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1420 is (sign_extend:M (subreg:O <X>)) if there is mode with
1421 GET_MODE_BITSIZE (N) - I bits.
1422 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1423 is similarly (zero_extend:M (subreg:O <X>)). */
1424 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1425 && GET_CODE (XEXP (op, 0)) == ASHIFT
1426 && CONST_INT_P (XEXP (op, 1))
1427 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1428 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1429 {
1430 machine_mode tmode
1431 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1432 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1433 gcc_assert (GET_MODE_BITSIZE (mode)
1434 > GET_MODE_BITSIZE (GET_MODE (op)));
1435 if (tmode != BLKmode)
1436 {
1437 rtx inner =
1438 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1439 if (inner)
1440 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1441 ? SIGN_EXTEND : ZERO_EXTEND,
1442 mode, inner, tmode);
1443 }
1444 }
1445
1446 #if defined(POINTERS_EXTEND_UNSIGNED)
1447 /* As we do not know which address space the pointer is referring to,
1448 we can do this only if the target does not support different pointer
1449 or address modes depending on the address space. */
1450 if (target_default_pointer_address_modes_p ()
1451 && ! POINTERS_EXTEND_UNSIGNED
1452 && mode == Pmode && GET_MODE (op) == ptr_mode
1453 && (CONSTANT_P (op)
1454 || (GET_CODE (op) == SUBREG
1455 && REG_P (SUBREG_REG (op))
1456 && REG_POINTER (SUBREG_REG (op))
1457 && GET_MODE (SUBREG_REG (op)) == Pmode))
1458 && !targetm.have_ptr_extend ())
1459 return convert_memory_address (Pmode, op);
1460 #endif
1461 break;
1462
1463 case ZERO_EXTEND:
1464 /* Check for a zero extension of a subreg of a promoted
1465 variable, where the promotion is zero-extended, and the
1466 target mode is the same as the variable's promotion. */
1467 if (GET_CODE (op) == SUBREG
1468 && SUBREG_PROMOTED_VAR_P (op)
1469 && SUBREG_PROMOTED_UNSIGNED_P (op)
1470 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1471 {
1472 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1473 if (temp)
1474 return temp;
1475 }
1476
1477 /* Extending a widening multiplication should be canonicalized to
1478 a wider widening multiplication. */
1479 if (GET_CODE (op) == MULT)
1480 {
1481 rtx lhs = XEXP (op, 0);
1482 rtx rhs = XEXP (op, 1);
1483 enum rtx_code lcode = GET_CODE (lhs);
1484 enum rtx_code rcode = GET_CODE (rhs);
1485
1486 /* Widening multiplies usually extend both operands, but sometimes
1487 they use a shift to extract a portion of a register. */
1488 if ((lcode == ZERO_EXTEND
1489 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1490 && (rcode == ZERO_EXTEND
1491 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1492 {
1493 machine_mode lmode = GET_MODE (lhs);
1494 machine_mode rmode = GET_MODE (rhs);
1495 int bits;
1496
1497 if (lcode == LSHIFTRT)
1498 /* Number of bits not shifted off the end. */
1499 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1500 else /* lcode == ZERO_EXTEND */
1501 /* Size of inner mode. */
1502 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1503
1504 if (rcode == LSHIFTRT)
1505 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1506 else /* rcode == ZERO_EXTEND */
1507 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1508
1509 /* We can only widen multiplies if the result is mathematiclly
1510 equivalent. I.e. if overflow was impossible. */
1511 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1512 return simplify_gen_binary
1513 (MULT, mode,
1514 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1515 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1516 }
1517 }
1518
1519 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1520 if (GET_CODE (op) == ZERO_EXTEND)
1521 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1522 GET_MODE (XEXP (op, 0)));
1523
1524 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1525 is (zero_extend:M (subreg:O <X>)) if there is mode with
1526 GET_MODE_PRECISION (N) - I bits. */
1527 if (GET_CODE (op) == LSHIFTRT
1528 && GET_CODE (XEXP (op, 0)) == ASHIFT
1529 && CONST_INT_P (XEXP (op, 1))
1530 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1531 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1532 {
1533 machine_mode tmode
1534 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1535 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1536 if (tmode != BLKmode)
1537 {
1538 rtx inner =
1539 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1540 if (inner)
1541 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1542 }
1543 }
1544
1545 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1546 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1547 of mode N. E.g.
1548 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1549 (and:SI (reg:SI) (const_int 63)). */
1550 if (GET_CODE (op) == SUBREG
1551 && GET_MODE_PRECISION (GET_MODE (op))
1552 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1553 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1554 <= HOST_BITS_PER_WIDE_INT
1555 && GET_MODE_PRECISION (mode)
1556 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1557 && subreg_lowpart_p (op)
1558 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1559 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1560 {
1561 if (GET_MODE_PRECISION (mode)
1562 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1563 return SUBREG_REG (op);
1564 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1565 GET_MODE (SUBREG_REG (op)));
1566 }
1567
1568 #if defined(POINTERS_EXTEND_UNSIGNED)
1569 /* As we do not know which address space the pointer is referring to,
1570 we can do this only if the target does not support different pointer
1571 or address modes depending on the address space. */
1572 if (target_default_pointer_address_modes_p ()
1573 && POINTERS_EXTEND_UNSIGNED > 0
1574 && mode == Pmode && GET_MODE (op) == ptr_mode
1575 && (CONSTANT_P (op)
1576 || (GET_CODE (op) == SUBREG
1577 && REG_P (SUBREG_REG (op))
1578 && REG_POINTER (SUBREG_REG (op))
1579 && GET_MODE (SUBREG_REG (op)) == Pmode))
1580 && !targetm.have_ptr_extend ())
1581 return convert_memory_address (Pmode, op);
1582 #endif
1583 break;
1584
1585 default:
1586 break;
1587 }
1588
1589 return 0;
1590 }
1591
1592 /* Try to compute the value of a unary operation CODE whose output mode is to
1593 be MODE with input operand OP whose mode was originally OP_MODE.
1594 Return zero if the value cannot be computed. */
1595 rtx
1596 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1597 rtx op, machine_mode op_mode)
1598 {
1599 unsigned int width = GET_MODE_PRECISION (mode);
1600
1601 if (code == VEC_DUPLICATE)
1602 {
1603 gcc_assert (VECTOR_MODE_P (mode));
1604 if (GET_MODE (op) != VOIDmode)
1605 {
1606 if (!VECTOR_MODE_P (GET_MODE (op)))
1607 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1608 else
1609 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1610 (GET_MODE (op)));
1611 }
1612 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1613 || GET_CODE (op) == CONST_VECTOR)
1614 {
1615 int elt_size = GET_MODE_UNIT_SIZE (mode);
1616 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1617 rtvec v = rtvec_alloc (n_elts);
1618 unsigned int i;
1619
1620 if (GET_CODE (op) != CONST_VECTOR)
1621 for (i = 0; i < n_elts; i++)
1622 RTVEC_ELT (v, i) = op;
1623 else
1624 {
1625 machine_mode inmode = GET_MODE (op);
1626 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1627 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1628
1629 gcc_assert (in_n_elts < n_elts);
1630 gcc_assert ((n_elts % in_n_elts) == 0);
1631 for (i = 0; i < n_elts; i++)
1632 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1633 }
1634 return gen_rtx_CONST_VECTOR (mode, v);
1635 }
1636 }
1637
1638 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1639 {
1640 int elt_size = GET_MODE_UNIT_SIZE (mode);
1641 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1642 machine_mode opmode = GET_MODE (op);
1643 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1644 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1645 rtvec v = rtvec_alloc (n_elts);
1646 unsigned int i;
1647
1648 gcc_assert (op_n_elts == n_elts);
1649 for (i = 0; i < n_elts; i++)
1650 {
1651 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1652 CONST_VECTOR_ELT (op, i),
1653 GET_MODE_INNER (opmode));
1654 if (!x)
1655 return 0;
1656 RTVEC_ELT (v, i) = x;
1657 }
1658 return gen_rtx_CONST_VECTOR (mode, v);
1659 }
1660
1661 /* The order of these tests is critical so that, for example, we don't
1662 check the wrong mode (input vs. output) for a conversion operation,
1663 such as FIX. At some point, this should be simplified. */
1664
1665 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1666 {
1667 REAL_VALUE_TYPE d;
1668
1669 if (op_mode == VOIDmode)
1670 {
1671 /* CONST_INT have VOIDmode as the mode. We assume that all
1672 the bits of the constant are significant, though, this is
1673 a dangerous assumption as many times CONST_INTs are
1674 created and used with garbage in the bits outside of the
1675 precision of the implied mode of the const_int. */
1676 op_mode = MAX_MODE_INT;
1677 }
1678
1679 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1680 d = real_value_truncate (mode, d);
1681 return const_double_from_real_value (d, mode);
1682 }
1683 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1684 {
1685 REAL_VALUE_TYPE d;
1686
1687 if (op_mode == VOIDmode)
1688 {
1689 /* CONST_INT have VOIDmode as the mode. We assume that all
1690 the bits of the constant are significant, though, this is
1691 a dangerous assumption as many times CONST_INTs are
1692 created and used with garbage in the bits outside of the
1693 precision of the implied mode of the const_int. */
1694 op_mode = MAX_MODE_INT;
1695 }
1696
1697 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1698 d = real_value_truncate (mode, d);
1699 return const_double_from_real_value (d, mode);
1700 }
1701
1702 if (CONST_SCALAR_INT_P (op) && width > 0)
1703 {
1704 wide_int result;
1705 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1706 rtx_mode_t op0 = std::make_pair (op, imode);
1707 int int_value;
1708
1709 #if TARGET_SUPPORTS_WIDE_INT == 0
1710 /* This assert keeps the simplification from producing a result
1711 that cannot be represented in a CONST_DOUBLE but a lot of
1712 upstream callers expect that this function never fails to
1713 simplify something and so you if you added this to the test
1714 above the code would die later anyway. If this assert
1715 happens, you just need to make the port support wide int. */
1716 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1717 #endif
1718
1719 switch (code)
1720 {
1721 case NOT:
1722 result = wi::bit_not (op0);
1723 break;
1724
1725 case NEG:
1726 result = wi::neg (op0);
1727 break;
1728
1729 case ABS:
1730 result = wi::abs (op0);
1731 break;
1732
1733 case FFS:
1734 result = wi::shwi (wi::ffs (op0), mode);
1735 break;
1736
1737 case CLZ:
1738 if (wi::ne_p (op0, 0))
1739 int_value = wi::clz (op0);
1740 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1741 int_value = GET_MODE_PRECISION (mode);
1742 result = wi::shwi (int_value, mode);
1743 break;
1744
1745 case CLRSB:
1746 result = wi::shwi (wi::clrsb (op0), mode);
1747 break;
1748
1749 case CTZ:
1750 if (wi::ne_p (op0, 0))
1751 int_value = wi::ctz (op0);
1752 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1753 int_value = GET_MODE_PRECISION (mode);
1754 result = wi::shwi (int_value, mode);
1755 break;
1756
1757 case POPCOUNT:
1758 result = wi::shwi (wi::popcount (op0), mode);
1759 break;
1760
1761 case PARITY:
1762 result = wi::shwi (wi::parity (op0), mode);
1763 break;
1764
1765 case BSWAP:
1766 result = wide_int (op0).bswap ();
1767 break;
1768
1769 case TRUNCATE:
1770 case ZERO_EXTEND:
1771 result = wide_int::from (op0, width, UNSIGNED);
1772 break;
1773
1774 case SIGN_EXTEND:
1775 result = wide_int::from (op0, width, SIGNED);
1776 break;
1777
1778 case SQRT:
1779 default:
1780 return 0;
1781 }
1782
1783 return immed_wide_int_const (result, mode);
1784 }
1785
1786 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1787 && SCALAR_FLOAT_MODE_P (mode)
1788 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1789 {
1790 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1791 switch (code)
1792 {
1793 case SQRT:
1794 return 0;
1795 case ABS:
1796 d = real_value_abs (&d);
1797 break;
1798 case NEG:
1799 d = real_value_negate (&d);
1800 break;
1801 case FLOAT_TRUNCATE:
1802 d = real_value_truncate (mode, d);
1803 break;
1804 case FLOAT_EXTEND:
1805 /* All this does is change the mode, unless changing
1806 mode class. */
1807 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1808 real_convert (&d, mode, &d);
1809 break;
1810 case FIX:
1811 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1812 break;
1813 case NOT:
1814 {
1815 long tmp[4];
1816 int i;
1817
1818 real_to_target (tmp, &d, GET_MODE (op));
1819 for (i = 0; i < 4; i++)
1820 tmp[i] = ~tmp[i];
1821 real_from_target (&d, tmp, mode);
1822 break;
1823 }
1824 default:
1825 gcc_unreachable ();
1826 }
1827 return const_double_from_real_value (d, mode);
1828 }
1829 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1830 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1831 && GET_MODE_CLASS (mode) == MODE_INT
1832 && width > 0)
1833 {
1834 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1835 operators are intentionally left unspecified (to ease implementation
1836 by target backends), for consistency, this routine implements the
1837 same semantics for constant folding as used by the middle-end. */
1838
1839 /* This was formerly used only for non-IEEE float.
1840 eggert@twinsun.com says it is safe for IEEE also. */
1841 REAL_VALUE_TYPE t;
1842 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1843 wide_int wmax, wmin;
1844 /* This is part of the abi to real_to_integer, but we check
1845 things before making this call. */
1846 bool fail;
1847
1848 switch (code)
1849 {
1850 case FIX:
1851 if (REAL_VALUE_ISNAN (*x))
1852 return const0_rtx;
1853
1854 /* Test against the signed upper bound. */
1855 wmax = wi::max_value (width, SIGNED);
1856 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1857 if (real_less (&t, x))
1858 return immed_wide_int_const (wmax, mode);
1859
1860 /* Test against the signed lower bound. */
1861 wmin = wi::min_value (width, SIGNED);
1862 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1863 if (real_less (x, &t))
1864 return immed_wide_int_const (wmin, mode);
1865
1866 return immed_wide_int_const (real_to_integer (x, &fail, width),
1867 mode);
1868
1869 case UNSIGNED_FIX:
1870 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1871 return const0_rtx;
1872
1873 /* Test against the unsigned upper bound. */
1874 wmax = wi::max_value (width, UNSIGNED);
1875 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1876 if (real_less (&t, x))
1877 return immed_wide_int_const (wmax, mode);
1878
1879 return immed_wide_int_const (real_to_integer (x, &fail, width),
1880 mode);
1881
1882 default:
1883 gcc_unreachable ();
1884 }
1885 }
1886
1887 return NULL_RTX;
1888 }
1889 \f
1890 /* Subroutine of simplify_binary_operation to simplify a binary operation
1891 CODE that can commute with byte swapping, with result mode MODE and
1892 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1893 Return zero if no simplification or canonicalization is possible. */
1894
1895 static rtx
1896 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1897 rtx op0, rtx op1)
1898 {
1899 rtx tem;
1900
1901 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1902 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1903 {
1904 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1905 simplify_gen_unary (BSWAP, mode, op1, mode));
1906 return simplify_gen_unary (BSWAP, mode, tem, mode);
1907 }
1908
1909 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1910 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1911 {
1912 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1913 return simplify_gen_unary (BSWAP, mode, tem, mode);
1914 }
1915
1916 return NULL_RTX;
1917 }
1918
1919 /* Subroutine of simplify_binary_operation to simplify a commutative,
1920 associative binary operation CODE with result mode MODE, operating
1921 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1922 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1923 canonicalization is possible. */
1924
1925 static rtx
1926 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1927 rtx op0, rtx op1)
1928 {
1929 rtx tem;
1930
1931 /* Linearize the operator to the left. */
1932 if (GET_CODE (op1) == code)
1933 {
1934 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1935 if (GET_CODE (op0) == code)
1936 {
1937 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1938 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1939 }
1940
1941 /* "a op (b op c)" becomes "(b op c) op a". */
1942 if (! swap_commutative_operands_p (op1, op0))
1943 return simplify_gen_binary (code, mode, op1, op0);
1944
1945 std::swap (op0, op1);
1946 }
1947
1948 if (GET_CODE (op0) == code)
1949 {
1950 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1951 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1952 {
1953 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1954 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1955 }
1956
1957 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1958 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1959 if (tem != 0)
1960 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1961
1962 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1963 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1964 if (tem != 0)
1965 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1966 }
1967
1968 return 0;
1969 }
1970
1971
1972 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1973 and OP1. Return 0 if no simplification is possible.
1974
1975 Don't use this for relational operations such as EQ or LT.
1976 Use simplify_relational_operation instead. */
1977 rtx
1978 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1979 rtx op0, rtx op1)
1980 {
1981 rtx trueop0, trueop1;
1982 rtx tem;
1983
1984 /* Relational operations don't work here. We must know the mode
1985 of the operands in order to do the comparison correctly.
1986 Assuming a full word can give incorrect results.
1987 Consider comparing 128 with -128 in QImode. */
1988 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1989 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1990
1991 /* Make sure the constant is second. */
1992 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1993 && swap_commutative_operands_p (op0, op1))
1994 std::swap (op0, op1);
1995
1996 trueop0 = avoid_constant_pool_reference (op0);
1997 trueop1 = avoid_constant_pool_reference (op1);
1998
1999 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2000 if (tem)
2001 return tem;
2002 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2003
2004 if (tem)
2005 return tem;
2006
2007 /* If the above steps did not result in a simplification and op0 or op1
2008 were constant pool references, use the referenced constants directly. */
2009 if (trueop0 != op0 || trueop1 != op1)
2010 return simplify_gen_binary (code, mode, trueop0, trueop1);
2011
2012 return NULL_RTX;
2013 }
2014
2015 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2016 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2017 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2018 actual constants. */
2019
2020 static rtx
2021 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2022 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2023 {
2024 rtx tem, reversed, opleft, opright;
2025 HOST_WIDE_INT val;
2026 unsigned int width = GET_MODE_PRECISION (mode);
2027
2028 /* Even if we can't compute a constant result,
2029 there are some cases worth simplifying. */
2030
2031 switch (code)
2032 {
2033 case PLUS:
2034 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2035 when x is NaN, infinite, or finite and nonzero. They aren't
2036 when x is -0 and the rounding mode is not towards -infinity,
2037 since (-0) + 0 is then 0. */
2038 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2039 return op0;
2040
2041 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2042 transformations are safe even for IEEE. */
2043 if (GET_CODE (op0) == NEG)
2044 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2045 else if (GET_CODE (op1) == NEG)
2046 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2047
2048 /* (~a) + 1 -> -a */
2049 if (INTEGRAL_MODE_P (mode)
2050 && GET_CODE (op0) == NOT
2051 && trueop1 == const1_rtx)
2052 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2053
2054 /* Handle both-operands-constant cases. We can only add
2055 CONST_INTs to constants since the sum of relocatable symbols
2056 can't be handled by most assemblers. Don't add CONST_INT
2057 to CONST_INT since overflow won't be computed properly if wider
2058 than HOST_BITS_PER_WIDE_INT. */
2059
2060 if ((GET_CODE (op0) == CONST
2061 || GET_CODE (op0) == SYMBOL_REF
2062 || GET_CODE (op0) == LABEL_REF)
2063 && CONST_INT_P (op1))
2064 return plus_constant (mode, op0, INTVAL (op1));
2065 else if ((GET_CODE (op1) == CONST
2066 || GET_CODE (op1) == SYMBOL_REF
2067 || GET_CODE (op1) == LABEL_REF)
2068 && CONST_INT_P (op0))
2069 return plus_constant (mode, op1, INTVAL (op0));
2070
2071 /* See if this is something like X * C - X or vice versa or
2072 if the multiplication is written as a shift. If so, we can
2073 distribute and make a new multiply, shift, or maybe just
2074 have X (if C is 2 in the example above). But don't make
2075 something more expensive than we had before. */
2076
2077 if (SCALAR_INT_MODE_P (mode))
2078 {
2079 rtx lhs = op0, rhs = op1;
2080
2081 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2082 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2083
2084 if (GET_CODE (lhs) == NEG)
2085 {
2086 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2087 lhs = XEXP (lhs, 0);
2088 }
2089 else if (GET_CODE (lhs) == MULT
2090 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2091 {
2092 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2093 lhs = XEXP (lhs, 0);
2094 }
2095 else if (GET_CODE (lhs) == ASHIFT
2096 && CONST_INT_P (XEXP (lhs, 1))
2097 && INTVAL (XEXP (lhs, 1)) >= 0
2098 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2099 {
2100 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2101 GET_MODE_PRECISION (mode));
2102 lhs = XEXP (lhs, 0);
2103 }
2104
2105 if (GET_CODE (rhs) == NEG)
2106 {
2107 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2108 rhs = XEXP (rhs, 0);
2109 }
2110 else if (GET_CODE (rhs) == MULT
2111 && CONST_INT_P (XEXP (rhs, 1)))
2112 {
2113 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2114 rhs = XEXP (rhs, 0);
2115 }
2116 else if (GET_CODE (rhs) == ASHIFT
2117 && CONST_INT_P (XEXP (rhs, 1))
2118 && INTVAL (XEXP (rhs, 1)) >= 0
2119 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2120 {
2121 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2122 GET_MODE_PRECISION (mode));
2123 rhs = XEXP (rhs, 0);
2124 }
2125
2126 if (rtx_equal_p (lhs, rhs))
2127 {
2128 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2129 rtx coeff;
2130 bool speed = optimize_function_for_speed_p (cfun);
2131
2132 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2133
2134 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2135 return (set_src_cost (tem, mode, speed)
2136 <= set_src_cost (orig, mode, speed) ? tem : 0);
2137 }
2138 }
2139
2140 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2141 if (CONST_SCALAR_INT_P (op1)
2142 && GET_CODE (op0) == XOR
2143 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2144 && mode_signbit_p (mode, op1))
2145 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2146 simplify_gen_binary (XOR, mode, op1,
2147 XEXP (op0, 1)));
2148
2149 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2150 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2151 && GET_CODE (op0) == MULT
2152 && GET_CODE (XEXP (op0, 0)) == NEG)
2153 {
2154 rtx in1, in2;
2155
2156 in1 = XEXP (XEXP (op0, 0), 0);
2157 in2 = XEXP (op0, 1);
2158 return simplify_gen_binary (MINUS, mode, op1,
2159 simplify_gen_binary (MULT, mode,
2160 in1, in2));
2161 }
2162
2163 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2164 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2165 is 1. */
2166 if (COMPARISON_P (op0)
2167 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2168 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2169 && (reversed = reversed_comparison (op0, mode)))
2170 return
2171 simplify_gen_unary (NEG, mode, reversed, mode);
2172
2173 /* If one of the operands is a PLUS or a MINUS, see if we can
2174 simplify this by the associative law.
2175 Don't use the associative law for floating point.
2176 The inaccuracy makes it nonassociative,
2177 and subtle programs can break if operations are associated. */
2178
2179 if (INTEGRAL_MODE_P (mode)
2180 && (plus_minus_operand_p (op0)
2181 || plus_minus_operand_p (op1))
2182 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2183 return tem;
2184
2185 /* Reassociate floating point addition only when the user
2186 specifies associative math operations. */
2187 if (FLOAT_MODE_P (mode)
2188 && flag_associative_math)
2189 {
2190 tem = simplify_associative_operation (code, mode, op0, op1);
2191 if (tem)
2192 return tem;
2193 }
2194 break;
2195
2196 case COMPARE:
2197 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2198 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2199 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2200 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2201 {
2202 rtx xop00 = XEXP (op0, 0);
2203 rtx xop10 = XEXP (op1, 0);
2204
2205 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2206 return xop00;
2207
2208 if (REG_P (xop00) && REG_P (xop10)
2209 && GET_MODE (xop00) == GET_MODE (xop10)
2210 && REGNO (xop00) == REGNO (xop10)
2211 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2212 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2213 return xop00;
2214 }
2215 break;
2216
2217 case MINUS:
2218 /* We can't assume x-x is 0 even with non-IEEE floating point,
2219 but since it is zero except in very strange circumstances, we
2220 will treat it as zero with -ffinite-math-only. */
2221 if (rtx_equal_p (trueop0, trueop1)
2222 && ! side_effects_p (op0)
2223 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2224 return CONST0_RTX (mode);
2225
2226 /* Change subtraction from zero into negation. (0 - x) is the
2227 same as -x when x is NaN, infinite, or finite and nonzero.
2228 But if the mode has signed zeros, and does not round towards
2229 -infinity, then 0 - 0 is 0, not -0. */
2230 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2231 return simplify_gen_unary (NEG, mode, op1, mode);
2232
2233 /* (-1 - a) is ~a. */
2234 if (trueop0 == constm1_rtx)
2235 return simplify_gen_unary (NOT, mode, op1, mode);
2236
2237 /* Subtracting 0 has no effect unless the mode has signed zeros
2238 and supports rounding towards -infinity. In such a case,
2239 0 - 0 is -0. */
2240 if (!(HONOR_SIGNED_ZEROS (mode)
2241 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2242 && trueop1 == CONST0_RTX (mode))
2243 return op0;
2244
2245 /* See if this is something like X * C - X or vice versa or
2246 if the multiplication is written as a shift. If so, we can
2247 distribute and make a new multiply, shift, or maybe just
2248 have X (if C is 2 in the example above). But don't make
2249 something more expensive than we had before. */
2250
2251 if (SCALAR_INT_MODE_P (mode))
2252 {
2253 rtx lhs = op0, rhs = op1;
2254
2255 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2256 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2257
2258 if (GET_CODE (lhs) == NEG)
2259 {
2260 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2261 lhs = XEXP (lhs, 0);
2262 }
2263 else if (GET_CODE (lhs) == MULT
2264 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2265 {
2266 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2267 lhs = XEXP (lhs, 0);
2268 }
2269 else if (GET_CODE (lhs) == ASHIFT
2270 && CONST_INT_P (XEXP (lhs, 1))
2271 && INTVAL (XEXP (lhs, 1)) >= 0
2272 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2273 {
2274 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2275 GET_MODE_PRECISION (mode));
2276 lhs = XEXP (lhs, 0);
2277 }
2278
2279 if (GET_CODE (rhs) == NEG)
2280 {
2281 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2282 rhs = XEXP (rhs, 0);
2283 }
2284 else if (GET_CODE (rhs) == MULT
2285 && CONST_INT_P (XEXP (rhs, 1)))
2286 {
2287 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2288 rhs = XEXP (rhs, 0);
2289 }
2290 else if (GET_CODE (rhs) == ASHIFT
2291 && CONST_INT_P (XEXP (rhs, 1))
2292 && INTVAL (XEXP (rhs, 1)) >= 0
2293 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2294 {
2295 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2296 GET_MODE_PRECISION (mode));
2297 negcoeff1 = -negcoeff1;
2298 rhs = XEXP (rhs, 0);
2299 }
2300
2301 if (rtx_equal_p (lhs, rhs))
2302 {
2303 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2304 rtx coeff;
2305 bool speed = optimize_function_for_speed_p (cfun);
2306
2307 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2308
2309 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2310 return (set_src_cost (tem, mode, speed)
2311 <= set_src_cost (orig, mode, speed) ? tem : 0);
2312 }
2313 }
2314
2315 /* (a - (-b)) -> (a + b). True even for IEEE. */
2316 if (GET_CODE (op1) == NEG)
2317 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2318
2319 /* (-x - c) may be simplified as (-c - x). */
2320 if (GET_CODE (op0) == NEG
2321 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2322 {
2323 tem = simplify_unary_operation (NEG, mode, op1, mode);
2324 if (tem)
2325 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2326 }
2327
2328 /* Don't let a relocatable value get a negative coeff. */
2329 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2330 return simplify_gen_binary (PLUS, mode,
2331 op0,
2332 neg_const_int (mode, op1));
2333
2334 /* (x - (x & y)) -> (x & ~y) */
2335 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2336 {
2337 if (rtx_equal_p (op0, XEXP (op1, 0)))
2338 {
2339 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2340 GET_MODE (XEXP (op1, 1)));
2341 return simplify_gen_binary (AND, mode, op0, tem);
2342 }
2343 if (rtx_equal_p (op0, XEXP (op1, 1)))
2344 {
2345 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2346 GET_MODE (XEXP (op1, 0)));
2347 return simplify_gen_binary (AND, mode, op0, tem);
2348 }
2349 }
2350
2351 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2352 by reversing the comparison code if valid. */
2353 if (STORE_FLAG_VALUE == 1
2354 && trueop0 == const1_rtx
2355 && COMPARISON_P (op1)
2356 && (reversed = reversed_comparison (op1, mode)))
2357 return reversed;
2358
2359 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2360 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2361 && GET_CODE (op1) == MULT
2362 && GET_CODE (XEXP (op1, 0)) == NEG)
2363 {
2364 rtx in1, in2;
2365
2366 in1 = XEXP (XEXP (op1, 0), 0);
2367 in2 = XEXP (op1, 1);
2368 return simplify_gen_binary (PLUS, mode,
2369 simplify_gen_binary (MULT, mode,
2370 in1, in2),
2371 op0);
2372 }
2373
2374 /* Canonicalize (minus (neg A) (mult B C)) to
2375 (minus (mult (neg B) C) A). */
2376 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2377 && GET_CODE (op1) == MULT
2378 && GET_CODE (op0) == NEG)
2379 {
2380 rtx in1, in2;
2381
2382 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2383 in2 = XEXP (op1, 1);
2384 return simplify_gen_binary (MINUS, mode,
2385 simplify_gen_binary (MULT, mode,
2386 in1, in2),
2387 XEXP (op0, 0));
2388 }
2389
2390 /* If one of the operands is a PLUS or a MINUS, see if we can
2391 simplify this by the associative law. This will, for example,
2392 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2393 Don't use the associative law for floating point.
2394 The inaccuracy makes it nonassociative,
2395 and subtle programs can break if operations are associated. */
2396
2397 if (INTEGRAL_MODE_P (mode)
2398 && (plus_minus_operand_p (op0)
2399 || plus_minus_operand_p (op1))
2400 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2401 return tem;
2402 break;
2403
2404 case MULT:
2405 if (trueop1 == constm1_rtx)
2406 return simplify_gen_unary (NEG, mode, op0, mode);
2407
2408 if (GET_CODE (op0) == NEG)
2409 {
2410 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2411 /* If op1 is a MULT as well and simplify_unary_operation
2412 just moved the NEG to the second operand, simplify_gen_binary
2413 below could through simplify_associative_operation move
2414 the NEG around again and recurse endlessly. */
2415 if (temp
2416 && GET_CODE (op1) == MULT
2417 && GET_CODE (temp) == MULT
2418 && XEXP (op1, 0) == XEXP (temp, 0)
2419 && GET_CODE (XEXP (temp, 1)) == NEG
2420 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2421 temp = NULL_RTX;
2422 if (temp)
2423 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2424 }
2425 if (GET_CODE (op1) == NEG)
2426 {
2427 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2428 /* If op0 is a MULT as well and simplify_unary_operation
2429 just moved the NEG to the second operand, simplify_gen_binary
2430 below could through simplify_associative_operation move
2431 the NEG around again and recurse endlessly. */
2432 if (temp
2433 && GET_CODE (op0) == MULT
2434 && GET_CODE (temp) == MULT
2435 && XEXP (op0, 0) == XEXP (temp, 0)
2436 && GET_CODE (XEXP (temp, 1)) == NEG
2437 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2438 temp = NULL_RTX;
2439 if (temp)
2440 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2441 }
2442
2443 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2444 x is NaN, since x * 0 is then also NaN. Nor is it valid
2445 when the mode has signed zeros, since multiplying a negative
2446 number by 0 will give -0, not 0. */
2447 if (!HONOR_NANS (mode)
2448 && !HONOR_SIGNED_ZEROS (mode)
2449 && trueop1 == CONST0_RTX (mode)
2450 && ! side_effects_p (op0))
2451 return op1;
2452
2453 /* In IEEE floating point, x*1 is not equivalent to x for
2454 signalling NaNs. */
2455 if (!HONOR_SNANS (mode)
2456 && trueop1 == CONST1_RTX (mode))
2457 return op0;
2458
2459 /* Convert multiply by constant power of two into shift. */
2460 if (CONST_SCALAR_INT_P (trueop1))
2461 {
2462 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2463 if (val >= 0)
2464 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2465 }
2466
2467 /* x*2 is x+x and x*(-1) is -x */
2468 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2469 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2470 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2471 && GET_MODE (op0) == mode)
2472 {
2473 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2474
2475 if (real_equal (d1, &dconst2))
2476 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2477
2478 if (!HONOR_SNANS (mode)
2479 && real_equal (d1, &dconstm1))
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2481 }
2482
2483 /* Optimize -x * -x as x * x. */
2484 if (FLOAT_MODE_P (mode)
2485 && GET_CODE (op0) == NEG
2486 && GET_CODE (op1) == NEG
2487 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2488 && !side_effects_p (XEXP (op0, 0)))
2489 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2490
2491 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2492 if (SCALAR_FLOAT_MODE_P (mode)
2493 && GET_CODE (op0) == ABS
2494 && GET_CODE (op1) == ABS
2495 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2496 && !side_effects_p (XEXP (op0, 0)))
2497 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2498
2499 /* Reassociate multiplication, but for floating point MULTs
2500 only when the user specifies unsafe math optimizations. */
2501 if (! FLOAT_MODE_P (mode)
2502 || flag_unsafe_math_optimizations)
2503 {
2504 tem = simplify_associative_operation (code, mode, op0, op1);
2505 if (tem)
2506 return tem;
2507 }
2508 break;
2509
2510 case IOR:
2511 if (trueop1 == CONST0_RTX (mode))
2512 return op0;
2513 if (INTEGRAL_MODE_P (mode)
2514 && trueop1 == CONSTM1_RTX (mode)
2515 && !side_effects_p (op0))
2516 return op1;
2517 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2518 return op0;
2519 /* A | (~A) -> -1 */
2520 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2521 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2522 && ! side_effects_p (op0)
2523 && SCALAR_INT_MODE_P (mode))
2524 return constm1_rtx;
2525
2526 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2527 if (CONST_INT_P (op1)
2528 && HWI_COMPUTABLE_MODE_P (mode)
2529 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2530 && !side_effects_p (op0))
2531 return op1;
2532
2533 /* Canonicalize (X & C1) | C2. */
2534 if (GET_CODE (op0) == AND
2535 && CONST_INT_P (trueop1)
2536 && CONST_INT_P (XEXP (op0, 1)))
2537 {
2538 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2539 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2540 HOST_WIDE_INT c2 = INTVAL (trueop1);
2541
2542 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2543 if ((c1 & c2) == c1
2544 && !side_effects_p (XEXP (op0, 0)))
2545 return trueop1;
2546
2547 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2548 if (((c1|c2) & mask) == mask)
2549 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2550
2551 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2552 if (((c1 & ~c2) & mask) != (c1 & mask))
2553 {
2554 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2555 gen_int_mode (c1 & ~c2, mode));
2556 return simplify_gen_binary (IOR, mode, tem, op1);
2557 }
2558 }
2559
2560 /* Convert (A & B) | A to A. */
2561 if (GET_CODE (op0) == AND
2562 && (rtx_equal_p (XEXP (op0, 0), op1)
2563 || rtx_equal_p (XEXP (op0, 1), op1))
2564 && ! side_effects_p (XEXP (op0, 0))
2565 && ! side_effects_p (XEXP (op0, 1)))
2566 return op1;
2567
2568 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2569 mode size to (rotate A CX). */
2570
2571 if (GET_CODE (op1) == ASHIFT
2572 || GET_CODE (op1) == SUBREG)
2573 {
2574 opleft = op1;
2575 opright = op0;
2576 }
2577 else
2578 {
2579 opright = op1;
2580 opleft = op0;
2581 }
2582
2583 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2584 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2585 && CONST_INT_P (XEXP (opleft, 1))
2586 && CONST_INT_P (XEXP (opright, 1))
2587 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2588 == GET_MODE_PRECISION (mode)))
2589 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2590
2591 /* Same, but for ashift that has been "simplified" to a wider mode
2592 by simplify_shift_const. */
2593
2594 if (GET_CODE (opleft) == SUBREG
2595 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2596 && GET_CODE (opright) == LSHIFTRT
2597 && GET_CODE (XEXP (opright, 0)) == SUBREG
2598 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2599 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2600 && (GET_MODE_SIZE (GET_MODE (opleft))
2601 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2602 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2603 SUBREG_REG (XEXP (opright, 0)))
2604 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2605 && CONST_INT_P (XEXP (opright, 1))
2606 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2607 == GET_MODE_PRECISION (mode)))
2608 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2609 XEXP (SUBREG_REG (opleft), 1));
2610
2611 /* If we have (ior (and (X C1) C2)), simplify this by making
2612 C1 as small as possible if C1 actually changes. */
2613 if (CONST_INT_P (op1)
2614 && (HWI_COMPUTABLE_MODE_P (mode)
2615 || INTVAL (op1) > 0)
2616 && GET_CODE (op0) == AND
2617 && CONST_INT_P (XEXP (op0, 1))
2618 && CONST_INT_P (op1)
2619 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2620 {
2621 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2622 gen_int_mode (UINTVAL (XEXP (op0, 1))
2623 & ~UINTVAL (op1),
2624 mode));
2625 return simplify_gen_binary (IOR, mode, tmp, op1);
2626 }
2627
2628 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2629 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2630 the PLUS does not affect any of the bits in OP1: then we can do
2631 the IOR as a PLUS and we can associate. This is valid if OP1
2632 can be safely shifted left C bits. */
2633 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2634 && GET_CODE (XEXP (op0, 0)) == PLUS
2635 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2636 && CONST_INT_P (XEXP (op0, 1))
2637 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2638 {
2639 int count = INTVAL (XEXP (op0, 1));
2640 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2641
2642 if (mask >> count == INTVAL (trueop1)
2643 && trunc_int_for_mode (mask, mode) == mask
2644 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2645 return simplify_gen_binary (ASHIFTRT, mode,
2646 plus_constant (mode, XEXP (op0, 0),
2647 mask),
2648 XEXP (op0, 1));
2649 }
2650
2651 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2652 if (tem)
2653 return tem;
2654
2655 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (tem)
2657 return tem;
2658 break;
2659
2660 case XOR:
2661 if (trueop1 == CONST0_RTX (mode))
2662 return op0;
2663 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2664 return simplify_gen_unary (NOT, mode, op0, mode);
2665 if (rtx_equal_p (trueop0, trueop1)
2666 && ! side_effects_p (op0)
2667 && GET_MODE_CLASS (mode) != MODE_CC)
2668 return CONST0_RTX (mode);
2669
2670 /* Canonicalize XOR of the most significant bit to PLUS. */
2671 if (CONST_SCALAR_INT_P (op1)
2672 && mode_signbit_p (mode, op1))
2673 return simplify_gen_binary (PLUS, mode, op0, op1);
2674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2675 if (CONST_SCALAR_INT_P (op1)
2676 && GET_CODE (op0) == PLUS
2677 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2678 && mode_signbit_p (mode, XEXP (op0, 1)))
2679 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2680 simplify_gen_binary (XOR, mode, op1,
2681 XEXP (op0, 1)));
2682
2683 /* If we are XORing two things that have no bits in common,
2684 convert them into an IOR. This helps to detect rotation encoded
2685 using those methods and possibly other simplifications. */
2686
2687 if (HWI_COMPUTABLE_MODE_P (mode)
2688 && (nonzero_bits (op0, mode)
2689 & nonzero_bits (op1, mode)) == 0)
2690 return (simplify_gen_binary (IOR, mode, op0, op1));
2691
2692 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2693 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2694 (NOT y). */
2695 {
2696 int num_negated = 0;
2697
2698 if (GET_CODE (op0) == NOT)
2699 num_negated++, op0 = XEXP (op0, 0);
2700 if (GET_CODE (op1) == NOT)
2701 num_negated++, op1 = XEXP (op1, 0);
2702
2703 if (num_negated == 2)
2704 return simplify_gen_binary (XOR, mode, op0, op1);
2705 else if (num_negated == 1)
2706 return simplify_gen_unary (NOT, mode,
2707 simplify_gen_binary (XOR, mode, op0, op1),
2708 mode);
2709 }
2710
2711 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2712 correspond to a machine insn or result in further simplifications
2713 if B is a constant. */
2714
2715 if (GET_CODE (op0) == AND
2716 && rtx_equal_p (XEXP (op0, 1), op1)
2717 && ! side_effects_p (op1))
2718 return simplify_gen_binary (AND, mode,
2719 simplify_gen_unary (NOT, mode,
2720 XEXP (op0, 0), mode),
2721 op1);
2722
2723 else if (GET_CODE (op0) == AND
2724 && rtx_equal_p (XEXP (op0, 0), op1)
2725 && ! side_effects_p (op1))
2726 return simplify_gen_binary (AND, mode,
2727 simplify_gen_unary (NOT, mode,
2728 XEXP (op0, 1), mode),
2729 op1);
2730
2731 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2732 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2733 out bits inverted twice and not set by C. Similarly, given
2734 (xor (and (xor A B) C) D), simplify without inverting C in
2735 the xor operand: (xor (and A C) (B&C)^D).
2736 */
2737 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2738 && GET_CODE (XEXP (op0, 0)) == XOR
2739 && CONST_INT_P (op1)
2740 && CONST_INT_P (XEXP (op0, 1))
2741 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2742 {
2743 enum rtx_code op = GET_CODE (op0);
2744 rtx a = XEXP (XEXP (op0, 0), 0);
2745 rtx b = XEXP (XEXP (op0, 0), 1);
2746 rtx c = XEXP (op0, 1);
2747 rtx d = op1;
2748 HOST_WIDE_INT bval = INTVAL (b);
2749 HOST_WIDE_INT cval = INTVAL (c);
2750 HOST_WIDE_INT dval = INTVAL (d);
2751 HOST_WIDE_INT xcval;
2752
2753 if (op == IOR)
2754 xcval = ~cval;
2755 else
2756 xcval = cval;
2757
2758 return simplify_gen_binary (XOR, mode,
2759 simplify_gen_binary (op, mode, a, c),
2760 gen_int_mode ((bval & xcval) ^ dval,
2761 mode));
2762 }
2763
2764 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2765 we can transform like this:
2766 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2767 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2768 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2769 Attempt a few simplifications when B and C are both constants. */
2770 if (GET_CODE (op0) == AND
2771 && CONST_INT_P (op1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2773 {
2774 rtx a = XEXP (op0, 0);
2775 rtx b = XEXP (op0, 1);
2776 rtx c = op1;
2777 HOST_WIDE_INT bval = INTVAL (b);
2778 HOST_WIDE_INT cval = INTVAL (c);
2779
2780 /* Instead of computing ~A&C, we compute its negated value,
2781 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2782 optimize for sure. If it does not simplify, we still try
2783 to compute ~A&C below, but since that always allocates
2784 RTL, we don't try that before committing to returning a
2785 simplified expression. */
2786 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2787 GEN_INT (~cval));
2788
2789 if ((~cval & bval) == 0)
2790 {
2791 rtx na_c = NULL_RTX;
2792 if (n_na_c)
2793 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2794 else
2795 {
2796 /* If ~A does not simplify, don't bother: we don't
2797 want to simplify 2 operations into 3, and if na_c
2798 were to simplify with na, n_na_c would have
2799 simplified as well. */
2800 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2801 if (na)
2802 na_c = simplify_gen_binary (AND, mode, na, c);
2803 }
2804
2805 /* Try to simplify ~A&C | ~B&C. */
2806 if (na_c != NULL_RTX)
2807 return simplify_gen_binary (IOR, mode, na_c,
2808 gen_int_mode (~bval & cval, mode));
2809 }
2810 else
2811 {
2812 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2813 if (n_na_c == CONSTM1_RTX (mode))
2814 {
2815 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2816 gen_int_mode (~cval & bval,
2817 mode));
2818 return simplify_gen_binary (IOR, mode, a_nc_b,
2819 gen_int_mode (~bval & cval,
2820 mode));
2821 }
2822 }
2823 }
2824
2825 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2826 comparison if STORE_FLAG_VALUE is 1. */
2827 if (STORE_FLAG_VALUE == 1
2828 && trueop1 == const1_rtx
2829 && COMPARISON_P (op0)
2830 && (reversed = reversed_comparison (op0, mode)))
2831 return reversed;
2832
2833 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2834 is (lt foo (const_int 0)), so we can perform the above
2835 simplification if STORE_FLAG_VALUE is 1. */
2836
2837 if (STORE_FLAG_VALUE == 1
2838 && trueop1 == const1_rtx
2839 && GET_CODE (op0) == LSHIFTRT
2840 && CONST_INT_P (XEXP (op0, 1))
2841 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2842 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2843
2844 /* (xor (comparison foo bar) (const_int sign-bit))
2845 when STORE_FLAG_VALUE is the sign bit. */
2846 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2847 && trueop1 == const_true_rtx
2848 && COMPARISON_P (op0)
2849 && (reversed = reversed_comparison (op0, mode)))
2850 return reversed;
2851
2852 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2853 if (tem)
2854 return tem;
2855
2856 tem = simplify_associative_operation (code, mode, op0, op1);
2857 if (tem)
2858 return tem;
2859 break;
2860
2861 case AND:
2862 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2863 return trueop1;
2864 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2865 return op0;
2866 if (HWI_COMPUTABLE_MODE_P (mode))
2867 {
2868 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2869 HOST_WIDE_INT nzop1;
2870 if (CONST_INT_P (trueop1))
2871 {
2872 HOST_WIDE_INT val1 = INTVAL (trueop1);
2873 /* If we are turning off bits already known off in OP0, we need
2874 not do an AND. */
2875 if ((nzop0 & ~val1) == 0)
2876 return op0;
2877 }
2878 nzop1 = nonzero_bits (trueop1, mode);
2879 /* If we are clearing all the nonzero bits, the result is zero. */
2880 if ((nzop1 & nzop0) == 0
2881 && !side_effects_p (op0) && !side_effects_p (op1))
2882 return CONST0_RTX (mode);
2883 }
2884 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2885 && GET_MODE_CLASS (mode) != MODE_CC)
2886 return op0;
2887 /* A & (~A) -> 0 */
2888 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2889 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2890 && ! side_effects_p (op0)
2891 && GET_MODE_CLASS (mode) != MODE_CC)
2892 return CONST0_RTX (mode);
2893
2894 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2895 there are no nonzero bits of C outside of X's mode. */
2896 if ((GET_CODE (op0) == SIGN_EXTEND
2897 || GET_CODE (op0) == ZERO_EXTEND)
2898 && CONST_INT_P (trueop1)
2899 && HWI_COMPUTABLE_MODE_P (mode)
2900 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2901 & UINTVAL (trueop1)) == 0)
2902 {
2903 machine_mode imode = GET_MODE (XEXP (op0, 0));
2904 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2905 gen_int_mode (INTVAL (trueop1),
2906 imode));
2907 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2908 }
2909
2910 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2911 we might be able to further simplify the AND with X and potentially
2912 remove the truncation altogether. */
2913 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2914 {
2915 rtx x = XEXP (op0, 0);
2916 machine_mode xmode = GET_MODE (x);
2917 tem = simplify_gen_binary (AND, xmode, x,
2918 gen_int_mode (INTVAL (trueop1), xmode));
2919 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2920 }
2921
2922 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2923 if (GET_CODE (op0) == IOR
2924 && CONST_INT_P (trueop1)
2925 && CONST_INT_P (XEXP (op0, 1)))
2926 {
2927 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2928 return simplify_gen_binary (IOR, mode,
2929 simplify_gen_binary (AND, mode,
2930 XEXP (op0, 0), op1),
2931 gen_int_mode (tmp, mode));
2932 }
2933
2934 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2935 insn (and may simplify more). */
2936 if (GET_CODE (op0) == XOR
2937 && rtx_equal_p (XEXP (op0, 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode,
2940 simplify_gen_unary (NOT, mode,
2941 XEXP (op0, 1), mode),
2942 op1);
2943
2944 if (GET_CODE (op0) == XOR
2945 && rtx_equal_p (XEXP (op0, 1), op1)
2946 && ! side_effects_p (op1))
2947 return simplify_gen_binary (AND, mode,
2948 simplify_gen_unary (NOT, mode,
2949 XEXP (op0, 0), mode),
2950 op1);
2951
2952 /* Similarly for (~(A ^ B)) & A. */
2953 if (GET_CODE (op0) == NOT
2954 && GET_CODE (XEXP (op0, 0)) == XOR
2955 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2956 && ! side_effects_p (op1))
2957 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2958
2959 if (GET_CODE (op0) == NOT
2960 && GET_CODE (XEXP (op0, 0)) == XOR
2961 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2962 && ! side_effects_p (op1))
2963 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2964
2965 /* Convert (A | B) & A to A. */
2966 if (GET_CODE (op0) == IOR
2967 && (rtx_equal_p (XEXP (op0, 0), op1)
2968 || rtx_equal_p (XEXP (op0, 1), op1))
2969 && ! side_effects_p (XEXP (op0, 0))
2970 && ! side_effects_p (XEXP (op0, 1)))
2971 return op1;
2972
2973 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2974 ((A & N) + B) & M -> (A + B) & M
2975 Similarly if (N & M) == 0,
2976 ((A | N) + B) & M -> (A + B) & M
2977 and for - instead of + and/or ^ instead of |.
2978 Also, if (N & M) == 0, then
2979 (A +- N) & M -> A & M. */
2980 if (CONST_INT_P (trueop1)
2981 && HWI_COMPUTABLE_MODE_P (mode)
2982 && ~UINTVAL (trueop1)
2983 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2984 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2985 {
2986 rtx pmop[2];
2987 int which;
2988
2989 pmop[0] = XEXP (op0, 0);
2990 pmop[1] = XEXP (op0, 1);
2991
2992 if (CONST_INT_P (pmop[1])
2993 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2994 return simplify_gen_binary (AND, mode, pmop[0], op1);
2995
2996 for (which = 0; which < 2; which++)
2997 {
2998 tem = pmop[which];
2999 switch (GET_CODE (tem))
3000 {
3001 case AND:
3002 if (CONST_INT_P (XEXP (tem, 1))
3003 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3004 == UINTVAL (trueop1))
3005 pmop[which] = XEXP (tem, 0);
3006 break;
3007 case IOR:
3008 case XOR:
3009 if (CONST_INT_P (XEXP (tem, 1))
3010 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3011 pmop[which] = XEXP (tem, 0);
3012 break;
3013 default:
3014 break;
3015 }
3016 }
3017
3018 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3019 {
3020 tem = simplify_gen_binary (GET_CODE (op0), mode,
3021 pmop[0], pmop[1]);
3022 return simplify_gen_binary (code, mode, tem, op1);
3023 }
3024 }
3025
3026 /* (and X (ior (not X) Y) -> (and X Y) */
3027 if (GET_CODE (op1) == IOR
3028 && GET_CODE (XEXP (op1, 0)) == NOT
3029 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3030 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3031
3032 /* (and (ior (not X) Y) X) -> (and X Y) */
3033 if (GET_CODE (op0) == IOR
3034 && GET_CODE (XEXP (op0, 0)) == NOT
3035 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3036 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3037
3038 /* (and X (ior Y (not X)) -> (and X Y) */
3039 if (GET_CODE (op1) == IOR
3040 && GET_CODE (XEXP (op1, 1)) == NOT
3041 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3042 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3043
3044 /* (and (ior Y (not X)) X) -> (and X Y) */
3045 if (GET_CODE (op0) == IOR
3046 && GET_CODE (XEXP (op0, 1)) == NOT
3047 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3048 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3049
3050 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3051 if (tem)
3052 return tem;
3053
3054 tem = simplify_associative_operation (code, mode, op0, op1);
3055 if (tem)
3056 return tem;
3057 break;
3058
3059 case UDIV:
3060 /* 0/x is 0 (or x&0 if x has side-effects). */
3061 if (trueop0 == CONST0_RTX (mode))
3062 {
3063 if (side_effects_p (op1))
3064 return simplify_gen_binary (AND, mode, op1, trueop0);
3065 return trueop0;
3066 }
3067 /* x/1 is x. */
3068 if (trueop1 == CONST1_RTX (mode))
3069 {
3070 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3071 if (tem)
3072 return tem;
3073 }
3074 /* Convert divide by power of two into shift. */
3075 if (CONST_INT_P (trueop1)
3076 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3077 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3078 break;
3079
3080 case DIV:
3081 /* Handle floating point and integers separately. */
3082 if (SCALAR_FLOAT_MODE_P (mode))
3083 {
3084 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3085 safe for modes with NaNs, since 0.0 / 0.0 will then be
3086 NaN rather than 0.0. Nor is it safe for modes with signed
3087 zeros, since dividing 0 by a negative number gives -0.0 */
3088 if (trueop0 == CONST0_RTX (mode)
3089 && !HONOR_NANS (mode)
3090 && !HONOR_SIGNED_ZEROS (mode)
3091 && ! side_effects_p (op1))
3092 return op0;
3093 /* x/1.0 is x. */
3094 if (trueop1 == CONST1_RTX (mode)
3095 && !HONOR_SNANS (mode))
3096 return op0;
3097
3098 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3099 && trueop1 != CONST0_RTX (mode))
3100 {
3101 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3102
3103 /* x/-1.0 is -x. */
3104 if (real_equal (d1, &dconstm1)
3105 && !HONOR_SNANS (mode))
3106 return simplify_gen_unary (NEG, mode, op0, mode);
3107
3108 /* Change FP division by a constant into multiplication.
3109 Only do this with -freciprocal-math. */
3110 if (flag_reciprocal_math
3111 && !real_equal (d1, &dconst0))
3112 {
3113 REAL_VALUE_TYPE d;
3114 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3115 tem = const_double_from_real_value (d, mode);
3116 return simplify_gen_binary (MULT, mode, op0, tem);
3117 }
3118 }
3119 }
3120 else if (SCALAR_INT_MODE_P (mode))
3121 {
3122 /* 0/x is 0 (or x&0 if x has side-effects). */
3123 if (trueop0 == CONST0_RTX (mode)
3124 && !cfun->can_throw_non_call_exceptions)
3125 {
3126 if (side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, op1, trueop0);
3128 return trueop0;
3129 }
3130 /* x/1 is x. */
3131 if (trueop1 == CONST1_RTX (mode))
3132 {
3133 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3134 if (tem)
3135 return tem;
3136 }
3137 /* x/-1 is -x. */
3138 if (trueop1 == constm1_rtx)
3139 {
3140 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3141 if (x)
3142 return simplify_gen_unary (NEG, mode, x, mode);
3143 }
3144 }
3145 break;
3146
3147 case UMOD:
3148 /* 0%x is 0 (or x&0 if x has side-effects). */
3149 if (trueop0 == CONST0_RTX (mode))
3150 {
3151 if (side_effects_p (op1))
3152 return simplify_gen_binary (AND, mode, op1, trueop0);
3153 return trueop0;
3154 }
3155 /* x%1 is 0 (of x&0 if x has side-effects). */
3156 if (trueop1 == CONST1_RTX (mode))
3157 {
3158 if (side_effects_p (op0))
3159 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3160 return CONST0_RTX (mode);
3161 }
3162 /* Implement modulus by power of two as AND. */
3163 if (CONST_INT_P (trueop1)
3164 && exact_log2 (UINTVAL (trueop1)) > 0)
3165 return simplify_gen_binary (AND, mode, op0,
3166 gen_int_mode (INTVAL (op1) - 1, mode));
3167 break;
3168
3169 case MOD:
3170 /* 0%x is 0 (or x&0 if x has side-effects). */
3171 if (trueop0 == CONST0_RTX (mode))
3172 {
3173 if (side_effects_p (op1))
3174 return simplify_gen_binary (AND, mode, op1, trueop0);
3175 return trueop0;
3176 }
3177 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3178 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3179 {
3180 if (side_effects_p (op0))
3181 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3182 return CONST0_RTX (mode);
3183 }
3184 break;
3185
3186 case ROTATERT:
3187 case ROTATE:
3188 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3189 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3190 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3191 amount instead. */
3192 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3193 if (CONST_INT_P (trueop1)
3194 && IN_RANGE (INTVAL (trueop1),
3195 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3196 GET_MODE_PRECISION (mode) - 1))
3197 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3198 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3199 - INTVAL (trueop1)));
3200 #endif
3201 /* FALLTHRU */
3202 case ASHIFTRT:
3203 if (trueop1 == CONST0_RTX (mode))
3204 return op0;
3205 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3206 return op0;
3207 /* Rotating ~0 always results in ~0. */
3208 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3209 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3210 && ! side_effects_p (op1))
3211 return op0;
3212 /* Given:
3213 scalar modes M1, M2
3214 scalar constants c1, c2
3215 size (M2) > size (M1)
3216 c1 == size (M2) - size (M1)
3217 optimize:
3218 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3219 <low_part>)
3220 (const_int <c2>))
3221 to:
3222 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3223 <low_part>). */
3224 if (code == ASHIFTRT
3225 && !VECTOR_MODE_P (mode)
3226 && SUBREG_P (op0)
3227 && CONST_INT_P (op1)
3228 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3229 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3230 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3231 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3232 > GET_MODE_BITSIZE (mode))
3233 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3234 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3235 - GET_MODE_BITSIZE (mode)))
3236 && subreg_lowpart_p (op0))
3237 {
3238 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3239 + INTVAL (op1));
3240 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3241 tmp = simplify_gen_binary (ASHIFTRT,
3242 GET_MODE (SUBREG_REG (op0)),
3243 XEXP (SUBREG_REG (op0), 0),
3244 tmp);
3245 return lowpart_subreg (mode, tmp, inner_mode);
3246 }
3247 canonicalize_shift:
3248 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3249 {
3250 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3251 if (val != INTVAL (op1))
3252 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3253 }
3254 break;
3255
3256 case ASHIFT:
3257 case SS_ASHIFT:
3258 case US_ASHIFT:
3259 if (trueop1 == CONST0_RTX (mode))
3260 return op0;
3261 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3262 return op0;
3263 goto canonicalize_shift;
3264
3265 case LSHIFTRT:
3266 if (trueop1 == CONST0_RTX (mode))
3267 return op0;
3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3269 return op0;
3270 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3271 if (GET_CODE (op0) == CLZ
3272 && CONST_INT_P (trueop1)
3273 && STORE_FLAG_VALUE == 1
3274 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3275 {
3276 machine_mode imode = GET_MODE (XEXP (op0, 0));
3277 unsigned HOST_WIDE_INT zero_val = 0;
3278
3279 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3280 && zero_val == GET_MODE_PRECISION (imode)
3281 && INTVAL (trueop1) == exact_log2 (zero_val))
3282 return simplify_gen_relational (EQ, mode, imode,
3283 XEXP (op0, 0), const0_rtx);
3284 }
3285 goto canonicalize_shift;
3286
3287 case SMIN:
3288 if (width <= HOST_BITS_PER_WIDE_INT
3289 && mode_signbit_p (mode, trueop1)
3290 && ! side_effects_p (op0))
3291 return op1;
3292 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3293 return op0;
3294 tem = simplify_associative_operation (code, mode, op0, op1);
3295 if (tem)
3296 return tem;
3297 break;
3298
3299 case SMAX:
3300 if (width <= HOST_BITS_PER_WIDE_INT
3301 && CONST_INT_P (trueop1)
3302 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3303 && ! side_effects_p (op0))
3304 return op1;
3305 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3306 return op0;
3307 tem = simplify_associative_operation (code, mode, op0, op1);
3308 if (tem)
3309 return tem;
3310 break;
3311
3312 case UMIN:
3313 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3314 return op1;
3315 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3316 return op0;
3317 tem = simplify_associative_operation (code, mode, op0, op1);
3318 if (tem)
3319 return tem;
3320 break;
3321
3322 case UMAX:
3323 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3324 return op1;
3325 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3326 return op0;
3327 tem = simplify_associative_operation (code, mode, op0, op1);
3328 if (tem)
3329 return tem;
3330 break;
3331
3332 case SS_PLUS:
3333 case US_PLUS:
3334 case SS_MINUS:
3335 case US_MINUS:
3336 case SS_MULT:
3337 case US_MULT:
3338 case SS_DIV:
3339 case US_DIV:
3340 /* ??? There are simplifications that can be done. */
3341 return 0;
3342
3343 case VEC_SELECT:
3344 if (!VECTOR_MODE_P (mode))
3345 {
3346 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3347 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3348 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3349 gcc_assert (XVECLEN (trueop1, 0) == 1);
3350 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3351
3352 if (GET_CODE (trueop0) == CONST_VECTOR)
3353 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3354 (trueop1, 0, 0)));
3355
3356 /* Extract a scalar element from a nested VEC_SELECT expression
3357 (with optional nested VEC_CONCAT expression). Some targets
3358 (i386) extract scalar element from a vector using chain of
3359 nested VEC_SELECT expressions. When input operand is a memory
3360 operand, this operation can be simplified to a simple scalar
3361 load from an offseted memory address. */
3362 if (GET_CODE (trueop0) == VEC_SELECT)
3363 {
3364 rtx op0 = XEXP (trueop0, 0);
3365 rtx op1 = XEXP (trueop0, 1);
3366
3367 machine_mode opmode = GET_MODE (op0);
3368 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3369 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3370
3371 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3372 int elem;
3373
3374 rtvec vec;
3375 rtx tmp_op, tmp;
3376
3377 gcc_assert (GET_CODE (op1) == PARALLEL);
3378 gcc_assert (i < n_elts);
3379
3380 /* Select element, pointed by nested selector. */
3381 elem = INTVAL (XVECEXP (op1, 0, i));
3382
3383 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3384 if (GET_CODE (op0) == VEC_CONCAT)
3385 {
3386 rtx op00 = XEXP (op0, 0);
3387 rtx op01 = XEXP (op0, 1);
3388
3389 machine_mode mode00, mode01;
3390 int n_elts00, n_elts01;
3391
3392 mode00 = GET_MODE (op00);
3393 mode01 = GET_MODE (op01);
3394
3395 /* Find out number of elements of each operand. */
3396 if (VECTOR_MODE_P (mode00))
3397 {
3398 elt_size = GET_MODE_UNIT_SIZE (mode00);
3399 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3400 }
3401 else
3402 n_elts00 = 1;
3403
3404 if (VECTOR_MODE_P (mode01))
3405 {
3406 elt_size = GET_MODE_UNIT_SIZE (mode01);
3407 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3408 }
3409 else
3410 n_elts01 = 1;
3411
3412 gcc_assert (n_elts == n_elts00 + n_elts01);
3413
3414 /* Select correct operand of VEC_CONCAT
3415 and adjust selector. */
3416 if (elem < n_elts01)
3417 tmp_op = op00;
3418 else
3419 {
3420 tmp_op = op01;
3421 elem -= n_elts00;
3422 }
3423 }
3424 else
3425 tmp_op = op0;
3426
3427 vec = rtvec_alloc (1);
3428 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3429
3430 tmp = gen_rtx_fmt_ee (code, mode,
3431 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3432 return tmp;
3433 }
3434 if (GET_CODE (trueop0) == VEC_DUPLICATE
3435 && GET_MODE (XEXP (trueop0, 0)) == mode)
3436 return XEXP (trueop0, 0);
3437 }
3438 else
3439 {
3440 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3441 gcc_assert (GET_MODE_INNER (mode)
3442 == GET_MODE_INNER (GET_MODE (trueop0)));
3443 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3444
3445 if (GET_CODE (trueop0) == CONST_VECTOR)
3446 {
3447 int elt_size = GET_MODE_UNIT_SIZE (mode);
3448 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3449 rtvec v = rtvec_alloc (n_elts);
3450 unsigned int i;
3451
3452 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3453 for (i = 0; i < n_elts; i++)
3454 {
3455 rtx x = XVECEXP (trueop1, 0, i);
3456
3457 gcc_assert (CONST_INT_P (x));
3458 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3459 INTVAL (x));
3460 }
3461
3462 return gen_rtx_CONST_VECTOR (mode, v);
3463 }
3464
3465 /* Recognize the identity. */
3466 if (GET_MODE (trueop0) == mode)
3467 {
3468 bool maybe_ident = true;
3469 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3470 {
3471 rtx j = XVECEXP (trueop1, 0, i);
3472 if (!CONST_INT_P (j) || INTVAL (j) != i)
3473 {
3474 maybe_ident = false;
3475 break;
3476 }
3477 }
3478 if (maybe_ident)
3479 return trueop0;
3480 }
3481
3482 /* If we build {a,b} then permute it, build the result directly. */
3483 if (XVECLEN (trueop1, 0) == 2
3484 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3485 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3486 && GET_CODE (trueop0) == VEC_CONCAT
3487 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3488 && GET_MODE (XEXP (trueop0, 0)) == mode
3489 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3490 && GET_MODE (XEXP (trueop0, 1)) == mode)
3491 {
3492 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3493 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3494 rtx subop0, subop1;
3495
3496 gcc_assert (i0 < 4 && i1 < 4);
3497 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3498 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3499
3500 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3501 }
3502
3503 if (XVECLEN (trueop1, 0) == 2
3504 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3505 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3506 && GET_CODE (trueop0) == VEC_CONCAT
3507 && GET_MODE (trueop0) == mode)
3508 {
3509 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3510 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3511 rtx subop0, subop1;
3512
3513 gcc_assert (i0 < 2 && i1 < 2);
3514 subop0 = XEXP (trueop0, i0);
3515 subop1 = XEXP (trueop0, i1);
3516
3517 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3518 }
3519
3520 /* If we select one half of a vec_concat, return that. */
3521 if (GET_CODE (trueop0) == VEC_CONCAT
3522 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3523 {
3524 rtx subop0 = XEXP (trueop0, 0);
3525 rtx subop1 = XEXP (trueop0, 1);
3526 machine_mode mode0 = GET_MODE (subop0);
3527 machine_mode mode1 = GET_MODE (subop1);
3528 int li = GET_MODE_UNIT_SIZE (mode0);
3529 int l0 = GET_MODE_SIZE (mode0) / li;
3530 int l1 = GET_MODE_SIZE (mode1) / li;
3531 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3532 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3533 {
3534 bool success = true;
3535 for (int i = 1; i < l0; ++i)
3536 {
3537 rtx j = XVECEXP (trueop1, 0, i);
3538 if (!CONST_INT_P (j) || INTVAL (j) != i)
3539 {
3540 success = false;
3541 break;
3542 }
3543 }
3544 if (success)
3545 return subop0;
3546 }
3547 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3548 {
3549 bool success = true;
3550 for (int i = 1; i < l1; ++i)
3551 {
3552 rtx j = XVECEXP (trueop1, 0, i);
3553 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3554 {
3555 success = false;
3556 break;
3557 }
3558 }
3559 if (success)
3560 return subop1;
3561 }
3562 }
3563 }
3564
3565 if (XVECLEN (trueop1, 0) == 1
3566 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3567 && GET_CODE (trueop0) == VEC_CONCAT)
3568 {
3569 rtx vec = trueop0;
3570 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3571
3572 /* Try to find the element in the VEC_CONCAT. */
3573 while (GET_MODE (vec) != mode
3574 && GET_CODE (vec) == VEC_CONCAT)
3575 {
3576 HOST_WIDE_INT vec_size;
3577
3578 if (CONST_INT_P (XEXP (vec, 0)))
3579 {
3580 /* vec_concat of two const_ints doesn't make sense with
3581 respect to modes. */
3582 if (CONST_INT_P (XEXP (vec, 1)))
3583 return 0;
3584
3585 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3586 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3587 }
3588 else
3589 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3590
3591 if (offset < vec_size)
3592 vec = XEXP (vec, 0);
3593 else
3594 {
3595 offset -= vec_size;
3596 vec = XEXP (vec, 1);
3597 }
3598 vec = avoid_constant_pool_reference (vec);
3599 }
3600
3601 if (GET_MODE (vec) == mode)
3602 return vec;
3603 }
3604
3605 /* If we select elements in a vec_merge that all come from the same
3606 operand, select from that operand directly. */
3607 if (GET_CODE (op0) == VEC_MERGE)
3608 {
3609 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3610 if (CONST_INT_P (trueop02))
3611 {
3612 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3613 bool all_operand0 = true;
3614 bool all_operand1 = true;
3615 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3616 {
3617 rtx j = XVECEXP (trueop1, 0, i);
3618 if (sel & (1 << UINTVAL (j)))
3619 all_operand1 = false;
3620 else
3621 all_operand0 = false;
3622 }
3623 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3624 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3625 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3626 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3627 }
3628 }
3629
3630 /* If we have two nested selects that are inverses of each
3631 other, replace them with the source operand. */
3632 if (GET_CODE (trueop0) == VEC_SELECT
3633 && GET_MODE (XEXP (trueop0, 0)) == mode)
3634 {
3635 rtx op0_subop1 = XEXP (trueop0, 1);
3636 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3637 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3638
3639 /* Apply the outer ordering vector to the inner one. (The inner
3640 ordering vector is expressly permitted to be of a different
3641 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3642 then the two VEC_SELECTs cancel. */
3643 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3644 {
3645 rtx x = XVECEXP (trueop1, 0, i);
3646 if (!CONST_INT_P (x))
3647 return 0;
3648 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3649 if (!CONST_INT_P (y) || i != INTVAL (y))
3650 return 0;
3651 }
3652 return XEXP (trueop0, 0);
3653 }
3654
3655 return 0;
3656 case VEC_CONCAT:
3657 {
3658 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3659 ? GET_MODE (trueop0)
3660 : GET_MODE_INNER (mode));
3661 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3662 ? GET_MODE (trueop1)
3663 : GET_MODE_INNER (mode));
3664
3665 gcc_assert (VECTOR_MODE_P (mode));
3666 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3667 == GET_MODE_SIZE (mode));
3668
3669 if (VECTOR_MODE_P (op0_mode))
3670 gcc_assert (GET_MODE_INNER (mode)
3671 == GET_MODE_INNER (op0_mode));
3672 else
3673 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3674
3675 if (VECTOR_MODE_P (op1_mode))
3676 gcc_assert (GET_MODE_INNER (mode)
3677 == GET_MODE_INNER (op1_mode));
3678 else
3679 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3680
3681 if ((GET_CODE (trueop0) == CONST_VECTOR
3682 || CONST_SCALAR_INT_P (trueop0)
3683 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3684 && (GET_CODE (trueop1) == CONST_VECTOR
3685 || CONST_SCALAR_INT_P (trueop1)
3686 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3687 {
3688 int elt_size = GET_MODE_UNIT_SIZE (mode);
3689 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3690 rtvec v = rtvec_alloc (n_elts);
3691 unsigned int i;
3692 unsigned in_n_elts = 1;
3693
3694 if (VECTOR_MODE_P (op0_mode))
3695 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3696 for (i = 0; i < n_elts; i++)
3697 {
3698 if (i < in_n_elts)
3699 {
3700 if (!VECTOR_MODE_P (op0_mode))
3701 RTVEC_ELT (v, i) = trueop0;
3702 else
3703 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3704 }
3705 else
3706 {
3707 if (!VECTOR_MODE_P (op1_mode))
3708 RTVEC_ELT (v, i) = trueop1;
3709 else
3710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3711 i - in_n_elts);
3712 }
3713 }
3714
3715 return gen_rtx_CONST_VECTOR (mode, v);
3716 }
3717
3718 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3719 Restrict the transformation to avoid generating a VEC_SELECT with a
3720 mode unrelated to its operand. */
3721 if (GET_CODE (trueop0) == VEC_SELECT
3722 && GET_CODE (trueop1) == VEC_SELECT
3723 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3724 && GET_MODE (XEXP (trueop0, 0)) == mode)
3725 {
3726 rtx par0 = XEXP (trueop0, 1);
3727 rtx par1 = XEXP (trueop1, 1);
3728 int len0 = XVECLEN (par0, 0);
3729 int len1 = XVECLEN (par1, 0);
3730 rtvec vec = rtvec_alloc (len0 + len1);
3731 for (int i = 0; i < len0; i++)
3732 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3733 for (int i = 0; i < len1; i++)
3734 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3735 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3736 gen_rtx_PARALLEL (VOIDmode, vec));
3737 }
3738 }
3739 return 0;
3740
3741 default:
3742 gcc_unreachable ();
3743 }
3744
3745 return 0;
3746 }
3747
3748 rtx
3749 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3750 rtx op0, rtx op1)
3751 {
3752 unsigned int width = GET_MODE_PRECISION (mode);
3753
3754 if (VECTOR_MODE_P (mode)
3755 && code != VEC_CONCAT
3756 && GET_CODE (op0) == CONST_VECTOR
3757 && GET_CODE (op1) == CONST_VECTOR)
3758 {
3759 unsigned n_elts = GET_MODE_NUNITS (mode);
3760 machine_mode op0mode = GET_MODE (op0);
3761 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3762 machine_mode op1mode = GET_MODE (op1);
3763 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3764 rtvec v = rtvec_alloc (n_elts);
3765 unsigned int i;
3766
3767 gcc_assert (op0_n_elts == n_elts);
3768 gcc_assert (op1_n_elts == n_elts);
3769 for (i = 0; i < n_elts; i++)
3770 {
3771 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3772 CONST_VECTOR_ELT (op0, i),
3773 CONST_VECTOR_ELT (op1, i));
3774 if (!x)
3775 return 0;
3776 RTVEC_ELT (v, i) = x;
3777 }
3778
3779 return gen_rtx_CONST_VECTOR (mode, v);
3780 }
3781
3782 if (VECTOR_MODE_P (mode)
3783 && code == VEC_CONCAT
3784 && (CONST_SCALAR_INT_P (op0)
3785 || GET_CODE (op0) == CONST_FIXED
3786 || CONST_DOUBLE_AS_FLOAT_P (op0))
3787 && (CONST_SCALAR_INT_P (op1)
3788 || CONST_DOUBLE_AS_FLOAT_P (op1)
3789 || GET_CODE (op1) == CONST_FIXED))
3790 {
3791 unsigned n_elts = GET_MODE_NUNITS (mode);
3792 rtvec v = rtvec_alloc (n_elts);
3793
3794 gcc_assert (n_elts >= 2);
3795 if (n_elts == 2)
3796 {
3797 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3798 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3799
3800 RTVEC_ELT (v, 0) = op0;
3801 RTVEC_ELT (v, 1) = op1;
3802 }
3803 else
3804 {
3805 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3806 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3807 unsigned i;
3808
3809 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3810 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3811 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3812
3813 for (i = 0; i < op0_n_elts; ++i)
3814 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3815 for (i = 0; i < op1_n_elts; ++i)
3816 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3817 }
3818
3819 return gen_rtx_CONST_VECTOR (mode, v);
3820 }
3821
3822 if (SCALAR_FLOAT_MODE_P (mode)
3823 && CONST_DOUBLE_AS_FLOAT_P (op0)
3824 && CONST_DOUBLE_AS_FLOAT_P (op1)
3825 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3826 {
3827 if (code == AND
3828 || code == IOR
3829 || code == XOR)
3830 {
3831 long tmp0[4];
3832 long tmp1[4];
3833 REAL_VALUE_TYPE r;
3834 int i;
3835
3836 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3837 GET_MODE (op0));
3838 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3839 GET_MODE (op1));
3840 for (i = 0; i < 4; i++)
3841 {
3842 switch (code)
3843 {
3844 case AND:
3845 tmp0[i] &= tmp1[i];
3846 break;
3847 case IOR:
3848 tmp0[i] |= tmp1[i];
3849 break;
3850 case XOR:
3851 tmp0[i] ^= tmp1[i];
3852 break;
3853 default:
3854 gcc_unreachable ();
3855 }
3856 }
3857 real_from_target (&r, tmp0, mode);
3858 return const_double_from_real_value (r, mode);
3859 }
3860 else
3861 {
3862 REAL_VALUE_TYPE f0, f1, value, result;
3863 bool inexact;
3864
3865 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3866 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3867
3868 if (HONOR_SNANS (mode)
3869 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3870 return 0;
3871
3872 if (code == DIV
3873 && real_equal (&f1, &dconst0)
3874 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3875 return 0;
3876
3877 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3878 && flag_trapping_math
3879 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3880 {
3881 int s0 = REAL_VALUE_NEGATIVE (f0);
3882 int s1 = REAL_VALUE_NEGATIVE (f1);
3883
3884 switch (code)
3885 {
3886 case PLUS:
3887 /* Inf + -Inf = NaN plus exception. */
3888 if (s0 != s1)
3889 return 0;
3890 break;
3891 case MINUS:
3892 /* Inf - Inf = NaN plus exception. */
3893 if (s0 == s1)
3894 return 0;
3895 break;
3896 case DIV:
3897 /* Inf / Inf = NaN plus exception. */
3898 return 0;
3899 default:
3900 break;
3901 }
3902 }
3903
3904 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3905 && flag_trapping_math
3906 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3907 || (REAL_VALUE_ISINF (f1)
3908 && real_equal (&f0, &dconst0))))
3909 /* Inf * 0 = NaN plus exception. */
3910 return 0;
3911
3912 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3913 &f0, &f1);
3914 real_convert (&result, mode, &value);
3915
3916 /* Don't constant fold this floating point operation if
3917 the result has overflowed and flag_trapping_math. */
3918
3919 if (flag_trapping_math
3920 && MODE_HAS_INFINITIES (mode)
3921 && REAL_VALUE_ISINF (result)
3922 && !REAL_VALUE_ISINF (f0)
3923 && !REAL_VALUE_ISINF (f1))
3924 /* Overflow plus exception. */
3925 return 0;
3926
3927 /* Don't constant fold this floating point operation if the
3928 result may dependent upon the run-time rounding mode and
3929 flag_rounding_math is set, or if GCC's software emulation
3930 is unable to accurately represent the result. */
3931
3932 if ((flag_rounding_math
3933 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3934 && (inexact || !real_identical (&result, &value)))
3935 return NULL_RTX;
3936
3937 return const_double_from_real_value (result, mode);
3938 }
3939 }
3940
3941 /* We can fold some multi-word operations. */
3942 if ((GET_MODE_CLASS (mode) == MODE_INT
3943 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3944 && CONST_SCALAR_INT_P (op0)
3945 && CONST_SCALAR_INT_P (op1))
3946 {
3947 wide_int result;
3948 bool overflow;
3949 rtx_mode_t pop0 = std::make_pair (op0, mode);
3950 rtx_mode_t pop1 = std::make_pair (op1, mode);
3951
3952 #if TARGET_SUPPORTS_WIDE_INT == 0
3953 /* This assert keeps the simplification from producing a result
3954 that cannot be represented in a CONST_DOUBLE but a lot of
3955 upstream callers expect that this function never fails to
3956 simplify something and so you if you added this to the test
3957 above the code would die later anyway. If this assert
3958 happens, you just need to make the port support wide int. */
3959 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3960 #endif
3961 switch (code)
3962 {
3963 case MINUS:
3964 result = wi::sub (pop0, pop1);
3965 break;
3966
3967 case PLUS:
3968 result = wi::add (pop0, pop1);
3969 break;
3970
3971 case MULT:
3972 result = wi::mul (pop0, pop1);
3973 break;
3974
3975 case DIV:
3976 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3977 if (overflow)
3978 return NULL_RTX;
3979 break;
3980
3981 case MOD:
3982 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3983 if (overflow)
3984 return NULL_RTX;
3985 break;
3986
3987 case UDIV:
3988 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3989 if (overflow)
3990 return NULL_RTX;
3991 break;
3992
3993 case UMOD:
3994 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3995 if (overflow)
3996 return NULL_RTX;
3997 break;
3998
3999 case AND:
4000 result = wi::bit_and (pop0, pop1);
4001 break;
4002
4003 case IOR:
4004 result = wi::bit_or (pop0, pop1);
4005 break;
4006
4007 case XOR:
4008 result = wi::bit_xor (pop0, pop1);
4009 break;
4010
4011 case SMIN:
4012 result = wi::smin (pop0, pop1);
4013 break;
4014
4015 case SMAX:
4016 result = wi::smax (pop0, pop1);
4017 break;
4018
4019 case UMIN:
4020 result = wi::umin (pop0, pop1);
4021 break;
4022
4023 case UMAX:
4024 result = wi::umax (pop0, pop1);
4025 break;
4026
4027 case LSHIFTRT:
4028 case ASHIFTRT:
4029 case ASHIFT:
4030 {
4031 wide_int wop1 = pop1;
4032 if (SHIFT_COUNT_TRUNCATED)
4033 wop1 = wi::umod_trunc (wop1, width);
4034 else if (wi::geu_p (wop1, width))
4035 return NULL_RTX;
4036
4037 switch (code)
4038 {
4039 case LSHIFTRT:
4040 result = wi::lrshift (pop0, wop1);
4041 break;
4042
4043 case ASHIFTRT:
4044 result = wi::arshift (pop0, wop1);
4045 break;
4046
4047 case ASHIFT:
4048 result = wi::lshift (pop0, wop1);
4049 break;
4050
4051 default:
4052 gcc_unreachable ();
4053 }
4054 break;
4055 }
4056 case ROTATE:
4057 case ROTATERT:
4058 {
4059 if (wi::neg_p (pop1))
4060 return NULL_RTX;
4061
4062 switch (code)
4063 {
4064 case ROTATE:
4065 result = wi::lrotate (pop0, pop1);
4066 break;
4067
4068 case ROTATERT:
4069 result = wi::rrotate (pop0, pop1);
4070 break;
4071
4072 default:
4073 gcc_unreachable ();
4074 }
4075 break;
4076 }
4077 default:
4078 return NULL_RTX;
4079 }
4080 return immed_wide_int_const (result, mode);
4081 }
4082
4083 return NULL_RTX;
4084 }
4085
4086
4087 \f
4088 /* Return a positive integer if X should sort after Y. The value
4089 returned is 1 if and only if X and Y are both regs. */
4090
4091 static int
4092 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4093 {
4094 int result;
4095
4096 result = (commutative_operand_precedence (y)
4097 - commutative_operand_precedence (x));
4098 if (result)
4099 return result + result;
4100
4101 /* Group together equal REGs to do more simplification. */
4102 if (REG_P (x) && REG_P (y))
4103 return REGNO (x) > REGNO (y);
4104
4105 return 0;
4106 }
4107
4108 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4109 operands may be another PLUS or MINUS.
4110
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation.
4114
4115 May return NULL_RTX when no changes were made. */
4116
4117 static rtx
4118 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4119 rtx op1)
4120 {
4121 struct simplify_plus_minus_op_data
4122 {
4123 rtx op;
4124 short neg;
4125 } ops[16];
4126 rtx result, tem;
4127 int n_ops = 2;
4128 int changed, n_constants, canonicalized = 0;
4129 int i, j;
4130
4131 memset (ops, 0, sizeof ops);
4132
4133 /* Set up the two operands and then expand them until nothing has been
4134 changed. If we run out of room in our array, give up; this should
4135 almost never happen. */
4136
4137 ops[0].op = op0;
4138 ops[0].neg = 0;
4139 ops[1].op = op1;
4140 ops[1].neg = (code == MINUS);
4141
4142 do
4143 {
4144 changed = 0;
4145 n_constants = 0;
4146
4147 for (i = 0; i < n_ops; i++)
4148 {
4149 rtx this_op = ops[i].op;
4150 int this_neg = ops[i].neg;
4151 enum rtx_code this_code = GET_CODE (this_op);
4152
4153 switch (this_code)
4154 {
4155 case PLUS:
4156 case MINUS:
4157 if (n_ops == ARRAY_SIZE (ops))
4158 return NULL_RTX;
4159
4160 ops[n_ops].op = XEXP (this_op, 1);
4161 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4162 n_ops++;
4163
4164 ops[i].op = XEXP (this_op, 0);
4165 changed = 1;
4166 /* If this operand was negated then we will potentially
4167 canonicalize the expression. Similarly if we don't
4168 place the operands adjacent we're re-ordering the
4169 expression and thus might be performing a
4170 canonicalization. Ignore register re-ordering.
4171 ??? It might be better to shuffle the ops array here,
4172 but then (plus (plus (A, B), plus (C, D))) wouldn't
4173 be seen as non-canonical. */
4174 if (this_neg
4175 || (i != n_ops - 2
4176 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4177 canonicalized = 1;
4178 break;
4179
4180 case NEG:
4181 ops[i].op = XEXP (this_op, 0);
4182 ops[i].neg = ! this_neg;
4183 changed = 1;
4184 canonicalized = 1;
4185 break;
4186
4187 case CONST:
4188 if (n_ops != ARRAY_SIZE (ops)
4189 && GET_CODE (XEXP (this_op, 0)) == PLUS
4190 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4191 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4192 {
4193 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4194 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4195 ops[n_ops].neg = this_neg;
4196 n_ops++;
4197 changed = 1;
4198 canonicalized = 1;
4199 }
4200 break;
4201
4202 case NOT:
4203 /* ~a -> (-a - 1) */
4204 if (n_ops != ARRAY_SIZE (ops))
4205 {
4206 ops[n_ops].op = CONSTM1_RTX (mode);
4207 ops[n_ops++].neg = this_neg;
4208 ops[i].op = XEXP (this_op, 0);
4209 ops[i].neg = !this_neg;
4210 changed = 1;
4211 canonicalized = 1;
4212 }
4213 break;
4214
4215 case CONST_INT:
4216 n_constants++;
4217 if (this_neg)
4218 {
4219 ops[i].op = neg_const_int (mode, this_op);
4220 ops[i].neg = 0;
4221 changed = 1;
4222 canonicalized = 1;
4223 }
4224 break;
4225
4226 default:
4227 break;
4228 }
4229 }
4230 }
4231 while (changed);
4232
4233 if (n_constants > 1)
4234 canonicalized = 1;
4235
4236 gcc_assert (n_ops >= 2);
4237
4238 /* If we only have two operands, we can avoid the loops. */
4239 if (n_ops == 2)
4240 {
4241 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4242 rtx lhs, rhs;
4243
4244 /* Get the two operands. Be careful with the order, especially for
4245 the cases where code == MINUS. */
4246 if (ops[0].neg && ops[1].neg)
4247 {
4248 lhs = gen_rtx_NEG (mode, ops[0].op);
4249 rhs = ops[1].op;
4250 }
4251 else if (ops[0].neg)
4252 {
4253 lhs = ops[1].op;
4254 rhs = ops[0].op;
4255 }
4256 else
4257 {
4258 lhs = ops[0].op;
4259 rhs = ops[1].op;
4260 }
4261
4262 return simplify_const_binary_operation (code, mode, lhs, rhs);
4263 }
4264
4265 /* Now simplify each pair of operands until nothing changes. */
4266 while (1)
4267 {
4268 /* Insertion sort is good enough for a small array. */
4269 for (i = 1; i < n_ops; i++)
4270 {
4271 struct simplify_plus_minus_op_data save;
4272 int cmp;
4273
4274 j = i - 1;
4275 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4276 if (cmp <= 0)
4277 continue;
4278 /* Just swapping registers doesn't count as canonicalization. */
4279 if (cmp != 1)
4280 canonicalized = 1;
4281
4282 save = ops[i];
4283 do
4284 ops[j + 1] = ops[j];
4285 while (j--
4286 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4287 ops[j + 1] = save;
4288 }
4289
4290 changed = 0;
4291 for (i = n_ops - 1; i > 0; i--)
4292 for (j = i - 1; j >= 0; j--)
4293 {
4294 rtx lhs = ops[j].op, rhs = ops[i].op;
4295 int lneg = ops[j].neg, rneg = ops[i].neg;
4296
4297 if (lhs != 0 && rhs != 0)
4298 {
4299 enum rtx_code ncode = PLUS;
4300
4301 if (lneg != rneg)
4302 {
4303 ncode = MINUS;
4304 if (lneg)
4305 std::swap (lhs, rhs);
4306 }
4307 else if (swap_commutative_operands_p (lhs, rhs))
4308 std::swap (lhs, rhs);
4309
4310 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4311 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4312 {
4313 rtx tem_lhs, tem_rhs;
4314
4315 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4316 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4317 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4318 tem_rhs);
4319
4320 if (tem && !CONSTANT_P (tem))
4321 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4322 }
4323 else
4324 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4325
4326 if (tem)
4327 {
4328 /* Reject "simplifications" that just wrap the two
4329 arguments in a CONST. Failure to do so can result
4330 in infinite recursion with simplify_binary_operation
4331 when it calls us to simplify CONST operations.
4332 Also, if we find such a simplification, don't try
4333 any more combinations with this rhs: We must have
4334 something like symbol+offset, ie. one of the
4335 trivial CONST expressions we handle later. */
4336 if (GET_CODE (tem) == CONST
4337 && GET_CODE (XEXP (tem, 0)) == ncode
4338 && XEXP (XEXP (tem, 0), 0) == lhs
4339 && XEXP (XEXP (tem, 0), 1) == rhs)
4340 break;
4341 lneg &= rneg;
4342 if (GET_CODE (tem) == NEG)
4343 tem = XEXP (tem, 0), lneg = !lneg;
4344 if (CONST_INT_P (tem) && lneg)
4345 tem = neg_const_int (mode, tem), lneg = 0;
4346
4347 ops[i].op = tem;
4348 ops[i].neg = lneg;
4349 ops[j].op = NULL_RTX;
4350 changed = 1;
4351 canonicalized = 1;
4352 }
4353 }
4354 }
4355
4356 if (!changed)
4357 break;
4358
4359 /* Pack all the operands to the lower-numbered entries. */
4360 for (i = 0, j = 0; j < n_ops; j++)
4361 if (ops[j].op)
4362 {
4363 ops[i] = ops[j];
4364 i++;
4365 }
4366 n_ops = i;
4367 }
4368
4369 /* If nothing changed, fail. */
4370 if (!canonicalized)
4371 return NULL_RTX;
4372
4373 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4374 if (n_ops == 2
4375 && CONST_INT_P (ops[1].op)
4376 && CONSTANT_P (ops[0].op)
4377 && ops[0].neg)
4378 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4379
4380 /* We suppressed creation of trivial CONST expressions in the
4381 combination loop to avoid recursion. Create one manually now.
4382 The combination loop should have ensured that there is exactly
4383 one CONST_INT, and the sort will have ensured that it is last
4384 in the array and that any other constant will be next-to-last. */
4385
4386 if (n_ops > 1
4387 && CONST_INT_P (ops[n_ops - 1].op)
4388 && CONSTANT_P (ops[n_ops - 2].op))
4389 {
4390 rtx value = ops[n_ops - 1].op;
4391 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4392 value = neg_const_int (mode, value);
4393 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4394 INTVAL (value));
4395 n_ops--;
4396 }
4397
4398 /* Put a non-negated operand first, if possible. */
4399
4400 for (i = 0; i < n_ops && ops[i].neg; i++)
4401 continue;
4402 if (i == n_ops)
4403 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4404 else if (i != 0)
4405 {
4406 tem = ops[0].op;
4407 ops[0] = ops[i];
4408 ops[i].op = tem;
4409 ops[i].neg = 1;
4410 }
4411
4412 /* Now make the result by performing the requested operations. */
4413 result = ops[0].op;
4414 for (i = 1; i < n_ops; i++)
4415 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4416 mode, result, ops[i].op);
4417
4418 return result;
4419 }
4420
4421 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4422 static bool
4423 plus_minus_operand_p (const_rtx x)
4424 {
4425 return GET_CODE (x) == PLUS
4426 || GET_CODE (x) == MINUS
4427 || (GET_CODE (x) == CONST
4428 && GET_CODE (XEXP (x, 0)) == PLUS
4429 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4430 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4431 }
4432
4433 /* Like simplify_binary_operation except used for relational operators.
4434 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4435 not also be VOIDmode.
4436
4437 CMP_MODE specifies in which mode the comparison is done in, so it is
4438 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4439 the operands or, if both are VOIDmode, the operands are compared in
4440 "infinite precision". */
4441 rtx
4442 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4443 machine_mode cmp_mode, rtx op0, rtx op1)
4444 {
4445 rtx tem, trueop0, trueop1;
4446
4447 if (cmp_mode == VOIDmode)
4448 cmp_mode = GET_MODE (op0);
4449 if (cmp_mode == VOIDmode)
4450 cmp_mode = GET_MODE (op1);
4451
4452 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4453 if (tem)
4454 {
4455 if (SCALAR_FLOAT_MODE_P (mode))
4456 {
4457 if (tem == const0_rtx)
4458 return CONST0_RTX (mode);
4459 #ifdef FLOAT_STORE_FLAG_VALUE
4460 {
4461 REAL_VALUE_TYPE val;
4462 val = FLOAT_STORE_FLAG_VALUE (mode);
4463 return const_double_from_real_value (val, mode);
4464 }
4465 #else
4466 return NULL_RTX;
4467 #endif
4468 }
4469 if (VECTOR_MODE_P (mode))
4470 {
4471 if (tem == const0_rtx)
4472 return CONST0_RTX (mode);
4473 #ifdef VECTOR_STORE_FLAG_VALUE
4474 {
4475 int i, units;
4476 rtvec v;
4477
4478 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4479 if (val == NULL_RTX)
4480 return NULL_RTX;
4481 if (val == const1_rtx)
4482 return CONST1_RTX (mode);
4483
4484 units = GET_MODE_NUNITS (mode);
4485 v = rtvec_alloc (units);
4486 for (i = 0; i < units; i++)
4487 RTVEC_ELT (v, i) = val;
4488 return gen_rtx_raw_CONST_VECTOR (mode, v);
4489 }
4490 #else
4491 return NULL_RTX;
4492 #endif
4493 }
4494
4495 return tem;
4496 }
4497
4498 /* For the following tests, ensure const0_rtx is op1. */
4499 if (swap_commutative_operands_p (op0, op1)
4500 || (op0 == const0_rtx && op1 != const0_rtx))
4501 std::swap (op0, op1), code = swap_condition (code);
4502
4503 /* If op0 is a compare, extract the comparison arguments from it. */
4504 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4505 return simplify_gen_relational (code, mode, VOIDmode,
4506 XEXP (op0, 0), XEXP (op0, 1));
4507
4508 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4509 || CC0_P (op0))
4510 return NULL_RTX;
4511
4512 trueop0 = avoid_constant_pool_reference (op0);
4513 trueop1 = avoid_constant_pool_reference (op1);
4514 return simplify_relational_operation_1 (code, mode, cmp_mode,
4515 trueop0, trueop1);
4516 }
4517
4518 /* This part of simplify_relational_operation is only used when CMP_MODE
4519 is not in class MODE_CC (i.e. it is a real comparison).
4520
4521 MODE is the mode of the result, while CMP_MODE specifies in which
4522 mode the comparison is done in, so it is the mode of the operands. */
4523
4524 static rtx
4525 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4526 machine_mode cmp_mode, rtx op0, rtx op1)
4527 {
4528 enum rtx_code op0code = GET_CODE (op0);
4529
4530 if (op1 == const0_rtx && COMPARISON_P (op0))
4531 {
4532 /* If op0 is a comparison, extract the comparison arguments
4533 from it. */
4534 if (code == NE)
4535 {
4536 if (GET_MODE (op0) == mode)
4537 return simplify_rtx (op0);
4538 else
4539 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4540 XEXP (op0, 0), XEXP (op0, 1));
4541 }
4542 else if (code == EQ)
4543 {
4544 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4545 if (new_code != UNKNOWN)
4546 return simplify_gen_relational (new_code, mode, VOIDmode,
4547 XEXP (op0, 0), XEXP (op0, 1));
4548 }
4549 }
4550
4551 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4552 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4553 if ((code == LTU || code == GEU)
4554 && GET_CODE (op0) == PLUS
4555 && CONST_INT_P (XEXP (op0, 1))
4556 && (rtx_equal_p (op1, XEXP (op0, 0))
4557 || rtx_equal_p (op1, XEXP (op0, 1)))
4558 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4559 && XEXP (op0, 1) != const0_rtx)
4560 {
4561 rtx new_cmp
4562 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4563 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4564 cmp_mode, XEXP (op0, 0), new_cmp);
4565 }
4566
4567 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4568 if ((code == LTU || code == GEU)
4569 && GET_CODE (op0) == PLUS
4570 && rtx_equal_p (op1, XEXP (op0, 1))
4571 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4572 && !rtx_equal_p (op1, XEXP (op0, 0)))
4573 return simplify_gen_relational (code, mode, cmp_mode, op0,
4574 copy_rtx (XEXP (op0, 0)));
4575
4576 if (op1 == const0_rtx)
4577 {
4578 /* Canonicalize (GTU x 0) as (NE x 0). */
4579 if (code == GTU)
4580 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4581 /* Canonicalize (LEU x 0) as (EQ x 0). */
4582 if (code == LEU)
4583 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4584 }
4585 else if (op1 == const1_rtx)
4586 {
4587 switch (code)
4588 {
4589 case GE:
4590 /* Canonicalize (GE x 1) as (GT x 0). */
4591 return simplify_gen_relational (GT, mode, cmp_mode,
4592 op0, const0_rtx);
4593 case GEU:
4594 /* Canonicalize (GEU x 1) as (NE x 0). */
4595 return simplify_gen_relational (NE, mode, cmp_mode,
4596 op0, const0_rtx);
4597 case LT:
4598 /* Canonicalize (LT x 1) as (LE x 0). */
4599 return simplify_gen_relational (LE, mode, cmp_mode,
4600 op0, const0_rtx);
4601 case LTU:
4602 /* Canonicalize (LTU x 1) as (EQ x 0). */
4603 return simplify_gen_relational (EQ, mode, cmp_mode,
4604 op0, const0_rtx);
4605 default:
4606 break;
4607 }
4608 }
4609 else if (op1 == constm1_rtx)
4610 {
4611 /* Canonicalize (LE x -1) as (LT x 0). */
4612 if (code == LE)
4613 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4614 /* Canonicalize (GT x -1) as (GE x 0). */
4615 if (code == GT)
4616 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4617 }
4618
4619 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4620 if ((code == EQ || code == NE)
4621 && (op0code == PLUS || op0code == MINUS)
4622 && CONSTANT_P (op1)
4623 && CONSTANT_P (XEXP (op0, 1))
4624 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4625 {
4626 rtx x = XEXP (op0, 0);
4627 rtx c = XEXP (op0, 1);
4628 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4629 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4630
4631 /* Detect an infinite recursive condition, where we oscillate at this
4632 simplification case between:
4633 A + B == C <---> C - B == A,
4634 where A, B, and C are all constants with non-simplifiable expressions,
4635 usually SYMBOL_REFs. */
4636 if (GET_CODE (tem) == invcode
4637 && CONSTANT_P (x)
4638 && rtx_equal_p (c, XEXP (tem, 1)))
4639 return NULL_RTX;
4640
4641 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4642 }
4643
4644 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4645 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4646 if (code == NE
4647 && op1 == const0_rtx
4648 && GET_MODE_CLASS (mode) == MODE_INT
4649 && cmp_mode != VOIDmode
4650 /* ??? Work-around BImode bugs in the ia64 backend. */
4651 && mode != BImode
4652 && cmp_mode != BImode
4653 && nonzero_bits (op0, cmp_mode) == 1
4654 && STORE_FLAG_VALUE == 1)
4655 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4656 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4657 : lowpart_subreg (mode, op0, cmp_mode);
4658
4659 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4660 if ((code == EQ || code == NE)
4661 && op1 == const0_rtx
4662 && op0code == XOR)
4663 return simplify_gen_relational (code, mode, cmp_mode,
4664 XEXP (op0, 0), XEXP (op0, 1));
4665
4666 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4667 if ((code == EQ || code == NE)
4668 && op0code == XOR
4669 && rtx_equal_p (XEXP (op0, 0), op1)
4670 && !side_effects_p (XEXP (op0, 0)))
4671 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4672 CONST0_RTX (mode));
4673
4674 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4675 if ((code == EQ || code == NE)
4676 && op0code == XOR
4677 && rtx_equal_p (XEXP (op0, 1), op1)
4678 && !side_effects_p (XEXP (op0, 1)))
4679 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4680 CONST0_RTX (mode));
4681
4682 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4683 if ((code == EQ || code == NE)
4684 && op0code == XOR
4685 && CONST_SCALAR_INT_P (op1)
4686 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4687 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4688 simplify_gen_binary (XOR, cmp_mode,
4689 XEXP (op0, 1), op1));
4690
4691 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4692 can be implemented with a BICS instruction on some targets, or
4693 constant-folded if y is a constant. */
4694 if ((code == EQ || code == NE)
4695 && op0code == AND
4696 && rtx_equal_p (XEXP (op0, 0), op1)
4697 && !side_effects_p (op1)
4698 && op1 != CONST0_RTX (cmp_mode))
4699 {
4700 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4701 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4702
4703 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4704 CONST0_RTX (cmp_mode));
4705 }
4706
4707 /* Likewise for (eq/ne (and x y) y). */
4708 if ((code == EQ || code == NE)
4709 && op0code == AND
4710 && rtx_equal_p (XEXP (op0, 1), op1)
4711 && !side_effects_p (op1)
4712 && op1 != CONST0_RTX (cmp_mode))
4713 {
4714 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4715 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4716
4717 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4718 CONST0_RTX (cmp_mode));
4719 }
4720
4721 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4722 if ((code == EQ || code == NE)
4723 && GET_CODE (op0) == BSWAP
4724 && CONST_SCALAR_INT_P (op1))
4725 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4726 simplify_gen_unary (BSWAP, cmp_mode,
4727 op1, cmp_mode));
4728
4729 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4730 if ((code == EQ || code == NE)
4731 && GET_CODE (op0) == BSWAP
4732 && GET_CODE (op1) == BSWAP)
4733 return simplify_gen_relational (code, mode, cmp_mode,
4734 XEXP (op0, 0), XEXP (op1, 0));
4735
4736 if (op0code == POPCOUNT && op1 == const0_rtx)
4737 switch (code)
4738 {
4739 case EQ:
4740 case LE:
4741 case LEU:
4742 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4743 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4744 XEXP (op0, 0), const0_rtx);
4745
4746 case NE:
4747 case GT:
4748 case GTU:
4749 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4750 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4751 XEXP (op0, 0), const0_rtx);
4752
4753 default:
4754 break;
4755 }
4756
4757 return NULL_RTX;
4758 }
4759
4760 enum
4761 {
4762 CMP_EQ = 1,
4763 CMP_LT = 2,
4764 CMP_GT = 4,
4765 CMP_LTU = 8,
4766 CMP_GTU = 16
4767 };
4768
4769
4770 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4771 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4772 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4773 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4774 For floating-point comparisons, assume that the operands were ordered. */
4775
4776 static rtx
4777 comparison_result (enum rtx_code code, int known_results)
4778 {
4779 switch (code)
4780 {
4781 case EQ:
4782 case UNEQ:
4783 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4784 case NE:
4785 case LTGT:
4786 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4787
4788 case LT:
4789 case UNLT:
4790 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4791 case GE:
4792 case UNGE:
4793 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4794
4795 case GT:
4796 case UNGT:
4797 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4798 case LE:
4799 case UNLE:
4800 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4801
4802 case LTU:
4803 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4804 case GEU:
4805 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4806
4807 case GTU:
4808 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4809 case LEU:
4810 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4811
4812 case ORDERED:
4813 return const_true_rtx;
4814 case UNORDERED:
4815 return const0_rtx;
4816 default:
4817 gcc_unreachable ();
4818 }
4819 }
4820
4821 /* Check if the given comparison (done in the given MODE) is actually
4822 a tautology or a contradiction. If the mode is VOID_mode, the
4823 comparison is done in "infinite precision". If no simplification
4824 is possible, this function returns zero. Otherwise, it returns
4825 either const_true_rtx or const0_rtx. */
4826
4827 rtx
4828 simplify_const_relational_operation (enum rtx_code code,
4829 machine_mode mode,
4830 rtx op0, rtx op1)
4831 {
4832 rtx tem;
4833 rtx trueop0;
4834 rtx trueop1;
4835
4836 gcc_assert (mode != VOIDmode
4837 || (GET_MODE (op0) == VOIDmode
4838 && GET_MODE (op1) == VOIDmode));
4839
4840 /* If op0 is a compare, extract the comparison arguments from it. */
4841 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4842 {
4843 op1 = XEXP (op0, 1);
4844 op0 = XEXP (op0, 0);
4845
4846 if (GET_MODE (op0) != VOIDmode)
4847 mode = GET_MODE (op0);
4848 else if (GET_MODE (op1) != VOIDmode)
4849 mode = GET_MODE (op1);
4850 else
4851 return 0;
4852 }
4853
4854 /* We can't simplify MODE_CC values since we don't know what the
4855 actual comparison is. */
4856 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4857 return 0;
4858
4859 /* Make sure the constant is second. */
4860 if (swap_commutative_operands_p (op0, op1))
4861 {
4862 std::swap (op0, op1);
4863 code = swap_condition (code);
4864 }
4865
4866 trueop0 = avoid_constant_pool_reference (op0);
4867 trueop1 = avoid_constant_pool_reference (op1);
4868
4869 /* For integer comparisons of A and B maybe we can simplify A - B and can
4870 then simplify a comparison of that with zero. If A and B are both either
4871 a register or a CONST_INT, this can't help; testing for these cases will
4872 prevent infinite recursion here and speed things up.
4873
4874 We can only do this for EQ and NE comparisons as otherwise we may
4875 lose or introduce overflow which we cannot disregard as undefined as
4876 we do not know the signedness of the operation on either the left or
4877 the right hand side of the comparison. */
4878
4879 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4880 && (code == EQ || code == NE)
4881 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4882 && (REG_P (op1) || CONST_INT_P (trueop1)))
4883 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4884 /* We cannot do this if tem is a nonzero address. */
4885 && ! nonzero_address_p (tem))
4886 return simplify_const_relational_operation (signed_condition (code),
4887 mode, tem, const0_rtx);
4888
4889 if (! HONOR_NANS (mode) && code == ORDERED)
4890 return const_true_rtx;
4891
4892 if (! HONOR_NANS (mode) && code == UNORDERED)
4893 return const0_rtx;
4894
4895 /* For modes without NaNs, if the two operands are equal, we know the
4896 result except if they have side-effects. Even with NaNs we know
4897 the result of unordered comparisons and, if signaling NaNs are
4898 irrelevant, also the result of LT/GT/LTGT. */
4899 if ((! HONOR_NANS (trueop0)
4900 || code == UNEQ || code == UNLE || code == UNGE
4901 || ((code == LT || code == GT || code == LTGT)
4902 && ! HONOR_SNANS (trueop0)))
4903 && rtx_equal_p (trueop0, trueop1)
4904 && ! side_effects_p (trueop0))
4905 return comparison_result (code, CMP_EQ);
4906
4907 /* If the operands are floating-point constants, see if we can fold
4908 the result. */
4909 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4910 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4911 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4912 {
4913 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4914 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4915
4916 /* Comparisons are unordered iff at least one of the values is NaN. */
4917 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4918 switch (code)
4919 {
4920 case UNEQ:
4921 case UNLT:
4922 case UNGT:
4923 case UNLE:
4924 case UNGE:
4925 case NE:
4926 case UNORDERED:
4927 return const_true_rtx;
4928 case EQ:
4929 case LT:
4930 case GT:
4931 case LE:
4932 case GE:
4933 case LTGT:
4934 case ORDERED:
4935 return const0_rtx;
4936 default:
4937 return 0;
4938 }
4939
4940 return comparison_result (code,
4941 (real_equal (d0, d1) ? CMP_EQ :
4942 real_less (d0, d1) ? CMP_LT : CMP_GT));
4943 }
4944
4945 /* Otherwise, see if the operands are both integers. */
4946 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4947 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4948 {
4949 /* It would be nice if we really had a mode here. However, the
4950 largest int representable on the target is as good as
4951 infinite. */
4952 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4953 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4954 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4955
4956 if (wi::eq_p (ptrueop0, ptrueop1))
4957 return comparison_result (code, CMP_EQ);
4958 else
4959 {
4960 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4961 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4962 return comparison_result (code, cr);
4963 }
4964 }
4965
4966 /* Optimize comparisons with upper and lower bounds. */
4967 if (HWI_COMPUTABLE_MODE_P (mode)
4968 && CONST_INT_P (trueop1)
4969 && !side_effects_p (trueop0))
4970 {
4971 int sign;
4972 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4973 HOST_WIDE_INT val = INTVAL (trueop1);
4974 HOST_WIDE_INT mmin, mmax;
4975
4976 if (code == GEU
4977 || code == LEU
4978 || code == GTU
4979 || code == LTU)
4980 sign = 0;
4981 else
4982 sign = 1;
4983
4984 /* Get a reduced range if the sign bit is zero. */
4985 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4986 {
4987 mmin = 0;
4988 mmax = nonzero;
4989 }
4990 else
4991 {
4992 rtx mmin_rtx, mmax_rtx;
4993 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4994
4995 mmin = INTVAL (mmin_rtx);
4996 mmax = INTVAL (mmax_rtx);
4997 if (sign)
4998 {
4999 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5000
5001 mmin >>= (sign_copies - 1);
5002 mmax >>= (sign_copies - 1);
5003 }
5004 }
5005
5006 switch (code)
5007 {
5008 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5009 case GEU:
5010 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5011 return const_true_rtx;
5012 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5013 return const0_rtx;
5014 break;
5015 case GE:
5016 if (val <= mmin)
5017 return const_true_rtx;
5018 if (val > mmax)
5019 return const0_rtx;
5020 break;
5021
5022 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5023 case LEU:
5024 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5025 return const_true_rtx;
5026 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5027 return const0_rtx;
5028 break;
5029 case LE:
5030 if (val >= mmax)
5031 return const_true_rtx;
5032 if (val < mmin)
5033 return const0_rtx;
5034 break;
5035
5036 case EQ:
5037 /* x == y is always false for y out of range. */
5038 if (val < mmin || val > mmax)
5039 return const0_rtx;
5040 break;
5041
5042 /* x > y is always false for y >= mmax, always true for y < mmin. */
5043 case GTU:
5044 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5045 return const0_rtx;
5046 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5047 return const_true_rtx;
5048 break;
5049 case GT:
5050 if (val >= mmax)
5051 return const0_rtx;
5052 if (val < mmin)
5053 return const_true_rtx;
5054 break;
5055
5056 /* x < y is always false for y <= mmin, always true for y > mmax. */
5057 case LTU:
5058 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5059 return const0_rtx;
5060 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5061 return const_true_rtx;
5062 break;
5063 case LT:
5064 if (val <= mmin)
5065 return const0_rtx;
5066 if (val > mmax)
5067 return const_true_rtx;
5068 break;
5069
5070 case NE:
5071 /* x != y is always true for y out of range. */
5072 if (val < mmin || val > mmax)
5073 return const_true_rtx;
5074 break;
5075
5076 default:
5077 break;
5078 }
5079 }
5080
5081 /* Optimize integer comparisons with zero. */
5082 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5083 {
5084 /* Some addresses are known to be nonzero. We don't know
5085 their sign, but equality comparisons are known. */
5086 if (nonzero_address_p (trueop0))
5087 {
5088 if (code == EQ || code == LEU)
5089 return const0_rtx;
5090 if (code == NE || code == GTU)
5091 return const_true_rtx;
5092 }
5093
5094 /* See if the first operand is an IOR with a constant. If so, we
5095 may be able to determine the result of this comparison. */
5096 if (GET_CODE (op0) == IOR)
5097 {
5098 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5099 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5100 {
5101 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5102 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5103 && (UINTVAL (inner_const)
5104 & ((unsigned HOST_WIDE_INT) 1
5105 << sign_bitnum)));
5106
5107 switch (code)
5108 {
5109 case EQ:
5110 case LEU:
5111 return const0_rtx;
5112 case NE:
5113 case GTU:
5114 return const_true_rtx;
5115 case LT:
5116 case LE:
5117 if (has_sign)
5118 return const_true_rtx;
5119 break;
5120 case GT:
5121 case GE:
5122 if (has_sign)
5123 return const0_rtx;
5124 break;
5125 default:
5126 break;
5127 }
5128 }
5129 }
5130 }
5131
5132 /* Optimize comparison of ABS with zero. */
5133 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5134 && (GET_CODE (trueop0) == ABS
5135 || (GET_CODE (trueop0) == FLOAT_EXTEND
5136 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5137 {
5138 switch (code)
5139 {
5140 case LT:
5141 /* Optimize abs(x) < 0.0. */
5142 if (!HONOR_SNANS (mode)
5143 && (!INTEGRAL_MODE_P (mode)
5144 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5145 {
5146 if (INTEGRAL_MODE_P (mode)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5149 warning (OPT_Wstrict_overflow,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) < 0 is false"));
5152 return const0_rtx;
5153 }
5154 break;
5155
5156 case GE:
5157 /* Optimize abs(x) >= 0.0. */
5158 if (!HONOR_NANS (mode)
5159 && (!INTEGRAL_MODE_P (mode)
5160 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5161 {
5162 if (INTEGRAL_MODE_P (mode)
5163 && (issue_strict_overflow_warning
5164 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5165 warning (OPT_Wstrict_overflow,
5166 ("assuming signed overflow does not occur when "
5167 "assuming abs (x) >= 0 is true"));
5168 return const_true_rtx;
5169 }
5170 break;
5171
5172 case UNGE:
5173 /* Optimize ! (abs(x) < 0.0). */
5174 return const_true_rtx;
5175
5176 default:
5177 break;
5178 }
5179 }
5180
5181 return 0;
5182 }
5183 \f
5184 /* Simplify CODE, an operation with result mode MODE and three operands,
5185 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5186 a constant. Return 0 if no simplifications is possible. */
5187
5188 rtx
5189 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5190 machine_mode op0_mode, rtx op0, rtx op1,
5191 rtx op2)
5192 {
5193 unsigned int width = GET_MODE_PRECISION (mode);
5194 bool any_change = false;
5195 rtx tem, trueop2;
5196
5197 /* VOIDmode means "infinite" precision. */
5198 if (width == 0)
5199 width = HOST_BITS_PER_WIDE_INT;
5200
5201 switch (code)
5202 {
5203 case FMA:
5204 /* Simplify negations around the multiplication. */
5205 /* -a * -b + c => a * b + c. */
5206 if (GET_CODE (op0) == NEG)
5207 {
5208 tem = simplify_unary_operation (NEG, mode, op1, mode);
5209 if (tem)
5210 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5211 }
5212 else if (GET_CODE (op1) == NEG)
5213 {
5214 tem = simplify_unary_operation (NEG, mode, op0, mode);
5215 if (tem)
5216 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5217 }
5218
5219 /* Canonicalize the two multiplication operands. */
5220 /* a * -b + c => -b * a + c. */
5221 if (swap_commutative_operands_p (op0, op1))
5222 std::swap (op0, op1), any_change = true;
5223
5224 if (any_change)
5225 return gen_rtx_FMA (mode, op0, op1, op2);
5226 return NULL_RTX;
5227
5228 case SIGN_EXTRACT:
5229 case ZERO_EXTRACT:
5230 if (CONST_INT_P (op0)
5231 && CONST_INT_P (op1)
5232 && CONST_INT_P (op2)
5233 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5234 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5235 {
5236 /* Extracting a bit-field from a constant */
5237 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5238 HOST_WIDE_INT op1val = INTVAL (op1);
5239 HOST_WIDE_INT op2val = INTVAL (op2);
5240 if (BITS_BIG_ENDIAN)
5241 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5242 else
5243 val >>= op2val;
5244
5245 if (HOST_BITS_PER_WIDE_INT != op1val)
5246 {
5247 /* First zero-extend. */
5248 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5249 /* If desired, propagate sign bit. */
5250 if (code == SIGN_EXTRACT
5251 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5252 != 0)
5253 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5254 }
5255
5256 return gen_int_mode (val, mode);
5257 }
5258 break;
5259
5260 case IF_THEN_ELSE:
5261 if (CONST_INT_P (op0))
5262 return op0 != const0_rtx ? op1 : op2;
5263
5264 /* Convert c ? a : a into "a". */
5265 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5266 return op1;
5267
5268 /* Convert a != b ? a : b into "a". */
5269 if (GET_CODE (op0) == NE
5270 && ! side_effects_p (op0)
5271 && ! HONOR_NANS (mode)
5272 && ! HONOR_SIGNED_ZEROS (mode)
5273 && ((rtx_equal_p (XEXP (op0, 0), op1)
5274 && rtx_equal_p (XEXP (op0, 1), op2))
5275 || (rtx_equal_p (XEXP (op0, 0), op2)
5276 && rtx_equal_p (XEXP (op0, 1), op1))))
5277 return op1;
5278
5279 /* Convert a == b ? a : b into "b". */
5280 if (GET_CODE (op0) == EQ
5281 && ! side_effects_p (op0)
5282 && ! HONOR_NANS (mode)
5283 && ! HONOR_SIGNED_ZEROS (mode)
5284 && ((rtx_equal_p (XEXP (op0, 0), op1)
5285 && rtx_equal_p (XEXP (op0, 1), op2))
5286 || (rtx_equal_p (XEXP (op0, 0), op2)
5287 && rtx_equal_p (XEXP (op0, 1), op1))))
5288 return op2;
5289
5290 /* Convert (!c) != {0,...,0} ? a : b into
5291 c != {0,...,0} ? b : a for vector modes. */
5292 if (VECTOR_MODE_P (GET_MODE (op1))
5293 && GET_CODE (op0) == NE
5294 && GET_CODE (XEXP (op0, 0)) == NOT
5295 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5296 {
5297 rtx cv = XEXP (op0, 1);
5298 int nunits = CONST_VECTOR_NUNITS (cv);
5299 bool ok = true;
5300 for (int i = 0; i < nunits; ++i)
5301 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5302 {
5303 ok = false;
5304 break;
5305 }
5306 if (ok)
5307 {
5308 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5309 XEXP (XEXP (op0, 0), 0),
5310 XEXP (op0, 1));
5311 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5312 return retval;
5313 }
5314 }
5315
5316 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5317 {
5318 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5319 ? GET_MODE (XEXP (op0, 1))
5320 : GET_MODE (XEXP (op0, 0)));
5321 rtx temp;
5322
5323 /* Look for happy constants in op1 and op2. */
5324 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5325 {
5326 HOST_WIDE_INT t = INTVAL (op1);
5327 HOST_WIDE_INT f = INTVAL (op2);
5328
5329 if (t == STORE_FLAG_VALUE && f == 0)
5330 code = GET_CODE (op0);
5331 else if (t == 0 && f == STORE_FLAG_VALUE)
5332 {
5333 enum rtx_code tmp;
5334 tmp = reversed_comparison_code (op0, NULL_RTX);
5335 if (tmp == UNKNOWN)
5336 break;
5337 code = tmp;
5338 }
5339 else
5340 break;
5341
5342 return simplify_gen_relational (code, mode, cmp_mode,
5343 XEXP (op0, 0), XEXP (op0, 1));
5344 }
5345
5346 if (cmp_mode == VOIDmode)
5347 cmp_mode = op0_mode;
5348 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5349 cmp_mode, XEXP (op0, 0),
5350 XEXP (op0, 1));
5351
5352 /* See if any simplifications were possible. */
5353 if (temp)
5354 {
5355 if (CONST_INT_P (temp))
5356 return temp == const0_rtx ? op2 : op1;
5357 else if (temp)
5358 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5359 }
5360 }
5361 break;
5362
5363 case VEC_MERGE:
5364 gcc_assert (GET_MODE (op0) == mode);
5365 gcc_assert (GET_MODE (op1) == mode);
5366 gcc_assert (VECTOR_MODE_P (mode));
5367 trueop2 = avoid_constant_pool_reference (op2);
5368 if (CONST_INT_P (trueop2))
5369 {
5370 int elt_size = GET_MODE_UNIT_SIZE (mode);
5371 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5372 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5373 unsigned HOST_WIDE_INT mask;
5374 if (n_elts == HOST_BITS_PER_WIDE_INT)
5375 mask = -1;
5376 else
5377 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5378
5379 if (!(sel & mask) && !side_effects_p (op0))
5380 return op1;
5381 if ((sel & mask) == mask && !side_effects_p (op1))
5382 return op0;
5383
5384 rtx trueop0 = avoid_constant_pool_reference (op0);
5385 rtx trueop1 = avoid_constant_pool_reference (op1);
5386 if (GET_CODE (trueop0) == CONST_VECTOR
5387 && GET_CODE (trueop1) == CONST_VECTOR)
5388 {
5389 rtvec v = rtvec_alloc (n_elts);
5390 unsigned int i;
5391
5392 for (i = 0; i < n_elts; i++)
5393 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5394 ? CONST_VECTOR_ELT (trueop0, i)
5395 : CONST_VECTOR_ELT (trueop1, i));
5396 return gen_rtx_CONST_VECTOR (mode, v);
5397 }
5398
5399 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5400 if no element from a appears in the result. */
5401 if (GET_CODE (op0) == VEC_MERGE)
5402 {
5403 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5404 if (CONST_INT_P (tem))
5405 {
5406 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5407 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5408 return simplify_gen_ternary (code, mode, mode,
5409 XEXP (op0, 1), op1, op2);
5410 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5411 return simplify_gen_ternary (code, mode, mode,
5412 XEXP (op0, 0), op1, op2);
5413 }
5414 }
5415 if (GET_CODE (op1) == VEC_MERGE)
5416 {
5417 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5418 if (CONST_INT_P (tem))
5419 {
5420 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5421 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5422 return simplify_gen_ternary (code, mode, mode,
5423 op0, XEXP (op1, 1), op2);
5424 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5425 return simplify_gen_ternary (code, mode, mode,
5426 op0, XEXP (op1, 0), op2);
5427 }
5428 }
5429
5430 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5431 with a. */
5432 if (GET_CODE (op0) == VEC_DUPLICATE
5433 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5434 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5435 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5436 {
5437 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5438 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5439 {
5440 if (XEXP (XEXP (op0, 0), 0) == op1
5441 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5442 return op1;
5443 }
5444 }
5445 }
5446
5447 if (rtx_equal_p (op0, op1)
5448 && !side_effects_p (op2) && !side_effects_p (op1))
5449 return op0;
5450
5451 break;
5452
5453 default:
5454 gcc_unreachable ();
5455 }
5456
5457 return 0;
5458 }
5459
5460 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5461 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5462 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5463
5464 Works by unpacking OP into a collection of 8-bit values
5465 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5466 and then repacking them again for OUTERMODE. */
5467
5468 static rtx
5469 simplify_immed_subreg (machine_mode outermode, rtx op,
5470 machine_mode innermode, unsigned int byte)
5471 {
5472 enum {
5473 value_bit = 8,
5474 value_mask = (1 << value_bit) - 1
5475 };
5476 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5477 int value_start;
5478 int i;
5479 int elem;
5480
5481 int num_elem;
5482 rtx * elems;
5483 int elem_bitsize;
5484 rtx result_s;
5485 rtvec result_v = NULL;
5486 enum mode_class outer_class;
5487 machine_mode outer_submode;
5488 int max_bitsize;
5489
5490 /* Some ports misuse CCmode. */
5491 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5492 return op;
5493
5494 /* We have no way to represent a complex constant at the rtl level. */
5495 if (COMPLEX_MODE_P (outermode))
5496 return NULL_RTX;
5497
5498 /* We support any size mode. */
5499 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5500 GET_MODE_BITSIZE (innermode));
5501
5502 /* Unpack the value. */
5503
5504 if (GET_CODE (op) == CONST_VECTOR)
5505 {
5506 num_elem = CONST_VECTOR_NUNITS (op);
5507 elems = &CONST_VECTOR_ELT (op, 0);
5508 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5509 }
5510 else
5511 {
5512 num_elem = 1;
5513 elems = &op;
5514 elem_bitsize = max_bitsize;
5515 }
5516 /* If this asserts, it is too complicated; reducing value_bit may help. */
5517 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5518 /* I don't know how to handle endianness of sub-units. */
5519 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5520
5521 for (elem = 0; elem < num_elem; elem++)
5522 {
5523 unsigned char * vp;
5524 rtx el = elems[elem];
5525
5526 /* Vectors are kept in target memory order. (This is probably
5527 a mistake.) */
5528 {
5529 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5530 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5531 / BITS_PER_UNIT);
5532 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5533 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5534 unsigned bytele = (subword_byte % UNITS_PER_WORD
5535 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5536 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5537 }
5538
5539 switch (GET_CODE (el))
5540 {
5541 case CONST_INT:
5542 for (i = 0;
5543 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5544 i += value_bit)
5545 *vp++ = INTVAL (el) >> i;
5546 /* CONST_INTs are always logically sign-extended. */
5547 for (; i < elem_bitsize; i += value_bit)
5548 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5549 break;
5550
5551 case CONST_WIDE_INT:
5552 {
5553 rtx_mode_t val = std::make_pair (el, innermode);
5554 unsigned char extend = wi::sign_mask (val);
5555
5556 for (i = 0; i < elem_bitsize; i += value_bit)
5557 *vp++ = wi::extract_uhwi (val, i, value_bit);
5558 for (; i < elem_bitsize; i += value_bit)
5559 *vp++ = extend;
5560 }
5561 break;
5562
5563 case CONST_DOUBLE:
5564 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5565 {
5566 unsigned char extend = 0;
5567 /* If this triggers, someone should have generated a
5568 CONST_INT instead. */
5569 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5570
5571 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5572 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5573 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5574 {
5575 *vp++
5576 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5577 i += value_bit;
5578 }
5579
5580 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5581 extend = -1;
5582 for (; i < elem_bitsize; i += value_bit)
5583 *vp++ = extend;
5584 }
5585 else
5586 {
5587 /* This is big enough for anything on the platform. */
5588 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5589 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5590
5591 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5592 gcc_assert (bitsize <= elem_bitsize);
5593 gcc_assert (bitsize % value_bit == 0);
5594
5595 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5596 GET_MODE (el));
5597
5598 /* real_to_target produces its result in words affected by
5599 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5600 and use WORDS_BIG_ENDIAN instead; see the documentation
5601 of SUBREG in rtl.texi. */
5602 for (i = 0; i < bitsize; i += value_bit)
5603 {
5604 int ibase;
5605 if (WORDS_BIG_ENDIAN)
5606 ibase = bitsize - 1 - i;
5607 else
5608 ibase = i;
5609 *vp++ = tmp[ibase / 32] >> i % 32;
5610 }
5611
5612 /* It shouldn't matter what's done here, so fill it with
5613 zero. */
5614 for (; i < elem_bitsize; i += value_bit)
5615 *vp++ = 0;
5616 }
5617 break;
5618
5619 case CONST_FIXED:
5620 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5621 {
5622 for (i = 0; i < elem_bitsize; i += value_bit)
5623 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5624 }
5625 else
5626 {
5627 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5628 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5629 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5630 i += value_bit)
5631 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5632 >> (i - HOST_BITS_PER_WIDE_INT);
5633 for (; i < elem_bitsize; i += value_bit)
5634 *vp++ = 0;
5635 }
5636 break;
5637
5638 default:
5639 gcc_unreachable ();
5640 }
5641 }
5642
5643 /* Now, pick the right byte to start with. */
5644 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5645 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5646 will already have offset 0. */
5647 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5648 {
5649 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5650 - byte);
5651 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5652 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5653 byte = (subword_byte % UNITS_PER_WORD
5654 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5655 }
5656
5657 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5658 so if it's become negative it will instead be very large.) */
5659 gcc_assert (byte < GET_MODE_SIZE (innermode));
5660
5661 /* Convert from bytes to chunks of size value_bit. */
5662 value_start = byte * (BITS_PER_UNIT / value_bit);
5663
5664 /* Re-pack the value. */
5665 num_elem = GET_MODE_NUNITS (outermode);
5666
5667 if (VECTOR_MODE_P (outermode))
5668 {
5669 result_v = rtvec_alloc (num_elem);
5670 elems = &RTVEC_ELT (result_v, 0);
5671 }
5672 else
5673 elems = &result_s;
5674
5675 outer_submode = GET_MODE_INNER (outermode);
5676 outer_class = GET_MODE_CLASS (outer_submode);
5677 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5678
5679 gcc_assert (elem_bitsize % value_bit == 0);
5680 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5681
5682 for (elem = 0; elem < num_elem; elem++)
5683 {
5684 unsigned char *vp;
5685
5686 /* Vectors are stored in target memory order. (This is probably
5687 a mistake.) */
5688 {
5689 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5690 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5691 / BITS_PER_UNIT);
5692 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5693 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5694 unsigned bytele = (subword_byte % UNITS_PER_WORD
5695 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5696 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5697 }
5698
5699 switch (outer_class)
5700 {
5701 case MODE_INT:
5702 case MODE_PARTIAL_INT:
5703 {
5704 int u;
5705 int base = 0;
5706 int units
5707 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5708 / HOST_BITS_PER_WIDE_INT;
5709 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5710 wide_int r;
5711
5712 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5713 return NULL_RTX;
5714 for (u = 0; u < units; u++)
5715 {
5716 unsigned HOST_WIDE_INT buf = 0;
5717 for (i = 0;
5718 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5719 i += value_bit)
5720 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5721
5722 tmp[u] = buf;
5723 base += HOST_BITS_PER_WIDE_INT;
5724 }
5725 r = wide_int::from_array (tmp, units,
5726 GET_MODE_PRECISION (outer_submode));
5727 #if TARGET_SUPPORTS_WIDE_INT == 0
5728 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5729 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5730 return NULL_RTX;
5731 #endif
5732 elems[elem] = immed_wide_int_const (r, outer_submode);
5733 }
5734 break;
5735
5736 case MODE_FLOAT:
5737 case MODE_DECIMAL_FLOAT:
5738 {
5739 REAL_VALUE_TYPE r;
5740 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5741
5742 /* real_from_target wants its input in words affected by
5743 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5744 and use WORDS_BIG_ENDIAN instead; see the documentation
5745 of SUBREG in rtl.texi. */
5746 for (i = 0; i < max_bitsize / 32; i++)
5747 tmp[i] = 0;
5748 for (i = 0; i < elem_bitsize; i += value_bit)
5749 {
5750 int ibase;
5751 if (WORDS_BIG_ENDIAN)
5752 ibase = elem_bitsize - 1 - i;
5753 else
5754 ibase = i;
5755 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5756 }
5757
5758 real_from_target (&r, tmp, outer_submode);
5759 elems[elem] = const_double_from_real_value (r, outer_submode);
5760 }
5761 break;
5762
5763 case MODE_FRACT:
5764 case MODE_UFRACT:
5765 case MODE_ACCUM:
5766 case MODE_UACCUM:
5767 {
5768 FIXED_VALUE_TYPE f;
5769 f.data.low = 0;
5770 f.data.high = 0;
5771 f.mode = outer_submode;
5772
5773 for (i = 0;
5774 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5775 i += value_bit)
5776 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5777 for (; i < elem_bitsize; i += value_bit)
5778 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5779 << (i - HOST_BITS_PER_WIDE_INT));
5780
5781 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5782 }
5783 break;
5784
5785 default:
5786 gcc_unreachable ();
5787 }
5788 }
5789 if (VECTOR_MODE_P (outermode))
5790 return gen_rtx_CONST_VECTOR (outermode, result_v);
5791 else
5792 return result_s;
5793 }
5794
5795 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5796 Return 0 if no simplifications are possible. */
5797 rtx
5798 simplify_subreg (machine_mode outermode, rtx op,
5799 machine_mode innermode, unsigned int byte)
5800 {
5801 /* Little bit of sanity checking. */
5802 gcc_assert (innermode != VOIDmode);
5803 gcc_assert (outermode != VOIDmode);
5804 gcc_assert (innermode != BLKmode);
5805 gcc_assert (outermode != BLKmode);
5806
5807 gcc_assert (GET_MODE (op) == innermode
5808 || GET_MODE (op) == VOIDmode);
5809
5810 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5811 return NULL_RTX;
5812
5813 if (byte >= GET_MODE_SIZE (innermode))
5814 return NULL_RTX;
5815
5816 if (outermode == innermode && !byte)
5817 return op;
5818
5819 if (CONST_SCALAR_INT_P (op)
5820 || CONST_DOUBLE_AS_FLOAT_P (op)
5821 || GET_CODE (op) == CONST_FIXED
5822 || GET_CODE (op) == CONST_VECTOR)
5823 return simplify_immed_subreg (outermode, op, innermode, byte);
5824
5825 /* Changing mode twice with SUBREG => just change it once,
5826 or not at all if changing back op starting mode. */
5827 if (GET_CODE (op) == SUBREG)
5828 {
5829 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5830 int final_offset = byte + SUBREG_BYTE (op);
5831 rtx newx;
5832
5833 if (outermode == innermostmode
5834 && byte == 0 && SUBREG_BYTE (op) == 0)
5835 return SUBREG_REG (op);
5836
5837 /* The SUBREG_BYTE represents offset, as if the value were stored
5838 in memory. Irritating exception is paradoxical subreg, where
5839 we define SUBREG_BYTE to be 0. On big endian machines, this
5840 value should be negative. For a moment, undo this exception. */
5841 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5842 {
5843 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5844 if (WORDS_BIG_ENDIAN)
5845 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5846 if (BYTES_BIG_ENDIAN)
5847 final_offset += difference % UNITS_PER_WORD;
5848 }
5849 if (SUBREG_BYTE (op) == 0
5850 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5851 {
5852 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5853 if (WORDS_BIG_ENDIAN)
5854 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5855 if (BYTES_BIG_ENDIAN)
5856 final_offset += difference % UNITS_PER_WORD;
5857 }
5858
5859 /* See whether resulting subreg will be paradoxical. */
5860 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5861 {
5862 /* In nonparadoxical subregs we can't handle negative offsets. */
5863 if (final_offset < 0)
5864 return NULL_RTX;
5865 /* Bail out in case resulting subreg would be incorrect. */
5866 if (final_offset % GET_MODE_SIZE (outermode)
5867 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5868 return NULL_RTX;
5869 }
5870 else
5871 {
5872 int offset = 0;
5873 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5874
5875 /* In paradoxical subreg, see if we are still looking on lower part.
5876 If so, our SUBREG_BYTE will be 0. */
5877 if (WORDS_BIG_ENDIAN)
5878 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5879 if (BYTES_BIG_ENDIAN)
5880 offset += difference % UNITS_PER_WORD;
5881 if (offset == final_offset)
5882 final_offset = 0;
5883 else
5884 return NULL_RTX;
5885 }
5886
5887 /* Recurse for further possible simplifications. */
5888 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5889 final_offset);
5890 if (newx)
5891 return newx;
5892 if (validate_subreg (outermode, innermostmode,
5893 SUBREG_REG (op), final_offset))
5894 {
5895 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5896 if (SUBREG_PROMOTED_VAR_P (op)
5897 && SUBREG_PROMOTED_SIGN (op) >= 0
5898 && GET_MODE_CLASS (outermode) == MODE_INT
5899 && IN_RANGE (GET_MODE_SIZE (outermode),
5900 GET_MODE_SIZE (innermode),
5901 GET_MODE_SIZE (innermostmode))
5902 && subreg_lowpart_p (newx))
5903 {
5904 SUBREG_PROMOTED_VAR_P (newx) = 1;
5905 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5906 }
5907 return newx;
5908 }
5909 return NULL_RTX;
5910 }
5911
5912 /* SUBREG of a hard register => just change the register number
5913 and/or mode. If the hard register is not valid in that mode,
5914 suppress this simplification. If the hard register is the stack,
5915 frame, or argument pointer, leave this as a SUBREG. */
5916
5917 if (REG_P (op) && HARD_REGISTER_P (op))
5918 {
5919 unsigned int regno, final_regno;
5920
5921 regno = REGNO (op);
5922 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5923 if (HARD_REGISTER_NUM_P (final_regno))
5924 {
5925 rtx x;
5926 int final_offset = byte;
5927
5928 /* Adjust offset for paradoxical subregs. */
5929 if (byte == 0
5930 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5931 {
5932 int difference = (GET_MODE_SIZE (innermode)
5933 - GET_MODE_SIZE (outermode));
5934 if (WORDS_BIG_ENDIAN)
5935 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5936 if (BYTES_BIG_ENDIAN)
5937 final_offset += difference % UNITS_PER_WORD;
5938 }
5939
5940 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5941
5942 /* Propagate original regno. We don't have any way to specify
5943 the offset inside original regno, so do so only for lowpart.
5944 The information is used only by alias analysis that can not
5945 grog partial register anyway. */
5946
5947 if (subreg_lowpart_offset (outermode, innermode) == byte)
5948 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5949 return x;
5950 }
5951 }
5952
5953 /* If we have a SUBREG of a register that we are replacing and we are
5954 replacing it with a MEM, make a new MEM and try replacing the
5955 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5956 or if we would be widening it. */
5957
5958 if (MEM_P (op)
5959 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5960 /* Allow splitting of volatile memory references in case we don't
5961 have instruction to move the whole thing. */
5962 && (! MEM_VOLATILE_P (op)
5963 || ! have_insn_for (SET, innermode))
5964 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5965 return adjust_address_nv (op, outermode, byte);
5966
5967 /* Handle complex values represented as CONCAT
5968 of real and imaginary part. */
5969 if (GET_CODE (op) == CONCAT)
5970 {
5971 unsigned int part_size, final_offset;
5972 rtx part, res;
5973
5974 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5975 if (byte < part_size)
5976 {
5977 part = XEXP (op, 0);
5978 final_offset = byte;
5979 }
5980 else
5981 {
5982 part = XEXP (op, 1);
5983 final_offset = byte - part_size;
5984 }
5985
5986 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5987 return NULL_RTX;
5988
5989 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5990 if (res)
5991 return res;
5992 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5993 return gen_rtx_SUBREG (outermode, part, final_offset);
5994 return NULL_RTX;
5995 }
5996
5997 /* A SUBREG resulting from a zero extension may fold to zero if
5998 it extracts higher bits that the ZERO_EXTEND's source bits. */
5999 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6000 {
6001 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6002 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6003 return CONST0_RTX (outermode);
6004 }
6005
6006 if (SCALAR_INT_MODE_P (outermode)
6007 && SCALAR_INT_MODE_P (innermode)
6008 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6009 && byte == subreg_lowpart_offset (outermode, innermode))
6010 {
6011 rtx tem = simplify_truncation (outermode, op, innermode);
6012 if (tem)
6013 return tem;
6014 }
6015
6016 return NULL_RTX;
6017 }
6018
6019 /* Make a SUBREG operation or equivalent if it folds. */
6020
6021 rtx
6022 simplify_gen_subreg (machine_mode outermode, rtx op,
6023 machine_mode innermode, unsigned int byte)
6024 {
6025 rtx newx;
6026
6027 newx = simplify_subreg (outermode, op, innermode, byte);
6028 if (newx)
6029 return newx;
6030
6031 if (GET_CODE (op) == SUBREG
6032 || GET_CODE (op) == CONCAT
6033 || GET_MODE (op) == VOIDmode)
6034 return NULL_RTX;
6035
6036 if (validate_subreg (outermode, innermode, op, byte))
6037 return gen_rtx_SUBREG (outermode, op, byte);
6038
6039 return NULL_RTX;
6040 }
6041
6042 /* Generates a subreg to get the least significant part of EXPR (in mode
6043 INNER_MODE) to OUTER_MODE. */
6044
6045 rtx
6046 lowpart_subreg (machine_mode outer_mode, rtx expr,
6047 machine_mode inner_mode)
6048 {
6049 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6050 subreg_lowpart_offset (outer_mode, inner_mode));
6051 }
6052
6053 /* Simplify X, an rtx expression.
6054
6055 Return the simplified expression or NULL if no simplifications
6056 were possible.
6057
6058 This is the preferred entry point into the simplification routines;
6059 however, we still allow passes to call the more specific routines.
6060
6061 Right now GCC has three (yes, three) major bodies of RTL simplification
6062 code that need to be unified.
6063
6064 1. fold_rtx in cse.c. This code uses various CSE specific
6065 information to aid in RTL simplification.
6066
6067 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6068 it uses combine specific information to aid in RTL
6069 simplification.
6070
6071 3. The routines in this file.
6072
6073
6074 Long term we want to only have one body of simplification code; to
6075 get to that state I recommend the following steps:
6076
6077 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6078 which are not pass dependent state into these routines.
6079
6080 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6081 use this routine whenever possible.
6082
6083 3. Allow for pass dependent state to be provided to these
6084 routines and add simplifications based on the pass dependent
6085 state. Remove code from cse.c & combine.c that becomes
6086 redundant/dead.
6087
6088 It will take time, but ultimately the compiler will be easier to
6089 maintain and improve. It's totally silly that when we add a
6090 simplification that it needs to be added to 4 places (3 for RTL
6091 simplification and 1 for tree simplification. */
6092
6093 rtx
6094 simplify_rtx (const_rtx x)
6095 {
6096 const enum rtx_code code = GET_CODE (x);
6097 const machine_mode mode = GET_MODE (x);
6098
6099 switch (GET_RTX_CLASS (code))
6100 {
6101 case RTX_UNARY:
6102 return simplify_unary_operation (code, mode,
6103 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6104 case RTX_COMM_ARITH:
6105 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6106 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6107
6108 /* Fall through.... */
6109
6110 case RTX_BIN_ARITH:
6111 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6112
6113 case RTX_TERNARY:
6114 case RTX_BITFIELD_OPS:
6115 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6116 XEXP (x, 0), XEXP (x, 1),
6117 XEXP (x, 2));
6118
6119 case RTX_COMPARE:
6120 case RTX_COMM_COMPARE:
6121 return simplify_relational_operation (code, mode,
6122 ((GET_MODE (XEXP (x, 0))
6123 != VOIDmode)
6124 ? GET_MODE (XEXP (x, 0))
6125 : GET_MODE (XEXP (x, 1))),
6126 XEXP (x, 0),
6127 XEXP (x, 1));
6128
6129 case RTX_EXTRA:
6130 if (code == SUBREG)
6131 return simplify_subreg (mode, SUBREG_REG (x),
6132 GET_MODE (SUBREG_REG (x)),
6133 SUBREG_BYTE (x));
6134 break;
6135
6136 case RTX_OBJ:
6137 if (code == LO_SUM)
6138 {
6139 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6140 if (GET_CODE (XEXP (x, 0)) == HIGH
6141 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6142 return XEXP (x, 1);
6143 }
6144 break;
6145
6146 default:
6147 break;
6148 }
6149 return NULL;
6150 }