poly_int: get_inner_reference & co.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163 }
164
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
267 {
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
271 }
272 }
273
274 return x;
275 }
276 \f
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 poly_int64 offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
314
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
319 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
320 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
321 decl = NULL;
322 else
323 offset += bytepos + toffset_val;
324 break;
325 }
326 }
327
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
335 {
336 rtx newx;
337
338 offset += MEM_OFFSET (x);
339
340 newx = DECL_RTL (decl);
341
342 if (MEM_P (newx))
343 {
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 poly_int64 n_offset, o_offset;
346
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n = strip_offset (n, &n_offset);
354 o = strip_offset (o, &o_offset);
355 if (!(known_eq (o_offset, n_offset + offset)
356 && rtx_equal_p (o, n)))
357 x = adjust_address_nv (newx, mode, offset);
358 }
359 else if (GET_MODE (x) == GET_MODE (newx)
360 && known_eq (offset, 0))
361 x = newx;
362 }
363 }
364
365 return x;
366 }
367 \f
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
370
371 rtx
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 machine_mode op_mode)
374 {
375 rtx tem;
376
377 /* If this simplifies, use it. */
378 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379 return tem;
380
381 return gen_rtx_fmt_e (code, mode, op);
382 }
383
384 /* Likewise for ternary operations. */
385
386 rtx
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
389 {
390 rtx tem;
391
392 /* If this simplifies, use it. */
393 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
394 op0, op1, op2)) != 0)
395 return tem;
396
397 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
398 }
399
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
402
403 rtx
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 machine_mode cmp_mode, rtx op0, rtx op1)
406 {
407 rtx tem;
408
409 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
410 op0, op1)) != 0)
411 return tem;
412
413 return gen_rtx_fmt_ee (code, mode, op0, op1);
414 }
415 \f
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
419 result. */
420
421 rtx
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 rtx (*fn) (rtx, const_rtx, void *), void *data)
424 {
425 enum rtx_code code = GET_CODE (x);
426 machine_mode mode = GET_MODE (x);
427 machine_mode op_mode;
428 const char *fmt;
429 rtx op0, op1, op2, newx, op;
430 rtvec vec, newvec;
431 int i, j;
432
433 if (__builtin_expect (fn != NULL, 0))
434 {
435 newx = fn (x, old_rtx, data);
436 if (newx)
437 return newx;
438 }
439 else if (rtx_equal_p (x, old_rtx))
440 return copy_rtx ((rtx) data);
441
442 switch (GET_RTX_CLASS (code))
443 {
444 case RTX_UNARY:
445 op0 = XEXP (x, 0);
446 op_mode = GET_MODE (op0);
447 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448 if (op0 == XEXP (x, 0))
449 return x;
450 return simplify_gen_unary (code, mode, op0, op_mode);
451
452 case RTX_BIN_ARITH:
453 case RTX_COMM_ARITH:
454 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_binary (code, mode, op0, op1);
459
460 case RTX_COMPARE:
461 case RTX_COMM_COMPARE:
462 op0 = XEXP (x, 0);
463 op1 = XEXP (x, 1);
464 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 return x;
469 return simplify_gen_relational (code, mode, op_mode, op0, op1);
470
471 case RTX_TERNARY:
472 case RTX_BITFIELD_OPS:
473 op0 = XEXP (x, 0);
474 op_mode = GET_MODE (op0);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 return x;
480 if (op_mode == VOIDmode)
481 op_mode = GET_MODE (op0);
482 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
483
484 case RTX_EXTRA:
485 if (code == SUBREG)
486 {
487 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 if (op0 == SUBREG_REG (x))
489 return x;
490 op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 GET_MODE (SUBREG_REG (x)),
492 SUBREG_BYTE (x));
493 return op0 ? op0 : x;
494 }
495 break;
496
497 case RTX_OBJ:
498 if (code == MEM)
499 {
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0))
502 return x;
503 return replace_equiv_address_nv (x, op0);
504 }
505 else if (code == LO_SUM)
506 {
507 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0) == HIGH)
512 {
513 rtx base0, base1, offset0, offset1;
514 split_const (XEXP (op0, 0), &base0, &offset0);
515 split_const (op1, &base1, &offset1);
516 if (rtx_equal_p (base0, base1))
517 return op1;
518 }
519
520 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 return x;
522 return gen_rtx_LO_SUM (mode, op0, op1);
523 }
524 break;
525
526 default:
527 break;
528 }
529
530 newx = x;
531 fmt = GET_RTX_FORMAT (code);
532 for (i = 0; fmt[i]; i++)
533 switch (fmt[i])
534 {
535 case 'E':
536 vec = XVEC (x, i);
537 newvec = XVEC (newx, i);
538 for (j = 0; j < GET_NUM_ELEM (vec); j++)
539 {
540 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 old_rtx, fn, data);
542 if (op != RTVEC_ELT (vec, j))
543 {
544 if (newvec == vec)
545 {
546 newvec = shallow_copy_rtvec (vec);
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XVEC (newx, i) = newvec;
550 }
551 RTVEC_ELT (newvec, j) = op;
552 }
553 }
554 break;
555
556 case 'e':
557 if (XEXP (x, i))
558 {
559 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 if (op != XEXP (x, i))
561 {
562 if (x == newx)
563 newx = shallow_copy_rtx (x);
564 XEXP (newx, i) = op;
565 }
566 }
567 break;
568 }
569 return newx;
570 }
571
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
574
575 rtx
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
577 {
578 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
579 }
580 \f
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
583
584 RTL provides two ways of truncating a value:
585
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
589 an rvalue.
590
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
596
597 2. a TRUNCATE. This form handles both scalar and compound integers.
598
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
603
604 simplify_gen_unary (TRUNCATE, ...)
605
606 and leave simplify_unary_operation to work out which representation
607 should be used.
608
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
613 truncation of:
614
615 (and:DI X Y)
616
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
621
622 (and:DI (reg:DI X) (const_int 63))
623
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
627
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 machine_mode op_mode)
631 {
632 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634 scalar_int_mode int_mode, int_op_mode, subreg_mode;
635
636 gcc_assert (precision <= op_precision);
637
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
641 {
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
657 }
658
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
662 if (1
663 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
664 && (GET_CODE (op) == PLUS
665 || GET_CODE (op) == MINUS
666 || GET_CODE (op) == MULT))
667 {
668 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
669 if (op0)
670 {
671 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
672 if (op1)
673 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
674 }
675 }
676
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op) == LSHIFTRT
681 || GET_CODE (op) == ASHIFTRT)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision <= op_precision
687 && CONST_INT_P (XEXP (op, 1))
688 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
690 && UINTVAL (XEXP (op, 1)) < precision)
691 return simplify_gen_binary (ASHIFTRT, mode,
692 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op) == LSHIFTRT
698 || GET_CODE (op) == ASHIFTRT)
699 && CONST_INT_P (XEXP (op, 1))
700 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702 && UINTVAL (XEXP (op, 1)) < precision)
703 return simplify_gen_binary (LSHIFTRT, mode,
704 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op) == ASHIFT
710 && CONST_INT_P (XEXP (op, 1))
711 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714 && UINTVAL (XEXP (op, 1)) < precision)
715 return simplify_gen_binary (ASHIFT, mode,
716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720 and C2. */
721 if (GET_CODE (op) == AND
722 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
724 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
725 && CONST_INT_P (XEXP (op, 1)))
726 {
727 rtx op0 = (XEXP (XEXP (op, 0), 0));
728 rtx shift_op = XEXP (XEXP (op, 0), 1);
729 rtx mask_op = XEXP (op, 1);
730 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
731 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732
733 if (shift < precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode) >> shift) & mask)
737 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
738 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
739 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 {
741 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
742 return simplify_gen_binary (AND, mode, op0, mask_op);
743 }
744 }
745
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
748 changing len. */
749 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
750 && REG_P (XEXP (op, 0))
751 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
752 && CONST_INT_P (XEXP (op, 1))
753 && CONST_INT_P (XEXP (op, 2)))
754 {
755 rtx op0 = XEXP (op, 0);
756 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
757 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
758 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
759 {
760 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
761 if (op0)
762 {
763 pos -= op_precision - precision;
764 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
765 XEXP (op, 1), GEN_INT (pos));
766 }
767 }
768 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
769 {
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 XEXP (op, 1), XEXP (op, 2));
774 }
775 }
776
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op) == LSHIFTRT
779 || GET_CODE (op) == ASHIFTRT)
780 && SCALAR_INT_MODE_P (mode)
781 && SCALAR_INT_MODE_P (op_mode)
782 && precision >= BITS_PER_WORD
783 && 2 * precision <= op_precision
784 && CONST_INT_P (XEXP (op, 1))
785 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
786 && UINTVAL (XEXP (op, 1)) < op_precision)
787 {
788 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
789 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
790 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
791 (WORDS_BIG_ENDIAN
792 ? byte - shifted_bytes
793 : byte + shifted_bytes));
794 }
795
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op) == LSHIFTRT
800 || GET_CODE (op) == ASHIFTRT)
801 && is_a <scalar_int_mode> (mode, &int_mode)
802 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
803 && MEM_P (XEXP (op, 0))
804 && CONST_INT_P (XEXP (op, 1))
805 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
806 && INTVAL (XEXP (op, 1)) > 0
807 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
808 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op, 0))
811 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
813 {
814 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
815 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
816 return adjust_address_nv (XEXP (op, 0), int_mode,
817 (WORDS_BIG_ENDIAN
818 ? byte - shifted_bytes
819 : byte + shifted_bytes));
820 }
821
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op) == ABS
825 || GET_CODE (op) == NEG)
826 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
828 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
829 return simplify_gen_unary (GET_CODE (op), mode,
830 XEXP (XEXP (op, 0), 0), mode);
831
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
833 (truncate:A X). */
834 if (GET_CODE (op) == SUBREG
835 && is_a <scalar_int_mode> (mode, &int_mode)
836 && SCALAR_INT_MODE_P (op_mode)
837 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
838 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
839 && subreg_lowpart_p (op))
840 {
841 rtx inner = XEXP (SUBREG_REG (op), 0);
842 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
843 return simplify_gen_unary (TRUNCATE, int_mode, inner,
844 GET_MODE (inner));
845 else
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
849 }
850
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op) == TRUNCATE)
853 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
854 GET_MODE (XEXP (op, 0)));
855
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
857 in mode A. */
858 if (GET_CODE (op) == IOR
859 && SCALAR_INT_MODE_P (mode)
860 && SCALAR_INT_MODE_P (op_mode)
861 && CONST_INT_P (XEXP (op, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
863 return constm1_rtx;
864
865 return NULL_RTX;
866 }
867 \f
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
871 rtx
872 simplify_unary_operation (enum rtx_code code, machine_mode mode,
873 rtx op, machine_mode op_mode)
874 {
875 rtx trueop, tem;
876
877 trueop = avoid_constant_pool_reference (op);
878
879 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
880 if (tem)
881 return tem;
882
883 return simplify_unary_operation_1 (code, mode, op);
884 }
885
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
887 to be exact. */
888
889 static bool
890 exact_int_to_float_conversion_p (const_rtx op)
891 {
892 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
893 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode != VOIDmode);
896 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
897 int in_bits = in_prec;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode))
899 {
900 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
901 if (GET_CODE (op) == FLOAT)
902 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
903 else if (GET_CODE (op) == UNSIGNED_FLOAT)
904 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
905 else
906 gcc_unreachable ();
907 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
908 }
909 return in_bits <= out_bits;
910 }
911
912 /* Perform some simplifications we can do even if the operands
913 aren't constant. */
914 static rtx
915 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
916 {
917 enum rtx_code reversed;
918 rtx temp, elt, base, step;
919 scalar_int_mode inner, int_mode, op_mode, op0_mode;
920
921 switch (code)
922 {
923 case NOT:
924 /* (not (not X)) == X. */
925 if (GET_CODE (op) == NOT)
926 return XEXP (op, 0);
927
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op)
931 && (mode == BImode || STORE_FLAG_VALUE == -1)
932 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
933 return simplify_gen_relational (reversed, mode, VOIDmode,
934 XEXP (op, 0), XEXP (op, 1));
935
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op) == PLUS
938 && XEXP (op, 1) == constm1_rtx)
939 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
940
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
945 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
946 CONSTM1_RTX (mode));
947
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op) == XOR
950 && CONST_INT_P (XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
954
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op) == PLUS
957 && CONST_INT_P (XEXP (op, 1))
958 && mode_signbit_p (mode, XEXP (op, 1))
959 && (temp = simplify_unary_operation (NOT, mode,
960 XEXP (op, 1), mode)) != 0)
961 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
962
963
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
968 bother with. */
969 if (GET_CODE (op) == ASHIFT
970 && XEXP (op, 0) == const1_rtx)
971 {
972 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
973 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
974 }
975
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE == -1
980 && is_a <scalar_int_mode> (mode, &int_mode)
981 && GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
984 return simplify_gen_relational (GE, int_mode, VOIDmode,
985 XEXP (op, 0), const0_rtx);
986
987
988 if (partial_subreg_p (op)
989 && subreg_lowpart_p (op)
990 && GET_CODE (SUBREG_REG (op)) == ASHIFT
991 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
992 {
993 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
994 rtx x;
995
996 x = gen_rtx_ROTATE (inner_mode,
997 simplify_gen_unary (NOT, inner_mode, const1_rtx,
998 inner_mode),
999 XEXP (SUBREG_REG (op), 1));
1000 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1001 if (temp)
1002 return temp;
1003 }
1004
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1008 coded. */
1009 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1010 {
1011 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1012 machine_mode op_mode;
1013
1014 op_mode = GET_MODE (in1);
1015 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1016
1017 op_mode = GET_MODE (in2);
1018 if (op_mode == VOIDmode)
1019 op_mode = mode;
1020 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1021
1022 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1023 std::swap (in1, in2);
1024
1025 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1026 mode, in1, in2);
1027 }
1028
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op) == BSWAP)
1031 {
1032 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1033 return simplify_gen_unary (BSWAP, mode, x, mode);
1034 }
1035 break;
1036
1037 case NEG:
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op) == NEG)
1040 return XEXP (op, 0);
1041
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1044 x ? y : (neg y). */
1045 if (GET_CODE (op) == IF_THEN_ELSE)
1046 {
1047 rtx cond = XEXP (op, 0);
1048 rtx true_rtx = XEXP (op, 1);
1049 rtx false_rtx = XEXP (op, 2);
1050
1051 if ((GET_CODE (true_rtx) == NEG
1052 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1053 || (GET_CODE (false_rtx) == NEG
1054 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1055 {
1056 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1057 temp = reversed_comparison (cond, mode);
1058 else
1059 {
1060 temp = cond;
1061 std::swap (true_rtx, false_rtx);
1062 }
1063 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1064 mode, temp, true_rtx, false_rtx);
1065 }
1066 }
1067
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op) == PLUS
1070 && XEXP (op, 1) == const1_rtx)
1071 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1072
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op) == NOT)
1075 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1076 CONST1_RTX (mode));
1077
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1087
1088 if (GET_CODE (op) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1091 {
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1095 {
1096 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1097 if (temp)
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1099 }
1100
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1103 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1104 }
1105
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1110 {
1111 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1112 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1113 }
1114
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1117 is a constant). */
1118 if (GET_CODE (op) == ASHIFT)
1119 {
1120 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1121 if (temp)
1122 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1123 }
1124
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op, 1))
1129 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1130 return simplify_gen_binary (LSHIFTRT, mode,
1131 XEXP (op, 0), XEXP (op, 1));
1132
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op, 1))
1137 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1138 return simplify_gen_binary (ASHIFTRT, mode,
1139 XEXP (op, 0), XEXP (op, 1));
1140
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op) == XOR
1143 && XEXP (op, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op, 0), mode) == 1)
1145 return plus_constant (mode, XEXP (op, 0), -1);
1146
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op) == LT
1150 && XEXP (op, 1) == const0_rtx
1151 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1152 {
1153 int_mode = as_a <scalar_int_mode> (mode);
1154 int isize = GET_MODE_PRECISION (inner);
1155 if (STORE_FLAG_VALUE == 1)
1156 {
1157 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1158 gen_int_shift_amount (inner,
1159 isize - 1));
1160 if (int_mode == inner)
1161 return temp;
1162 if (GET_MODE_PRECISION (int_mode) > isize)
1163 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1164 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1165 }
1166 else if (STORE_FLAG_VALUE == -1)
1167 {
1168 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1176 }
1177 }
1178
1179 if (vec_series_p (op, &base, &step))
1180 {
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode = GET_MODE_INNER (mode);
1185 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1186 if (base)
1187 {
1188 step = simplify_unary_operation (NEG, inner_mode,
1189 step, inner_mode);
1190 if (step)
1191 return gen_vec_series (mode, base, step);
1192 }
1193 }
1194 break;
1195
1196 case TRUNCATE:
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op) == LSHIFTRT
1200 && GET_CODE (XEXP (op, 0)) == MULT)
1201 break;
1202
1203 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1204 {
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1206 {
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1210 }
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1213 integer mode. */
1214 break;
1215 }
1216
1217 if (GET_MODE (op) != VOIDmode)
1218 {
1219 temp = simplify_truncation (mode, op, GET_MODE (op));
1220 if (temp)
1221 return temp;
1222 }
1223
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (GET_MODE_NUNITS (mode) == 1
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1228 || truncated_to_mode (mode, op)))
1229 {
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1233 }
1234
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode)
1240 && COMPARISON_P (op)
1241 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1242 {
1243 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 if (temp)
1245 return temp;
1246 }
1247
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op) == MEM
1251 && !VECTOR_MODE_P (mode)
1252 && !MEM_VOLATILE_P (op)
1253 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1254 {
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1258 }
1259
1260 break;
1261
1262 case FLOAT_TRUNCATE:
1263 if (DECIMAL_FLOAT_MODE_P (mode))
1264 break;
1265
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op, 0)) == mode)
1269 return XEXP (op, 0);
1270
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1274
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1277
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations)
1282 || GET_CODE (op) == FLOAT_EXTEND)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1284 > GET_MODE_UNIT_SIZE (mode)
1285 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1286 mode,
1287 XEXP (op, 0), mode);
1288
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op)))
1293 return simplify_gen_unary (GET_CODE (op), mode,
1294 XEXP (op, 0),
1295 GET_MODE (XEXP (op, 0)));
1296
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op) == ABS
1300 || GET_CODE (op) == NEG)
1301 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1303 return simplify_gen_unary (GET_CODE (op), mode,
1304 XEXP (XEXP (op, 0), 0), mode);
1305
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op) == SUBREG
1309 && subreg_lowpart_p (op)
1310 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1311 return SUBREG_REG (op);
1312 break;
1313
1314 case FLOAT_EXTEND:
1315 if (DECIMAL_FLOAT_MODE_P (mode))
1316 break;
1317
1318 /* (float_extend (float_extend x)) is (float_extend x)
1319
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1322 */
1323 if (GET_CODE (op) == FLOAT_EXTEND
1324 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1325 && exact_int_to_float_conversion_p (op)))
1326 return simplify_gen_unary (GET_CODE (op), mode,
1327 XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1329
1330 break;
1331
1332 case ABS:
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op) == NEG)
1335 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1337
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1339 do nothing. */
1340 if (GET_MODE (op) == VOIDmode)
1341 break;
1342
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op),
1346 nonzero_bits (op, GET_MODE (op))))
1347 return op;
1348
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a <scalar_int_mode> (mode, &int_mode)
1351 && (num_sign_bit_copies (op, int_mode)
1352 == GET_MODE_PRECISION (int_mode)))
1353 return gen_rtx_NEG (int_mode, op);
1354
1355 break;
1356
1357 case FFS:
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op) == SIGN_EXTEND
1360 || GET_CODE (op) == ZERO_EXTEND)
1361 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1362 GET_MODE (XEXP (op, 0)));
1363 break;
1364
1365 case POPCOUNT:
1366 switch (GET_CODE (op))
1367 {
1368 case BSWAP:
1369 case ZERO_EXTEND:
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1373
1374 case ROTATE:
1375 case ROTATERT:
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op, 1)))
1378 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1379 GET_MODE (XEXP (op, 0)));
1380 break;
1381
1382 default:
1383 break;
1384 }
1385 break;
1386
1387 case PARITY:
1388 switch (GET_CODE (op))
1389 {
1390 case NOT:
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 case SIGN_EXTEND:
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1396
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1404
1405 default:
1406 break;
1407 }
1408 break;
1409
1410 case BSWAP:
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op) == BSWAP)
1413 return XEXP (op, 0);
1414 break;
1415
1416 case FLOAT:
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op) == SIGN_EXTEND)
1419 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1420 GET_MODE (XEXP (op, 0)));
1421 break;
1422
1423 case SIGN_EXTEND:
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1427 the VAX). */
1428 if (GET_CODE (op) == TRUNCATE
1429 && GET_MODE (XEXP (op, 0)) == mode
1430 && GET_CODE (XEXP (op, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1433 return XEXP (op, 0);
1434
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op) == MULT)
1438 {
1439 rtx lhs = XEXP (op, 0);
1440 rtx rhs = XEXP (op, 1);
1441 enum rtx_code lcode = GET_CODE (lhs);
1442 enum rtx_code rcode = GET_CODE (rhs);
1443
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode == SIGN_EXTEND
1447 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1448 && (rcode == SIGN_EXTEND
1449 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1450 {
1451 machine_mode lmode = GET_MODE (lhs);
1452 machine_mode rmode = GET_MODE (rhs);
1453 int bits;
1454
1455 if (lcode == ASHIFTRT)
1456 /* Number of bits not shifted off the end. */
1457 bits = (GET_MODE_UNIT_PRECISION (lmode)
1458 - INTVAL (XEXP (lhs, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1462
1463 if (rcode == ASHIFTRT)
1464 bits += (GET_MODE_UNIT_PRECISION (rmode)
1465 - INTVAL (XEXP (rhs, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1468
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1472 return simplify_gen_binary
1473 (MULT, mode,
1474 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1475 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1476 }
1477 }
1478
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op)
1484 && SUBREG_PROMOTED_SIGNED_P (op)
1485 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1486 {
1487 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1488 if (temp)
1489 return temp;
1490 }
1491
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1495 {
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1498 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1499 GET_MODE (XEXP (op, 0)));
1500 }
1501
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1508 && GET_CODE (XEXP (op, 0)) == ASHIFT
1509 && is_a <scalar_int_mode> (mode, &int_mode)
1510 && CONST_INT_P (XEXP (op, 1))
1511 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1512 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1513 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1514 {
1515 scalar_int_mode tmode;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode)
1517 > GET_MODE_BITSIZE (op_mode));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1519 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1520 {
1521 rtx inner =
1522 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1523 if (inner)
1524 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1525 ? SIGN_EXTEND : ZERO_EXTEND,
1526 int_mode, inner, tmode);
1527 }
1528 }
1529
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op, 1))
1534 && XEXP (op, 1) != const0_rtx)
1535 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1536
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode == Pmode && GET_MODE (op) == ptr_mode
1544 && (CONSTANT_P (op)
1545 || (GET_CODE (op) == SUBREG
1546 && REG_P (SUBREG_REG (op))
1547 && REG_POINTER (SUBREG_REG (op))
1548 && GET_MODE (SUBREG_REG (op)) == Pmode))
1549 && !targetm.have_ptr_extend ())
1550 {
1551 temp
1552 = convert_memory_address_addr_space_1 (Pmode, op,
1553 ADDR_SPACE_GENERIC, false,
1554 true);
1555 if (temp)
1556 return temp;
1557 }
1558 #endif
1559 break;
1560
1561 case ZERO_EXTEND:
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op)
1568 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1569 {
1570 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1571 if (temp)
1572 return temp;
1573 }
1574
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op) == MULT)
1578 {
1579 rtx lhs = XEXP (op, 0);
1580 rtx rhs = XEXP (op, 1);
1581 enum rtx_code lcode = GET_CODE (lhs);
1582 enum rtx_code rcode = GET_CODE (rhs);
1583
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode == ZERO_EXTEND
1587 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1588 && (rcode == ZERO_EXTEND
1589 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1590 {
1591 machine_mode lmode = GET_MODE (lhs);
1592 machine_mode rmode = GET_MODE (rhs);
1593 int bits;
1594
1595 if (lcode == LSHIFTRT)
1596 /* Number of bits not shifted off the end. */
1597 bits = (GET_MODE_UNIT_PRECISION (lmode)
1598 - INTVAL (XEXP (lhs, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1602
1603 if (rcode == LSHIFTRT)
1604 bits += (GET_MODE_UNIT_PRECISION (rmode)
1605 - INTVAL (XEXP (rhs, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1608
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1612 return simplify_gen_binary
1613 (MULT, mode,
1614 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1615 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1616 }
1617 }
1618
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op) == ZERO_EXTEND)
1621 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1622 GET_MODE (XEXP (op, 0)));
1623
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op) == LSHIFTRT
1628 && GET_CODE (XEXP (op, 0)) == ASHIFT
1629 && is_a <scalar_int_mode> (mode, &int_mode)
1630 && CONST_INT_P (XEXP (op, 1))
1631 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1632 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1633 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1634 {
1635 scalar_int_mode tmode;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1637 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1638 {
1639 rtx inner =
1640 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1641 if (inner)
1642 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1643 inner, tmode);
1644 }
1645 }
1646
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1649 of mode N. E.g.
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op)
1653 && is_a <scalar_int_mode> (mode, &int_mode)
1654 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1655 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1657 && subreg_lowpart_p (op)
1658 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1659 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1660 {
1661 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1662 return SUBREG_REG (op);
1663 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1664 op0_mode);
1665 }
1666
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED > 0
1673 && mode == Pmode && GET_MODE (op) == ptr_mode
1674 && (CONSTANT_P (op)
1675 || (GET_CODE (op) == SUBREG
1676 && REG_P (SUBREG_REG (op))
1677 && REG_POINTER (SUBREG_REG (op))
1678 && GET_MODE (SUBREG_REG (op)) == Pmode))
1679 && !targetm.have_ptr_extend ())
1680 {
1681 temp
1682 = convert_memory_address_addr_space_1 (Pmode, op,
1683 ADDR_SPACE_GENERIC, false,
1684 true);
1685 if (temp)
1686 return temp;
1687 }
1688 #endif
1689 break;
1690
1691 default:
1692 break;
1693 }
1694
1695 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1696 {
1697 /* Try applying the operator to ELT and see if that simplifies.
1698 We can duplicate the result if so.
1699
1700 The reason we don't use simplify_gen_unary is that it isn't
1701 necessarily a win to convert things like:
1702
1703 (neg:V (vec_duplicate:V (reg:S R)))
1704
1705 to:
1706
1707 (vec_duplicate:V (neg:S (reg:S R)))
1708
1709 The first might be done entirely in vector registers while the
1710 second might need a move between register files. */
1711 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1712 elt, GET_MODE_INNER (GET_MODE (op)));
1713 if (temp)
1714 return gen_vec_duplicate (mode, temp);
1715 }
1716
1717 return 0;
1718 }
1719
1720 /* Try to compute the value of a unary operation CODE whose output mode is to
1721 be MODE with input operand OP whose mode was originally OP_MODE.
1722 Return zero if the value cannot be computed. */
1723 rtx
1724 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1725 rtx op, machine_mode op_mode)
1726 {
1727 scalar_int_mode result_mode;
1728
1729 if (code == VEC_DUPLICATE)
1730 {
1731 gcc_assert (VECTOR_MODE_P (mode));
1732 if (GET_MODE (op) != VOIDmode)
1733 {
1734 if (!VECTOR_MODE_P (GET_MODE (op)))
1735 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1736 else
1737 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1738 (GET_MODE (op)));
1739 }
1740 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1741 return gen_const_vec_duplicate (mode, op);
1742 if (GET_CODE (op) == CONST_VECTOR)
1743 {
1744 unsigned int n_elts = GET_MODE_NUNITS (mode);
1745 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
1746 gcc_assert (in_n_elts < n_elts);
1747 gcc_assert ((n_elts % in_n_elts) == 0);
1748 rtvec v = rtvec_alloc (n_elts);
1749 for (unsigned i = 0; i < n_elts; i++)
1750 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1751 return gen_rtx_CONST_VECTOR (mode, v);
1752 }
1753 }
1754
1755 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1756 {
1757 int elt_size = GET_MODE_UNIT_SIZE (mode);
1758 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1759 machine_mode opmode = GET_MODE (op);
1760 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1761 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1762 rtvec v = rtvec_alloc (n_elts);
1763 unsigned int i;
1764
1765 gcc_assert (op_n_elts == n_elts);
1766 for (i = 0; i < n_elts; i++)
1767 {
1768 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1769 CONST_VECTOR_ELT (op, i),
1770 GET_MODE_INNER (opmode));
1771 if (!x)
1772 return 0;
1773 RTVEC_ELT (v, i) = x;
1774 }
1775 return gen_rtx_CONST_VECTOR (mode, v);
1776 }
1777
1778 /* The order of these tests is critical so that, for example, we don't
1779 check the wrong mode (input vs. output) for a conversion operation,
1780 such as FIX. At some point, this should be simplified. */
1781
1782 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1783 {
1784 REAL_VALUE_TYPE d;
1785
1786 if (op_mode == VOIDmode)
1787 {
1788 /* CONST_INT have VOIDmode as the mode. We assume that all
1789 the bits of the constant are significant, though, this is
1790 a dangerous assumption as many times CONST_INTs are
1791 created and used with garbage in the bits outside of the
1792 precision of the implied mode of the const_int. */
1793 op_mode = MAX_MODE_INT;
1794 }
1795
1796 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1797
1798 /* Avoid the folding if flag_signaling_nans is on and
1799 operand is a signaling NaN. */
1800 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1801 return 0;
1802
1803 d = real_value_truncate (mode, d);
1804 return const_double_from_real_value (d, mode);
1805 }
1806 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1807 {
1808 REAL_VALUE_TYPE d;
1809
1810 if (op_mode == VOIDmode)
1811 {
1812 /* CONST_INT have VOIDmode as the mode. We assume that all
1813 the bits of the constant are significant, though, this is
1814 a dangerous assumption as many times CONST_INTs are
1815 created and used with garbage in the bits outside of the
1816 precision of the implied mode of the const_int. */
1817 op_mode = MAX_MODE_INT;
1818 }
1819
1820 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1821
1822 /* Avoid the folding if flag_signaling_nans is on and
1823 operand is a signaling NaN. */
1824 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1825 return 0;
1826
1827 d = real_value_truncate (mode, d);
1828 return const_double_from_real_value (d, mode);
1829 }
1830
1831 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1832 {
1833 unsigned int width = GET_MODE_PRECISION (result_mode);
1834 wide_int result;
1835 scalar_int_mode imode = (op_mode == VOIDmode
1836 ? result_mode
1837 : as_a <scalar_int_mode> (op_mode));
1838 rtx_mode_t op0 = rtx_mode_t (op, imode);
1839 int int_value;
1840
1841 #if TARGET_SUPPORTS_WIDE_INT == 0
1842 /* This assert keeps the simplification from producing a result
1843 that cannot be represented in a CONST_DOUBLE but a lot of
1844 upstream callers expect that this function never fails to
1845 simplify something and so you if you added this to the test
1846 above the code would die later anyway. If this assert
1847 happens, you just need to make the port support wide int. */
1848 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1849 #endif
1850
1851 switch (code)
1852 {
1853 case NOT:
1854 result = wi::bit_not (op0);
1855 break;
1856
1857 case NEG:
1858 result = wi::neg (op0);
1859 break;
1860
1861 case ABS:
1862 result = wi::abs (op0);
1863 break;
1864
1865 case FFS:
1866 result = wi::shwi (wi::ffs (op0), result_mode);
1867 break;
1868
1869 case CLZ:
1870 if (wi::ne_p (op0, 0))
1871 int_value = wi::clz (op0);
1872 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1873 int_value = GET_MODE_PRECISION (imode);
1874 result = wi::shwi (int_value, result_mode);
1875 break;
1876
1877 case CLRSB:
1878 result = wi::shwi (wi::clrsb (op0), result_mode);
1879 break;
1880
1881 case CTZ:
1882 if (wi::ne_p (op0, 0))
1883 int_value = wi::ctz (op0);
1884 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1885 int_value = GET_MODE_PRECISION (imode);
1886 result = wi::shwi (int_value, result_mode);
1887 break;
1888
1889 case POPCOUNT:
1890 result = wi::shwi (wi::popcount (op0), result_mode);
1891 break;
1892
1893 case PARITY:
1894 result = wi::shwi (wi::parity (op0), result_mode);
1895 break;
1896
1897 case BSWAP:
1898 result = wide_int (op0).bswap ();
1899 break;
1900
1901 case TRUNCATE:
1902 case ZERO_EXTEND:
1903 result = wide_int::from (op0, width, UNSIGNED);
1904 break;
1905
1906 case SIGN_EXTEND:
1907 result = wide_int::from (op0, width, SIGNED);
1908 break;
1909
1910 case SQRT:
1911 default:
1912 return 0;
1913 }
1914
1915 return immed_wide_int_const (result, result_mode);
1916 }
1917
1918 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1919 && SCALAR_FLOAT_MODE_P (mode)
1920 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1921 {
1922 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1923 switch (code)
1924 {
1925 case SQRT:
1926 return 0;
1927 case ABS:
1928 d = real_value_abs (&d);
1929 break;
1930 case NEG:
1931 d = real_value_negate (&d);
1932 break;
1933 case FLOAT_TRUNCATE:
1934 /* Don't perform the operation if flag_signaling_nans is on
1935 and the operand is a signaling NaN. */
1936 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1937 return NULL_RTX;
1938 d = real_value_truncate (mode, d);
1939 break;
1940 case FLOAT_EXTEND:
1941 /* Don't perform the operation if flag_signaling_nans is on
1942 and the operand is a signaling NaN. */
1943 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1944 return NULL_RTX;
1945 /* All this does is change the mode, unless changing
1946 mode class. */
1947 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1948 real_convert (&d, mode, &d);
1949 break;
1950 case FIX:
1951 /* Don't perform the operation if flag_signaling_nans is on
1952 and the operand is a signaling NaN. */
1953 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1954 return NULL_RTX;
1955 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1956 break;
1957 case NOT:
1958 {
1959 long tmp[4];
1960 int i;
1961
1962 real_to_target (tmp, &d, GET_MODE (op));
1963 for (i = 0; i < 4; i++)
1964 tmp[i] = ~tmp[i];
1965 real_from_target (&d, tmp, mode);
1966 break;
1967 }
1968 default:
1969 gcc_unreachable ();
1970 }
1971 return const_double_from_real_value (d, mode);
1972 }
1973 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1974 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1975 && is_int_mode (mode, &result_mode))
1976 {
1977 unsigned int width = GET_MODE_PRECISION (result_mode);
1978 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1979 operators are intentionally left unspecified (to ease implementation
1980 by target backends), for consistency, this routine implements the
1981 same semantics for constant folding as used by the middle-end. */
1982
1983 /* This was formerly used only for non-IEEE float.
1984 eggert@twinsun.com says it is safe for IEEE also. */
1985 REAL_VALUE_TYPE t;
1986 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1987 wide_int wmax, wmin;
1988 /* This is part of the abi to real_to_integer, but we check
1989 things before making this call. */
1990 bool fail;
1991
1992 switch (code)
1993 {
1994 case FIX:
1995 if (REAL_VALUE_ISNAN (*x))
1996 return const0_rtx;
1997
1998 /* Test against the signed upper bound. */
1999 wmax = wi::max_value (width, SIGNED);
2000 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2001 if (real_less (&t, x))
2002 return immed_wide_int_const (wmax, mode);
2003
2004 /* Test against the signed lower bound. */
2005 wmin = wi::min_value (width, SIGNED);
2006 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2007 if (real_less (x, &t))
2008 return immed_wide_int_const (wmin, mode);
2009
2010 return immed_wide_int_const (real_to_integer (x, &fail, width),
2011 mode);
2012
2013 case UNSIGNED_FIX:
2014 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2015 return const0_rtx;
2016
2017 /* Test against the unsigned upper bound. */
2018 wmax = wi::max_value (width, UNSIGNED);
2019 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2020 if (real_less (&t, x))
2021 return immed_wide_int_const (wmax, mode);
2022
2023 return immed_wide_int_const (real_to_integer (x, &fail, width),
2024 mode);
2025
2026 default:
2027 gcc_unreachable ();
2028 }
2029 }
2030
2031 /* Handle polynomial integers. */
2032 else if (CONST_POLY_INT_P (op))
2033 {
2034 poly_wide_int result;
2035 switch (code)
2036 {
2037 case NEG:
2038 result = -const_poly_int_value (op);
2039 break;
2040
2041 case NOT:
2042 result = ~const_poly_int_value (op);
2043 break;
2044
2045 default:
2046 return NULL_RTX;
2047 }
2048 return immed_wide_int_const (result, mode);
2049 }
2050
2051 return NULL_RTX;
2052 }
2053 \f
2054 /* Subroutine of simplify_binary_operation to simplify a binary operation
2055 CODE that can commute with byte swapping, with result mode MODE and
2056 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2057 Return zero if no simplification or canonicalization is possible. */
2058
2059 static rtx
2060 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2061 rtx op0, rtx op1)
2062 {
2063 rtx tem;
2064
2065 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2066 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2067 {
2068 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2069 simplify_gen_unary (BSWAP, mode, op1, mode));
2070 return simplify_gen_unary (BSWAP, mode, tem, mode);
2071 }
2072
2073 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2074 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2075 {
2076 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2077 return simplify_gen_unary (BSWAP, mode, tem, mode);
2078 }
2079
2080 return NULL_RTX;
2081 }
2082
2083 /* Subroutine of simplify_binary_operation to simplify a commutative,
2084 associative binary operation CODE with result mode MODE, operating
2085 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2086 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2087 canonicalization is possible. */
2088
2089 static rtx
2090 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2091 rtx op0, rtx op1)
2092 {
2093 rtx tem;
2094
2095 /* Linearize the operator to the left. */
2096 if (GET_CODE (op1) == code)
2097 {
2098 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2099 if (GET_CODE (op0) == code)
2100 {
2101 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2102 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2103 }
2104
2105 /* "a op (b op c)" becomes "(b op c) op a". */
2106 if (! swap_commutative_operands_p (op1, op0))
2107 return simplify_gen_binary (code, mode, op1, op0);
2108
2109 std::swap (op0, op1);
2110 }
2111
2112 if (GET_CODE (op0) == code)
2113 {
2114 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2115 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2116 {
2117 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2118 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2119 }
2120
2121 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2122 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2123 if (tem != 0)
2124 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2125
2126 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2127 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2128 if (tem != 0)
2129 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2130 }
2131
2132 return 0;
2133 }
2134
2135
2136 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2137 and OP1. Return 0 if no simplification is possible.
2138
2139 Don't use this for relational operations such as EQ or LT.
2140 Use simplify_relational_operation instead. */
2141 rtx
2142 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2143 rtx op0, rtx op1)
2144 {
2145 rtx trueop0, trueop1;
2146 rtx tem;
2147
2148 /* Relational operations don't work here. We must know the mode
2149 of the operands in order to do the comparison correctly.
2150 Assuming a full word can give incorrect results.
2151 Consider comparing 128 with -128 in QImode. */
2152 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2153 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2154
2155 /* Make sure the constant is second. */
2156 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2157 && swap_commutative_operands_p (op0, op1))
2158 std::swap (op0, op1);
2159
2160 trueop0 = avoid_constant_pool_reference (op0);
2161 trueop1 = avoid_constant_pool_reference (op1);
2162
2163 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2164 if (tem)
2165 return tem;
2166 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2167
2168 if (tem)
2169 return tem;
2170
2171 /* If the above steps did not result in a simplification and op0 or op1
2172 were constant pool references, use the referenced constants directly. */
2173 if (trueop0 != op0 || trueop1 != op1)
2174 return simplify_gen_binary (code, mode, trueop0, trueop1);
2175
2176 return NULL_RTX;
2177 }
2178
2179 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2180 which OP0 and OP1 are both vector series or vector duplicates
2181 (which are really just series with a step of 0). If so, try to
2182 form a new series by applying CODE to the bases and to the steps.
2183 Return null if no simplification is possible.
2184
2185 MODE is the mode of the operation and is known to be a vector
2186 integer mode. */
2187
2188 static rtx
2189 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2190 rtx op0, rtx op1)
2191 {
2192 rtx base0, step0;
2193 if (vec_duplicate_p (op0, &base0))
2194 step0 = const0_rtx;
2195 else if (!vec_series_p (op0, &base0, &step0))
2196 return NULL_RTX;
2197
2198 rtx base1, step1;
2199 if (vec_duplicate_p (op1, &base1))
2200 step1 = const0_rtx;
2201 else if (!vec_series_p (op1, &base1, &step1))
2202 return NULL_RTX;
2203
2204 /* Only create a new series if we can simplify both parts. In other
2205 cases this isn't really a simplification, and it's not necessarily
2206 a win to replace a vector operation with a scalar operation. */
2207 scalar_mode inner_mode = GET_MODE_INNER (mode);
2208 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2209 if (!new_base)
2210 return NULL_RTX;
2211
2212 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2213 if (!new_step)
2214 return NULL_RTX;
2215
2216 return gen_vec_series (mode, new_base, new_step);
2217 }
2218
2219 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2220 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2221 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2222 actual constants. */
2223
2224 static rtx
2225 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2226 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2227 {
2228 rtx tem, reversed, opleft, opright, elt0, elt1;
2229 HOST_WIDE_INT val;
2230 scalar_int_mode int_mode, inner_mode;
2231 poly_int64 offset;
2232
2233 /* Even if we can't compute a constant result,
2234 there are some cases worth simplifying. */
2235
2236 switch (code)
2237 {
2238 case PLUS:
2239 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2240 when x is NaN, infinite, or finite and nonzero. They aren't
2241 when x is -0 and the rounding mode is not towards -infinity,
2242 since (-0) + 0 is then 0. */
2243 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2244 return op0;
2245
2246 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2247 transformations are safe even for IEEE. */
2248 if (GET_CODE (op0) == NEG)
2249 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2250 else if (GET_CODE (op1) == NEG)
2251 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2252
2253 /* (~a) + 1 -> -a */
2254 if (INTEGRAL_MODE_P (mode)
2255 && GET_CODE (op0) == NOT
2256 && trueop1 == const1_rtx)
2257 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2258
2259 /* Handle both-operands-constant cases. We can only add
2260 CONST_INTs to constants since the sum of relocatable symbols
2261 can't be handled by most assemblers. Don't add CONST_INT
2262 to CONST_INT since overflow won't be computed properly if wider
2263 than HOST_BITS_PER_WIDE_INT. */
2264
2265 if ((GET_CODE (op0) == CONST
2266 || GET_CODE (op0) == SYMBOL_REF
2267 || GET_CODE (op0) == LABEL_REF)
2268 && CONST_INT_P (op1))
2269 return plus_constant (mode, op0, INTVAL (op1));
2270 else if ((GET_CODE (op1) == CONST
2271 || GET_CODE (op1) == SYMBOL_REF
2272 || GET_CODE (op1) == LABEL_REF)
2273 && CONST_INT_P (op0))
2274 return plus_constant (mode, op1, INTVAL (op0));
2275
2276 /* See if this is something like X * C - X or vice versa or
2277 if the multiplication is written as a shift. If so, we can
2278 distribute and make a new multiply, shift, or maybe just
2279 have X (if C is 2 in the example above). But don't make
2280 something more expensive than we had before. */
2281
2282 if (is_a <scalar_int_mode> (mode, &int_mode))
2283 {
2284 rtx lhs = op0, rhs = op1;
2285
2286 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2287 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2288
2289 if (GET_CODE (lhs) == NEG)
2290 {
2291 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2292 lhs = XEXP (lhs, 0);
2293 }
2294 else if (GET_CODE (lhs) == MULT
2295 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2296 {
2297 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2298 lhs = XEXP (lhs, 0);
2299 }
2300 else if (GET_CODE (lhs) == ASHIFT
2301 && CONST_INT_P (XEXP (lhs, 1))
2302 && INTVAL (XEXP (lhs, 1)) >= 0
2303 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2304 {
2305 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2306 GET_MODE_PRECISION (int_mode));
2307 lhs = XEXP (lhs, 0);
2308 }
2309
2310 if (GET_CODE (rhs) == NEG)
2311 {
2312 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2313 rhs = XEXP (rhs, 0);
2314 }
2315 else if (GET_CODE (rhs) == MULT
2316 && CONST_INT_P (XEXP (rhs, 1)))
2317 {
2318 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2319 rhs = XEXP (rhs, 0);
2320 }
2321 else if (GET_CODE (rhs) == ASHIFT
2322 && CONST_INT_P (XEXP (rhs, 1))
2323 && INTVAL (XEXP (rhs, 1)) >= 0
2324 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2325 {
2326 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2327 GET_MODE_PRECISION (int_mode));
2328 rhs = XEXP (rhs, 0);
2329 }
2330
2331 if (rtx_equal_p (lhs, rhs))
2332 {
2333 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2334 rtx coeff;
2335 bool speed = optimize_function_for_speed_p (cfun);
2336
2337 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2338
2339 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2340 return (set_src_cost (tem, int_mode, speed)
2341 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2342 }
2343 }
2344
2345 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2346 if (CONST_SCALAR_INT_P (op1)
2347 && GET_CODE (op0) == XOR
2348 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2349 && mode_signbit_p (mode, op1))
2350 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2351 simplify_gen_binary (XOR, mode, op1,
2352 XEXP (op0, 1)));
2353
2354 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2356 && GET_CODE (op0) == MULT
2357 && GET_CODE (XEXP (op0, 0)) == NEG)
2358 {
2359 rtx in1, in2;
2360
2361 in1 = XEXP (XEXP (op0, 0), 0);
2362 in2 = XEXP (op0, 1);
2363 return simplify_gen_binary (MINUS, mode, op1,
2364 simplify_gen_binary (MULT, mode,
2365 in1, in2));
2366 }
2367
2368 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2369 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2370 is 1. */
2371 if (COMPARISON_P (op0)
2372 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2373 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2374 && (reversed = reversed_comparison (op0, mode)))
2375 return
2376 simplify_gen_unary (NEG, mode, reversed, mode);
2377
2378 /* If one of the operands is a PLUS or a MINUS, see if we can
2379 simplify this by the associative law.
2380 Don't use the associative law for floating point.
2381 The inaccuracy makes it nonassociative,
2382 and subtle programs can break if operations are associated. */
2383
2384 if (INTEGRAL_MODE_P (mode)
2385 && (plus_minus_operand_p (op0)
2386 || plus_minus_operand_p (op1))
2387 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2388 return tem;
2389
2390 /* Reassociate floating point addition only when the user
2391 specifies associative math operations. */
2392 if (FLOAT_MODE_P (mode)
2393 && flag_associative_math)
2394 {
2395 tem = simplify_associative_operation (code, mode, op0, op1);
2396 if (tem)
2397 return tem;
2398 }
2399
2400 /* Handle vector series. */
2401 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2402 {
2403 tem = simplify_binary_operation_series (code, mode, op0, op1);
2404 if (tem)
2405 return tem;
2406 }
2407 break;
2408
2409 case COMPARE:
2410 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2411 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2412 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2413 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2414 {
2415 rtx xop00 = XEXP (op0, 0);
2416 rtx xop10 = XEXP (op1, 0);
2417
2418 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2419 return xop00;
2420
2421 if (REG_P (xop00) && REG_P (xop10)
2422 && REGNO (xop00) == REGNO (xop10)
2423 && GET_MODE (xop00) == mode
2424 && GET_MODE (xop10) == mode
2425 && GET_MODE_CLASS (mode) == MODE_CC)
2426 return xop00;
2427 }
2428 break;
2429
2430 case MINUS:
2431 /* We can't assume x-x is 0 even with non-IEEE floating point,
2432 but since it is zero except in very strange circumstances, we
2433 will treat it as zero with -ffinite-math-only. */
2434 if (rtx_equal_p (trueop0, trueop1)
2435 && ! side_effects_p (op0)
2436 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2437 return CONST0_RTX (mode);
2438
2439 /* Change subtraction from zero into negation. (0 - x) is the
2440 same as -x when x is NaN, infinite, or finite and nonzero.
2441 But if the mode has signed zeros, and does not round towards
2442 -infinity, then 0 - 0 is 0, not -0. */
2443 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2444 return simplify_gen_unary (NEG, mode, op1, mode);
2445
2446 /* (-1 - a) is ~a, unless the expression contains symbolic
2447 constants, in which case not retaining additions and
2448 subtractions could cause invalid assembly to be produced. */
2449 if (trueop0 == constm1_rtx
2450 && !contains_symbolic_reference_p (op1))
2451 return simplify_gen_unary (NOT, mode, op1, mode);
2452
2453 /* Subtracting 0 has no effect unless the mode has signed zeros
2454 and supports rounding towards -infinity. In such a case,
2455 0 - 0 is -0. */
2456 if (!(HONOR_SIGNED_ZEROS (mode)
2457 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2458 && trueop1 == CONST0_RTX (mode))
2459 return op0;
2460
2461 /* See if this is something like X * C - X or vice versa or
2462 if the multiplication is written as a shift. If so, we can
2463 distribute and make a new multiply, shift, or maybe just
2464 have X (if C is 2 in the example above). But don't make
2465 something more expensive than we had before. */
2466
2467 if (is_a <scalar_int_mode> (mode, &int_mode))
2468 {
2469 rtx lhs = op0, rhs = op1;
2470
2471 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2472 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2473
2474 if (GET_CODE (lhs) == NEG)
2475 {
2476 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2477 lhs = XEXP (lhs, 0);
2478 }
2479 else if (GET_CODE (lhs) == MULT
2480 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2481 {
2482 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2483 lhs = XEXP (lhs, 0);
2484 }
2485 else if (GET_CODE (lhs) == ASHIFT
2486 && CONST_INT_P (XEXP (lhs, 1))
2487 && INTVAL (XEXP (lhs, 1)) >= 0
2488 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2489 {
2490 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2491 GET_MODE_PRECISION (int_mode));
2492 lhs = XEXP (lhs, 0);
2493 }
2494
2495 if (GET_CODE (rhs) == NEG)
2496 {
2497 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2498 rhs = XEXP (rhs, 0);
2499 }
2500 else if (GET_CODE (rhs) == MULT
2501 && CONST_INT_P (XEXP (rhs, 1)))
2502 {
2503 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2504 rhs = XEXP (rhs, 0);
2505 }
2506 else if (GET_CODE (rhs) == ASHIFT
2507 && CONST_INT_P (XEXP (rhs, 1))
2508 && INTVAL (XEXP (rhs, 1)) >= 0
2509 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2510 {
2511 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2512 GET_MODE_PRECISION (int_mode));
2513 negcoeff1 = -negcoeff1;
2514 rhs = XEXP (rhs, 0);
2515 }
2516
2517 if (rtx_equal_p (lhs, rhs))
2518 {
2519 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2520 rtx coeff;
2521 bool speed = optimize_function_for_speed_p (cfun);
2522
2523 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2524
2525 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2526 return (set_src_cost (tem, int_mode, speed)
2527 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2528 }
2529 }
2530
2531 /* (a - (-b)) -> (a + b). True even for IEEE. */
2532 if (GET_CODE (op1) == NEG)
2533 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2534
2535 /* (-x - c) may be simplified as (-c - x). */
2536 if (GET_CODE (op0) == NEG
2537 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2538 {
2539 tem = simplify_unary_operation (NEG, mode, op1, mode);
2540 if (tem)
2541 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2542 }
2543
2544 if ((GET_CODE (op0) == CONST
2545 || GET_CODE (op0) == SYMBOL_REF
2546 || GET_CODE (op0) == LABEL_REF)
2547 && poly_int_rtx_p (op1, &offset))
2548 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2549
2550 /* Don't let a relocatable value get a negative coeff. */
2551 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2552 return simplify_gen_binary (PLUS, mode,
2553 op0,
2554 neg_const_int (mode, op1));
2555
2556 /* (x - (x & y)) -> (x & ~y) */
2557 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2558 {
2559 if (rtx_equal_p (op0, XEXP (op1, 0)))
2560 {
2561 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2562 GET_MODE (XEXP (op1, 1)));
2563 return simplify_gen_binary (AND, mode, op0, tem);
2564 }
2565 if (rtx_equal_p (op0, XEXP (op1, 1)))
2566 {
2567 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2568 GET_MODE (XEXP (op1, 0)));
2569 return simplify_gen_binary (AND, mode, op0, tem);
2570 }
2571 }
2572
2573 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2574 by reversing the comparison code if valid. */
2575 if (STORE_FLAG_VALUE == 1
2576 && trueop0 == const1_rtx
2577 && COMPARISON_P (op1)
2578 && (reversed = reversed_comparison (op1, mode)))
2579 return reversed;
2580
2581 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2582 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2583 && GET_CODE (op1) == MULT
2584 && GET_CODE (XEXP (op1, 0)) == NEG)
2585 {
2586 rtx in1, in2;
2587
2588 in1 = XEXP (XEXP (op1, 0), 0);
2589 in2 = XEXP (op1, 1);
2590 return simplify_gen_binary (PLUS, mode,
2591 simplify_gen_binary (MULT, mode,
2592 in1, in2),
2593 op0);
2594 }
2595
2596 /* Canonicalize (minus (neg A) (mult B C)) to
2597 (minus (mult (neg B) C) A). */
2598 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2599 && GET_CODE (op1) == MULT
2600 && GET_CODE (op0) == NEG)
2601 {
2602 rtx in1, in2;
2603
2604 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2605 in2 = XEXP (op1, 1);
2606 return simplify_gen_binary (MINUS, mode,
2607 simplify_gen_binary (MULT, mode,
2608 in1, in2),
2609 XEXP (op0, 0));
2610 }
2611
2612 /* If one of the operands is a PLUS or a MINUS, see if we can
2613 simplify this by the associative law. This will, for example,
2614 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2615 Don't use the associative law for floating point.
2616 The inaccuracy makes it nonassociative,
2617 and subtle programs can break if operations are associated. */
2618
2619 if (INTEGRAL_MODE_P (mode)
2620 && (plus_minus_operand_p (op0)
2621 || plus_minus_operand_p (op1))
2622 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2623 return tem;
2624
2625 /* Handle vector series. */
2626 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2627 {
2628 tem = simplify_binary_operation_series (code, mode, op0, op1);
2629 if (tem)
2630 return tem;
2631 }
2632 break;
2633
2634 case MULT:
2635 if (trueop1 == constm1_rtx)
2636 return simplify_gen_unary (NEG, mode, op0, mode);
2637
2638 if (GET_CODE (op0) == NEG)
2639 {
2640 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2641 /* If op1 is a MULT as well and simplify_unary_operation
2642 just moved the NEG to the second operand, simplify_gen_binary
2643 below could through simplify_associative_operation move
2644 the NEG around again and recurse endlessly. */
2645 if (temp
2646 && GET_CODE (op1) == MULT
2647 && GET_CODE (temp) == MULT
2648 && XEXP (op1, 0) == XEXP (temp, 0)
2649 && GET_CODE (XEXP (temp, 1)) == NEG
2650 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2651 temp = NULL_RTX;
2652 if (temp)
2653 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2654 }
2655 if (GET_CODE (op1) == NEG)
2656 {
2657 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2658 /* If op0 is a MULT as well and simplify_unary_operation
2659 just moved the NEG to the second operand, simplify_gen_binary
2660 below could through simplify_associative_operation move
2661 the NEG around again and recurse endlessly. */
2662 if (temp
2663 && GET_CODE (op0) == MULT
2664 && GET_CODE (temp) == MULT
2665 && XEXP (op0, 0) == XEXP (temp, 0)
2666 && GET_CODE (XEXP (temp, 1)) == NEG
2667 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2668 temp = NULL_RTX;
2669 if (temp)
2670 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2671 }
2672
2673 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2674 x is NaN, since x * 0 is then also NaN. Nor is it valid
2675 when the mode has signed zeros, since multiplying a negative
2676 number by 0 will give -0, not 0. */
2677 if (!HONOR_NANS (mode)
2678 && !HONOR_SIGNED_ZEROS (mode)
2679 && trueop1 == CONST0_RTX (mode)
2680 && ! side_effects_p (op0))
2681 return op1;
2682
2683 /* In IEEE floating point, x*1 is not equivalent to x for
2684 signalling NaNs. */
2685 if (!HONOR_SNANS (mode)
2686 && trueop1 == CONST1_RTX (mode))
2687 return op0;
2688
2689 /* Convert multiply by constant power of two into shift. */
2690 if (CONST_SCALAR_INT_P (trueop1))
2691 {
2692 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2693 if (val >= 0)
2694 return simplify_gen_binary (ASHIFT, mode, op0,
2695 gen_int_shift_amount (mode, val));
2696 }
2697
2698 /* x*2 is x+x and x*(-1) is -x */
2699 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2700 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2701 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2702 && GET_MODE (op0) == mode)
2703 {
2704 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2705
2706 if (real_equal (d1, &dconst2))
2707 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2708
2709 if (!HONOR_SNANS (mode)
2710 && real_equal (d1, &dconstm1))
2711 return simplify_gen_unary (NEG, mode, op0, mode);
2712 }
2713
2714 /* Optimize -x * -x as x * x. */
2715 if (FLOAT_MODE_P (mode)
2716 && GET_CODE (op0) == NEG
2717 && GET_CODE (op1) == NEG
2718 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2719 && !side_effects_p (XEXP (op0, 0)))
2720 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2721
2722 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2723 if (SCALAR_FLOAT_MODE_P (mode)
2724 && GET_CODE (op0) == ABS
2725 && GET_CODE (op1) == ABS
2726 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2727 && !side_effects_p (XEXP (op0, 0)))
2728 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2729
2730 /* Reassociate multiplication, but for floating point MULTs
2731 only when the user specifies unsafe math optimizations. */
2732 if (! FLOAT_MODE_P (mode)
2733 || flag_unsafe_math_optimizations)
2734 {
2735 tem = simplify_associative_operation (code, mode, op0, op1);
2736 if (tem)
2737 return tem;
2738 }
2739 break;
2740
2741 case IOR:
2742 if (trueop1 == CONST0_RTX (mode))
2743 return op0;
2744 if (INTEGRAL_MODE_P (mode)
2745 && trueop1 == CONSTM1_RTX (mode)
2746 && !side_effects_p (op0))
2747 return op1;
2748 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2749 return op0;
2750 /* A | (~A) -> -1 */
2751 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2752 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2753 && ! side_effects_p (op0)
2754 && SCALAR_INT_MODE_P (mode))
2755 return constm1_rtx;
2756
2757 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2758 if (CONST_INT_P (op1)
2759 && HWI_COMPUTABLE_MODE_P (mode)
2760 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2761 && !side_effects_p (op0))
2762 return op1;
2763
2764 /* Canonicalize (X & C1) | C2. */
2765 if (GET_CODE (op0) == AND
2766 && CONST_INT_P (trueop1)
2767 && CONST_INT_P (XEXP (op0, 1)))
2768 {
2769 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2770 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2771 HOST_WIDE_INT c2 = INTVAL (trueop1);
2772
2773 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2774 if ((c1 & c2) == c1
2775 && !side_effects_p (XEXP (op0, 0)))
2776 return trueop1;
2777
2778 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2779 if (((c1|c2) & mask) == mask)
2780 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2781 }
2782
2783 /* Convert (A & B) | A to A. */
2784 if (GET_CODE (op0) == AND
2785 && (rtx_equal_p (XEXP (op0, 0), op1)
2786 || rtx_equal_p (XEXP (op0, 1), op1))
2787 && ! side_effects_p (XEXP (op0, 0))
2788 && ! side_effects_p (XEXP (op0, 1)))
2789 return op1;
2790
2791 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2792 mode size to (rotate A CX). */
2793
2794 if (GET_CODE (op1) == ASHIFT
2795 || GET_CODE (op1) == SUBREG)
2796 {
2797 opleft = op1;
2798 opright = op0;
2799 }
2800 else
2801 {
2802 opright = op1;
2803 opleft = op0;
2804 }
2805
2806 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2807 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2808 && CONST_INT_P (XEXP (opleft, 1))
2809 && CONST_INT_P (XEXP (opright, 1))
2810 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2811 == GET_MODE_UNIT_PRECISION (mode)))
2812 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2813
2814 /* Same, but for ashift that has been "simplified" to a wider mode
2815 by simplify_shift_const. */
2816
2817 if (GET_CODE (opleft) == SUBREG
2818 && is_a <scalar_int_mode> (mode, &int_mode)
2819 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2820 &inner_mode)
2821 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2822 && GET_CODE (opright) == LSHIFTRT
2823 && GET_CODE (XEXP (opright, 0)) == SUBREG
2824 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2825 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2826 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2827 SUBREG_REG (XEXP (opright, 0)))
2828 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2829 && CONST_INT_P (XEXP (opright, 1))
2830 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2831 + INTVAL (XEXP (opright, 1))
2832 == GET_MODE_PRECISION (int_mode)))
2833 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2834 XEXP (SUBREG_REG (opleft), 1));
2835
2836 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2837 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2838 the PLUS does not affect any of the bits in OP1: then we can do
2839 the IOR as a PLUS and we can associate. This is valid if OP1
2840 can be safely shifted left C bits. */
2841 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2842 && GET_CODE (XEXP (op0, 0)) == PLUS
2843 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2844 && CONST_INT_P (XEXP (op0, 1))
2845 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2846 {
2847 int count = INTVAL (XEXP (op0, 1));
2848 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2849
2850 if (mask >> count == INTVAL (trueop1)
2851 && trunc_int_for_mode (mask, mode) == mask
2852 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2853 return simplify_gen_binary (ASHIFTRT, mode,
2854 plus_constant (mode, XEXP (op0, 0),
2855 mask),
2856 XEXP (op0, 1));
2857 }
2858
2859 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2860 if (tem)
2861 return tem;
2862
2863 tem = simplify_associative_operation (code, mode, op0, op1);
2864 if (tem)
2865 return tem;
2866 break;
2867
2868 case XOR:
2869 if (trueop1 == CONST0_RTX (mode))
2870 return op0;
2871 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2872 return simplify_gen_unary (NOT, mode, op0, mode);
2873 if (rtx_equal_p (trueop0, trueop1)
2874 && ! side_effects_p (op0)
2875 && GET_MODE_CLASS (mode) != MODE_CC)
2876 return CONST0_RTX (mode);
2877
2878 /* Canonicalize XOR of the most significant bit to PLUS. */
2879 if (CONST_SCALAR_INT_P (op1)
2880 && mode_signbit_p (mode, op1))
2881 return simplify_gen_binary (PLUS, mode, op0, op1);
2882 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2883 if (CONST_SCALAR_INT_P (op1)
2884 && GET_CODE (op0) == PLUS
2885 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2886 && mode_signbit_p (mode, XEXP (op0, 1)))
2887 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2888 simplify_gen_binary (XOR, mode, op1,
2889 XEXP (op0, 1)));
2890
2891 /* If we are XORing two things that have no bits in common,
2892 convert them into an IOR. This helps to detect rotation encoded
2893 using those methods and possibly other simplifications. */
2894
2895 if (HWI_COMPUTABLE_MODE_P (mode)
2896 && (nonzero_bits (op0, mode)
2897 & nonzero_bits (op1, mode)) == 0)
2898 return (simplify_gen_binary (IOR, mode, op0, op1));
2899
2900 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2901 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2902 (NOT y). */
2903 {
2904 int num_negated = 0;
2905
2906 if (GET_CODE (op0) == NOT)
2907 num_negated++, op0 = XEXP (op0, 0);
2908 if (GET_CODE (op1) == NOT)
2909 num_negated++, op1 = XEXP (op1, 0);
2910
2911 if (num_negated == 2)
2912 return simplify_gen_binary (XOR, mode, op0, op1);
2913 else if (num_negated == 1)
2914 return simplify_gen_unary (NOT, mode,
2915 simplify_gen_binary (XOR, mode, op0, op1),
2916 mode);
2917 }
2918
2919 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2920 correspond to a machine insn or result in further simplifications
2921 if B is a constant. */
2922
2923 if (GET_CODE (op0) == AND
2924 && rtx_equal_p (XEXP (op0, 1), op1)
2925 && ! side_effects_p (op1))
2926 return simplify_gen_binary (AND, mode,
2927 simplify_gen_unary (NOT, mode,
2928 XEXP (op0, 0), mode),
2929 op1);
2930
2931 else if (GET_CODE (op0) == AND
2932 && rtx_equal_p (XEXP (op0, 0), op1)
2933 && ! side_effects_p (op1))
2934 return simplify_gen_binary (AND, mode,
2935 simplify_gen_unary (NOT, mode,
2936 XEXP (op0, 1), mode),
2937 op1);
2938
2939 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2940 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2941 out bits inverted twice and not set by C. Similarly, given
2942 (xor (and (xor A B) C) D), simplify without inverting C in
2943 the xor operand: (xor (and A C) (B&C)^D).
2944 */
2945 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2946 && GET_CODE (XEXP (op0, 0)) == XOR
2947 && CONST_INT_P (op1)
2948 && CONST_INT_P (XEXP (op0, 1))
2949 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2950 {
2951 enum rtx_code op = GET_CODE (op0);
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = XEXP (XEXP (op0, 0), 1);
2954 rtx c = XEXP (op0, 1);
2955 rtx d = op1;
2956 HOST_WIDE_INT bval = INTVAL (b);
2957 HOST_WIDE_INT cval = INTVAL (c);
2958 HOST_WIDE_INT dval = INTVAL (d);
2959 HOST_WIDE_INT xcval;
2960
2961 if (op == IOR)
2962 xcval = ~cval;
2963 else
2964 xcval = cval;
2965
2966 return simplify_gen_binary (XOR, mode,
2967 simplify_gen_binary (op, mode, a, c),
2968 gen_int_mode ((bval & xcval) ^ dval,
2969 mode));
2970 }
2971
2972 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2973 we can transform like this:
2974 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2975 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2976 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2977 Attempt a few simplifications when B and C are both constants. */
2978 if (GET_CODE (op0) == AND
2979 && CONST_INT_P (op1)
2980 && CONST_INT_P (XEXP (op0, 1)))
2981 {
2982 rtx a = XEXP (op0, 0);
2983 rtx b = XEXP (op0, 1);
2984 rtx c = op1;
2985 HOST_WIDE_INT bval = INTVAL (b);
2986 HOST_WIDE_INT cval = INTVAL (c);
2987
2988 /* Instead of computing ~A&C, we compute its negated value,
2989 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2990 optimize for sure. If it does not simplify, we still try
2991 to compute ~A&C below, but since that always allocates
2992 RTL, we don't try that before committing to returning a
2993 simplified expression. */
2994 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2995 GEN_INT (~cval));
2996
2997 if ((~cval & bval) == 0)
2998 {
2999 rtx na_c = NULL_RTX;
3000 if (n_na_c)
3001 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3002 else
3003 {
3004 /* If ~A does not simplify, don't bother: we don't
3005 want to simplify 2 operations into 3, and if na_c
3006 were to simplify with na, n_na_c would have
3007 simplified as well. */
3008 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3009 if (na)
3010 na_c = simplify_gen_binary (AND, mode, na, c);
3011 }
3012
3013 /* Try to simplify ~A&C | ~B&C. */
3014 if (na_c != NULL_RTX)
3015 return simplify_gen_binary (IOR, mode, na_c,
3016 gen_int_mode (~bval & cval, mode));
3017 }
3018 else
3019 {
3020 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3021 if (n_na_c == CONSTM1_RTX (mode))
3022 {
3023 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3024 gen_int_mode (~cval & bval,
3025 mode));
3026 return simplify_gen_binary (IOR, mode, a_nc_b,
3027 gen_int_mode (~bval & cval,
3028 mode));
3029 }
3030 }
3031 }
3032
3033 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3034 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3035 machines, and also has shorter instruction path length. */
3036 if (GET_CODE (op0) == AND
3037 && GET_CODE (XEXP (op0, 0)) == XOR
3038 && CONST_INT_P (XEXP (op0, 1))
3039 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3040 {
3041 rtx a = trueop1;
3042 rtx b = XEXP (XEXP (op0, 0), 1);
3043 rtx c = XEXP (op0, 1);
3044 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3045 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3046 rtx bc = simplify_gen_binary (AND, mode, b, c);
3047 return simplify_gen_binary (IOR, mode, a_nc, bc);
3048 }
3049 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3050 else if (GET_CODE (op0) == AND
3051 && GET_CODE (XEXP (op0, 0)) == XOR
3052 && CONST_INT_P (XEXP (op0, 1))
3053 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3054 {
3055 rtx a = XEXP (XEXP (op0, 0), 0);
3056 rtx b = trueop1;
3057 rtx c = XEXP (op0, 1);
3058 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3059 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3060 rtx ac = simplify_gen_binary (AND, mode, a, c);
3061 return simplify_gen_binary (IOR, mode, ac, b_nc);
3062 }
3063
3064 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3065 comparison if STORE_FLAG_VALUE is 1. */
3066 if (STORE_FLAG_VALUE == 1
3067 && trueop1 == const1_rtx
3068 && COMPARISON_P (op0)
3069 && (reversed = reversed_comparison (op0, mode)))
3070 return reversed;
3071
3072 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3073 is (lt foo (const_int 0)), so we can perform the above
3074 simplification if STORE_FLAG_VALUE is 1. */
3075
3076 if (is_a <scalar_int_mode> (mode, &int_mode)
3077 && STORE_FLAG_VALUE == 1
3078 && trueop1 == const1_rtx
3079 && GET_CODE (op0) == LSHIFTRT
3080 && CONST_INT_P (XEXP (op0, 1))
3081 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3082 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3083
3084 /* (xor (comparison foo bar) (const_int sign-bit))
3085 when STORE_FLAG_VALUE is the sign bit. */
3086 if (is_a <scalar_int_mode> (mode, &int_mode)
3087 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3088 && trueop1 == const_true_rtx
3089 && COMPARISON_P (op0)
3090 && (reversed = reversed_comparison (op0, int_mode)))
3091 return reversed;
3092
3093 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3094 if (tem)
3095 return tem;
3096
3097 tem = simplify_associative_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3100 break;
3101
3102 case AND:
3103 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3104 return trueop1;
3105 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3106 return op0;
3107 if (HWI_COMPUTABLE_MODE_P (mode))
3108 {
3109 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3110 HOST_WIDE_INT nzop1;
3111 if (CONST_INT_P (trueop1))
3112 {
3113 HOST_WIDE_INT val1 = INTVAL (trueop1);
3114 /* If we are turning off bits already known off in OP0, we need
3115 not do an AND. */
3116 if ((nzop0 & ~val1) == 0)
3117 return op0;
3118 }
3119 nzop1 = nonzero_bits (trueop1, mode);
3120 /* If we are clearing all the nonzero bits, the result is zero. */
3121 if ((nzop1 & nzop0) == 0
3122 && !side_effects_p (op0) && !side_effects_p (op1))
3123 return CONST0_RTX (mode);
3124 }
3125 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3126 && GET_MODE_CLASS (mode) != MODE_CC)
3127 return op0;
3128 /* A & (~A) -> 0 */
3129 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3130 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3131 && ! side_effects_p (op0)
3132 && GET_MODE_CLASS (mode) != MODE_CC)
3133 return CONST0_RTX (mode);
3134
3135 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3136 there are no nonzero bits of C outside of X's mode. */
3137 if ((GET_CODE (op0) == SIGN_EXTEND
3138 || GET_CODE (op0) == ZERO_EXTEND)
3139 && CONST_INT_P (trueop1)
3140 && HWI_COMPUTABLE_MODE_P (mode)
3141 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3142 & UINTVAL (trueop1)) == 0)
3143 {
3144 machine_mode imode = GET_MODE (XEXP (op0, 0));
3145 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3146 gen_int_mode (INTVAL (trueop1),
3147 imode));
3148 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3149 }
3150
3151 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3152 we might be able to further simplify the AND with X and potentially
3153 remove the truncation altogether. */
3154 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3155 {
3156 rtx x = XEXP (op0, 0);
3157 machine_mode xmode = GET_MODE (x);
3158 tem = simplify_gen_binary (AND, xmode, x,
3159 gen_int_mode (INTVAL (trueop1), xmode));
3160 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3161 }
3162
3163 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3164 if (GET_CODE (op0) == IOR
3165 && CONST_INT_P (trueop1)
3166 && CONST_INT_P (XEXP (op0, 1)))
3167 {
3168 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3169 return simplify_gen_binary (IOR, mode,
3170 simplify_gen_binary (AND, mode,
3171 XEXP (op0, 0), op1),
3172 gen_int_mode (tmp, mode));
3173 }
3174
3175 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3176 insn (and may simplify more). */
3177 if (GET_CODE (op0) == XOR
3178 && rtx_equal_p (XEXP (op0, 0), op1)
3179 && ! side_effects_p (op1))
3180 return simplify_gen_binary (AND, mode,
3181 simplify_gen_unary (NOT, mode,
3182 XEXP (op0, 1), mode),
3183 op1);
3184
3185 if (GET_CODE (op0) == XOR
3186 && rtx_equal_p (XEXP (op0, 1), op1)
3187 && ! side_effects_p (op1))
3188 return simplify_gen_binary (AND, mode,
3189 simplify_gen_unary (NOT, mode,
3190 XEXP (op0, 0), mode),
3191 op1);
3192
3193 /* Similarly for (~(A ^ B)) & A. */
3194 if (GET_CODE (op0) == NOT
3195 && GET_CODE (XEXP (op0, 0)) == XOR
3196 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3197 && ! side_effects_p (op1))
3198 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3199
3200 if (GET_CODE (op0) == NOT
3201 && GET_CODE (XEXP (op0, 0)) == XOR
3202 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3203 && ! side_effects_p (op1))
3204 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3205
3206 /* Convert (A | B) & A to A. */
3207 if (GET_CODE (op0) == IOR
3208 && (rtx_equal_p (XEXP (op0, 0), op1)
3209 || rtx_equal_p (XEXP (op0, 1), op1))
3210 && ! side_effects_p (XEXP (op0, 0))
3211 && ! side_effects_p (XEXP (op0, 1)))
3212 return op1;
3213
3214 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3215 ((A & N) + B) & M -> (A + B) & M
3216 Similarly if (N & M) == 0,
3217 ((A | N) + B) & M -> (A + B) & M
3218 and for - instead of + and/or ^ instead of |.
3219 Also, if (N & M) == 0, then
3220 (A +- N) & M -> A & M. */
3221 if (CONST_INT_P (trueop1)
3222 && HWI_COMPUTABLE_MODE_P (mode)
3223 && ~UINTVAL (trueop1)
3224 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3225 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3226 {
3227 rtx pmop[2];
3228 int which;
3229
3230 pmop[0] = XEXP (op0, 0);
3231 pmop[1] = XEXP (op0, 1);
3232
3233 if (CONST_INT_P (pmop[1])
3234 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3235 return simplify_gen_binary (AND, mode, pmop[0], op1);
3236
3237 for (which = 0; which < 2; which++)
3238 {
3239 tem = pmop[which];
3240 switch (GET_CODE (tem))
3241 {
3242 case AND:
3243 if (CONST_INT_P (XEXP (tem, 1))
3244 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3245 == UINTVAL (trueop1))
3246 pmop[which] = XEXP (tem, 0);
3247 break;
3248 case IOR:
3249 case XOR:
3250 if (CONST_INT_P (XEXP (tem, 1))
3251 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3252 pmop[which] = XEXP (tem, 0);
3253 break;
3254 default:
3255 break;
3256 }
3257 }
3258
3259 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3260 {
3261 tem = simplify_gen_binary (GET_CODE (op0), mode,
3262 pmop[0], pmop[1]);
3263 return simplify_gen_binary (code, mode, tem, op1);
3264 }
3265 }
3266
3267 /* (and X (ior (not X) Y) -> (and X Y) */
3268 if (GET_CODE (op1) == IOR
3269 && GET_CODE (XEXP (op1, 0)) == NOT
3270 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3271 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3272
3273 /* (and (ior (not X) Y) X) -> (and X Y) */
3274 if (GET_CODE (op0) == IOR
3275 && GET_CODE (XEXP (op0, 0)) == NOT
3276 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3277 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3278
3279 /* (and X (ior Y (not X)) -> (and X Y) */
3280 if (GET_CODE (op1) == IOR
3281 && GET_CODE (XEXP (op1, 1)) == NOT
3282 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3283 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3284
3285 /* (and (ior Y (not X)) X) -> (and X Y) */
3286 if (GET_CODE (op0) == IOR
3287 && GET_CODE (XEXP (op0, 1)) == NOT
3288 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3289 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3290
3291 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3292 if (tem)
3293 return tem;
3294
3295 tem = simplify_associative_operation (code, mode, op0, op1);
3296 if (tem)
3297 return tem;
3298 break;
3299
3300 case UDIV:
3301 /* 0/x is 0 (or x&0 if x has side-effects). */
3302 if (trueop0 == CONST0_RTX (mode)
3303 && !cfun->can_throw_non_call_exceptions)
3304 {
3305 if (side_effects_p (op1))
3306 return simplify_gen_binary (AND, mode, op1, trueop0);
3307 return trueop0;
3308 }
3309 /* x/1 is x. */
3310 if (trueop1 == CONST1_RTX (mode))
3311 {
3312 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3313 if (tem)
3314 return tem;
3315 }
3316 /* Convert divide by power of two into shift. */
3317 if (CONST_INT_P (trueop1)
3318 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3319 return simplify_gen_binary (LSHIFTRT, mode, op0,
3320 gen_int_shift_amount (mode, val));
3321 break;
3322
3323 case DIV:
3324 /* Handle floating point and integers separately. */
3325 if (SCALAR_FLOAT_MODE_P (mode))
3326 {
3327 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3328 safe for modes with NaNs, since 0.0 / 0.0 will then be
3329 NaN rather than 0.0. Nor is it safe for modes with signed
3330 zeros, since dividing 0 by a negative number gives -0.0 */
3331 if (trueop0 == CONST0_RTX (mode)
3332 && !HONOR_NANS (mode)
3333 && !HONOR_SIGNED_ZEROS (mode)
3334 && ! side_effects_p (op1))
3335 return op0;
3336 /* x/1.0 is x. */
3337 if (trueop1 == CONST1_RTX (mode)
3338 && !HONOR_SNANS (mode))
3339 return op0;
3340
3341 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3342 && trueop1 != CONST0_RTX (mode))
3343 {
3344 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3345
3346 /* x/-1.0 is -x. */
3347 if (real_equal (d1, &dconstm1)
3348 && !HONOR_SNANS (mode))
3349 return simplify_gen_unary (NEG, mode, op0, mode);
3350
3351 /* Change FP division by a constant into multiplication.
3352 Only do this with -freciprocal-math. */
3353 if (flag_reciprocal_math
3354 && !real_equal (d1, &dconst0))
3355 {
3356 REAL_VALUE_TYPE d;
3357 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3358 tem = const_double_from_real_value (d, mode);
3359 return simplify_gen_binary (MULT, mode, op0, tem);
3360 }
3361 }
3362 }
3363 else if (SCALAR_INT_MODE_P (mode))
3364 {
3365 /* 0/x is 0 (or x&0 if x has side-effects). */
3366 if (trueop0 == CONST0_RTX (mode)
3367 && !cfun->can_throw_non_call_exceptions)
3368 {
3369 if (side_effects_p (op1))
3370 return simplify_gen_binary (AND, mode, op1, trueop0);
3371 return trueop0;
3372 }
3373 /* x/1 is x. */
3374 if (trueop1 == CONST1_RTX (mode))
3375 {
3376 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3377 if (tem)
3378 return tem;
3379 }
3380 /* x/-1 is -x. */
3381 if (trueop1 == constm1_rtx)
3382 {
3383 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3384 if (x)
3385 return simplify_gen_unary (NEG, mode, x, mode);
3386 }
3387 }
3388 break;
3389
3390 case UMOD:
3391 /* 0%x is 0 (or x&0 if x has side-effects). */
3392 if (trueop0 == CONST0_RTX (mode))
3393 {
3394 if (side_effects_p (op1))
3395 return simplify_gen_binary (AND, mode, op1, trueop0);
3396 return trueop0;
3397 }
3398 /* x%1 is 0 (of x&0 if x has side-effects). */
3399 if (trueop1 == CONST1_RTX (mode))
3400 {
3401 if (side_effects_p (op0))
3402 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3403 return CONST0_RTX (mode);
3404 }
3405 /* Implement modulus by power of two as AND. */
3406 if (CONST_INT_P (trueop1)
3407 && exact_log2 (UINTVAL (trueop1)) > 0)
3408 return simplify_gen_binary (AND, mode, op0,
3409 gen_int_mode (INTVAL (op1) - 1, mode));
3410 break;
3411
3412 case MOD:
3413 /* 0%x is 0 (or x&0 if x has side-effects). */
3414 if (trueop0 == CONST0_RTX (mode))
3415 {
3416 if (side_effects_p (op1))
3417 return simplify_gen_binary (AND, mode, op1, trueop0);
3418 return trueop0;
3419 }
3420 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3421 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3422 {
3423 if (side_effects_p (op0))
3424 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3425 return CONST0_RTX (mode);
3426 }
3427 break;
3428
3429 case ROTATERT:
3430 case ROTATE:
3431 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3432 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3433 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3434 amount instead. */
3435 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3436 if (CONST_INT_P (trueop1)
3437 && IN_RANGE (INTVAL (trueop1),
3438 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3439 GET_MODE_UNIT_PRECISION (mode) - 1))
3440 {
3441 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3442 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3443 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3444 mode, op0, new_amount_rtx);
3445 }
3446 #endif
3447 /* FALLTHRU */
3448 case ASHIFTRT:
3449 if (trueop1 == CONST0_RTX (mode))
3450 return op0;
3451 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3452 return op0;
3453 /* Rotating ~0 always results in ~0. */
3454 if (CONST_INT_P (trueop0)
3455 && HWI_COMPUTABLE_MODE_P (mode)
3456 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3457 && ! side_effects_p (op1))
3458 return op0;
3459
3460 canonicalize_shift:
3461 /* Given:
3462 scalar modes M1, M2
3463 scalar constants c1, c2
3464 size (M2) > size (M1)
3465 c1 == size (M2) - size (M1)
3466 optimize:
3467 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3468 <low_part>)
3469 (const_int <c2>))
3470 to:
3471 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3472 <low_part>). */
3473 if ((code == ASHIFTRT || code == LSHIFTRT)
3474 && is_a <scalar_int_mode> (mode, &int_mode)
3475 && SUBREG_P (op0)
3476 && CONST_INT_P (op1)
3477 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3478 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3479 &inner_mode)
3480 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3481 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3482 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3483 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3484 && subreg_lowpart_p (op0))
3485 {
3486 rtx tmp = gen_int_shift_amount
3487 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3488 tmp = simplify_gen_binary (code, inner_mode,
3489 XEXP (SUBREG_REG (op0), 0),
3490 tmp);
3491 return lowpart_subreg (int_mode, tmp, inner_mode);
3492 }
3493
3494 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3495 {
3496 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3497 if (val != INTVAL (op1))
3498 return simplify_gen_binary (code, mode, op0,
3499 gen_int_shift_amount (mode, val));
3500 }
3501 break;
3502
3503 case ASHIFT:
3504 case SS_ASHIFT:
3505 case US_ASHIFT:
3506 if (trueop1 == CONST0_RTX (mode))
3507 return op0;
3508 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3509 return op0;
3510 goto canonicalize_shift;
3511
3512 case LSHIFTRT:
3513 if (trueop1 == CONST0_RTX (mode))
3514 return op0;
3515 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3516 return op0;
3517 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3518 if (GET_CODE (op0) == CLZ
3519 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3520 && CONST_INT_P (trueop1)
3521 && STORE_FLAG_VALUE == 1
3522 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3523 {
3524 unsigned HOST_WIDE_INT zero_val = 0;
3525
3526 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3527 && zero_val == GET_MODE_PRECISION (inner_mode)
3528 && INTVAL (trueop1) == exact_log2 (zero_val))
3529 return simplify_gen_relational (EQ, mode, inner_mode,
3530 XEXP (op0, 0), const0_rtx);
3531 }
3532 goto canonicalize_shift;
3533
3534 case SMIN:
3535 if (HWI_COMPUTABLE_MODE_P (mode)
3536 && mode_signbit_p (mode, trueop1)
3537 && ! side_effects_p (op0))
3538 return op1;
3539 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3540 return op0;
3541 tem = simplify_associative_operation (code, mode, op0, op1);
3542 if (tem)
3543 return tem;
3544 break;
3545
3546 case SMAX:
3547 if (HWI_COMPUTABLE_MODE_P (mode)
3548 && CONST_INT_P (trueop1)
3549 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3550 && ! side_effects_p (op0))
3551 return op1;
3552 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3553 return op0;
3554 tem = simplify_associative_operation (code, mode, op0, op1);
3555 if (tem)
3556 return tem;
3557 break;
3558
3559 case UMIN:
3560 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3561 return op1;
3562 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3563 return op0;
3564 tem = simplify_associative_operation (code, mode, op0, op1);
3565 if (tem)
3566 return tem;
3567 break;
3568
3569 case UMAX:
3570 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3571 return op1;
3572 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3573 return op0;
3574 tem = simplify_associative_operation (code, mode, op0, op1);
3575 if (tem)
3576 return tem;
3577 break;
3578
3579 case SS_PLUS:
3580 case US_PLUS:
3581 case SS_MINUS:
3582 case US_MINUS:
3583 case SS_MULT:
3584 case US_MULT:
3585 case SS_DIV:
3586 case US_DIV:
3587 /* ??? There are simplifications that can be done. */
3588 return 0;
3589
3590 case VEC_SERIES:
3591 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3592 return gen_vec_duplicate (mode, op0);
3593 if (CONSTANT_P (op0) && CONSTANT_P (op1))
3594 return gen_const_vec_series (mode, op0, op1);
3595 return 0;
3596
3597 case VEC_SELECT:
3598 if (!VECTOR_MODE_P (mode))
3599 {
3600 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3601 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3602 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3603 gcc_assert (XVECLEN (trueop1, 0) == 1);
3604 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3605
3606 if (vec_duplicate_p (trueop0, &elt0))
3607 return elt0;
3608
3609 if (GET_CODE (trueop0) == CONST_VECTOR)
3610 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3611 (trueop1, 0, 0)));
3612
3613 /* Extract a scalar element from a nested VEC_SELECT expression
3614 (with optional nested VEC_CONCAT expression). Some targets
3615 (i386) extract scalar element from a vector using chain of
3616 nested VEC_SELECT expressions. When input operand is a memory
3617 operand, this operation can be simplified to a simple scalar
3618 load from an offseted memory address. */
3619 if (GET_CODE (trueop0) == VEC_SELECT)
3620 {
3621 rtx op0 = XEXP (trueop0, 0);
3622 rtx op1 = XEXP (trueop0, 1);
3623
3624 int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3625
3626 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3627 int elem;
3628
3629 rtvec vec;
3630 rtx tmp_op, tmp;
3631
3632 gcc_assert (GET_CODE (op1) == PARALLEL);
3633 gcc_assert (i < n_elts);
3634
3635 /* Select element, pointed by nested selector. */
3636 elem = INTVAL (XVECEXP (op1, 0, i));
3637
3638 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3639 if (GET_CODE (op0) == VEC_CONCAT)
3640 {
3641 rtx op00 = XEXP (op0, 0);
3642 rtx op01 = XEXP (op0, 1);
3643
3644 machine_mode mode00, mode01;
3645 int n_elts00, n_elts01;
3646
3647 mode00 = GET_MODE (op00);
3648 mode01 = GET_MODE (op01);
3649
3650 /* Find out number of elements of each operand. */
3651 n_elts00 = GET_MODE_NUNITS (mode00);
3652 n_elts01 = GET_MODE_NUNITS (mode01);
3653
3654 gcc_assert (n_elts == n_elts00 + n_elts01);
3655
3656 /* Select correct operand of VEC_CONCAT
3657 and adjust selector. */
3658 if (elem < n_elts01)
3659 tmp_op = op00;
3660 else
3661 {
3662 tmp_op = op01;
3663 elem -= n_elts00;
3664 }
3665 }
3666 else
3667 tmp_op = op0;
3668
3669 vec = rtvec_alloc (1);
3670 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3671
3672 tmp = gen_rtx_fmt_ee (code, mode,
3673 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3674 return tmp;
3675 }
3676 }
3677 else
3678 {
3679 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3680 gcc_assert (GET_MODE_INNER (mode)
3681 == GET_MODE_INNER (GET_MODE (trueop0)));
3682 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3683
3684 if (vec_duplicate_p (trueop0, &elt0))
3685 /* It doesn't matter which elements are selected by trueop1,
3686 because they are all the same. */
3687 return gen_vec_duplicate (mode, elt0);
3688
3689 if (GET_CODE (trueop0) == CONST_VECTOR)
3690 {
3691 int elt_size = GET_MODE_UNIT_SIZE (mode);
3692 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3693 rtvec v = rtvec_alloc (n_elts);
3694 unsigned int i;
3695
3696 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3697 for (i = 0; i < n_elts; i++)
3698 {
3699 rtx x = XVECEXP (trueop1, 0, i);
3700
3701 gcc_assert (CONST_INT_P (x));
3702 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3703 INTVAL (x));
3704 }
3705
3706 return gen_rtx_CONST_VECTOR (mode, v);
3707 }
3708
3709 /* Recognize the identity. */
3710 if (GET_MODE (trueop0) == mode)
3711 {
3712 bool maybe_ident = true;
3713 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3714 {
3715 rtx j = XVECEXP (trueop1, 0, i);
3716 if (!CONST_INT_P (j) || INTVAL (j) != i)
3717 {
3718 maybe_ident = false;
3719 break;
3720 }
3721 }
3722 if (maybe_ident)
3723 return trueop0;
3724 }
3725
3726 /* If we build {a,b} then permute it, build the result directly. */
3727 if (XVECLEN (trueop1, 0) == 2
3728 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3729 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3730 && GET_CODE (trueop0) == VEC_CONCAT
3731 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3732 && GET_MODE (XEXP (trueop0, 0)) == mode
3733 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3734 && GET_MODE (XEXP (trueop0, 1)) == mode)
3735 {
3736 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3737 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3738 rtx subop0, subop1;
3739
3740 gcc_assert (i0 < 4 && i1 < 4);
3741 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3742 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3743
3744 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3745 }
3746
3747 if (XVECLEN (trueop1, 0) == 2
3748 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3749 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3750 && GET_CODE (trueop0) == VEC_CONCAT
3751 && GET_MODE (trueop0) == mode)
3752 {
3753 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3754 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3755 rtx subop0, subop1;
3756
3757 gcc_assert (i0 < 2 && i1 < 2);
3758 subop0 = XEXP (trueop0, i0);
3759 subop1 = XEXP (trueop0, i1);
3760
3761 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3762 }
3763
3764 /* If we select one half of a vec_concat, return that. */
3765 if (GET_CODE (trueop0) == VEC_CONCAT
3766 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3767 {
3768 rtx subop0 = XEXP (trueop0, 0);
3769 rtx subop1 = XEXP (trueop0, 1);
3770 machine_mode mode0 = GET_MODE (subop0);
3771 machine_mode mode1 = GET_MODE (subop1);
3772 int l0 = GET_MODE_NUNITS (mode0);
3773 int l1 = GET_MODE_NUNITS (mode1);
3774 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3775 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3776 {
3777 bool success = true;
3778 for (int i = 1; i < l0; ++i)
3779 {
3780 rtx j = XVECEXP (trueop1, 0, i);
3781 if (!CONST_INT_P (j) || INTVAL (j) != i)
3782 {
3783 success = false;
3784 break;
3785 }
3786 }
3787 if (success)
3788 return subop0;
3789 }
3790 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3791 {
3792 bool success = true;
3793 for (int i = 1; i < l1; ++i)
3794 {
3795 rtx j = XVECEXP (trueop1, 0, i);
3796 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3797 {
3798 success = false;
3799 break;
3800 }
3801 }
3802 if (success)
3803 return subop1;
3804 }
3805 }
3806 }
3807
3808 if (XVECLEN (trueop1, 0) == 1
3809 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3810 && GET_CODE (trueop0) == VEC_CONCAT)
3811 {
3812 rtx vec = trueop0;
3813 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3814
3815 /* Try to find the element in the VEC_CONCAT. */
3816 while (GET_MODE (vec) != mode
3817 && GET_CODE (vec) == VEC_CONCAT)
3818 {
3819 HOST_WIDE_INT vec_size;
3820
3821 if (CONST_INT_P (XEXP (vec, 0)))
3822 {
3823 /* vec_concat of two const_ints doesn't make sense with
3824 respect to modes. */
3825 if (CONST_INT_P (XEXP (vec, 1)))
3826 return 0;
3827
3828 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3829 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3830 }
3831 else
3832 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3833
3834 if (offset < vec_size)
3835 vec = XEXP (vec, 0);
3836 else
3837 {
3838 offset -= vec_size;
3839 vec = XEXP (vec, 1);
3840 }
3841 vec = avoid_constant_pool_reference (vec);
3842 }
3843
3844 if (GET_MODE (vec) == mode)
3845 return vec;
3846 }
3847
3848 /* If we select elements in a vec_merge that all come from the same
3849 operand, select from that operand directly. */
3850 if (GET_CODE (op0) == VEC_MERGE)
3851 {
3852 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3853 if (CONST_INT_P (trueop02))
3854 {
3855 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3856 bool all_operand0 = true;
3857 bool all_operand1 = true;
3858 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3859 {
3860 rtx j = XVECEXP (trueop1, 0, i);
3861 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3862 all_operand1 = false;
3863 else
3864 all_operand0 = false;
3865 }
3866 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3867 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3868 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3869 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3870 }
3871 }
3872
3873 /* If we have two nested selects that are inverses of each
3874 other, replace them with the source operand. */
3875 if (GET_CODE (trueop0) == VEC_SELECT
3876 && GET_MODE (XEXP (trueop0, 0)) == mode)
3877 {
3878 rtx op0_subop1 = XEXP (trueop0, 1);
3879 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3880 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3881
3882 /* Apply the outer ordering vector to the inner one. (The inner
3883 ordering vector is expressly permitted to be of a different
3884 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3885 then the two VEC_SELECTs cancel. */
3886 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3887 {
3888 rtx x = XVECEXP (trueop1, 0, i);
3889 if (!CONST_INT_P (x))
3890 return 0;
3891 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3892 if (!CONST_INT_P (y) || i != INTVAL (y))
3893 return 0;
3894 }
3895 return XEXP (trueop0, 0);
3896 }
3897
3898 return 0;
3899 case VEC_CONCAT:
3900 {
3901 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3902 ? GET_MODE (trueop0)
3903 : GET_MODE_INNER (mode));
3904 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3905 ? GET_MODE (trueop1)
3906 : GET_MODE_INNER (mode));
3907
3908 gcc_assert (VECTOR_MODE_P (mode));
3909 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3910 == GET_MODE_SIZE (mode));
3911
3912 if (VECTOR_MODE_P (op0_mode))
3913 gcc_assert (GET_MODE_INNER (mode)
3914 == GET_MODE_INNER (op0_mode));
3915 else
3916 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3917
3918 if (VECTOR_MODE_P (op1_mode))
3919 gcc_assert (GET_MODE_INNER (mode)
3920 == GET_MODE_INNER (op1_mode));
3921 else
3922 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3923
3924 if ((GET_CODE (trueop0) == CONST_VECTOR
3925 || CONST_SCALAR_INT_P (trueop0)
3926 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3927 && (GET_CODE (trueop1) == CONST_VECTOR
3928 || CONST_SCALAR_INT_P (trueop1)
3929 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3930 {
3931 unsigned n_elts = GET_MODE_NUNITS (mode);
3932 unsigned in_n_elts = GET_MODE_NUNITS (op0_mode);
3933 rtvec v = rtvec_alloc (n_elts);
3934 unsigned int i;
3935 for (i = 0; i < n_elts; i++)
3936 {
3937 if (i < in_n_elts)
3938 {
3939 if (!VECTOR_MODE_P (op0_mode))
3940 RTVEC_ELT (v, i) = trueop0;
3941 else
3942 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3943 }
3944 else
3945 {
3946 if (!VECTOR_MODE_P (op1_mode))
3947 RTVEC_ELT (v, i) = trueop1;
3948 else
3949 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3950 i - in_n_elts);
3951 }
3952 }
3953
3954 return gen_rtx_CONST_VECTOR (mode, v);
3955 }
3956
3957 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3958 Restrict the transformation to avoid generating a VEC_SELECT with a
3959 mode unrelated to its operand. */
3960 if (GET_CODE (trueop0) == VEC_SELECT
3961 && GET_CODE (trueop1) == VEC_SELECT
3962 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3963 && GET_MODE (XEXP (trueop0, 0)) == mode)
3964 {
3965 rtx par0 = XEXP (trueop0, 1);
3966 rtx par1 = XEXP (trueop1, 1);
3967 int len0 = XVECLEN (par0, 0);
3968 int len1 = XVECLEN (par1, 0);
3969 rtvec vec = rtvec_alloc (len0 + len1);
3970 for (int i = 0; i < len0; i++)
3971 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3972 for (int i = 0; i < len1; i++)
3973 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3974 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3975 gen_rtx_PARALLEL (VOIDmode, vec));
3976 }
3977 }
3978 return 0;
3979
3980 default:
3981 gcc_unreachable ();
3982 }
3983
3984 if (mode == GET_MODE (op0)
3985 && mode == GET_MODE (op1)
3986 && vec_duplicate_p (op0, &elt0)
3987 && vec_duplicate_p (op1, &elt1))
3988 {
3989 /* Try applying the operator to ELT and see if that simplifies.
3990 We can duplicate the result if so.
3991
3992 The reason we don't use simplify_gen_binary is that it isn't
3993 necessarily a win to convert things like:
3994
3995 (plus:V (vec_duplicate:V (reg:S R1))
3996 (vec_duplicate:V (reg:S R2)))
3997
3998 to:
3999
4000 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4001
4002 The first might be done entirely in vector registers while the
4003 second might need a move between register files. */
4004 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4005 elt0, elt1);
4006 if (tem)
4007 return gen_vec_duplicate (mode, tem);
4008 }
4009
4010 return 0;
4011 }
4012
4013 rtx
4014 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4015 rtx op0, rtx op1)
4016 {
4017 if (VECTOR_MODE_P (mode)
4018 && code != VEC_CONCAT
4019 && GET_CODE (op0) == CONST_VECTOR
4020 && GET_CODE (op1) == CONST_VECTOR)
4021 {
4022 unsigned int n_elts = CONST_VECTOR_NUNITS (op0);
4023 gcc_assert (n_elts == (unsigned int) CONST_VECTOR_NUNITS (op1));
4024 gcc_assert (n_elts == GET_MODE_NUNITS (mode));
4025 rtvec v = rtvec_alloc (n_elts);
4026 unsigned int i;
4027
4028 for (i = 0; i < n_elts; i++)
4029 {
4030 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4031 CONST_VECTOR_ELT (op0, i),
4032 CONST_VECTOR_ELT (op1, i));
4033 if (!x)
4034 return 0;
4035 RTVEC_ELT (v, i) = x;
4036 }
4037
4038 return gen_rtx_CONST_VECTOR (mode, v);
4039 }
4040
4041 if (VECTOR_MODE_P (mode)
4042 && code == VEC_CONCAT
4043 && (CONST_SCALAR_INT_P (op0)
4044 || GET_CODE (op0) == CONST_FIXED
4045 || CONST_DOUBLE_AS_FLOAT_P (op0))
4046 && (CONST_SCALAR_INT_P (op1)
4047 || CONST_DOUBLE_AS_FLOAT_P (op1)
4048 || GET_CODE (op1) == CONST_FIXED))
4049 {
4050 unsigned n_elts = GET_MODE_NUNITS (mode);
4051 rtvec v = rtvec_alloc (n_elts);
4052
4053 gcc_assert (n_elts >= 2);
4054 if (n_elts == 2)
4055 {
4056 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4057 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4058
4059 RTVEC_ELT (v, 0) = op0;
4060 RTVEC_ELT (v, 1) = op1;
4061 }
4062 else
4063 {
4064 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
4065 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
4066 unsigned i;
4067
4068 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4069 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4070 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4071
4072 for (i = 0; i < op0_n_elts; ++i)
4073 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
4074 for (i = 0; i < op1_n_elts; ++i)
4075 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
4076 }
4077
4078 return gen_rtx_CONST_VECTOR (mode, v);
4079 }
4080
4081 if (SCALAR_FLOAT_MODE_P (mode)
4082 && CONST_DOUBLE_AS_FLOAT_P (op0)
4083 && CONST_DOUBLE_AS_FLOAT_P (op1)
4084 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4085 {
4086 if (code == AND
4087 || code == IOR
4088 || code == XOR)
4089 {
4090 long tmp0[4];
4091 long tmp1[4];
4092 REAL_VALUE_TYPE r;
4093 int i;
4094
4095 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4096 GET_MODE (op0));
4097 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4098 GET_MODE (op1));
4099 for (i = 0; i < 4; i++)
4100 {
4101 switch (code)
4102 {
4103 case AND:
4104 tmp0[i] &= tmp1[i];
4105 break;
4106 case IOR:
4107 tmp0[i] |= tmp1[i];
4108 break;
4109 case XOR:
4110 tmp0[i] ^= tmp1[i];
4111 break;
4112 default:
4113 gcc_unreachable ();
4114 }
4115 }
4116 real_from_target (&r, tmp0, mode);
4117 return const_double_from_real_value (r, mode);
4118 }
4119 else
4120 {
4121 REAL_VALUE_TYPE f0, f1, value, result;
4122 const REAL_VALUE_TYPE *opr0, *opr1;
4123 bool inexact;
4124
4125 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4126 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4127
4128 if (HONOR_SNANS (mode)
4129 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4130 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4131 return 0;
4132
4133 real_convert (&f0, mode, opr0);
4134 real_convert (&f1, mode, opr1);
4135
4136 if (code == DIV
4137 && real_equal (&f1, &dconst0)
4138 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4139 return 0;
4140
4141 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4142 && flag_trapping_math
4143 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4144 {
4145 int s0 = REAL_VALUE_NEGATIVE (f0);
4146 int s1 = REAL_VALUE_NEGATIVE (f1);
4147
4148 switch (code)
4149 {
4150 case PLUS:
4151 /* Inf + -Inf = NaN plus exception. */
4152 if (s0 != s1)
4153 return 0;
4154 break;
4155 case MINUS:
4156 /* Inf - Inf = NaN plus exception. */
4157 if (s0 == s1)
4158 return 0;
4159 break;
4160 case DIV:
4161 /* Inf / Inf = NaN plus exception. */
4162 return 0;
4163 default:
4164 break;
4165 }
4166 }
4167
4168 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4169 && flag_trapping_math
4170 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4171 || (REAL_VALUE_ISINF (f1)
4172 && real_equal (&f0, &dconst0))))
4173 /* Inf * 0 = NaN plus exception. */
4174 return 0;
4175
4176 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4177 &f0, &f1);
4178 real_convert (&result, mode, &value);
4179
4180 /* Don't constant fold this floating point operation if
4181 the result has overflowed and flag_trapping_math. */
4182
4183 if (flag_trapping_math
4184 && MODE_HAS_INFINITIES (mode)
4185 && REAL_VALUE_ISINF (result)
4186 && !REAL_VALUE_ISINF (f0)
4187 && !REAL_VALUE_ISINF (f1))
4188 /* Overflow plus exception. */
4189 return 0;
4190
4191 /* Don't constant fold this floating point operation if the
4192 result may dependent upon the run-time rounding mode and
4193 flag_rounding_math is set, or if GCC's software emulation
4194 is unable to accurately represent the result. */
4195
4196 if ((flag_rounding_math
4197 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4198 && (inexact || !real_identical (&result, &value)))
4199 return NULL_RTX;
4200
4201 return const_double_from_real_value (result, mode);
4202 }
4203 }
4204
4205 /* We can fold some multi-word operations. */
4206 scalar_int_mode int_mode;
4207 if (is_a <scalar_int_mode> (mode, &int_mode)
4208 && CONST_SCALAR_INT_P (op0)
4209 && CONST_SCALAR_INT_P (op1))
4210 {
4211 wide_int result;
4212 bool overflow;
4213 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4214 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4215
4216 #if TARGET_SUPPORTS_WIDE_INT == 0
4217 /* This assert keeps the simplification from producing a result
4218 that cannot be represented in a CONST_DOUBLE but a lot of
4219 upstream callers expect that this function never fails to
4220 simplify something and so you if you added this to the test
4221 above the code would die later anyway. If this assert
4222 happens, you just need to make the port support wide int. */
4223 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4224 #endif
4225 switch (code)
4226 {
4227 case MINUS:
4228 result = wi::sub (pop0, pop1);
4229 break;
4230
4231 case PLUS:
4232 result = wi::add (pop0, pop1);
4233 break;
4234
4235 case MULT:
4236 result = wi::mul (pop0, pop1);
4237 break;
4238
4239 case DIV:
4240 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4241 if (overflow)
4242 return NULL_RTX;
4243 break;
4244
4245 case MOD:
4246 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4247 if (overflow)
4248 return NULL_RTX;
4249 break;
4250
4251 case UDIV:
4252 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4253 if (overflow)
4254 return NULL_RTX;
4255 break;
4256
4257 case UMOD:
4258 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4259 if (overflow)
4260 return NULL_RTX;
4261 break;
4262
4263 case AND:
4264 result = wi::bit_and (pop0, pop1);
4265 break;
4266
4267 case IOR:
4268 result = wi::bit_or (pop0, pop1);
4269 break;
4270
4271 case XOR:
4272 result = wi::bit_xor (pop0, pop1);
4273 break;
4274
4275 case SMIN:
4276 result = wi::smin (pop0, pop1);
4277 break;
4278
4279 case SMAX:
4280 result = wi::smax (pop0, pop1);
4281 break;
4282
4283 case UMIN:
4284 result = wi::umin (pop0, pop1);
4285 break;
4286
4287 case UMAX:
4288 result = wi::umax (pop0, pop1);
4289 break;
4290
4291 case LSHIFTRT:
4292 case ASHIFTRT:
4293 case ASHIFT:
4294 {
4295 wide_int wop1 = pop1;
4296 if (SHIFT_COUNT_TRUNCATED)
4297 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4298 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4299 return NULL_RTX;
4300
4301 switch (code)
4302 {
4303 case LSHIFTRT:
4304 result = wi::lrshift (pop0, wop1);
4305 break;
4306
4307 case ASHIFTRT:
4308 result = wi::arshift (pop0, wop1);
4309 break;
4310
4311 case ASHIFT:
4312 result = wi::lshift (pop0, wop1);
4313 break;
4314
4315 default:
4316 gcc_unreachable ();
4317 }
4318 break;
4319 }
4320 case ROTATE:
4321 case ROTATERT:
4322 {
4323 if (wi::neg_p (pop1))
4324 return NULL_RTX;
4325
4326 switch (code)
4327 {
4328 case ROTATE:
4329 result = wi::lrotate (pop0, pop1);
4330 break;
4331
4332 case ROTATERT:
4333 result = wi::rrotate (pop0, pop1);
4334 break;
4335
4336 default:
4337 gcc_unreachable ();
4338 }
4339 break;
4340 }
4341 default:
4342 return NULL_RTX;
4343 }
4344 return immed_wide_int_const (result, int_mode);
4345 }
4346
4347 /* Handle polynomial integers. */
4348 if (NUM_POLY_INT_COEFFS > 1
4349 && is_a <scalar_int_mode> (mode, &int_mode)
4350 && poly_int_rtx_p (op0)
4351 && poly_int_rtx_p (op1))
4352 {
4353 poly_wide_int result;
4354 switch (code)
4355 {
4356 case PLUS:
4357 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4358 break;
4359
4360 case MINUS:
4361 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4362 break;
4363
4364 case MULT:
4365 if (CONST_SCALAR_INT_P (op1))
4366 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4367 else
4368 return NULL_RTX;
4369 break;
4370
4371 case ASHIFT:
4372 if (CONST_SCALAR_INT_P (op1))
4373 {
4374 wide_int shift = rtx_mode_t (op1, mode);
4375 if (SHIFT_COUNT_TRUNCATED)
4376 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4377 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4378 return NULL_RTX;
4379 result = wi::to_poly_wide (op0, mode) << shift;
4380 }
4381 else
4382 return NULL_RTX;
4383 break;
4384
4385 case IOR:
4386 if (!CONST_SCALAR_INT_P (op1)
4387 || !can_ior_p (wi::to_poly_wide (op0, mode),
4388 rtx_mode_t (op1, mode), &result))
4389 return NULL_RTX;
4390 break;
4391
4392 default:
4393 return NULL_RTX;
4394 }
4395 return immed_wide_int_const (result, int_mode);
4396 }
4397
4398 return NULL_RTX;
4399 }
4400
4401
4402 \f
4403 /* Return a positive integer if X should sort after Y. The value
4404 returned is 1 if and only if X and Y are both regs. */
4405
4406 static int
4407 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4408 {
4409 int result;
4410
4411 result = (commutative_operand_precedence (y)
4412 - commutative_operand_precedence (x));
4413 if (result)
4414 return result + result;
4415
4416 /* Group together equal REGs to do more simplification. */
4417 if (REG_P (x) && REG_P (y))
4418 return REGNO (x) > REGNO (y);
4419
4420 return 0;
4421 }
4422
4423 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4424 operands may be another PLUS or MINUS.
4425
4426 Rather than test for specific case, we do this by a brute-force method
4427 and do all possible simplifications until no more changes occur. Then
4428 we rebuild the operation.
4429
4430 May return NULL_RTX when no changes were made. */
4431
4432 static rtx
4433 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4434 rtx op1)
4435 {
4436 struct simplify_plus_minus_op_data
4437 {
4438 rtx op;
4439 short neg;
4440 } ops[16];
4441 rtx result, tem;
4442 int n_ops = 2;
4443 int changed, n_constants, canonicalized = 0;
4444 int i, j;
4445
4446 memset (ops, 0, sizeof ops);
4447
4448 /* Set up the two operands and then expand them until nothing has been
4449 changed. If we run out of room in our array, give up; this should
4450 almost never happen. */
4451
4452 ops[0].op = op0;
4453 ops[0].neg = 0;
4454 ops[1].op = op1;
4455 ops[1].neg = (code == MINUS);
4456
4457 do
4458 {
4459 changed = 0;
4460 n_constants = 0;
4461
4462 for (i = 0; i < n_ops; i++)
4463 {
4464 rtx this_op = ops[i].op;
4465 int this_neg = ops[i].neg;
4466 enum rtx_code this_code = GET_CODE (this_op);
4467
4468 switch (this_code)
4469 {
4470 case PLUS:
4471 case MINUS:
4472 if (n_ops == ARRAY_SIZE (ops))
4473 return NULL_RTX;
4474
4475 ops[n_ops].op = XEXP (this_op, 1);
4476 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4477 n_ops++;
4478
4479 ops[i].op = XEXP (this_op, 0);
4480 changed = 1;
4481 /* If this operand was negated then we will potentially
4482 canonicalize the expression. Similarly if we don't
4483 place the operands adjacent we're re-ordering the
4484 expression and thus might be performing a
4485 canonicalization. Ignore register re-ordering.
4486 ??? It might be better to shuffle the ops array here,
4487 but then (plus (plus (A, B), plus (C, D))) wouldn't
4488 be seen as non-canonical. */
4489 if (this_neg
4490 || (i != n_ops - 2
4491 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4492 canonicalized = 1;
4493 break;
4494
4495 case NEG:
4496 ops[i].op = XEXP (this_op, 0);
4497 ops[i].neg = ! this_neg;
4498 changed = 1;
4499 canonicalized = 1;
4500 break;
4501
4502 case CONST:
4503 if (n_ops != ARRAY_SIZE (ops)
4504 && GET_CODE (XEXP (this_op, 0)) == PLUS
4505 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4506 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4507 {
4508 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4509 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4510 ops[n_ops].neg = this_neg;
4511 n_ops++;
4512 changed = 1;
4513 canonicalized = 1;
4514 }
4515 break;
4516
4517 case NOT:
4518 /* ~a -> (-a - 1) */
4519 if (n_ops != ARRAY_SIZE (ops))
4520 {
4521 ops[n_ops].op = CONSTM1_RTX (mode);
4522 ops[n_ops++].neg = this_neg;
4523 ops[i].op = XEXP (this_op, 0);
4524 ops[i].neg = !this_neg;
4525 changed = 1;
4526 canonicalized = 1;
4527 }
4528 break;
4529
4530 case CONST_INT:
4531 n_constants++;
4532 if (this_neg)
4533 {
4534 ops[i].op = neg_const_int (mode, this_op);
4535 ops[i].neg = 0;
4536 changed = 1;
4537 canonicalized = 1;
4538 }
4539 break;
4540
4541 default:
4542 break;
4543 }
4544 }
4545 }
4546 while (changed);
4547
4548 if (n_constants > 1)
4549 canonicalized = 1;
4550
4551 gcc_assert (n_ops >= 2);
4552
4553 /* If we only have two operands, we can avoid the loops. */
4554 if (n_ops == 2)
4555 {
4556 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4557 rtx lhs, rhs;
4558
4559 /* Get the two operands. Be careful with the order, especially for
4560 the cases where code == MINUS. */
4561 if (ops[0].neg && ops[1].neg)
4562 {
4563 lhs = gen_rtx_NEG (mode, ops[0].op);
4564 rhs = ops[1].op;
4565 }
4566 else if (ops[0].neg)
4567 {
4568 lhs = ops[1].op;
4569 rhs = ops[0].op;
4570 }
4571 else
4572 {
4573 lhs = ops[0].op;
4574 rhs = ops[1].op;
4575 }
4576
4577 return simplify_const_binary_operation (code, mode, lhs, rhs);
4578 }
4579
4580 /* Now simplify each pair of operands until nothing changes. */
4581 while (1)
4582 {
4583 /* Insertion sort is good enough for a small array. */
4584 for (i = 1; i < n_ops; i++)
4585 {
4586 struct simplify_plus_minus_op_data save;
4587 int cmp;
4588
4589 j = i - 1;
4590 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4591 if (cmp <= 0)
4592 continue;
4593 /* Just swapping registers doesn't count as canonicalization. */
4594 if (cmp != 1)
4595 canonicalized = 1;
4596
4597 save = ops[i];
4598 do
4599 ops[j + 1] = ops[j];
4600 while (j--
4601 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4602 ops[j + 1] = save;
4603 }
4604
4605 changed = 0;
4606 for (i = n_ops - 1; i > 0; i--)
4607 for (j = i - 1; j >= 0; j--)
4608 {
4609 rtx lhs = ops[j].op, rhs = ops[i].op;
4610 int lneg = ops[j].neg, rneg = ops[i].neg;
4611
4612 if (lhs != 0 && rhs != 0)
4613 {
4614 enum rtx_code ncode = PLUS;
4615
4616 if (lneg != rneg)
4617 {
4618 ncode = MINUS;
4619 if (lneg)
4620 std::swap (lhs, rhs);
4621 }
4622 else if (swap_commutative_operands_p (lhs, rhs))
4623 std::swap (lhs, rhs);
4624
4625 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4626 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4627 {
4628 rtx tem_lhs, tem_rhs;
4629
4630 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4631 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4632 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4633 tem_rhs);
4634
4635 if (tem && !CONSTANT_P (tem))
4636 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4637 }
4638 else
4639 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4640
4641 if (tem)
4642 {
4643 /* Reject "simplifications" that just wrap the two
4644 arguments in a CONST. Failure to do so can result
4645 in infinite recursion with simplify_binary_operation
4646 when it calls us to simplify CONST operations.
4647 Also, if we find such a simplification, don't try
4648 any more combinations with this rhs: We must have
4649 something like symbol+offset, ie. one of the
4650 trivial CONST expressions we handle later. */
4651 if (GET_CODE (tem) == CONST
4652 && GET_CODE (XEXP (tem, 0)) == ncode
4653 && XEXP (XEXP (tem, 0), 0) == lhs
4654 && XEXP (XEXP (tem, 0), 1) == rhs)
4655 break;
4656 lneg &= rneg;
4657 if (GET_CODE (tem) == NEG)
4658 tem = XEXP (tem, 0), lneg = !lneg;
4659 if (CONST_INT_P (tem) && lneg)
4660 tem = neg_const_int (mode, tem), lneg = 0;
4661
4662 ops[i].op = tem;
4663 ops[i].neg = lneg;
4664 ops[j].op = NULL_RTX;
4665 changed = 1;
4666 canonicalized = 1;
4667 }
4668 }
4669 }
4670
4671 if (!changed)
4672 break;
4673
4674 /* Pack all the operands to the lower-numbered entries. */
4675 for (i = 0, j = 0; j < n_ops; j++)
4676 if (ops[j].op)
4677 {
4678 ops[i] = ops[j];
4679 i++;
4680 }
4681 n_ops = i;
4682 }
4683
4684 /* If nothing changed, check that rematerialization of rtl instructions
4685 is still required. */
4686 if (!canonicalized)
4687 {
4688 /* Perform rematerialization if only all operands are registers and
4689 all operations are PLUS. */
4690 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4691 around rs6000 and how it uses the CA register. See PR67145. */
4692 for (i = 0; i < n_ops; i++)
4693 if (ops[i].neg
4694 || !REG_P (ops[i].op)
4695 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4696 && fixed_regs[REGNO (ops[i].op)]
4697 && !global_regs[REGNO (ops[i].op)]
4698 && ops[i].op != frame_pointer_rtx
4699 && ops[i].op != arg_pointer_rtx
4700 && ops[i].op != stack_pointer_rtx))
4701 return NULL_RTX;
4702 goto gen_result;
4703 }
4704
4705 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4706 if (n_ops == 2
4707 && CONST_INT_P (ops[1].op)
4708 && CONSTANT_P (ops[0].op)
4709 && ops[0].neg)
4710 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4711
4712 /* We suppressed creation of trivial CONST expressions in the
4713 combination loop to avoid recursion. Create one manually now.
4714 The combination loop should have ensured that there is exactly
4715 one CONST_INT, and the sort will have ensured that it is last
4716 in the array and that any other constant will be next-to-last. */
4717
4718 if (n_ops > 1
4719 && CONST_INT_P (ops[n_ops - 1].op)
4720 && CONSTANT_P (ops[n_ops - 2].op))
4721 {
4722 rtx value = ops[n_ops - 1].op;
4723 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4724 value = neg_const_int (mode, value);
4725 if (CONST_INT_P (value))
4726 {
4727 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4728 INTVAL (value));
4729 n_ops--;
4730 }
4731 }
4732
4733 /* Put a non-negated operand first, if possible. */
4734
4735 for (i = 0; i < n_ops && ops[i].neg; i++)
4736 continue;
4737 if (i == n_ops)
4738 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4739 else if (i != 0)
4740 {
4741 tem = ops[0].op;
4742 ops[0] = ops[i];
4743 ops[i].op = tem;
4744 ops[i].neg = 1;
4745 }
4746
4747 /* Now make the result by performing the requested operations. */
4748 gen_result:
4749 result = ops[0].op;
4750 for (i = 1; i < n_ops; i++)
4751 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4752 mode, result, ops[i].op);
4753
4754 return result;
4755 }
4756
4757 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4758 static bool
4759 plus_minus_operand_p (const_rtx x)
4760 {
4761 return GET_CODE (x) == PLUS
4762 || GET_CODE (x) == MINUS
4763 || (GET_CODE (x) == CONST
4764 && GET_CODE (XEXP (x, 0)) == PLUS
4765 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4766 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4767 }
4768
4769 /* Like simplify_binary_operation except used for relational operators.
4770 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4771 not also be VOIDmode.
4772
4773 CMP_MODE specifies in which mode the comparison is done in, so it is
4774 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4775 the operands or, if both are VOIDmode, the operands are compared in
4776 "infinite precision". */
4777 rtx
4778 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4779 machine_mode cmp_mode, rtx op0, rtx op1)
4780 {
4781 rtx tem, trueop0, trueop1;
4782
4783 if (cmp_mode == VOIDmode)
4784 cmp_mode = GET_MODE (op0);
4785 if (cmp_mode == VOIDmode)
4786 cmp_mode = GET_MODE (op1);
4787
4788 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4789 if (tem)
4790 {
4791 if (SCALAR_FLOAT_MODE_P (mode))
4792 {
4793 if (tem == const0_rtx)
4794 return CONST0_RTX (mode);
4795 #ifdef FLOAT_STORE_FLAG_VALUE
4796 {
4797 REAL_VALUE_TYPE val;
4798 val = FLOAT_STORE_FLAG_VALUE (mode);
4799 return const_double_from_real_value (val, mode);
4800 }
4801 #else
4802 return NULL_RTX;
4803 #endif
4804 }
4805 if (VECTOR_MODE_P (mode))
4806 {
4807 if (tem == const0_rtx)
4808 return CONST0_RTX (mode);
4809 #ifdef VECTOR_STORE_FLAG_VALUE
4810 {
4811 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4812 if (val == NULL_RTX)
4813 return NULL_RTX;
4814 if (val == const1_rtx)
4815 return CONST1_RTX (mode);
4816
4817 return gen_const_vec_duplicate (mode, val);
4818 }
4819 #else
4820 return NULL_RTX;
4821 #endif
4822 }
4823
4824 return tem;
4825 }
4826
4827 /* For the following tests, ensure const0_rtx is op1. */
4828 if (swap_commutative_operands_p (op0, op1)
4829 || (op0 == const0_rtx && op1 != const0_rtx))
4830 std::swap (op0, op1), code = swap_condition (code);
4831
4832 /* If op0 is a compare, extract the comparison arguments from it. */
4833 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4834 return simplify_gen_relational (code, mode, VOIDmode,
4835 XEXP (op0, 0), XEXP (op0, 1));
4836
4837 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4838 || CC0_P (op0))
4839 return NULL_RTX;
4840
4841 trueop0 = avoid_constant_pool_reference (op0);
4842 trueop1 = avoid_constant_pool_reference (op1);
4843 return simplify_relational_operation_1 (code, mode, cmp_mode,
4844 trueop0, trueop1);
4845 }
4846
4847 /* This part of simplify_relational_operation is only used when CMP_MODE
4848 is not in class MODE_CC (i.e. it is a real comparison).
4849
4850 MODE is the mode of the result, while CMP_MODE specifies in which
4851 mode the comparison is done in, so it is the mode of the operands. */
4852
4853 static rtx
4854 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4855 machine_mode cmp_mode, rtx op0, rtx op1)
4856 {
4857 enum rtx_code op0code = GET_CODE (op0);
4858
4859 if (op1 == const0_rtx && COMPARISON_P (op0))
4860 {
4861 /* If op0 is a comparison, extract the comparison arguments
4862 from it. */
4863 if (code == NE)
4864 {
4865 if (GET_MODE (op0) == mode)
4866 return simplify_rtx (op0);
4867 else
4868 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4869 XEXP (op0, 0), XEXP (op0, 1));
4870 }
4871 else if (code == EQ)
4872 {
4873 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4874 if (new_code != UNKNOWN)
4875 return simplify_gen_relational (new_code, mode, VOIDmode,
4876 XEXP (op0, 0), XEXP (op0, 1));
4877 }
4878 }
4879
4880 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4881 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4882 if ((code == LTU || code == GEU)
4883 && GET_CODE (op0) == PLUS
4884 && CONST_INT_P (XEXP (op0, 1))
4885 && (rtx_equal_p (op1, XEXP (op0, 0))
4886 || rtx_equal_p (op1, XEXP (op0, 1)))
4887 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4888 && XEXP (op0, 1) != const0_rtx)
4889 {
4890 rtx new_cmp
4891 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4892 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4893 cmp_mode, XEXP (op0, 0), new_cmp);
4894 }
4895
4896 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4897 transformed into (LTU a -C). */
4898 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4899 && CONST_INT_P (XEXP (op0, 1))
4900 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4901 && XEXP (op0, 1) != const0_rtx)
4902 {
4903 rtx new_cmp
4904 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4905 return simplify_gen_relational (LTU, mode, cmp_mode,
4906 XEXP (op0, 0), new_cmp);
4907 }
4908
4909 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4910 if ((code == LTU || code == GEU)
4911 && GET_CODE (op0) == PLUS
4912 && rtx_equal_p (op1, XEXP (op0, 1))
4913 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4914 && !rtx_equal_p (op1, XEXP (op0, 0)))
4915 return simplify_gen_relational (code, mode, cmp_mode, op0,
4916 copy_rtx (XEXP (op0, 0)));
4917
4918 if (op1 == const0_rtx)
4919 {
4920 /* Canonicalize (GTU x 0) as (NE x 0). */
4921 if (code == GTU)
4922 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4923 /* Canonicalize (LEU x 0) as (EQ x 0). */
4924 if (code == LEU)
4925 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4926 }
4927 else if (op1 == const1_rtx)
4928 {
4929 switch (code)
4930 {
4931 case GE:
4932 /* Canonicalize (GE x 1) as (GT x 0). */
4933 return simplify_gen_relational (GT, mode, cmp_mode,
4934 op0, const0_rtx);
4935 case GEU:
4936 /* Canonicalize (GEU x 1) as (NE x 0). */
4937 return simplify_gen_relational (NE, mode, cmp_mode,
4938 op0, const0_rtx);
4939 case LT:
4940 /* Canonicalize (LT x 1) as (LE x 0). */
4941 return simplify_gen_relational (LE, mode, cmp_mode,
4942 op0, const0_rtx);
4943 case LTU:
4944 /* Canonicalize (LTU x 1) as (EQ x 0). */
4945 return simplify_gen_relational (EQ, mode, cmp_mode,
4946 op0, const0_rtx);
4947 default:
4948 break;
4949 }
4950 }
4951 else if (op1 == constm1_rtx)
4952 {
4953 /* Canonicalize (LE x -1) as (LT x 0). */
4954 if (code == LE)
4955 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4956 /* Canonicalize (GT x -1) as (GE x 0). */
4957 if (code == GT)
4958 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4959 }
4960
4961 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4962 if ((code == EQ || code == NE)
4963 && (op0code == PLUS || op0code == MINUS)
4964 && CONSTANT_P (op1)
4965 && CONSTANT_P (XEXP (op0, 1))
4966 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4967 {
4968 rtx x = XEXP (op0, 0);
4969 rtx c = XEXP (op0, 1);
4970 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4971 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4972
4973 /* Detect an infinite recursive condition, where we oscillate at this
4974 simplification case between:
4975 A + B == C <---> C - B == A,
4976 where A, B, and C are all constants with non-simplifiable expressions,
4977 usually SYMBOL_REFs. */
4978 if (GET_CODE (tem) == invcode
4979 && CONSTANT_P (x)
4980 && rtx_equal_p (c, XEXP (tem, 1)))
4981 return NULL_RTX;
4982
4983 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4984 }
4985
4986 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4987 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4988 scalar_int_mode int_mode, int_cmp_mode;
4989 if (code == NE
4990 && op1 == const0_rtx
4991 && is_int_mode (mode, &int_mode)
4992 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4993 /* ??? Work-around BImode bugs in the ia64 backend. */
4994 && int_mode != BImode
4995 && int_cmp_mode != BImode
4996 && nonzero_bits (op0, int_cmp_mode) == 1
4997 && STORE_FLAG_VALUE == 1)
4998 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4999 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5000 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5001
5002 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5003 if ((code == EQ || code == NE)
5004 && op1 == const0_rtx
5005 && op0code == XOR)
5006 return simplify_gen_relational (code, mode, cmp_mode,
5007 XEXP (op0, 0), XEXP (op0, 1));
5008
5009 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5010 if ((code == EQ || code == NE)
5011 && op0code == XOR
5012 && rtx_equal_p (XEXP (op0, 0), op1)
5013 && !side_effects_p (XEXP (op0, 0)))
5014 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5015 CONST0_RTX (mode));
5016
5017 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5018 if ((code == EQ || code == NE)
5019 && op0code == XOR
5020 && rtx_equal_p (XEXP (op0, 1), op1)
5021 && !side_effects_p (XEXP (op0, 1)))
5022 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5023 CONST0_RTX (mode));
5024
5025 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5026 if ((code == EQ || code == NE)
5027 && op0code == XOR
5028 && CONST_SCALAR_INT_P (op1)
5029 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5030 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5031 simplify_gen_binary (XOR, cmp_mode,
5032 XEXP (op0, 1), op1));
5033
5034 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
5035 can be implemented with a BICS instruction on some targets, or
5036 constant-folded if y is a constant. */
5037 if ((code == EQ || code == NE)
5038 && op0code == AND
5039 && rtx_equal_p (XEXP (op0, 0), op1)
5040 && !side_effects_p (op1)
5041 && op1 != CONST0_RTX (cmp_mode))
5042 {
5043 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
5044 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5045
5046 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5047 CONST0_RTX (cmp_mode));
5048 }
5049
5050 /* Likewise for (eq/ne (and x y) y). */
5051 if ((code == EQ || code == NE)
5052 && op0code == AND
5053 && rtx_equal_p (XEXP (op0, 1), op1)
5054 && !side_effects_p (op1)
5055 && op1 != CONST0_RTX (cmp_mode))
5056 {
5057 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
5058 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5059
5060 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5061 CONST0_RTX (cmp_mode));
5062 }
5063
5064 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5065 if ((code == EQ || code == NE)
5066 && GET_CODE (op0) == BSWAP
5067 && CONST_SCALAR_INT_P (op1))
5068 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5069 simplify_gen_unary (BSWAP, cmp_mode,
5070 op1, cmp_mode));
5071
5072 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5073 if ((code == EQ || code == NE)
5074 && GET_CODE (op0) == BSWAP
5075 && GET_CODE (op1) == BSWAP)
5076 return simplify_gen_relational (code, mode, cmp_mode,
5077 XEXP (op0, 0), XEXP (op1, 0));
5078
5079 if (op0code == POPCOUNT && op1 == const0_rtx)
5080 switch (code)
5081 {
5082 case EQ:
5083 case LE:
5084 case LEU:
5085 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5086 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5087 XEXP (op0, 0), const0_rtx);
5088
5089 case NE:
5090 case GT:
5091 case GTU:
5092 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5093 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5094 XEXP (op0, 0), const0_rtx);
5095
5096 default:
5097 break;
5098 }
5099
5100 return NULL_RTX;
5101 }
5102
5103 enum
5104 {
5105 CMP_EQ = 1,
5106 CMP_LT = 2,
5107 CMP_GT = 4,
5108 CMP_LTU = 8,
5109 CMP_GTU = 16
5110 };
5111
5112
5113 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5114 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5115 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5116 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5117 For floating-point comparisons, assume that the operands were ordered. */
5118
5119 static rtx
5120 comparison_result (enum rtx_code code, int known_results)
5121 {
5122 switch (code)
5123 {
5124 case EQ:
5125 case UNEQ:
5126 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5127 case NE:
5128 case LTGT:
5129 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5130
5131 case LT:
5132 case UNLT:
5133 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5134 case GE:
5135 case UNGE:
5136 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5137
5138 case GT:
5139 case UNGT:
5140 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5141 case LE:
5142 case UNLE:
5143 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5144
5145 case LTU:
5146 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5147 case GEU:
5148 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5149
5150 case GTU:
5151 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5152 case LEU:
5153 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5154
5155 case ORDERED:
5156 return const_true_rtx;
5157 case UNORDERED:
5158 return const0_rtx;
5159 default:
5160 gcc_unreachable ();
5161 }
5162 }
5163
5164 /* Check if the given comparison (done in the given MODE) is actually
5165 a tautology or a contradiction. If the mode is VOID_mode, the
5166 comparison is done in "infinite precision". If no simplification
5167 is possible, this function returns zero. Otherwise, it returns
5168 either const_true_rtx or const0_rtx. */
5169
5170 rtx
5171 simplify_const_relational_operation (enum rtx_code code,
5172 machine_mode mode,
5173 rtx op0, rtx op1)
5174 {
5175 rtx tem;
5176 rtx trueop0;
5177 rtx trueop1;
5178
5179 gcc_assert (mode != VOIDmode
5180 || (GET_MODE (op0) == VOIDmode
5181 && GET_MODE (op1) == VOIDmode));
5182
5183 /* If op0 is a compare, extract the comparison arguments from it. */
5184 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5185 {
5186 op1 = XEXP (op0, 1);
5187 op0 = XEXP (op0, 0);
5188
5189 if (GET_MODE (op0) != VOIDmode)
5190 mode = GET_MODE (op0);
5191 else if (GET_MODE (op1) != VOIDmode)
5192 mode = GET_MODE (op1);
5193 else
5194 return 0;
5195 }
5196
5197 /* We can't simplify MODE_CC values since we don't know what the
5198 actual comparison is. */
5199 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5200 return 0;
5201
5202 /* Make sure the constant is second. */
5203 if (swap_commutative_operands_p (op0, op1))
5204 {
5205 std::swap (op0, op1);
5206 code = swap_condition (code);
5207 }
5208
5209 trueop0 = avoid_constant_pool_reference (op0);
5210 trueop1 = avoid_constant_pool_reference (op1);
5211
5212 /* For integer comparisons of A and B maybe we can simplify A - B and can
5213 then simplify a comparison of that with zero. If A and B are both either
5214 a register or a CONST_INT, this can't help; testing for these cases will
5215 prevent infinite recursion here and speed things up.
5216
5217 We can only do this for EQ and NE comparisons as otherwise we may
5218 lose or introduce overflow which we cannot disregard as undefined as
5219 we do not know the signedness of the operation on either the left or
5220 the right hand side of the comparison. */
5221
5222 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5223 && (code == EQ || code == NE)
5224 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5225 && (REG_P (op1) || CONST_INT_P (trueop1)))
5226 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5227 /* We cannot do this if tem is a nonzero address. */
5228 && ! nonzero_address_p (tem))
5229 return simplify_const_relational_operation (signed_condition (code),
5230 mode, tem, const0_rtx);
5231
5232 if (! HONOR_NANS (mode) && code == ORDERED)
5233 return const_true_rtx;
5234
5235 if (! HONOR_NANS (mode) && code == UNORDERED)
5236 return const0_rtx;
5237
5238 /* For modes without NaNs, if the two operands are equal, we know the
5239 result except if they have side-effects. Even with NaNs we know
5240 the result of unordered comparisons and, if signaling NaNs are
5241 irrelevant, also the result of LT/GT/LTGT. */
5242 if ((! HONOR_NANS (trueop0)
5243 || code == UNEQ || code == UNLE || code == UNGE
5244 || ((code == LT || code == GT || code == LTGT)
5245 && ! HONOR_SNANS (trueop0)))
5246 && rtx_equal_p (trueop0, trueop1)
5247 && ! side_effects_p (trueop0))
5248 return comparison_result (code, CMP_EQ);
5249
5250 /* If the operands are floating-point constants, see if we can fold
5251 the result. */
5252 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5253 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5254 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5255 {
5256 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5257 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5258
5259 /* Comparisons are unordered iff at least one of the values is NaN. */
5260 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5261 switch (code)
5262 {
5263 case UNEQ:
5264 case UNLT:
5265 case UNGT:
5266 case UNLE:
5267 case UNGE:
5268 case NE:
5269 case UNORDERED:
5270 return const_true_rtx;
5271 case EQ:
5272 case LT:
5273 case GT:
5274 case LE:
5275 case GE:
5276 case LTGT:
5277 case ORDERED:
5278 return const0_rtx;
5279 default:
5280 return 0;
5281 }
5282
5283 return comparison_result (code,
5284 (real_equal (d0, d1) ? CMP_EQ :
5285 real_less (d0, d1) ? CMP_LT : CMP_GT));
5286 }
5287
5288 /* Otherwise, see if the operands are both integers. */
5289 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5290 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5291 {
5292 /* It would be nice if we really had a mode here. However, the
5293 largest int representable on the target is as good as
5294 infinite. */
5295 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5296 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5297 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5298
5299 if (wi::eq_p (ptrueop0, ptrueop1))
5300 return comparison_result (code, CMP_EQ);
5301 else
5302 {
5303 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5304 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5305 return comparison_result (code, cr);
5306 }
5307 }
5308
5309 /* Optimize comparisons with upper and lower bounds. */
5310 scalar_int_mode int_mode;
5311 if (CONST_INT_P (trueop1)
5312 && is_a <scalar_int_mode> (mode, &int_mode)
5313 && HWI_COMPUTABLE_MODE_P (int_mode)
5314 && !side_effects_p (trueop0))
5315 {
5316 int sign;
5317 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5318 HOST_WIDE_INT val = INTVAL (trueop1);
5319 HOST_WIDE_INT mmin, mmax;
5320
5321 if (code == GEU
5322 || code == LEU
5323 || code == GTU
5324 || code == LTU)
5325 sign = 0;
5326 else
5327 sign = 1;
5328
5329 /* Get a reduced range if the sign bit is zero. */
5330 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5331 {
5332 mmin = 0;
5333 mmax = nonzero;
5334 }
5335 else
5336 {
5337 rtx mmin_rtx, mmax_rtx;
5338 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5339
5340 mmin = INTVAL (mmin_rtx);
5341 mmax = INTVAL (mmax_rtx);
5342 if (sign)
5343 {
5344 unsigned int sign_copies
5345 = num_sign_bit_copies (trueop0, int_mode);
5346
5347 mmin >>= (sign_copies - 1);
5348 mmax >>= (sign_copies - 1);
5349 }
5350 }
5351
5352 switch (code)
5353 {
5354 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5355 case GEU:
5356 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5357 return const_true_rtx;
5358 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5359 return const0_rtx;
5360 break;
5361 case GE:
5362 if (val <= mmin)
5363 return const_true_rtx;
5364 if (val > mmax)
5365 return const0_rtx;
5366 break;
5367
5368 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5369 case LEU:
5370 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5371 return const_true_rtx;
5372 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5373 return const0_rtx;
5374 break;
5375 case LE:
5376 if (val >= mmax)
5377 return const_true_rtx;
5378 if (val < mmin)
5379 return const0_rtx;
5380 break;
5381
5382 case EQ:
5383 /* x == y is always false for y out of range. */
5384 if (val < mmin || val > mmax)
5385 return const0_rtx;
5386 break;
5387
5388 /* x > y is always false for y >= mmax, always true for y < mmin. */
5389 case GTU:
5390 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5391 return const0_rtx;
5392 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5393 return const_true_rtx;
5394 break;
5395 case GT:
5396 if (val >= mmax)
5397 return const0_rtx;
5398 if (val < mmin)
5399 return const_true_rtx;
5400 break;
5401
5402 /* x < y is always false for y <= mmin, always true for y > mmax. */
5403 case LTU:
5404 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5405 return const0_rtx;
5406 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5407 return const_true_rtx;
5408 break;
5409 case LT:
5410 if (val <= mmin)
5411 return const0_rtx;
5412 if (val > mmax)
5413 return const_true_rtx;
5414 break;
5415
5416 case NE:
5417 /* x != y is always true for y out of range. */
5418 if (val < mmin || val > mmax)
5419 return const_true_rtx;
5420 break;
5421
5422 default:
5423 break;
5424 }
5425 }
5426
5427 /* Optimize integer comparisons with zero. */
5428 if (is_a <scalar_int_mode> (mode, &int_mode)
5429 && trueop1 == const0_rtx
5430 && !side_effects_p (trueop0))
5431 {
5432 /* Some addresses are known to be nonzero. We don't know
5433 their sign, but equality comparisons are known. */
5434 if (nonzero_address_p (trueop0))
5435 {
5436 if (code == EQ || code == LEU)
5437 return const0_rtx;
5438 if (code == NE || code == GTU)
5439 return const_true_rtx;
5440 }
5441
5442 /* See if the first operand is an IOR with a constant. If so, we
5443 may be able to determine the result of this comparison. */
5444 if (GET_CODE (op0) == IOR)
5445 {
5446 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5447 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5448 {
5449 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5450 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5451 && (UINTVAL (inner_const)
5452 & (HOST_WIDE_INT_1U
5453 << sign_bitnum)));
5454
5455 switch (code)
5456 {
5457 case EQ:
5458 case LEU:
5459 return const0_rtx;
5460 case NE:
5461 case GTU:
5462 return const_true_rtx;
5463 case LT:
5464 case LE:
5465 if (has_sign)
5466 return const_true_rtx;
5467 break;
5468 case GT:
5469 case GE:
5470 if (has_sign)
5471 return const0_rtx;
5472 break;
5473 default:
5474 break;
5475 }
5476 }
5477 }
5478 }
5479
5480 /* Optimize comparison of ABS with zero. */
5481 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5482 && (GET_CODE (trueop0) == ABS
5483 || (GET_CODE (trueop0) == FLOAT_EXTEND
5484 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5485 {
5486 switch (code)
5487 {
5488 case LT:
5489 /* Optimize abs(x) < 0.0. */
5490 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5491 return const0_rtx;
5492 break;
5493
5494 case GE:
5495 /* Optimize abs(x) >= 0.0. */
5496 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5497 return const_true_rtx;
5498 break;
5499
5500 case UNGE:
5501 /* Optimize ! (abs(x) < 0.0). */
5502 return const_true_rtx;
5503
5504 default:
5505 break;
5506 }
5507 }
5508
5509 return 0;
5510 }
5511
5512 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5513 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5514 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5515 can be simplified to that or NULL_RTX if not.
5516 Assume X is compared against zero with CMP_CODE and the true
5517 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5518
5519 static rtx
5520 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5521 {
5522 if (cmp_code != EQ && cmp_code != NE)
5523 return NULL_RTX;
5524
5525 /* Result on X == 0 and X !=0 respectively. */
5526 rtx on_zero, on_nonzero;
5527 if (cmp_code == EQ)
5528 {
5529 on_zero = true_val;
5530 on_nonzero = false_val;
5531 }
5532 else
5533 {
5534 on_zero = false_val;
5535 on_nonzero = true_val;
5536 }
5537
5538 rtx_code op_code = GET_CODE (on_nonzero);
5539 if ((op_code != CLZ && op_code != CTZ)
5540 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5541 || !CONST_INT_P (on_zero))
5542 return NULL_RTX;
5543
5544 HOST_WIDE_INT op_val;
5545 scalar_int_mode mode ATTRIBUTE_UNUSED
5546 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5547 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5548 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5549 && op_val == INTVAL (on_zero))
5550 return on_nonzero;
5551
5552 return NULL_RTX;
5553 }
5554
5555 \f
5556 /* Simplify CODE, an operation with result mode MODE and three operands,
5557 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5558 a constant. Return 0 if no simplifications is possible. */
5559
5560 rtx
5561 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5562 machine_mode op0_mode, rtx op0, rtx op1,
5563 rtx op2)
5564 {
5565 bool any_change = false;
5566 rtx tem, trueop2;
5567 scalar_int_mode int_mode, int_op0_mode;
5568
5569 switch (code)
5570 {
5571 case FMA:
5572 /* Simplify negations around the multiplication. */
5573 /* -a * -b + c => a * b + c. */
5574 if (GET_CODE (op0) == NEG)
5575 {
5576 tem = simplify_unary_operation (NEG, mode, op1, mode);
5577 if (tem)
5578 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5579 }
5580 else if (GET_CODE (op1) == NEG)
5581 {
5582 tem = simplify_unary_operation (NEG, mode, op0, mode);
5583 if (tem)
5584 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5585 }
5586
5587 /* Canonicalize the two multiplication operands. */
5588 /* a * -b + c => -b * a + c. */
5589 if (swap_commutative_operands_p (op0, op1))
5590 std::swap (op0, op1), any_change = true;
5591
5592 if (any_change)
5593 return gen_rtx_FMA (mode, op0, op1, op2);
5594 return NULL_RTX;
5595
5596 case SIGN_EXTRACT:
5597 case ZERO_EXTRACT:
5598 if (CONST_INT_P (op0)
5599 && CONST_INT_P (op1)
5600 && CONST_INT_P (op2)
5601 && is_a <scalar_int_mode> (mode, &int_mode)
5602 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5603 && HWI_COMPUTABLE_MODE_P (int_mode))
5604 {
5605 /* Extracting a bit-field from a constant */
5606 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5607 HOST_WIDE_INT op1val = INTVAL (op1);
5608 HOST_WIDE_INT op2val = INTVAL (op2);
5609 if (!BITS_BIG_ENDIAN)
5610 val >>= op2val;
5611 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5612 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5613 else
5614 /* Not enough information to calculate the bit position. */
5615 break;
5616
5617 if (HOST_BITS_PER_WIDE_INT != op1val)
5618 {
5619 /* First zero-extend. */
5620 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5621 /* If desired, propagate sign bit. */
5622 if (code == SIGN_EXTRACT
5623 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5624 != 0)
5625 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5626 }
5627
5628 return gen_int_mode (val, int_mode);
5629 }
5630 break;
5631
5632 case IF_THEN_ELSE:
5633 if (CONST_INT_P (op0))
5634 return op0 != const0_rtx ? op1 : op2;
5635
5636 /* Convert c ? a : a into "a". */
5637 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5638 return op1;
5639
5640 /* Convert a != b ? a : b into "a". */
5641 if (GET_CODE (op0) == NE
5642 && ! side_effects_p (op0)
5643 && ! HONOR_NANS (mode)
5644 && ! HONOR_SIGNED_ZEROS (mode)
5645 && ((rtx_equal_p (XEXP (op0, 0), op1)
5646 && rtx_equal_p (XEXP (op0, 1), op2))
5647 || (rtx_equal_p (XEXP (op0, 0), op2)
5648 && rtx_equal_p (XEXP (op0, 1), op1))))
5649 return op1;
5650
5651 /* Convert a == b ? a : b into "b". */
5652 if (GET_CODE (op0) == EQ
5653 && ! side_effects_p (op0)
5654 && ! HONOR_NANS (mode)
5655 && ! HONOR_SIGNED_ZEROS (mode)
5656 && ((rtx_equal_p (XEXP (op0, 0), op1)
5657 && rtx_equal_p (XEXP (op0, 1), op2))
5658 || (rtx_equal_p (XEXP (op0, 0), op2)
5659 && rtx_equal_p (XEXP (op0, 1), op1))))
5660 return op2;
5661
5662 /* Convert (!c) != {0,...,0} ? a : b into
5663 c != {0,...,0} ? b : a for vector modes. */
5664 if (VECTOR_MODE_P (GET_MODE (op1))
5665 && GET_CODE (op0) == NE
5666 && GET_CODE (XEXP (op0, 0)) == NOT
5667 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5668 {
5669 rtx cv = XEXP (op0, 1);
5670 int nunits = CONST_VECTOR_NUNITS (cv);
5671 bool ok = true;
5672 for (int i = 0; i < nunits; ++i)
5673 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5674 {
5675 ok = false;
5676 break;
5677 }
5678 if (ok)
5679 {
5680 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5681 XEXP (XEXP (op0, 0), 0),
5682 XEXP (op0, 1));
5683 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5684 return retval;
5685 }
5686 }
5687
5688 /* Convert x == 0 ? N : clz (x) into clz (x) when
5689 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5690 Similarly for ctz (x). */
5691 if (COMPARISON_P (op0) && !side_effects_p (op0)
5692 && XEXP (op0, 1) == const0_rtx)
5693 {
5694 rtx simplified
5695 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5696 op1, op2);
5697 if (simplified)
5698 return simplified;
5699 }
5700
5701 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5702 {
5703 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5704 ? GET_MODE (XEXP (op0, 1))
5705 : GET_MODE (XEXP (op0, 0)));
5706 rtx temp;
5707
5708 /* Look for happy constants in op1 and op2. */
5709 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5710 {
5711 HOST_WIDE_INT t = INTVAL (op1);
5712 HOST_WIDE_INT f = INTVAL (op2);
5713
5714 if (t == STORE_FLAG_VALUE && f == 0)
5715 code = GET_CODE (op0);
5716 else if (t == 0 && f == STORE_FLAG_VALUE)
5717 {
5718 enum rtx_code tmp;
5719 tmp = reversed_comparison_code (op0, NULL);
5720 if (tmp == UNKNOWN)
5721 break;
5722 code = tmp;
5723 }
5724 else
5725 break;
5726
5727 return simplify_gen_relational (code, mode, cmp_mode,
5728 XEXP (op0, 0), XEXP (op0, 1));
5729 }
5730
5731 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5732 cmp_mode, XEXP (op0, 0),
5733 XEXP (op0, 1));
5734
5735 /* See if any simplifications were possible. */
5736 if (temp)
5737 {
5738 if (CONST_INT_P (temp))
5739 return temp == const0_rtx ? op2 : op1;
5740 else if (temp)
5741 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5742 }
5743 }
5744 break;
5745
5746 case VEC_MERGE:
5747 gcc_assert (GET_MODE (op0) == mode);
5748 gcc_assert (GET_MODE (op1) == mode);
5749 gcc_assert (VECTOR_MODE_P (mode));
5750 trueop2 = avoid_constant_pool_reference (op2);
5751 if (CONST_INT_P (trueop2))
5752 {
5753 unsigned n_elts = GET_MODE_NUNITS (mode);
5754 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5755 unsigned HOST_WIDE_INT mask;
5756 if (n_elts == HOST_BITS_PER_WIDE_INT)
5757 mask = -1;
5758 else
5759 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5760
5761 if (!(sel & mask) && !side_effects_p (op0))
5762 return op1;
5763 if ((sel & mask) == mask && !side_effects_p (op1))
5764 return op0;
5765
5766 rtx trueop0 = avoid_constant_pool_reference (op0);
5767 rtx trueop1 = avoid_constant_pool_reference (op1);
5768 if (GET_CODE (trueop0) == CONST_VECTOR
5769 && GET_CODE (trueop1) == CONST_VECTOR)
5770 {
5771 rtvec v = rtvec_alloc (n_elts);
5772 unsigned int i;
5773
5774 for (i = 0; i < n_elts; i++)
5775 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5776 ? CONST_VECTOR_ELT (trueop0, i)
5777 : CONST_VECTOR_ELT (trueop1, i));
5778 return gen_rtx_CONST_VECTOR (mode, v);
5779 }
5780
5781 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5782 if no element from a appears in the result. */
5783 if (GET_CODE (op0) == VEC_MERGE)
5784 {
5785 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5786 if (CONST_INT_P (tem))
5787 {
5788 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5789 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5790 return simplify_gen_ternary (code, mode, mode,
5791 XEXP (op0, 1), op1, op2);
5792 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5793 return simplify_gen_ternary (code, mode, mode,
5794 XEXP (op0, 0), op1, op2);
5795 }
5796 }
5797 if (GET_CODE (op1) == VEC_MERGE)
5798 {
5799 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5800 if (CONST_INT_P (tem))
5801 {
5802 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5803 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5804 return simplify_gen_ternary (code, mode, mode,
5805 op0, XEXP (op1, 1), op2);
5806 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5807 return simplify_gen_ternary (code, mode, mode,
5808 op0, XEXP (op1, 0), op2);
5809 }
5810 }
5811
5812 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5813 with a. */
5814 if (GET_CODE (op0) == VEC_DUPLICATE
5815 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5816 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5817 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5818 {
5819 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5820 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5821 {
5822 if (XEXP (XEXP (op0, 0), 0) == op1
5823 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5824 return op1;
5825 }
5826 }
5827 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5828 (const_int N))
5829 with (vec_concat (X) (B)) if N == 1 or
5830 (vec_concat (A) (X)) if N == 2. */
5831 if (GET_CODE (op0) == VEC_DUPLICATE
5832 && GET_CODE (op1) == CONST_VECTOR
5833 && CONST_VECTOR_NUNITS (op1) == 2
5834 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5835 && IN_RANGE (sel, 1, 2))
5836 {
5837 rtx newop0 = XEXP (op0, 0);
5838 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5839 if (sel == 2)
5840 std::swap (newop0, newop1);
5841 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5842 }
5843 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5844 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5845 Only applies for vectors of two elements. */
5846 if (GET_CODE (op0) == VEC_DUPLICATE
5847 && GET_CODE (op1) == VEC_CONCAT
5848 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5849 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5850 && IN_RANGE (sel, 1, 2))
5851 {
5852 rtx newop0 = XEXP (op0, 0);
5853 rtx newop1 = XEXP (op1, 2 - sel);
5854 rtx otherop = XEXP (op1, sel - 1);
5855 if (sel == 2)
5856 std::swap (newop0, newop1);
5857 /* Don't want to throw away the other part of the vec_concat if
5858 it has side-effects. */
5859 if (!side_effects_p (otherop))
5860 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5861 }
5862
5863 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5864 (const_int n))
5865 with (vec_concat x y) or (vec_concat y x) depending on value
5866 of N. */
5867 if (GET_CODE (op0) == VEC_DUPLICATE
5868 && GET_CODE (op1) == VEC_DUPLICATE
5869 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5870 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5871 && IN_RANGE (sel, 1, 2))
5872 {
5873 rtx newop0 = XEXP (op0, 0);
5874 rtx newop1 = XEXP (op1, 0);
5875 if (sel == 2)
5876 std::swap (newop0, newop1);
5877
5878 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5879 }
5880 }
5881
5882 if (rtx_equal_p (op0, op1)
5883 && !side_effects_p (op2) && !side_effects_p (op1))
5884 return op0;
5885
5886 break;
5887
5888 default:
5889 gcc_unreachable ();
5890 }
5891
5892 return 0;
5893 }
5894
5895 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5896 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5897 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5898
5899 Works by unpacking OP into a collection of 8-bit values
5900 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5901 and then repacking them again for OUTERMODE. */
5902
5903 static rtx
5904 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5905 fixed_size_mode innermode, unsigned int byte)
5906 {
5907 enum {
5908 value_bit = 8,
5909 value_mask = (1 << value_bit) - 1
5910 };
5911 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5912 int value_start;
5913 int i;
5914 int elem;
5915
5916 int num_elem;
5917 rtx * elems;
5918 int elem_bitsize;
5919 rtx result_s = NULL;
5920 rtvec result_v = NULL;
5921 enum mode_class outer_class;
5922 scalar_mode outer_submode;
5923 int max_bitsize;
5924
5925 /* Some ports misuse CCmode. */
5926 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5927 return op;
5928
5929 /* We have no way to represent a complex constant at the rtl level. */
5930 if (COMPLEX_MODE_P (outermode))
5931 return NULL_RTX;
5932
5933 /* We support any size mode. */
5934 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5935 GET_MODE_BITSIZE (innermode));
5936
5937 /* Unpack the value. */
5938
5939 if (GET_CODE (op) == CONST_VECTOR)
5940 {
5941 num_elem = CONST_VECTOR_NUNITS (op);
5942 elems = &CONST_VECTOR_ELT (op, 0);
5943 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5944 }
5945 else
5946 {
5947 num_elem = 1;
5948 elems = &op;
5949 elem_bitsize = max_bitsize;
5950 }
5951 /* If this asserts, it is too complicated; reducing value_bit may help. */
5952 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5953 /* I don't know how to handle endianness of sub-units. */
5954 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5955
5956 for (elem = 0; elem < num_elem; elem++)
5957 {
5958 unsigned char * vp;
5959 rtx el = elems[elem];
5960
5961 /* Vectors are kept in target memory order. (This is probably
5962 a mistake.) */
5963 {
5964 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5965 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5966 / BITS_PER_UNIT);
5967 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5968 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5969 unsigned bytele = (subword_byte % UNITS_PER_WORD
5970 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5971 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5972 }
5973
5974 switch (GET_CODE (el))
5975 {
5976 case CONST_INT:
5977 for (i = 0;
5978 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5979 i += value_bit)
5980 *vp++ = INTVAL (el) >> i;
5981 /* CONST_INTs are always logically sign-extended. */
5982 for (; i < elem_bitsize; i += value_bit)
5983 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5984 break;
5985
5986 case CONST_WIDE_INT:
5987 {
5988 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5989 unsigned char extend = wi::sign_mask (val);
5990 int prec = wi::get_precision (val);
5991
5992 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5993 *vp++ = wi::extract_uhwi (val, i, value_bit);
5994 for (; i < elem_bitsize; i += value_bit)
5995 *vp++ = extend;
5996 }
5997 break;
5998
5999 case CONST_DOUBLE:
6000 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6001 {
6002 unsigned char extend = 0;
6003 /* If this triggers, someone should have generated a
6004 CONST_INT instead. */
6005 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6006
6007 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6008 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6009 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6010 {
6011 *vp++
6012 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6013 i += value_bit;
6014 }
6015
6016 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6017 extend = -1;
6018 for (; i < elem_bitsize; i += value_bit)
6019 *vp++ = extend;
6020 }
6021 else
6022 {
6023 /* This is big enough for anything on the platform. */
6024 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6025 scalar_float_mode el_mode;
6026
6027 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6028 int bitsize = GET_MODE_BITSIZE (el_mode);
6029
6030 gcc_assert (bitsize <= elem_bitsize);
6031 gcc_assert (bitsize % value_bit == 0);
6032
6033 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6034 GET_MODE (el));
6035
6036 /* real_to_target produces its result in words affected by
6037 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6038 and use WORDS_BIG_ENDIAN instead; see the documentation
6039 of SUBREG in rtl.texi. */
6040 for (i = 0; i < bitsize; i += value_bit)
6041 {
6042 int ibase;
6043 if (WORDS_BIG_ENDIAN)
6044 ibase = bitsize - 1 - i;
6045 else
6046 ibase = i;
6047 *vp++ = tmp[ibase / 32] >> i % 32;
6048 }
6049
6050 /* It shouldn't matter what's done here, so fill it with
6051 zero. */
6052 for (; i < elem_bitsize; i += value_bit)
6053 *vp++ = 0;
6054 }
6055 break;
6056
6057 case CONST_FIXED:
6058 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6059 {
6060 for (i = 0; i < elem_bitsize; i += value_bit)
6061 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6062 }
6063 else
6064 {
6065 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6066 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6067 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6068 i += value_bit)
6069 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6070 >> (i - HOST_BITS_PER_WIDE_INT);
6071 for (; i < elem_bitsize; i += value_bit)
6072 *vp++ = 0;
6073 }
6074 break;
6075
6076 default:
6077 gcc_unreachable ();
6078 }
6079 }
6080
6081 /* Now, pick the right byte to start with. */
6082 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6083 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6084 will already have offset 0. */
6085 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
6086 {
6087 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
6088 - byte);
6089 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6090 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6091 byte = (subword_byte % UNITS_PER_WORD
6092 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6093 }
6094
6095 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6096 so if it's become negative it will instead be very large.) */
6097 gcc_assert (byte < GET_MODE_SIZE (innermode));
6098
6099 /* Convert from bytes to chunks of size value_bit. */
6100 value_start = byte * (BITS_PER_UNIT / value_bit);
6101
6102 /* Re-pack the value. */
6103 num_elem = GET_MODE_NUNITS (outermode);
6104
6105 if (VECTOR_MODE_P (outermode))
6106 {
6107 result_v = rtvec_alloc (num_elem);
6108 elems = &RTVEC_ELT (result_v, 0);
6109 }
6110 else
6111 elems = &result_s;
6112
6113 outer_submode = GET_MODE_INNER (outermode);
6114 outer_class = GET_MODE_CLASS (outer_submode);
6115 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6116
6117 gcc_assert (elem_bitsize % value_bit == 0);
6118 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6119
6120 for (elem = 0; elem < num_elem; elem++)
6121 {
6122 unsigned char *vp;
6123
6124 /* Vectors are stored in target memory order. (This is probably
6125 a mistake.) */
6126 {
6127 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6128 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6129 / BITS_PER_UNIT);
6130 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6131 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6132 unsigned bytele = (subword_byte % UNITS_PER_WORD
6133 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6134 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6135 }
6136
6137 switch (outer_class)
6138 {
6139 case MODE_INT:
6140 case MODE_PARTIAL_INT:
6141 {
6142 int u;
6143 int base = 0;
6144 int units
6145 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6146 / HOST_BITS_PER_WIDE_INT;
6147 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6148 wide_int r;
6149
6150 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6151 return NULL_RTX;
6152 for (u = 0; u < units; u++)
6153 {
6154 unsigned HOST_WIDE_INT buf = 0;
6155 for (i = 0;
6156 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6157 i += value_bit)
6158 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6159
6160 tmp[u] = buf;
6161 base += HOST_BITS_PER_WIDE_INT;
6162 }
6163 r = wide_int::from_array (tmp, units,
6164 GET_MODE_PRECISION (outer_submode));
6165 #if TARGET_SUPPORTS_WIDE_INT == 0
6166 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6167 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6168 return NULL_RTX;
6169 #endif
6170 elems[elem] = immed_wide_int_const (r, outer_submode);
6171 }
6172 break;
6173
6174 case MODE_FLOAT:
6175 case MODE_DECIMAL_FLOAT:
6176 {
6177 REAL_VALUE_TYPE r;
6178 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6179
6180 /* real_from_target wants its input in words affected by
6181 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6182 and use WORDS_BIG_ENDIAN instead; see the documentation
6183 of SUBREG in rtl.texi. */
6184 for (i = 0; i < elem_bitsize; i += value_bit)
6185 {
6186 int ibase;
6187 if (WORDS_BIG_ENDIAN)
6188 ibase = elem_bitsize - 1 - i;
6189 else
6190 ibase = i;
6191 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6192 }
6193
6194 real_from_target (&r, tmp, outer_submode);
6195 elems[elem] = const_double_from_real_value (r, outer_submode);
6196 }
6197 break;
6198
6199 case MODE_FRACT:
6200 case MODE_UFRACT:
6201 case MODE_ACCUM:
6202 case MODE_UACCUM:
6203 {
6204 FIXED_VALUE_TYPE f;
6205 f.data.low = 0;
6206 f.data.high = 0;
6207 f.mode = outer_submode;
6208
6209 for (i = 0;
6210 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6211 i += value_bit)
6212 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6213 for (; i < elem_bitsize; i += value_bit)
6214 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6215 << (i - HOST_BITS_PER_WIDE_INT));
6216
6217 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6218 }
6219 break;
6220
6221 default:
6222 gcc_unreachable ();
6223 }
6224 }
6225 if (VECTOR_MODE_P (outermode))
6226 return gen_rtx_CONST_VECTOR (outermode, result_v);
6227 else
6228 return result_s;
6229 }
6230
6231 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6232 Return 0 if no simplifications are possible. */
6233 rtx
6234 simplify_subreg (machine_mode outermode, rtx op,
6235 machine_mode innermode, poly_uint64 byte)
6236 {
6237 /* Little bit of sanity checking. */
6238 gcc_assert (innermode != VOIDmode);
6239 gcc_assert (outermode != VOIDmode);
6240 gcc_assert (innermode != BLKmode);
6241 gcc_assert (outermode != BLKmode);
6242
6243 gcc_assert (GET_MODE (op) == innermode
6244 || GET_MODE (op) == VOIDmode);
6245
6246 if (!multiple_p (byte, GET_MODE_SIZE (outermode)))
6247 return NULL_RTX;
6248
6249 if (maybe_ge (byte, GET_MODE_SIZE (innermode)))
6250 return NULL_RTX;
6251
6252 if (outermode == innermode && known_eq (byte, 0U))
6253 return op;
6254
6255 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6256 {
6257 rtx elt;
6258
6259 if (VECTOR_MODE_P (outermode)
6260 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6261 && vec_duplicate_p (op, &elt))
6262 return gen_vec_duplicate (outermode, elt);
6263
6264 if (outermode == GET_MODE_INNER (innermode)
6265 && vec_duplicate_p (op, &elt))
6266 return elt;
6267 }
6268
6269 if (CONST_SCALAR_INT_P (op)
6270 || CONST_DOUBLE_AS_FLOAT_P (op)
6271 || GET_CODE (op) == CONST_FIXED
6272 || GET_CODE (op) == CONST_VECTOR)
6273 {
6274 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6275 the result from bytes, so it only works if the sizes of the modes
6276 and the value of the offset are known at compile time. Cases that
6277 that apply to general modes and offsets should be handled here
6278 before calling simplify_immed_subreg. */
6279 fixed_size_mode fs_outermode, fs_innermode;
6280 unsigned HOST_WIDE_INT cbyte;
6281 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6282 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6283 && byte.is_constant (&cbyte))
6284 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte);
6285
6286 return NULL_RTX;
6287 }
6288
6289 /* Changing mode twice with SUBREG => just change it once,
6290 or not at all if changing back op starting mode. */
6291 if (GET_CODE (op) == SUBREG)
6292 {
6293 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6294 rtx newx;
6295
6296 if (outermode == innermostmode
6297 && known_eq (byte, 0U)
6298 && known_eq (SUBREG_BYTE (op), 0))
6299 return SUBREG_REG (op);
6300
6301 /* Work out the memory offset of the final OUTERMODE value relative
6302 to the inner value of OP. */
6303 poly_int64 mem_offset = subreg_memory_offset (outermode,
6304 innermode, byte);
6305 poly_int64 op_mem_offset = subreg_memory_offset (op);
6306 poly_int64 final_offset = mem_offset + op_mem_offset;
6307
6308 /* See whether resulting subreg will be paradoxical. */
6309 if (!paradoxical_subreg_p (outermode, innermostmode))
6310 {
6311 /* In nonparadoxical subregs we can't handle negative offsets. */
6312 if (maybe_lt (final_offset, 0))
6313 return NULL_RTX;
6314 /* Bail out in case resulting subreg would be incorrect. */
6315 if (!multiple_p (final_offset, GET_MODE_SIZE (outermode))
6316 || maybe_ge (final_offset, GET_MODE_SIZE (innermostmode)))
6317 return NULL_RTX;
6318 }
6319 else
6320 {
6321 poly_int64 required_offset = subreg_memory_offset (outermode,
6322 innermostmode, 0);
6323 if (maybe_ne (final_offset, required_offset))
6324 return NULL_RTX;
6325 /* Paradoxical subregs always have byte offset 0. */
6326 final_offset = 0;
6327 }
6328
6329 /* Recurse for further possible simplifications. */
6330 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6331 final_offset);
6332 if (newx)
6333 return newx;
6334 if (validate_subreg (outermode, innermostmode,
6335 SUBREG_REG (op), final_offset))
6336 {
6337 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6338 if (SUBREG_PROMOTED_VAR_P (op)
6339 && SUBREG_PROMOTED_SIGN (op) >= 0
6340 && GET_MODE_CLASS (outermode) == MODE_INT
6341 && IN_RANGE (GET_MODE_SIZE (outermode),
6342 GET_MODE_SIZE (innermode),
6343 GET_MODE_SIZE (innermostmode))
6344 && subreg_lowpart_p (newx))
6345 {
6346 SUBREG_PROMOTED_VAR_P (newx) = 1;
6347 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6348 }
6349 return newx;
6350 }
6351 return NULL_RTX;
6352 }
6353
6354 /* SUBREG of a hard register => just change the register number
6355 and/or mode. If the hard register is not valid in that mode,
6356 suppress this simplification. If the hard register is the stack,
6357 frame, or argument pointer, leave this as a SUBREG. */
6358
6359 if (REG_P (op) && HARD_REGISTER_P (op))
6360 {
6361 unsigned int regno, final_regno;
6362
6363 regno = REGNO (op);
6364 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6365 if (HARD_REGISTER_NUM_P (final_regno))
6366 {
6367 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6368 subreg_memory_offset (outermode,
6369 innermode, byte));
6370
6371 /* Propagate original regno. We don't have any way to specify
6372 the offset inside original regno, so do so only for lowpart.
6373 The information is used only by alias analysis that can not
6374 grog partial register anyway. */
6375
6376 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6377 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6378 return x;
6379 }
6380 }
6381
6382 /* If we have a SUBREG of a register that we are replacing and we are
6383 replacing it with a MEM, make a new MEM and try replacing the
6384 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6385 or if we would be widening it. */
6386
6387 if (MEM_P (op)
6388 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6389 /* Allow splitting of volatile memory references in case we don't
6390 have instruction to move the whole thing. */
6391 && (! MEM_VOLATILE_P (op)
6392 || ! have_insn_for (SET, innermode))
6393 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6394 return adjust_address_nv (op, outermode, byte);
6395
6396 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6397 of two parts. */
6398 if (GET_CODE (op) == CONCAT
6399 || GET_CODE (op) == VEC_CONCAT)
6400 {
6401 unsigned int part_size;
6402 poly_uint64 final_offset;
6403 rtx part, res;
6404
6405 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6406 if (part_mode == VOIDmode)
6407 part_mode = GET_MODE_INNER (GET_MODE (op));
6408 part_size = GET_MODE_SIZE (part_mode);
6409 if (known_lt (byte, part_size))
6410 {
6411 part = XEXP (op, 0);
6412 final_offset = byte;
6413 }
6414 else if (known_ge (byte, part_size))
6415 {
6416 part = XEXP (op, 1);
6417 final_offset = byte - part_size;
6418 }
6419 else
6420 return NULL_RTX;
6421
6422 if (maybe_gt (final_offset + GET_MODE_SIZE (outermode), part_size))
6423 return NULL_RTX;
6424
6425 part_mode = GET_MODE (part);
6426 if (part_mode == VOIDmode)
6427 part_mode = GET_MODE_INNER (GET_MODE (op));
6428 res = simplify_subreg (outermode, part, part_mode, final_offset);
6429 if (res)
6430 return res;
6431 if (validate_subreg (outermode, part_mode, part, final_offset))
6432 return gen_rtx_SUBREG (outermode, part, final_offset);
6433 return NULL_RTX;
6434 }
6435
6436 /* A SUBREG resulting from a zero extension may fold to zero if
6437 it extracts higher bits that the ZERO_EXTEND's source bits. */
6438 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6439 {
6440 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6441 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6442 return CONST0_RTX (outermode);
6443 }
6444
6445 scalar_int_mode int_outermode, int_innermode;
6446 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6447 && is_a <scalar_int_mode> (innermode, &int_innermode)
6448 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6449 {
6450 /* Handle polynomial integers. The upper bits of a paradoxical
6451 subreg are undefined, so this is safe regardless of whether
6452 we're truncating or extending. */
6453 if (CONST_POLY_INT_P (op))
6454 {
6455 poly_wide_int val
6456 = poly_wide_int::from (const_poly_int_value (op),
6457 GET_MODE_PRECISION (int_outermode),
6458 SIGNED);
6459 return immed_wide_int_const (val, int_outermode);
6460 }
6461
6462 if (GET_MODE_PRECISION (int_outermode)
6463 < GET_MODE_PRECISION (int_innermode))
6464 {
6465 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6466 if (tem)
6467 return tem;
6468 }
6469 }
6470
6471 return NULL_RTX;
6472 }
6473
6474 /* Make a SUBREG operation or equivalent if it folds. */
6475
6476 rtx
6477 simplify_gen_subreg (machine_mode outermode, rtx op,
6478 machine_mode innermode, poly_uint64 byte)
6479 {
6480 rtx newx;
6481
6482 newx = simplify_subreg (outermode, op, innermode, byte);
6483 if (newx)
6484 return newx;
6485
6486 if (GET_CODE (op) == SUBREG
6487 || GET_CODE (op) == CONCAT
6488 || GET_MODE (op) == VOIDmode)
6489 return NULL_RTX;
6490
6491 if (validate_subreg (outermode, innermode, op, byte))
6492 return gen_rtx_SUBREG (outermode, op, byte);
6493
6494 return NULL_RTX;
6495 }
6496
6497 /* Generates a subreg to get the least significant part of EXPR (in mode
6498 INNER_MODE) to OUTER_MODE. */
6499
6500 rtx
6501 lowpart_subreg (machine_mode outer_mode, rtx expr,
6502 machine_mode inner_mode)
6503 {
6504 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6505 subreg_lowpart_offset (outer_mode, inner_mode));
6506 }
6507
6508 /* Simplify X, an rtx expression.
6509
6510 Return the simplified expression or NULL if no simplifications
6511 were possible.
6512
6513 This is the preferred entry point into the simplification routines;
6514 however, we still allow passes to call the more specific routines.
6515
6516 Right now GCC has three (yes, three) major bodies of RTL simplification
6517 code that need to be unified.
6518
6519 1. fold_rtx in cse.c. This code uses various CSE specific
6520 information to aid in RTL simplification.
6521
6522 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6523 it uses combine specific information to aid in RTL
6524 simplification.
6525
6526 3. The routines in this file.
6527
6528
6529 Long term we want to only have one body of simplification code; to
6530 get to that state I recommend the following steps:
6531
6532 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6533 which are not pass dependent state into these routines.
6534
6535 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6536 use this routine whenever possible.
6537
6538 3. Allow for pass dependent state to be provided to these
6539 routines and add simplifications based on the pass dependent
6540 state. Remove code from cse.c & combine.c that becomes
6541 redundant/dead.
6542
6543 It will take time, but ultimately the compiler will be easier to
6544 maintain and improve. It's totally silly that when we add a
6545 simplification that it needs to be added to 4 places (3 for RTL
6546 simplification and 1 for tree simplification. */
6547
6548 rtx
6549 simplify_rtx (const_rtx x)
6550 {
6551 const enum rtx_code code = GET_CODE (x);
6552 const machine_mode mode = GET_MODE (x);
6553
6554 switch (GET_RTX_CLASS (code))
6555 {
6556 case RTX_UNARY:
6557 return simplify_unary_operation (code, mode,
6558 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6559 case RTX_COMM_ARITH:
6560 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6561 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6562
6563 /* Fall through. */
6564
6565 case RTX_BIN_ARITH:
6566 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6567
6568 case RTX_TERNARY:
6569 case RTX_BITFIELD_OPS:
6570 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6571 XEXP (x, 0), XEXP (x, 1),
6572 XEXP (x, 2));
6573
6574 case RTX_COMPARE:
6575 case RTX_COMM_COMPARE:
6576 return simplify_relational_operation (code, mode,
6577 ((GET_MODE (XEXP (x, 0))
6578 != VOIDmode)
6579 ? GET_MODE (XEXP (x, 0))
6580 : GET_MODE (XEXP (x, 1))),
6581 XEXP (x, 0),
6582 XEXP (x, 1));
6583
6584 case RTX_EXTRA:
6585 if (code == SUBREG)
6586 return simplify_subreg (mode, SUBREG_REG (x),
6587 GET_MODE (SUBREG_REG (x)),
6588 SUBREG_BYTE (x));
6589 break;
6590
6591 case RTX_OBJ:
6592 if (code == LO_SUM)
6593 {
6594 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6595 if (GET_CODE (XEXP (x, 0)) == HIGH
6596 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6597 return XEXP (x, 1);
6598 }
6599 break;
6600
6601 default:
6602 break;
6603 }
6604 return NULL;
6605 }
6606
6607 #if CHECKING_P
6608
6609 namespace selftest {
6610
6611 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6612
6613 static rtx
6614 make_test_reg (machine_mode mode)
6615 {
6616 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6617
6618 return gen_rtx_REG (mode, test_reg_num++);
6619 }
6620
6621 /* Test vector simplifications involving VEC_DUPLICATE in which the
6622 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6623 register that holds one element of MODE. */
6624
6625 static void
6626 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6627 {
6628 scalar_mode inner_mode = GET_MODE_INNER (mode);
6629 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6630 unsigned int nunits = GET_MODE_NUNITS (mode);
6631 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6632 {
6633 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6634 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6635 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6636 ASSERT_RTX_EQ (duplicate,
6637 simplify_unary_operation (NOT, mode,
6638 duplicate_not, mode));
6639
6640 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6641 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6642 ASSERT_RTX_EQ (duplicate,
6643 simplify_unary_operation (NEG, mode,
6644 duplicate_neg, mode));
6645
6646 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6647 ASSERT_RTX_EQ (duplicate,
6648 simplify_binary_operation (PLUS, mode, duplicate,
6649 CONST0_RTX (mode)));
6650
6651 ASSERT_RTX_EQ (duplicate,
6652 simplify_binary_operation (MINUS, mode, duplicate,
6653 CONST0_RTX (mode)));
6654
6655 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6656 simplify_binary_operation (MINUS, mode, duplicate,
6657 duplicate));
6658 }
6659
6660 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6661 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6662 ASSERT_RTX_PTR_EQ (scalar_reg,
6663 simplify_binary_operation (VEC_SELECT, inner_mode,
6664 duplicate, zero_par));
6665
6666 /* And again with the final element. */
6667 rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
6668 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6669 ASSERT_RTX_PTR_EQ (scalar_reg,
6670 simplify_binary_operation (VEC_SELECT, inner_mode,
6671 duplicate, last_par));
6672
6673 /* Test a scalar subreg of a VEC_DUPLICATE. */
6674 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6675 ASSERT_RTX_EQ (scalar_reg,
6676 simplify_gen_subreg (inner_mode, duplicate,
6677 mode, offset));
6678
6679 machine_mode narrower_mode;
6680 if (nunits > 2
6681 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6682 && VECTOR_MODE_P (narrower_mode))
6683 {
6684 /* Test VEC_SELECT of a vector. */
6685 rtx vec_par
6686 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6687 rtx narrower_duplicate
6688 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6689 ASSERT_RTX_EQ (narrower_duplicate,
6690 simplify_binary_operation (VEC_SELECT, narrower_mode,
6691 duplicate, vec_par));
6692
6693 /* Test a vector subreg of a VEC_DUPLICATE. */
6694 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6695 ASSERT_RTX_EQ (narrower_duplicate,
6696 simplify_gen_subreg (narrower_mode, duplicate,
6697 mode, offset));
6698 }
6699 }
6700
6701 /* Test vector simplifications involving VEC_SERIES in which the
6702 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6703 register that holds one element of MODE. */
6704
6705 static void
6706 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6707 {
6708 /* Test unary cases with VEC_SERIES arguments. */
6709 scalar_mode inner_mode = GET_MODE_INNER (mode);
6710 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6711 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6712 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6713 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6714 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6715 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6716 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6717 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6718 neg_scalar_reg);
6719 ASSERT_RTX_EQ (series_0_r,
6720 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6721 ASSERT_RTX_EQ (series_r_m1,
6722 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6723 ASSERT_RTX_EQ (series_r_r,
6724 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6725
6726 /* Test that a VEC_SERIES with a zero step is simplified away. */
6727 ASSERT_RTX_EQ (duplicate,
6728 simplify_binary_operation (VEC_SERIES, mode,
6729 scalar_reg, const0_rtx));
6730
6731 /* Test PLUS and MINUS with VEC_SERIES. */
6732 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6733 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6734 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6735 ASSERT_RTX_EQ (series_r_r,
6736 simplify_binary_operation (PLUS, mode, series_0_r,
6737 duplicate));
6738 ASSERT_RTX_EQ (series_r_1,
6739 simplify_binary_operation (PLUS, mode, duplicate,
6740 series_0_1));
6741 ASSERT_RTX_EQ (series_r_m1,
6742 simplify_binary_operation (PLUS, mode, duplicate,
6743 series_0_m1));
6744 ASSERT_RTX_EQ (series_0_r,
6745 simplify_binary_operation (MINUS, mode, series_r_r,
6746 duplicate));
6747 ASSERT_RTX_EQ (series_r_m1,
6748 simplify_binary_operation (MINUS, mode, duplicate,
6749 series_0_1));
6750 ASSERT_RTX_EQ (series_r_1,
6751 simplify_binary_operation (MINUS, mode, duplicate,
6752 series_0_m1));
6753 ASSERT_RTX_EQ (series_0_m1,
6754 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6755 constm1_rtx));
6756 }
6757
6758 /* Verify some simplifications involving vectors. */
6759
6760 static void
6761 test_vector_ops ()
6762 {
6763 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6764 {
6765 machine_mode mode = (machine_mode) i;
6766 if (VECTOR_MODE_P (mode))
6767 {
6768 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6769 test_vector_ops_duplicate (mode, scalar_reg);
6770 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6771 && GET_MODE_NUNITS (mode) > 2)
6772 test_vector_ops_series (mode, scalar_reg);
6773 }
6774 }
6775 }
6776
6777 template<unsigned int N>
6778 struct simplify_const_poly_int_tests
6779 {
6780 static void run ();
6781 };
6782
6783 template<>
6784 struct simplify_const_poly_int_tests<1>
6785 {
6786 static void run () {}
6787 };
6788
6789 /* Test various CONST_POLY_INT properties. */
6790
6791 template<unsigned int N>
6792 void
6793 simplify_const_poly_int_tests<N>::run ()
6794 {
6795 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6796 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6797 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6798 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6799 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6800 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6801 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6802 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6803 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6804 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6805 rtx two = GEN_INT (2);
6806 rtx six = GEN_INT (6);
6807 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
6808
6809 /* These tests only try limited operation combinations. Fuller arithmetic
6810 testing is done directly on poly_ints. */
6811 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6812 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6813 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6814 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6815 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6816 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6817 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6818 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6819 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6820 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6821 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6822 }
6823
6824 /* Run all of the selftests within this file. */
6825
6826 void
6827 simplify_rtx_c_tests ()
6828 {
6829 test_vector_ops ();
6830 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6831 }
6832
6833 } // namespace selftest
6834
6835 #endif /* CHECKING_P */