middle-end: Simplify (sign_extend:HI (truncate:QI (ashiftrt:HI X 8)))
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
60
61 static rtx
62 neg_poly_int_rtx (machine_mode mode, const_rtx i)
63 {
64 return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
65 }
66
67 /* Test whether expression, X, is an immediate constant that represents
68 the most significant bit of machine mode MODE. */
69
70 bool
71 mode_signbit_p (machine_mode mode, const_rtx x)
72 {
73 unsigned HOST_WIDE_INT val;
74 unsigned int width;
75 scalar_int_mode int_mode;
76
77 if (!is_int_mode (mode, &int_mode))
78 return false;
79
80 width = GET_MODE_PRECISION (int_mode);
81 if (width == 0)
82 return false;
83
84 if (width <= HOST_BITS_PER_WIDE_INT
85 && CONST_INT_P (x))
86 val = INTVAL (x);
87 #if TARGET_SUPPORTS_WIDE_INT
88 else if (CONST_WIDE_INT_P (x))
89 {
90 unsigned int i;
91 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
92 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
93 return false;
94 for (i = 0; i < elts - 1; i++)
95 if (CONST_WIDE_INT_ELT (x, i) != 0)
96 return false;
97 val = CONST_WIDE_INT_ELT (x, elts - 1);
98 width %= HOST_BITS_PER_WIDE_INT;
99 if (width == 0)
100 width = HOST_BITS_PER_WIDE_INT;
101 }
102 #else
103 else if (width <= HOST_BITS_PER_DOUBLE_INT
104 && CONST_DOUBLE_AS_INT_P (x)
105 && CONST_DOUBLE_LOW (x) == 0)
106 {
107 val = CONST_DOUBLE_HIGH (x);
108 width -= HOST_BITS_PER_WIDE_INT;
109 }
110 #endif
111 else
112 /* X is not an integer constant. */
113 return false;
114
115 if (width < HOST_BITS_PER_WIDE_INT)
116 val &= (HOST_WIDE_INT_1U << width) - 1;
117 return val == (HOST_WIDE_INT_1U << (width - 1));
118 }
119
120 /* Test whether VAL is equal to the most significant bit of mode MODE
121 (after masking with the mode mask of MODE). Returns false if the
122 precision of MODE is too large to handle. */
123
124 bool
125 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
126 {
127 unsigned int width;
128 scalar_int_mode int_mode;
129
130 if (!is_int_mode (mode, &int_mode))
131 return false;
132
133 width = GET_MODE_PRECISION (int_mode);
134 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
135 return false;
136
137 val &= GET_MODE_MASK (int_mode);
138 return val == (HOST_WIDE_INT_1U << (width - 1));
139 }
140
141 /* Test whether the most significant bit of mode MODE is set in VAL.
142 Returns false if the precision of MODE is too large to handle. */
143 bool
144 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
145 {
146 unsigned int width;
147
148 scalar_int_mode int_mode;
149 if (!is_int_mode (mode, &int_mode))
150 return false;
151
152 width = GET_MODE_PRECISION (int_mode);
153 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
154 return false;
155
156 val &= HOST_WIDE_INT_1U << (width - 1);
157 return val != 0;
158 }
159
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
162 bool
163 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
164 {
165 unsigned int width;
166
167 scalar_int_mode int_mode;
168 if (!is_int_mode (mode, &int_mode))
169 return false;
170
171 width = GET_MODE_PRECISION (int_mode);
172 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173 return false;
174
175 val &= HOST_WIDE_INT_1U << (width - 1);
176 return val == 0;
177 }
178 \f
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
181
182 rtx
183 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
184 rtx op1)
185 {
186 rtx tem;
187
188 /* If this simplifies, do it. */
189 tem = simplify_binary_operation (code, mode, op0, op1);
190 if (tem)
191 return tem;
192
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0, op1))
196 std::swap (op0, op1);
197
198 return gen_rtx_fmt_ee (code, mode, op0, op1);
199 }
200 \f
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
203 rtx
204 avoid_constant_pool_reference (rtx x)
205 {
206 rtx c, tmp, addr;
207 machine_mode cmode;
208 poly_int64 offset = 0;
209
210 switch (GET_CODE (x))
211 {
212 case MEM:
213 break;
214
215 case FLOAT_EXTEND:
216 /* Handle float extensions of constant pool references. */
217 tmp = XEXP (x, 0);
218 c = avoid_constant_pool_reference (tmp);
219 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
220 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
221 GET_MODE (x));
222 return x;
223
224 default:
225 return x;
226 }
227
228 if (GET_MODE (x) == BLKmode)
229 return x;
230
231 addr = XEXP (x, 0);
232
233 /* Call target hook to avoid the effects of -fpic etc.... */
234 addr = targetm.delegitimize_address (addr);
235
236 /* Split the address into a base and integer offset. */
237 addr = strip_offset (addr, &offset);
238
239 if (GET_CODE (addr) == LO_SUM)
240 addr = XEXP (addr, 1);
241
242 /* If this is a constant pool reference, we can turn it into its
243 constant and hope that simplifications happen. */
244 if (GET_CODE (addr) == SYMBOL_REF
245 && CONSTANT_POOL_ADDRESS_P (addr))
246 {
247 c = get_pool_constant (addr);
248 cmode = get_pool_mode (addr);
249
250 /* If we're accessing the constant in a different mode than it was
251 originally stored, attempt to fix that up via subreg simplifications.
252 If that fails we have no choice but to return the original memory. */
253 if (known_eq (offset, 0) && cmode == GET_MODE (x))
254 return c;
255 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
256 {
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
260 }
261 }
262
263 return x;
264 }
265 \f
266 /* Simplify a MEM based on its attributes. This is the default
267 delegitimize_address target hook, and it's recommended that every
268 overrider call it. */
269
270 rtx
271 delegitimize_mem_from_attrs (rtx x)
272 {
273 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
274 use their base addresses as equivalent. */
275 if (MEM_P (x)
276 && MEM_EXPR (x)
277 && MEM_OFFSET_KNOWN_P (x))
278 {
279 tree decl = MEM_EXPR (x);
280 machine_mode mode = GET_MODE (x);
281 poly_int64 offset = 0;
282
283 switch (TREE_CODE (decl))
284 {
285 default:
286 decl = NULL;
287 break;
288
289 case VAR_DECL:
290 break;
291
292 case ARRAY_REF:
293 case ARRAY_RANGE_REF:
294 case COMPONENT_REF:
295 case BIT_FIELD_REF:
296 case REALPART_EXPR:
297 case IMAGPART_EXPR:
298 case VIEW_CONVERT_EXPR:
299 {
300 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
301 tree toffset;
302 int unsignedp, reversep, volatilep = 0;
303
304 decl
305 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
306 &unsignedp, &reversep, &volatilep);
307 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
308 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
309 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
310 decl = NULL;
311 else
312 offset += bytepos + toffset_val;
313 break;
314 }
315 }
316
317 if (decl
318 && mode == GET_MODE (x)
319 && VAR_P (decl)
320 && (TREE_STATIC (decl)
321 || DECL_THREAD_LOCAL_P (decl))
322 && DECL_RTL_SET_P (decl)
323 && MEM_P (DECL_RTL (decl)))
324 {
325 rtx newx;
326
327 offset += MEM_OFFSET (x);
328
329 newx = DECL_RTL (decl);
330
331 if (MEM_P (newx))
332 {
333 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
334 poly_int64 n_offset, o_offset;
335
336 /* Avoid creating a new MEM needlessly if we already had
337 the same address. We do if there's no OFFSET and the
338 old address X is identical to NEWX, or if X is of the
339 form (plus NEWX OFFSET), or the NEWX is of the form
340 (plus Y (const_int Z)) and X is that with the offset
341 added: (plus Y (const_int Z+OFFSET)). */
342 n = strip_offset (n, &n_offset);
343 o = strip_offset (o, &o_offset);
344 if (!(known_eq (o_offset, n_offset + offset)
345 && rtx_equal_p (o, n)))
346 x = adjust_address_nv (newx, mode, offset);
347 }
348 else if (GET_MODE (x) == GET_MODE (newx)
349 && known_eq (offset, 0))
350 x = newx;
351 }
352 }
353
354 return x;
355 }
356 \f
357 /* Make a unary operation by first seeing if it folds and otherwise making
358 the specified operation. */
359
360 rtx
361 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
362 machine_mode op_mode)
363 {
364 rtx tem;
365
366 /* If this simplifies, use it. */
367 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
368 return tem;
369
370 return gen_rtx_fmt_e (code, mode, op);
371 }
372
373 /* Likewise for ternary operations. */
374
375 rtx
376 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
377 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
378 {
379 rtx tem;
380
381 /* If this simplifies, use it. */
382 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
383 op0, op1, op2)) != 0)
384 return tem;
385
386 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
387 }
388
389 /* Likewise, for relational operations.
390 CMP_MODE specifies mode comparison is done in. */
391
392 rtx
393 simplify_gen_relational (enum rtx_code code, machine_mode mode,
394 machine_mode cmp_mode, rtx op0, rtx op1)
395 {
396 rtx tem;
397
398 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
399 op0, op1)) != 0)
400 return tem;
401
402 return gen_rtx_fmt_ee (code, mode, op0, op1);
403 }
404 \f
405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
406 and simplify the result. If FN is non-NULL, call this callback on each
407 X, if it returns non-NULL, replace X with its return value and simplify the
408 result. */
409
410 rtx
411 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
412 rtx (*fn) (rtx, const_rtx, void *), void *data)
413 {
414 enum rtx_code code = GET_CODE (x);
415 machine_mode mode = GET_MODE (x);
416 machine_mode op_mode;
417 const char *fmt;
418 rtx op0, op1, op2, newx, op;
419 rtvec vec, newvec;
420 int i, j;
421
422 if (__builtin_expect (fn != NULL, 0))
423 {
424 newx = fn (x, old_rtx, data);
425 if (newx)
426 return newx;
427 }
428 else if (rtx_equal_p (x, old_rtx))
429 return copy_rtx ((rtx) data);
430
431 switch (GET_RTX_CLASS (code))
432 {
433 case RTX_UNARY:
434 op0 = XEXP (x, 0);
435 op_mode = GET_MODE (op0);
436 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return simplify_gen_unary (code, mode, op0, op_mode);
440
441 case RTX_BIN_ARITH:
442 case RTX_COMM_ARITH:
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
445 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
446 return x;
447 return simplify_gen_binary (code, mode, op0, op1);
448
449 case RTX_COMPARE:
450 case RTX_COMM_COMPARE:
451 op0 = XEXP (x, 0);
452 op1 = XEXP (x, 1);
453 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
454 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_relational (code, mode, op_mode, op0, op1);
459
460 case RTX_TERNARY:
461 case RTX_BITFIELD_OPS:
462 op0 = XEXP (x, 0);
463 op_mode = GET_MODE (op0);
464 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
468 return x;
469 if (op_mode == VOIDmode)
470 op_mode = GET_MODE (op0);
471 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
472
473 case RTX_EXTRA:
474 if (code == SUBREG)
475 {
476 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
477 if (op0 == SUBREG_REG (x))
478 return x;
479 op0 = simplify_gen_subreg (GET_MODE (x), op0,
480 GET_MODE (SUBREG_REG (x)),
481 SUBREG_BYTE (x));
482 return op0 ? op0 : x;
483 }
484 break;
485
486 case RTX_OBJ:
487 if (code == MEM)
488 {
489 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
490 if (op0 == XEXP (x, 0))
491 return x;
492 return replace_equiv_address_nv (x, op0);
493 }
494 else if (code == LO_SUM)
495 {
496 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
497 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
498
499 /* (lo_sum (high x) y) -> y where x and y have the same base. */
500 if (GET_CODE (op0) == HIGH)
501 {
502 rtx base0, base1, offset0, offset1;
503 split_const (XEXP (op0, 0), &base0, &offset0);
504 split_const (op1, &base1, &offset1);
505 if (rtx_equal_p (base0, base1))
506 return op1;
507 }
508
509 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
510 return x;
511 return gen_rtx_LO_SUM (mode, op0, op1);
512 }
513 break;
514
515 default:
516 break;
517 }
518
519 newx = x;
520 fmt = GET_RTX_FORMAT (code);
521 for (i = 0; fmt[i]; i++)
522 switch (fmt[i])
523 {
524 case 'E':
525 vec = XVEC (x, i);
526 newvec = XVEC (newx, i);
527 for (j = 0; j < GET_NUM_ELEM (vec); j++)
528 {
529 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
530 old_rtx, fn, data);
531 if (op != RTVEC_ELT (vec, j))
532 {
533 if (newvec == vec)
534 {
535 newvec = shallow_copy_rtvec (vec);
536 if (x == newx)
537 newx = shallow_copy_rtx (x);
538 XVEC (newx, i) = newvec;
539 }
540 RTVEC_ELT (newvec, j) = op;
541 }
542 }
543 break;
544
545 case 'e':
546 if (XEXP (x, i))
547 {
548 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
549 if (op != XEXP (x, i))
550 {
551 if (x == newx)
552 newx = shallow_copy_rtx (x);
553 XEXP (newx, i) = op;
554 }
555 }
556 break;
557 }
558 return newx;
559 }
560
561 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
562 resulting RTX. Return a new RTX which is as simplified as possible. */
563
564 rtx
565 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
566 {
567 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
568 }
569 \f
570 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
571 Only handle cases where the truncated value is inherently an rvalue.
572
573 RTL provides two ways of truncating a value:
574
575 1. a lowpart subreg. This form is only a truncation when both
576 the outer and inner modes (here MODE and OP_MODE respectively)
577 are scalar integers, and only then when the subreg is used as
578 an rvalue.
579
580 It is only valid to form such truncating subregs if the
581 truncation requires no action by the target. The onus for
582 proving this is on the creator of the subreg -- e.g. the
583 caller to simplify_subreg or simplify_gen_subreg -- and typically
584 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
585
586 2. a TRUNCATE. This form handles both scalar and compound integers.
587
588 The first form is preferred where valid. However, the TRUNCATE
589 handling in simplify_unary_operation turns the second form into the
590 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
591 so it is generally safe to form rvalue truncations using:
592
593 simplify_gen_unary (TRUNCATE, ...)
594
595 and leave simplify_unary_operation to work out which representation
596 should be used.
597
598 Because of the proof requirements on (1), simplify_truncation must
599 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
600 regardless of whether the outer truncation came from a SUBREG or a
601 TRUNCATE. For example, if the caller has proven that an SImode
602 truncation of:
603
604 (and:DI X Y)
605
606 is a no-op and can be represented as a subreg, it does not follow
607 that SImode truncations of X and Y are also no-ops. On a target
608 like 64-bit MIPS that requires SImode values to be stored in
609 sign-extended form, an SImode truncation of:
610
611 (and:DI (reg:DI X) (const_int 63))
612
613 is trivially a no-op because only the lower 6 bits can be set.
614 However, X is still an arbitrary 64-bit number and so we cannot
615 assume that truncating it too is a no-op. */
616
617 static rtx
618 simplify_truncation (machine_mode mode, rtx op,
619 machine_mode op_mode)
620 {
621 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
622 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
623 scalar_int_mode int_mode, int_op_mode, subreg_mode;
624
625 gcc_assert (precision <= op_precision);
626
627 /* Optimize truncations of zero and sign extended values. */
628 if (GET_CODE (op) == ZERO_EXTEND
629 || GET_CODE (op) == SIGN_EXTEND)
630 {
631 /* There are three possibilities. If MODE is the same as the
632 origmode, we can omit both the extension and the subreg.
633 If MODE is not larger than the origmode, we can apply the
634 truncation without the extension. Finally, if the outermode
635 is larger than the origmode, we can just extend to the appropriate
636 mode. */
637 machine_mode origmode = GET_MODE (XEXP (op, 0));
638 if (mode == origmode)
639 return XEXP (op, 0);
640 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
641 return simplify_gen_unary (TRUNCATE, mode,
642 XEXP (op, 0), origmode);
643 else
644 return simplify_gen_unary (GET_CODE (op), mode,
645 XEXP (op, 0), origmode);
646 }
647
648 /* If the machine can perform operations in the truncated mode, distribute
649 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
650 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
651 if (1
652 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
653 && (GET_CODE (op) == PLUS
654 || GET_CODE (op) == MINUS
655 || GET_CODE (op) == MULT))
656 {
657 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
658 if (op0)
659 {
660 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
661 if (op1)
662 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
663 }
664 }
665
666 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
667 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
668 the outer subreg is effectively a truncation to the original mode. */
669 if ((GET_CODE (op) == LSHIFTRT
670 || GET_CODE (op) == ASHIFTRT)
671 /* Ensure that OP_MODE is at least twice as wide as MODE
672 to avoid the possibility that an outer LSHIFTRT shifts by more
673 than the sign extension's sign_bit_copies and introduces zeros
674 into the high bits of the result. */
675 && 2 * precision <= op_precision
676 && CONST_INT_P (XEXP (op, 1))
677 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
678 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
679 && UINTVAL (XEXP (op, 1)) < precision)
680 return simplify_gen_binary (ASHIFTRT, mode,
681 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
682
683 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
684 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
685 the outer subreg is effectively a truncation to the original mode. */
686 if ((GET_CODE (op) == LSHIFTRT
687 || GET_CODE (op) == ASHIFTRT)
688 && CONST_INT_P (XEXP (op, 1))
689 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
690 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
691 && UINTVAL (XEXP (op, 1)) < precision)
692 return simplify_gen_binary (LSHIFTRT, mode,
693 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
694
695 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
696 to (ashift:QI (x:QI) C), where C is a suitable small constant and
697 the outer subreg is effectively a truncation to the original mode. */
698 if (GET_CODE (op) == ASHIFT
699 && CONST_INT_P (XEXP (op, 1))
700 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
702 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
703 && UINTVAL (XEXP (op, 1)) < precision)
704 return simplify_gen_binary (ASHIFT, mode,
705 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
706
707 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
708 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
709 and C2. */
710 if (GET_CODE (op) == AND
711 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
712 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
713 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
714 && CONST_INT_P (XEXP (op, 1)))
715 {
716 rtx op0 = (XEXP (XEXP (op, 0), 0));
717 rtx shift_op = XEXP (XEXP (op, 0), 1);
718 rtx mask_op = XEXP (op, 1);
719 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
720 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
721
722 if (shift < precision
723 /* If doing this transform works for an X with all bits set,
724 it works for any X. */
725 && ((GET_MODE_MASK (mode) >> shift) & mask)
726 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
727 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
728 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
729 {
730 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
731 return simplify_gen_binary (AND, mode, op0, mask_op);
732 }
733 }
734
735 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
736 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
737 changing len. */
738 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
739 && REG_P (XEXP (op, 0))
740 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
741 && CONST_INT_P (XEXP (op, 1))
742 && CONST_INT_P (XEXP (op, 2)))
743 {
744 rtx op0 = XEXP (op, 0);
745 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
746 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
747 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
748 {
749 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
750 if (op0)
751 {
752 pos -= op_precision - precision;
753 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
754 XEXP (op, 1), GEN_INT (pos));
755 }
756 }
757 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
758 {
759 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
760 if (op0)
761 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
762 XEXP (op, 1), XEXP (op, 2));
763 }
764 }
765
766 /* Recognize a word extraction from a multi-word subreg. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (mode)
770 && SCALAR_INT_MODE_P (op_mode)
771 && precision >= BITS_PER_WORD
772 && 2 * precision <= op_precision
773 && CONST_INT_P (XEXP (op, 1))
774 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
775 && UINTVAL (XEXP (op, 1)) < op_precision)
776 {
777 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
778 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
779 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
780 (WORDS_BIG_ENDIAN
781 ? byte - shifted_bytes
782 : byte + shifted_bytes));
783 }
784
785 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
786 and try replacing the TRUNCATE and shift with it. Don't do this
787 if the MEM has a mode-dependent address. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && is_a <scalar_int_mode> (mode, &int_mode)
791 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
792 && MEM_P (XEXP (op, 0))
793 && CONST_INT_P (XEXP (op, 1))
794 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
795 && INTVAL (XEXP (op, 1)) > 0
796 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
797 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
798 MEM_ADDR_SPACE (XEXP (op, 0)))
799 && ! MEM_VOLATILE_P (XEXP (op, 0))
800 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
801 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
802 {
803 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
804 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
805 return adjust_address_nv (XEXP (op, 0), int_mode,
806 (WORDS_BIG_ENDIAN
807 ? byte - shifted_bytes
808 : byte + shifted_bytes));
809 }
810
811 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
812 (OP:SI foo:SI) if OP is NEG or ABS. */
813 if ((GET_CODE (op) == ABS
814 || GET_CODE (op) == NEG)
815 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
816 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
817 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
818 return simplify_gen_unary (GET_CODE (op), mode,
819 XEXP (XEXP (op, 0), 0), mode);
820
821 /* (truncate:A (subreg:B (truncate:C X) 0)) is
822 (truncate:A X). */
823 if (GET_CODE (op) == SUBREG
824 && is_a <scalar_int_mode> (mode, &int_mode)
825 && SCALAR_INT_MODE_P (op_mode)
826 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
827 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
828 && subreg_lowpart_p (op))
829 {
830 rtx inner = XEXP (SUBREG_REG (op), 0);
831 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
832 return simplify_gen_unary (TRUNCATE, int_mode, inner,
833 GET_MODE (inner));
834 else
835 /* If subreg above is paradoxical and C is narrower
836 than A, return (subreg:A (truncate:C X) 0). */
837 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
838 }
839
840 /* (truncate:A (truncate:B X)) is (truncate:A X). */
841 if (GET_CODE (op) == TRUNCATE)
842 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
843 GET_MODE (XEXP (op, 0)));
844
845 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
846 in mode A. */
847 if (GET_CODE (op) == IOR
848 && SCALAR_INT_MODE_P (mode)
849 && SCALAR_INT_MODE_P (op_mode)
850 && CONST_INT_P (XEXP (op, 1))
851 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
852 return constm1_rtx;
853
854 return NULL_RTX;
855 }
856 \f
857 /* Try to simplify a unary operation CODE whose output mode is to be
858 MODE with input operand OP whose mode was originally OP_MODE.
859 Return zero if no simplification can be made. */
860 rtx
861 simplify_unary_operation (enum rtx_code code, machine_mode mode,
862 rtx op, machine_mode op_mode)
863 {
864 rtx trueop, tem;
865
866 trueop = avoid_constant_pool_reference (op);
867
868 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
869 if (tem)
870 return tem;
871
872 return simplify_unary_operation_1 (code, mode, op);
873 }
874
875 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
876 to be exact. */
877
878 static bool
879 exact_int_to_float_conversion_p (const_rtx op)
880 {
881 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
882 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
883 /* Constants shouldn't reach here. */
884 gcc_assert (op0_mode != VOIDmode);
885 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
886 int in_bits = in_prec;
887 if (HWI_COMPUTABLE_MODE_P (op0_mode))
888 {
889 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
890 if (GET_CODE (op) == FLOAT)
891 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
892 else if (GET_CODE (op) == UNSIGNED_FLOAT)
893 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
894 else
895 gcc_unreachable ();
896 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
897 }
898 return in_bits <= out_bits;
899 }
900
901 /* Perform some simplifications we can do even if the operands
902 aren't constant. */
903 static rtx
904 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
905 {
906 enum rtx_code reversed;
907 rtx temp, elt, base, step;
908 scalar_int_mode inner, int_mode, op_mode, op0_mode;
909
910 switch (code)
911 {
912 case NOT:
913 /* (not (not X)) == X. */
914 if (GET_CODE (op) == NOT)
915 return XEXP (op, 0);
916
917 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
918 comparison is all ones. */
919 if (COMPARISON_P (op)
920 && (mode == BImode || STORE_FLAG_VALUE == -1)
921 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
922 return simplify_gen_relational (reversed, mode, VOIDmode,
923 XEXP (op, 0), XEXP (op, 1));
924
925 /* (not (plus X -1)) can become (neg X). */
926 if (GET_CODE (op) == PLUS
927 && XEXP (op, 1) == constm1_rtx)
928 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
929
930 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
931 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
932 and MODE_VECTOR_INT. */
933 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
934 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
935 CONSTM1_RTX (mode));
936
937 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
938 if (GET_CODE (op) == XOR
939 && CONST_INT_P (XEXP (op, 1))
940 && (temp = simplify_unary_operation (NOT, mode,
941 XEXP (op, 1), mode)) != 0)
942 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
943
944 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
945 if (GET_CODE (op) == PLUS
946 && CONST_INT_P (XEXP (op, 1))
947 && mode_signbit_p (mode, XEXP (op, 1))
948 && (temp = simplify_unary_operation (NOT, mode,
949 XEXP (op, 1), mode)) != 0)
950 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
951
952
953 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
954 operands other than 1, but that is not valid. We could do a
955 similar simplification for (not (lshiftrt C X)) where C is
956 just the sign bit, but this doesn't seem common enough to
957 bother with. */
958 if (GET_CODE (op) == ASHIFT
959 && XEXP (op, 0) == const1_rtx)
960 {
961 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
962 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
963 }
964
965 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
966 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
967 so we can perform the above simplification. */
968 if (STORE_FLAG_VALUE == -1
969 && is_a <scalar_int_mode> (mode, &int_mode)
970 && GET_CODE (op) == ASHIFTRT
971 && CONST_INT_P (XEXP (op, 1))
972 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
973 return simplify_gen_relational (GE, int_mode, VOIDmode,
974 XEXP (op, 0), const0_rtx);
975
976
977 if (partial_subreg_p (op)
978 && subreg_lowpart_p (op)
979 && GET_CODE (SUBREG_REG (op)) == ASHIFT
980 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
981 {
982 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
983 rtx x;
984
985 x = gen_rtx_ROTATE (inner_mode,
986 simplify_gen_unary (NOT, inner_mode, const1_rtx,
987 inner_mode),
988 XEXP (SUBREG_REG (op), 1));
989 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
990 if (temp)
991 return temp;
992 }
993
994 /* Apply De Morgan's laws to reduce number of patterns for machines
995 with negating logical insns (and-not, nand, etc.). If result has
996 only one NOT, put it first, since that is how the patterns are
997 coded. */
998 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
999 {
1000 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1001 machine_mode op_mode;
1002
1003 op_mode = GET_MODE (in1);
1004 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1005
1006 op_mode = GET_MODE (in2);
1007 if (op_mode == VOIDmode)
1008 op_mode = mode;
1009 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1010
1011 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1012 std::swap (in1, in2);
1013
1014 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1015 mode, in1, in2);
1016 }
1017
1018 /* (not (bswap x)) -> (bswap (not x)). */
1019 if (GET_CODE (op) == BSWAP)
1020 {
1021 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1022 return simplify_gen_unary (BSWAP, mode, x, mode);
1023 }
1024 break;
1025
1026 case NEG:
1027 /* (neg (neg X)) == X. */
1028 if (GET_CODE (op) == NEG)
1029 return XEXP (op, 0);
1030
1031 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1032 If comparison is not reversible use
1033 x ? y : (neg y). */
1034 if (GET_CODE (op) == IF_THEN_ELSE)
1035 {
1036 rtx cond = XEXP (op, 0);
1037 rtx true_rtx = XEXP (op, 1);
1038 rtx false_rtx = XEXP (op, 2);
1039
1040 if ((GET_CODE (true_rtx) == NEG
1041 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1042 || (GET_CODE (false_rtx) == NEG
1043 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1044 {
1045 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1046 temp = reversed_comparison (cond, mode);
1047 else
1048 {
1049 temp = cond;
1050 std::swap (true_rtx, false_rtx);
1051 }
1052 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1053 mode, temp, true_rtx, false_rtx);
1054 }
1055 }
1056
1057 /* (neg (plus X 1)) can become (not X). */
1058 if (GET_CODE (op) == PLUS
1059 && XEXP (op, 1) == const1_rtx)
1060 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1061
1062 /* Similarly, (neg (not X)) is (plus X 1). */
1063 if (GET_CODE (op) == NOT)
1064 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1065 CONST1_RTX (mode));
1066
1067 /* (neg (minus X Y)) can become (minus Y X). This transformation
1068 isn't safe for modes with signed zeros, since if X and Y are
1069 both +0, (minus Y X) is the same as (minus X Y). If the
1070 rounding mode is towards +infinity (or -infinity) then the two
1071 expressions will be rounded differently. */
1072 if (GET_CODE (op) == MINUS
1073 && !HONOR_SIGNED_ZEROS (mode)
1074 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1075 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1076
1077 if (GET_CODE (op) == PLUS
1078 && !HONOR_SIGNED_ZEROS (mode)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1080 {
1081 /* (neg (plus A C)) is simplified to (minus -C A). */
1082 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1083 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1084 {
1085 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1086 if (temp)
1087 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1088 }
1089
1090 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1091 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1092 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1093 }
1094
1095 /* (neg (mult A B)) becomes (mult A (neg B)).
1096 This works even for floating-point values. */
1097 if (GET_CODE (op) == MULT
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1099 {
1100 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1101 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1102 }
1103
1104 /* NEG commutes with ASHIFT since it is multiplication. Only do
1105 this if we can then eliminate the NEG (e.g., if the operand
1106 is a constant). */
1107 if (GET_CODE (op) == ASHIFT)
1108 {
1109 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1110 if (temp)
1111 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1112 }
1113
1114 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1115 C is equal to the width of MODE minus 1. */
1116 if (GET_CODE (op) == ASHIFTRT
1117 && CONST_INT_P (XEXP (op, 1))
1118 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1119 return simplify_gen_binary (LSHIFTRT, mode,
1120 XEXP (op, 0), XEXP (op, 1));
1121
1122 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1123 C is equal to the width of MODE minus 1. */
1124 if (GET_CODE (op) == LSHIFTRT
1125 && CONST_INT_P (XEXP (op, 1))
1126 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1127 return simplify_gen_binary (ASHIFTRT, mode,
1128 XEXP (op, 0), XEXP (op, 1));
1129
1130 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1131 if (GET_CODE (op) == XOR
1132 && XEXP (op, 1) == const1_rtx
1133 && nonzero_bits (XEXP (op, 0), mode) == 1)
1134 return plus_constant (mode, XEXP (op, 0), -1);
1135
1136 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1137 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1138 if (GET_CODE (op) == LT
1139 && XEXP (op, 1) == const0_rtx
1140 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1141 {
1142 int_mode = as_a <scalar_int_mode> (mode);
1143 int isize = GET_MODE_PRECISION (inner);
1144 if (STORE_FLAG_VALUE == 1)
1145 {
1146 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1147 gen_int_shift_amount (inner,
1148 isize - 1));
1149 if (int_mode == inner)
1150 return temp;
1151 if (GET_MODE_PRECISION (int_mode) > isize)
1152 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1153 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1154 }
1155 else if (STORE_FLAG_VALUE == -1)
1156 {
1157 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1158 gen_int_shift_amount (inner,
1159 isize - 1));
1160 if (int_mode == inner)
1161 return temp;
1162 if (GET_MODE_PRECISION (int_mode) > isize)
1163 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1164 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1165 }
1166 }
1167
1168 if (vec_series_p (op, &base, &step))
1169 {
1170 /* Only create a new series if we can simplify both parts. In other
1171 cases this isn't really a simplification, and it's not necessarily
1172 a win to replace a vector operation with a scalar operation. */
1173 scalar_mode inner_mode = GET_MODE_INNER (mode);
1174 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1175 if (base)
1176 {
1177 step = simplify_unary_operation (NEG, inner_mode,
1178 step, inner_mode);
1179 if (step)
1180 return gen_vec_series (mode, base, step);
1181 }
1182 }
1183 break;
1184
1185 case TRUNCATE:
1186 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1187 with the umulXi3_highpart patterns. */
1188 if (GET_CODE (op) == LSHIFTRT
1189 && GET_CODE (XEXP (op, 0)) == MULT)
1190 break;
1191
1192 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1193 {
1194 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1195 {
1196 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1197 if (temp)
1198 return temp;
1199 }
1200 /* We can't handle truncation to a partial integer mode here
1201 because we don't know the real bitsize of the partial
1202 integer mode. */
1203 break;
1204 }
1205
1206 if (GET_MODE (op) != VOIDmode)
1207 {
1208 temp = simplify_truncation (mode, op, GET_MODE (op));
1209 if (temp)
1210 return temp;
1211 }
1212
1213 /* If we know that the value is already truncated, we can
1214 replace the TRUNCATE with a SUBREG. */
1215 if (known_eq (GET_MODE_NUNITS (mode), 1)
1216 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1217 || truncated_to_mode (mode, op)))
1218 {
1219 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1220 if (temp)
1221 return temp;
1222 }
1223
1224 /* A truncate of a comparison can be replaced with a subreg if
1225 STORE_FLAG_VALUE permits. This is like the previous test,
1226 but it works even if the comparison is done in a mode larger
1227 than HOST_BITS_PER_WIDE_INT. */
1228 if (HWI_COMPUTABLE_MODE_P (mode)
1229 && COMPARISON_P (op)
1230 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1231 {
1232 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1233 if (temp)
1234 return temp;
1235 }
1236
1237 /* A truncate of a memory is just loading the low part of the memory
1238 if we are not changing the meaning of the address. */
1239 if (GET_CODE (op) == MEM
1240 && !VECTOR_MODE_P (mode)
1241 && !MEM_VOLATILE_P (op)
1242 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1243 {
1244 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1245 if (temp)
1246 return temp;
1247 }
1248
1249 break;
1250
1251 case FLOAT_TRUNCATE:
1252 if (DECIMAL_FLOAT_MODE_P (mode))
1253 break;
1254
1255 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1256 if (GET_CODE (op) == FLOAT_EXTEND
1257 && GET_MODE (XEXP (op, 0)) == mode)
1258 return XEXP (op, 0);
1259
1260 /* (float_truncate:SF (float_truncate:DF foo:XF))
1261 = (float_truncate:SF foo:XF).
1262 This may eliminate double rounding, so it is unsafe.
1263
1264 (float_truncate:SF (float_extend:XF foo:DF))
1265 = (float_truncate:SF foo:DF).
1266
1267 (float_truncate:DF (float_extend:XF foo:SF))
1268 = (float_extend:DF foo:SF). */
1269 if ((GET_CODE (op) == FLOAT_TRUNCATE
1270 && flag_unsafe_math_optimizations)
1271 || GET_CODE (op) == FLOAT_EXTEND)
1272 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1273 > GET_MODE_UNIT_SIZE (mode)
1274 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1275 mode,
1276 XEXP (op, 0), mode);
1277
1278 /* (float_truncate (float x)) is (float x) */
1279 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1280 && (flag_unsafe_math_optimizations
1281 || exact_int_to_float_conversion_p (op)))
1282 return simplify_gen_unary (GET_CODE (op), mode,
1283 XEXP (op, 0),
1284 GET_MODE (XEXP (op, 0)));
1285
1286 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1287 (OP:SF foo:SF) if OP is NEG or ABS. */
1288 if ((GET_CODE (op) == ABS
1289 || GET_CODE (op) == NEG)
1290 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1291 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1292 return simplify_gen_unary (GET_CODE (op), mode,
1293 XEXP (XEXP (op, 0), 0), mode);
1294
1295 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1296 is (float_truncate:SF x). */
1297 if (GET_CODE (op) == SUBREG
1298 && subreg_lowpart_p (op)
1299 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1300 return SUBREG_REG (op);
1301 break;
1302
1303 case FLOAT_EXTEND:
1304 if (DECIMAL_FLOAT_MODE_P (mode))
1305 break;
1306
1307 /* (float_extend (float_extend x)) is (float_extend x)
1308
1309 (float_extend (float x)) is (float x) assuming that double
1310 rounding can't happen.
1311 */
1312 if (GET_CODE (op) == FLOAT_EXTEND
1313 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1314 && exact_int_to_float_conversion_p (op)))
1315 return simplify_gen_unary (GET_CODE (op), mode,
1316 XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318
1319 break;
1320
1321 case ABS:
1322 /* (abs (neg <foo>)) -> (abs <foo>) */
1323 if (GET_CODE (op) == NEG)
1324 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1325 GET_MODE (XEXP (op, 0)));
1326
1327 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1328 do nothing. */
1329 if (GET_MODE (op) == VOIDmode)
1330 break;
1331
1332 /* If operand is something known to be positive, ignore the ABS. */
1333 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1334 || val_signbit_known_clear_p (GET_MODE (op),
1335 nonzero_bits (op, GET_MODE (op))))
1336 return op;
1337
1338 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1339 if (is_a <scalar_int_mode> (mode, &int_mode)
1340 && (num_sign_bit_copies (op, int_mode)
1341 == GET_MODE_PRECISION (int_mode)))
1342 return gen_rtx_NEG (int_mode, op);
1343
1344 break;
1345
1346 case FFS:
1347 /* (ffs (*_extend <X>)) = (ffs <X>) */
1348 if (GET_CODE (op) == SIGN_EXTEND
1349 || GET_CODE (op) == ZERO_EXTEND)
1350 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1351 GET_MODE (XEXP (op, 0)));
1352 break;
1353
1354 case POPCOUNT:
1355 switch (GET_CODE (op))
1356 {
1357 case BSWAP:
1358 case ZERO_EXTEND:
1359 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1361 GET_MODE (XEXP (op, 0)));
1362
1363 case ROTATE:
1364 case ROTATERT:
1365 /* Rotations don't affect popcount. */
1366 if (!side_effects_p (XEXP (op, 1)))
1367 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1368 GET_MODE (XEXP (op, 0)));
1369 break;
1370
1371 default:
1372 break;
1373 }
1374 break;
1375
1376 case PARITY:
1377 switch (GET_CODE (op))
1378 {
1379 case NOT:
1380 case BSWAP:
1381 case ZERO_EXTEND:
1382 case SIGN_EXTEND:
1383 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1384 GET_MODE (XEXP (op, 0)));
1385
1386 case ROTATE:
1387 case ROTATERT:
1388 /* Rotations don't affect parity. */
1389 if (!side_effects_p (XEXP (op, 1)))
1390 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1391 GET_MODE (XEXP (op, 0)));
1392 break;
1393
1394 case PARITY:
1395 /* (parity (parity x)) -> parity (x). */
1396 return op;
1397
1398 default:
1399 break;
1400 }
1401 break;
1402
1403 case BSWAP:
1404 /* (bswap (bswap x)) -> x. */
1405 if (GET_CODE (op) == BSWAP)
1406 return XEXP (op, 0);
1407 break;
1408
1409 case FLOAT:
1410 /* (float (sign_extend <X>)) = (float <X>). */
1411 if (GET_CODE (op) == SIGN_EXTEND)
1412 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1413 GET_MODE (XEXP (op, 0)));
1414 break;
1415
1416 case SIGN_EXTEND:
1417 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1418 becomes just the MINUS if its mode is MODE. This allows
1419 folding switch statements on machines using casesi (such as
1420 the VAX). */
1421 if (GET_CODE (op) == TRUNCATE
1422 && GET_MODE (XEXP (op, 0)) == mode
1423 && GET_CODE (XEXP (op, 0)) == MINUS
1424 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1425 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1426 return XEXP (op, 0);
1427
1428 /* Extending a widening multiplication should be canonicalized to
1429 a wider widening multiplication. */
1430 if (GET_CODE (op) == MULT)
1431 {
1432 rtx lhs = XEXP (op, 0);
1433 rtx rhs = XEXP (op, 1);
1434 enum rtx_code lcode = GET_CODE (lhs);
1435 enum rtx_code rcode = GET_CODE (rhs);
1436
1437 /* Widening multiplies usually extend both operands, but sometimes
1438 they use a shift to extract a portion of a register. */
1439 if ((lcode == SIGN_EXTEND
1440 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1441 && (rcode == SIGN_EXTEND
1442 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1443 {
1444 machine_mode lmode = GET_MODE (lhs);
1445 machine_mode rmode = GET_MODE (rhs);
1446 int bits;
1447
1448 if (lcode == ASHIFTRT)
1449 /* Number of bits not shifted off the end. */
1450 bits = (GET_MODE_UNIT_PRECISION (lmode)
1451 - INTVAL (XEXP (lhs, 1)));
1452 else /* lcode == SIGN_EXTEND */
1453 /* Size of inner mode. */
1454 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1455
1456 if (rcode == ASHIFTRT)
1457 bits += (GET_MODE_UNIT_PRECISION (rmode)
1458 - INTVAL (XEXP (rhs, 1)));
1459 else /* rcode == SIGN_EXTEND */
1460 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1461
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1465 return simplify_gen_binary
1466 (MULT, mode,
1467 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1468 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1469 }
1470 }
1471
1472 /* Check for a sign extension of a subreg of a promoted
1473 variable, where the promotion is sign-extended, and the
1474 target mode is the same as the variable's promotion. */
1475 if (GET_CODE (op) == SUBREG
1476 && SUBREG_PROMOTED_VAR_P (op)
1477 && SUBREG_PROMOTED_SIGNED_P (op)
1478 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1479 {
1480 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1481 if (temp)
1482 return temp;
1483 }
1484
1485 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1486 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1487 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1488 {
1489 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1490 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1491 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1492 GET_MODE (XEXP (op, 0)));
1493 }
1494
1495 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1496 is (sign_extend:M (subreg:O <X>)) if there is mode with
1497 GET_MODE_BITSIZE (N) - I bits.
1498 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1499 is similarly (zero_extend:M (subreg:O <X>)). */
1500 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1501 && GET_CODE (XEXP (op, 0)) == ASHIFT
1502 && is_a <scalar_int_mode> (mode, &int_mode)
1503 && CONST_INT_P (XEXP (op, 1))
1504 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1505 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1506 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1507 {
1508 scalar_int_mode tmode;
1509 gcc_assert (GET_MODE_PRECISION (int_mode)
1510 > GET_MODE_PRECISION (op_mode));
1511 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1512 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1513 {
1514 rtx inner =
1515 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1516 if (inner)
1517 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1518 ? SIGN_EXTEND : ZERO_EXTEND,
1519 int_mode, inner, tmode);
1520 }
1521 }
1522
1523 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1524 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1525 if (GET_CODE (op) == LSHIFTRT
1526 && CONST_INT_P (XEXP (op, 1))
1527 && XEXP (op, 1) != const0_rtx)
1528 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1529
1530 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1531 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1532 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1533 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1534 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1535 wider than O. */
1536 if (GET_CODE (op) == TRUNCATE
1537 && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1538 && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1539 {
1540 scalar_int_mode m_mode, n_mode, o_mode;
1541 rtx old_shift = XEXP (op, 0);
1542 if (is_a <scalar_int_mode> (mode, &m_mode)
1543 && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1544 && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1545 && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1546 == INTVAL (XEXP (old_shift, 1)))
1547 {
1548 rtx new_shift = simplify_gen_binary (ASHIFTRT,
1549 GET_MODE (old_shift),
1550 XEXP (old_shift, 0),
1551 XEXP (old_shift, 1));
1552 if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1553 return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1554 GET_MODE (new_shift));
1555 if (mode != GET_MODE (new_shift))
1556 return simplify_gen_unary (TRUNCATE, mode, new_shift,
1557 GET_MODE (new_shift));
1558 return new_shift;
1559 }
1560 }
1561
1562 #if defined(POINTERS_EXTEND_UNSIGNED)
1563 /* As we do not know which address space the pointer is referring to,
1564 we can do this only if the target does not support different pointer
1565 or address modes depending on the address space. */
1566 if (target_default_pointer_address_modes_p ()
1567 && ! POINTERS_EXTEND_UNSIGNED
1568 && mode == Pmode && GET_MODE (op) == ptr_mode
1569 && (CONSTANT_P (op)
1570 || (GET_CODE (op) == SUBREG
1571 && REG_P (SUBREG_REG (op))
1572 && REG_POINTER (SUBREG_REG (op))
1573 && GET_MODE (SUBREG_REG (op)) == Pmode))
1574 && !targetm.have_ptr_extend ())
1575 {
1576 temp
1577 = convert_memory_address_addr_space_1 (Pmode, op,
1578 ADDR_SPACE_GENERIC, false,
1579 true);
1580 if (temp)
1581 return temp;
1582 }
1583 #endif
1584 break;
1585
1586 case ZERO_EXTEND:
1587 /* Check for a zero extension of a subreg of a promoted
1588 variable, where the promotion is zero-extended, and the
1589 target mode is the same as the variable's promotion. */
1590 if (GET_CODE (op) == SUBREG
1591 && SUBREG_PROMOTED_VAR_P (op)
1592 && SUBREG_PROMOTED_UNSIGNED_P (op)
1593 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1594 {
1595 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1596 if (temp)
1597 return temp;
1598 }
1599
1600 /* Extending a widening multiplication should be canonicalized to
1601 a wider widening multiplication. */
1602 if (GET_CODE (op) == MULT)
1603 {
1604 rtx lhs = XEXP (op, 0);
1605 rtx rhs = XEXP (op, 1);
1606 enum rtx_code lcode = GET_CODE (lhs);
1607 enum rtx_code rcode = GET_CODE (rhs);
1608
1609 /* Widening multiplies usually extend both operands, but sometimes
1610 they use a shift to extract a portion of a register. */
1611 if ((lcode == ZERO_EXTEND
1612 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1613 && (rcode == ZERO_EXTEND
1614 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1615 {
1616 machine_mode lmode = GET_MODE (lhs);
1617 machine_mode rmode = GET_MODE (rhs);
1618 int bits;
1619
1620 if (lcode == LSHIFTRT)
1621 /* Number of bits not shifted off the end. */
1622 bits = (GET_MODE_UNIT_PRECISION (lmode)
1623 - INTVAL (XEXP (lhs, 1)));
1624 else /* lcode == ZERO_EXTEND */
1625 /* Size of inner mode. */
1626 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1627
1628 if (rcode == LSHIFTRT)
1629 bits += (GET_MODE_UNIT_PRECISION (rmode)
1630 - INTVAL (XEXP (rhs, 1)));
1631 else /* rcode == ZERO_EXTEND */
1632 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1633
1634 /* We can only widen multiplies if the result is mathematiclly
1635 equivalent. I.e. if overflow was impossible. */
1636 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1637 return simplify_gen_binary
1638 (MULT, mode,
1639 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1640 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1641 }
1642 }
1643
1644 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1645 if (GET_CODE (op) == ZERO_EXTEND)
1646 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1647 GET_MODE (XEXP (op, 0)));
1648
1649 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1650 is (zero_extend:M (subreg:O <X>)) if there is mode with
1651 GET_MODE_PRECISION (N) - I bits. */
1652 if (GET_CODE (op) == LSHIFTRT
1653 && GET_CODE (XEXP (op, 0)) == ASHIFT
1654 && is_a <scalar_int_mode> (mode, &int_mode)
1655 && CONST_INT_P (XEXP (op, 1))
1656 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1657 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1658 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1659 {
1660 scalar_int_mode tmode;
1661 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1662 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1663 {
1664 rtx inner =
1665 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1666 if (inner)
1667 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1668 inner, tmode);
1669 }
1670 }
1671
1672 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1673 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1674 of mode N. E.g.
1675 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1676 (and:SI (reg:SI) (const_int 63)). */
1677 if (partial_subreg_p (op)
1678 && is_a <scalar_int_mode> (mode, &int_mode)
1679 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1680 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1681 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1682 && subreg_lowpart_p (op)
1683 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1684 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1685 {
1686 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1687 return SUBREG_REG (op);
1688 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1689 op0_mode);
1690 }
1691
1692 #if defined(POINTERS_EXTEND_UNSIGNED)
1693 /* As we do not know which address space the pointer is referring to,
1694 we can do this only if the target does not support different pointer
1695 or address modes depending on the address space. */
1696 if (target_default_pointer_address_modes_p ()
1697 && POINTERS_EXTEND_UNSIGNED > 0
1698 && mode == Pmode && GET_MODE (op) == ptr_mode
1699 && (CONSTANT_P (op)
1700 || (GET_CODE (op) == SUBREG
1701 && REG_P (SUBREG_REG (op))
1702 && REG_POINTER (SUBREG_REG (op))
1703 && GET_MODE (SUBREG_REG (op)) == Pmode))
1704 && !targetm.have_ptr_extend ())
1705 {
1706 temp
1707 = convert_memory_address_addr_space_1 (Pmode, op,
1708 ADDR_SPACE_GENERIC, false,
1709 true);
1710 if (temp)
1711 return temp;
1712 }
1713 #endif
1714 break;
1715
1716 default:
1717 break;
1718 }
1719
1720 if (VECTOR_MODE_P (mode)
1721 && vec_duplicate_p (op, &elt)
1722 && code != VEC_DUPLICATE)
1723 {
1724 /* Try applying the operator to ELT and see if that simplifies.
1725 We can duplicate the result if so.
1726
1727 The reason we don't use simplify_gen_unary is that it isn't
1728 necessarily a win to convert things like:
1729
1730 (neg:V (vec_duplicate:V (reg:S R)))
1731
1732 to:
1733
1734 (vec_duplicate:V (neg:S (reg:S R)))
1735
1736 The first might be done entirely in vector registers while the
1737 second might need a move between register files. */
1738 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1739 elt, GET_MODE_INNER (GET_MODE (op)));
1740 if (temp)
1741 return gen_vec_duplicate (mode, temp);
1742 }
1743
1744 return 0;
1745 }
1746
1747 /* Try to compute the value of a unary operation CODE whose output mode is to
1748 be MODE with input operand OP whose mode was originally OP_MODE.
1749 Return zero if the value cannot be computed. */
1750 rtx
1751 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1752 rtx op, machine_mode op_mode)
1753 {
1754 scalar_int_mode result_mode;
1755
1756 if (code == VEC_DUPLICATE)
1757 {
1758 gcc_assert (VECTOR_MODE_P (mode));
1759 if (GET_MODE (op) != VOIDmode)
1760 {
1761 if (!VECTOR_MODE_P (GET_MODE (op)))
1762 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1763 else
1764 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1765 (GET_MODE (op)));
1766 }
1767 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1768 return gen_const_vec_duplicate (mode, op);
1769 if (GET_CODE (op) == CONST_VECTOR
1770 && (CONST_VECTOR_DUPLICATE_P (op)
1771 || CONST_VECTOR_NUNITS (op).is_constant ()))
1772 {
1773 unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1774 ? CONST_VECTOR_NPATTERNS (op)
1775 : CONST_VECTOR_NUNITS (op).to_constant ());
1776 gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1777 rtx_vector_builder builder (mode, npatterns, 1);
1778 for (unsigned i = 0; i < npatterns; i++)
1779 builder.quick_push (CONST_VECTOR_ELT (op, i));
1780 return builder.build ();
1781 }
1782 }
1783
1784 if (VECTOR_MODE_P (mode)
1785 && GET_CODE (op) == CONST_VECTOR
1786 && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1787 {
1788 gcc_assert (GET_MODE (op) == op_mode);
1789
1790 rtx_vector_builder builder;
1791 if (!builder.new_unary_operation (mode, op, false))
1792 return 0;
1793
1794 unsigned int count = builder.encoded_nelts ();
1795 for (unsigned int i = 0; i < count; i++)
1796 {
1797 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1798 CONST_VECTOR_ELT (op, i),
1799 GET_MODE_INNER (op_mode));
1800 if (!x || !valid_for_const_vector_p (mode, x))
1801 return 0;
1802 builder.quick_push (x);
1803 }
1804 return builder.build ();
1805 }
1806
1807 /* The order of these tests is critical so that, for example, we don't
1808 check the wrong mode (input vs. output) for a conversion operation,
1809 such as FIX. At some point, this should be simplified. */
1810
1811 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1812 {
1813 REAL_VALUE_TYPE d;
1814
1815 if (op_mode == VOIDmode)
1816 {
1817 /* CONST_INT have VOIDmode as the mode. We assume that all
1818 the bits of the constant are significant, though, this is
1819 a dangerous assumption as many times CONST_INTs are
1820 created and used with garbage in the bits outside of the
1821 precision of the implied mode of the const_int. */
1822 op_mode = MAX_MODE_INT;
1823 }
1824
1825 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1826
1827 /* Avoid the folding if flag_signaling_nans is on and
1828 operand is a signaling NaN. */
1829 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1830 return 0;
1831
1832 d = real_value_truncate (mode, d);
1833 return const_double_from_real_value (d, mode);
1834 }
1835 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1836 {
1837 REAL_VALUE_TYPE d;
1838
1839 if (op_mode == VOIDmode)
1840 {
1841 /* CONST_INT have VOIDmode as the mode. We assume that all
1842 the bits of the constant are significant, though, this is
1843 a dangerous assumption as many times CONST_INTs are
1844 created and used with garbage in the bits outside of the
1845 precision of the implied mode of the const_int. */
1846 op_mode = MAX_MODE_INT;
1847 }
1848
1849 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1850
1851 /* Avoid the folding if flag_signaling_nans is on and
1852 operand is a signaling NaN. */
1853 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1854 return 0;
1855
1856 d = real_value_truncate (mode, d);
1857 return const_double_from_real_value (d, mode);
1858 }
1859
1860 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1861 {
1862 unsigned int width = GET_MODE_PRECISION (result_mode);
1863 if (width > MAX_BITSIZE_MODE_ANY_INT)
1864 return 0;
1865
1866 wide_int result;
1867 scalar_int_mode imode = (op_mode == VOIDmode
1868 ? result_mode
1869 : as_a <scalar_int_mode> (op_mode));
1870 rtx_mode_t op0 = rtx_mode_t (op, imode);
1871 int int_value;
1872
1873 #if TARGET_SUPPORTS_WIDE_INT == 0
1874 /* This assert keeps the simplification from producing a result
1875 that cannot be represented in a CONST_DOUBLE but a lot of
1876 upstream callers expect that this function never fails to
1877 simplify something and so you if you added this to the test
1878 above the code would die later anyway. If this assert
1879 happens, you just need to make the port support wide int. */
1880 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1881 #endif
1882
1883 switch (code)
1884 {
1885 case NOT:
1886 result = wi::bit_not (op0);
1887 break;
1888
1889 case NEG:
1890 result = wi::neg (op0);
1891 break;
1892
1893 case ABS:
1894 result = wi::abs (op0);
1895 break;
1896
1897 case FFS:
1898 result = wi::shwi (wi::ffs (op0), result_mode);
1899 break;
1900
1901 case CLZ:
1902 if (wi::ne_p (op0, 0))
1903 int_value = wi::clz (op0);
1904 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1905 return NULL_RTX;
1906 result = wi::shwi (int_value, result_mode);
1907 break;
1908
1909 case CLRSB:
1910 result = wi::shwi (wi::clrsb (op0), result_mode);
1911 break;
1912
1913 case CTZ:
1914 if (wi::ne_p (op0, 0))
1915 int_value = wi::ctz (op0);
1916 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1917 return NULL_RTX;
1918 result = wi::shwi (int_value, result_mode);
1919 break;
1920
1921 case POPCOUNT:
1922 result = wi::shwi (wi::popcount (op0), result_mode);
1923 break;
1924
1925 case PARITY:
1926 result = wi::shwi (wi::parity (op0), result_mode);
1927 break;
1928
1929 case BSWAP:
1930 result = wide_int (op0).bswap ();
1931 break;
1932
1933 case TRUNCATE:
1934 case ZERO_EXTEND:
1935 result = wide_int::from (op0, width, UNSIGNED);
1936 break;
1937
1938 case SIGN_EXTEND:
1939 result = wide_int::from (op0, width, SIGNED);
1940 break;
1941
1942 case SQRT:
1943 default:
1944 return 0;
1945 }
1946
1947 return immed_wide_int_const (result, result_mode);
1948 }
1949
1950 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1951 && SCALAR_FLOAT_MODE_P (mode)
1952 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1953 {
1954 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1955 switch (code)
1956 {
1957 case SQRT:
1958 return 0;
1959 case ABS:
1960 d = real_value_abs (&d);
1961 break;
1962 case NEG:
1963 d = real_value_negate (&d);
1964 break;
1965 case FLOAT_TRUNCATE:
1966 /* Don't perform the operation if flag_signaling_nans is on
1967 and the operand is a signaling NaN. */
1968 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1969 return NULL_RTX;
1970 d = real_value_truncate (mode, d);
1971 break;
1972 case FLOAT_EXTEND:
1973 /* Don't perform the operation if flag_signaling_nans is on
1974 and the operand is a signaling NaN. */
1975 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1976 return NULL_RTX;
1977 /* All this does is change the mode, unless changing
1978 mode class. */
1979 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1980 real_convert (&d, mode, &d);
1981 break;
1982 case FIX:
1983 /* Don't perform the operation if flag_signaling_nans is on
1984 and the operand is a signaling NaN. */
1985 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1986 return NULL_RTX;
1987 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1988 break;
1989 case NOT:
1990 {
1991 long tmp[4];
1992 int i;
1993
1994 real_to_target (tmp, &d, GET_MODE (op));
1995 for (i = 0; i < 4; i++)
1996 tmp[i] = ~tmp[i];
1997 real_from_target (&d, tmp, mode);
1998 break;
1999 }
2000 default:
2001 gcc_unreachable ();
2002 }
2003 return const_double_from_real_value (d, mode);
2004 }
2005 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2006 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2007 && is_int_mode (mode, &result_mode))
2008 {
2009 unsigned int width = GET_MODE_PRECISION (result_mode);
2010 if (width > MAX_BITSIZE_MODE_ANY_INT)
2011 return 0;
2012
2013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2014 operators are intentionally left unspecified (to ease implementation
2015 by target backends), for consistency, this routine implements the
2016 same semantics for constant folding as used by the middle-end. */
2017
2018 /* This was formerly used only for non-IEEE float.
2019 eggert@twinsun.com says it is safe for IEEE also. */
2020 REAL_VALUE_TYPE t;
2021 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2022 wide_int wmax, wmin;
2023 /* This is part of the abi to real_to_integer, but we check
2024 things before making this call. */
2025 bool fail;
2026
2027 switch (code)
2028 {
2029 case FIX:
2030 if (REAL_VALUE_ISNAN (*x))
2031 return const0_rtx;
2032
2033 /* Test against the signed upper bound. */
2034 wmax = wi::max_value (width, SIGNED);
2035 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2036 if (real_less (&t, x))
2037 return immed_wide_int_const (wmax, mode);
2038
2039 /* Test against the signed lower bound. */
2040 wmin = wi::min_value (width, SIGNED);
2041 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2042 if (real_less (x, &t))
2043 return immed_wide_int_const (wmin, mode);
2044
2045 return immed_wide_int_const (real_to_integer (x, &fail, width),
2046 mode);
2047
2048 case UNSIGNED_FIX:
2049 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2050 return const0_rtx;
2051
2052 /* Test against the unsigned upper bound. */
2053 wmax = wi::max_value (width, UNSIGNED);
2054 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2055 if (real_less (&t, x))
2056 return immed_wide_int_const (wmax, mode);
2057
2058 return immed_wide_int_const (real_to_integer (x, &fail, width),
2059 mode);
2060
2061 default:
2062 gcc_unreachable ();
2063 }
2064 }
2065
2066 /* Handle polynomial integers. */
2067 else if (CONST_POLY_INT_P (op))
2068 {
2069 poly_wide_int result;
2070 switch (code)
2071 {
2072 case NEG:
2073 result = -const_poly_int_value (op);
2074 break;
2075
2076 case NOT:
2077 result = ~const_poly_int_value (op);
2078 break;
2079
2080 default:
2081 return NULL_RTX;
2082 }
2083 return immed_wide_int_const (result, mode);
2084 }
2085
2086 return NULL_RTX;
2087 }
2088 \f
2089 /* Subroutine of simplify_binary_operation to simplify a binary operation
2090 CODE that can commute with byte swapping, with result mode MODE and
2091 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2092 Return zero if no simplification or canonicalization is possible. */
2093
2094 static rtx
2095 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2096 rtx op0, rtx op1)
2097 {
2098 rtx tem;
2099
2100 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2101 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2102 {
2103 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2104 simplify_gen_unary (BSWAP, mode, op1, mode));
2105 return simplify_gen_unary (BSWAP, mode, tem, mode);
2106 }
2107
2108 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2109 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2110 {
2111 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2112 return simplify_gen_unary (BSWAP, mode, tem, mode);
2113 }
2114
2115 return NULL_RTX;
2116 }
2117
2118 /* Subroutine of simplify_binary_operation to simplify a commutative,
2119 associative binary operation CODE with result mode MODE, operating
2120 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2121 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2122 canonicalization is possible. */
2123
2124 static rtx
2125 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2126 rtx op0, rtx op1)
2127 {
2128 rtx tem;
2129
2130 /* Linearize the operator to the left. */
2131 if (GET_CODE (op1) == code)
2132 {
2133 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2134 if (GET_CODE (op0) == code)
2135 {
2136 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2137 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2138 }
2139
2140 /* "a op (b op c)" becomes "(b op c) op a". */
2141 if (! swap_commutative_operands_p (op1, op0))
2142 return simplify_gen_binary (code, mode, op1, op0);
2143
2144 std::swap (op0, op1);
2145 }
2146
2147 if (GET_CODE (op0) == code)
2148 {
2149 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2150 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2151 {
2152 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2153 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2154 }
2155
2156 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2157 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2158 if (tem != 0)
2159 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2160
2161 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2162 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2163 if (tem != 0)
2164 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2165 }
2166
2167 return 0;
2168 }
2169
2170 /* Return a mask describing the COMPARISON. */
2171 static int
2172 comparison_to_mask (enum rtx_code comparison)
2173 {
2174 switch (comparison)
2175 {
2176 case LT:
2177 return 8;
2178 case GT:
2179 return 4;
2180 case EQ:
2181 return 2;
2182 case UNORDERED:
2183 return 1;
2184
2185 case LTGT:
2186 return 12;
2187 case LE:
2188 return 10;
2189 case GE:
2190 return 6;
2191 case UNLT:
2192 return 9;
2193 case UNGT:
2194 return 5;
2195 case UNEQ:
2196 return 3;
2197
2198 case ORDERED:
2199 return 14;
2200 case NE:
2201 return 13;
2202 case UNLE:
2203 return 11;
2204 case UNGE:
2205 return 7;
2206
2207 default:
2208 gcc_unreachable ();
2209 }
2210 }
2211
2212 /* Return a comparison corresponding to the MASK. */
2213 static enum rtx_code
2214 mask_to_comparison (int mask)
2215 {
2216 switch (mask)
2217 {
2218 case 8:
2219 return LT;
2220 case 4:
2221 return GT;
2222 case 2:
2223 return EQ;
2224 case 1:
2225 return UNORDERED;
2226
2227 case 12:
2228 return LTGT;
2229 case 10:
2230 return LE;
2231 case 6:
2232 return GE;
2233 case 9:
2234 return UNLT;
2235 case 5:
2236 return UNGT;
2237 case 3:
2238 return UNEQ;
2239
2240 case 14:
2241 return ORDERED;
2242 case 13:
2243 return NE;
2244 case 11:
2245 return UNLE;
2246 case 7:
2247 return UNGE;
2248
2249 default:
2250 gcc_unreachable ();
2251 }
2252 }
2253
2254 /* Return true if CODE is valid for comparisons of mode MODE, false
2255 otherwise.
2256
2257 It is always safe to return false, even if the code was valid for the
2258 given mode as that will merely suppress optimizations. */
2259
2260 static bool
2261 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2262 {
2263 switch (code)
2264 {
2265 /* These are valid for integral, floating and vector modes. */
2266 case NE:
2267 case EQ:
2268 case GE:
2269 case GT:
2270 case LE:
2271 case LT:
2272 return (INTEGRAL_MODE_P (mode)
2273 || FLOAT_MODE_P (mode)
2274 || VECTOR_MODE_P (mode));
2275
2276 /* These are valid for floating point modes. */
2277 case LTGT:
2278 case UNORDERED:
2279 case ORDERED:
2280 case UNEQ:
2281 case UNGE:
2282 case UNGT:
2283 case UNLE:
2284 case UNLT:
2285 return FLOAT_MODE_P (mode);
2286
2287 /* These are filtered out in simplify_logical_operation, but
2288 we check for them too as a matter of safety. They are valid
2289 for integral and vector modes. */
2290 case GEU:
2291 case GTU:
2292 case LEU:
2293 case LTU:
2294 return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2295
2296 default:
2297 gcc_unreachable ();
2298 }
2299 }
2300
2301 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2302 and OP1, which should be both relational operations. Return 0 if no such
2303 simplification is possible. */
2304 rtx
2305 simplify_logical_relational_operation (enum rtx_code code, machine_mode mode,
2306 rtx op0, rtx op1)
2307 {
2308 /* We only handle IOR of two relational operations. */
2309 if (code != IOR)
2310 return 0;
2311
2312 if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2313 return 0;
2314
2315 if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2316 && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2317 return 0;
2318
2319 enum rtx_code code0 = GET_CODE (op0);
2320 enum rtx_code code1 = GET_CODE (op1);
2321
2322 /* We don't handle unsigned comparisons currently. */
2323 if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2324 return 0;
2325 if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2326 return 0;
2327
2328 int mask0 = comparison_to_mask (code0);
2329 int mask1 = comparison_to_mask (code1);
2330
2331 int mask = mask0 | mask1;
2332
2333 if (mask == 15)
2334 return const_true_rtx;
2335
2336 code = mask_to_comparison (mask);
2337
2338 /* Many comparison codes are only valid for certain mode classes. */
2339 if (!comparison_code_valid_for_mode (code, mode))
2340 return 0;
2341
2342 op0 = XEXP (op1, 0);
2343 op1 = XEXP (op1, 1);
2344
2345 return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2346 }
2347
2348 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2349 and OP1. Return 0 if no simplification is possible.
2350
2351 Don't use this for relational operations such as EQ or LT.
2352 Use simplify_relational_operation instead. */
2353 rtx
2354 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2355 rtx op0, rtx op1)
2356 {
2357 rtx trueop0, trueop1;
2358 rtx tem;
2359
2360 /* Relational operations don't work here. We must know the mode
2361 of the operands in order to do the comparison correctly.
2362 Assuming a full word can give incorrect results.
2363 Consider comparing 128 with -128 in QImode. */
2364 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2365 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2366
2367 /* Make sure the constant is second. */
2368 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2369 && swap_commutative_operands_p (op0, op1))
2370 std::swap (op0, op1);
2371
2372 trueop0 = avoid_constant_pool_reference (op0);
2373 trueop1 = avoid_constant_pool_reference (op1);
2374
2375 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2376 if (tem)
2377 return tem;
2378 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2379
2380 if (tem)
2381 return tem;
2382
2383 /* If the above steps did not result in a simplification and op0 or op1
2384 were constant pool references, use the referenced constants directly. */
2385 if (trueop0 != op0 || trueop1 != op1)
2386 return simplify_gen_binary (code, mode, trueop0, trueop1);
2387
2388 return NULL_RTX;
2389 }
2390
2391 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2392 which OP0 and OP1 are both vector series or vector duplicates
2393 (which are really just series with a step of 0). If so, try to
2394 form a new series by applying CODE to the bases and to the steps.
2395 Return null if no simplification is possible.
2396
2397 MODE is the mode of the operation and is known to be a vector
2398 integer mode. */
2399
2400 static rtx
2401 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2402 rtx op0, rtx op1)
2403 {
2404 rtx base0, step0;
2405 if (vec_duplicate_p (op0, &base0))
2406 step0 = const0_rtx;
2407 else if (!vec_series_p (op0, &base0, &step0))
2408 return NULL_RTX;
2409
2410 rtx base1, step1;
2411 if (vec_duplicate_p (op1, &base1))
2412 step1 = const0_rtx;
2413 else if (!vec_series_p (op1, &base1, &step1))
2414 return NULL_RTX;
2415
2416 /* Only create a new series if we can simplify both parts. In other
2417 cases this isn't really a simplification, and it's not necessarily
2418 a win to replace a vector operation with a scalar operation. */
2419 scalar_mode inner_mode = GET_MODE_INNER (mode);
2420 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2421 if (!new_base)
2422 return NULL_RTX;
2423
2424 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2425 if (!new_step)
2426 return NULL_RTX;
2427
2428 return gen_vec_series (mode, new_base, new_step);
2429 }
2430
2431 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2432 operation CODE with result mode MODE, operating on OP0 and OP1.
2433 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2434 Returns NULL_RTX if no simplification is possible. */
2435
2436 static rtx
2437 simplify_distributive_operation (enum rtx_code code, machine_mode mode,
2438 rtx op0, rtx op1)
2439 {
2440 enum rtx_code op = GET_CODE (op0);
2441 gcc_assert (GET_CODE (op1) == op);
2442
2443 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2444 && ! side_effects_p (XEXP (op0, 1)))
2445 return simplify_gen_binary (op, mode,
2446 simplify_gen_binary (code, mode,
2447 XEXP (op0, 0),
2448 XEXP (op1, 0)),
2449 XEXP (op0, 1));
2450
2451 if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2452 {
2453 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2454 && ! side_effects_p (XEXP (op0, 0)))
2455 return simplify_gen_binary (op, mode,
2456 simplify_gen_binary (code, mode,
2457 XEXP (op0, 1),
2458 XEXP (op1, 1)),
2459 XEXP (op0, 0));
2460 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2461 && ! side_effects_p (XEXP (op0, 0)))
2462 return simplify_gen_binary (op, mode,
2463 simplify_gen_binary (code, mode,
2464 XEXP (op0, 1),
2465 XEXP (op1, 0)),
2466 XEXP (op0, 0));
2467 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2468 && ! side_effects_p (XEXP (op0, 1)))
2469 return simplify_gen_binary (op, mode,
2470 simplify_gen_binary (code, mode,
2471 XEXP (op0, 0),
2472 XEXP (op1, 1)),
2473 XEXP (op0, 1));
2474 }
2475
2476 return NULL_RTX;
2477 }
2478
2479 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2480 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2481 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2482 actual constants. */
2483
2484 static rtx
2485 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2486 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2487 {
2488 rtx tem, reversed, opleft, opright, elt0, elt1;
2489 HOST_WIDE_INT val;
2490 scalar_int_mode int_mode, inner_mode;
2491 poly_int64 offset;
2492
2493 /* Even if we can't compute a constant result,
2494 there are some cases worth simplifying. */
2495
2496 switch (code)
2497 {
2498 case PLUS:
2499 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2500 when x is NaN, infinite, or finite and nonzero. They aren't
2501 when x is -0 and the rounding mode is not towards -infinity,
2502 since (-0) + 0 is then 0. */
2503 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2504 return op0;
2505
2506 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2507 transformations are safe even for IEEE. */
2508 if (GET_CODE (op0) == NEG)
2509 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2510 else if (GET_CODE (op1) == NEG)
2511 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2512
2513 /* (~a) + 1 -> -a */
2514 if (INTEGRAL_MODE_P (mode)
2515 && GET_CODE (op0) == NOT
2516 && trueop1 == const1_rtx)
2517 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2518
2519 /* Handle both-operands-constant cases. We can only add
2520 CONST_INTs to constants since the sum of relocatable symbols
2521 can't be handled by most assemblers. Don't add CONST_INT
2522 to CONST_INT since overflow won't be computed properly if wider
2523 than HOST_BITS_PER_WIDE_INT. */
2524
2525 if ((GET_CODE (op0) == CONST
2526 || GET_CODE (op0) == SYMBOL_REF
2527 || GET_CODE (op0) == LABEL_REF)
2528 && poly_int_rtx_p (op1, &offset))
2529 return plus_constant (mode, op0, offset);
2530 else if ((GET_CODE (op1) == CONST
2531 || GET_CODE (op1) == SYMBOL_REF
2532 || GET_CODE (op1) == LABEL_REF)
2533 && poly_int_rtx_p (op0, &offset))
2534 return plus_constant (mode, op1, offset);
2535
2536 /* See if this is something like X * C - X or vice versa or
2537 if the multiplication is written as a shift. If so, we can
2538 distribute and make a new multiply, shift, or maybe just
2539 have X (if C is 2 in the example above). But don't make
2540 something more expensive than we had before. */
2541
2542 if (is_a <scalar_int_mode> (mode, &int_mode))
2543 {
2544 rtx lhs = op0, rhs = op1;
2545
2546 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2547 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2548
2549 if (GET_CODE (lhs) == NEG)
2550 {
2551 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2552 lhs = XEXP (lhs, 0);
2553 }
2554 else if (GET_CODE (lhs) == MULT
2555 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2556 {
2557 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2558 lhs = XEXP (lhs, 0);
2559 }
2560 else if (GET_CODE (lhs) == ASHIFT
2561 && CONST_INT_P (XEXP (lhs, 1))
2562 && INTVAL (XEXP (lhs, 1)) >= 0
2563 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2564 {
2565 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2566 GET_MODE_PRECISION (int_mode));
2567 lhs = XEXP (lhs, 0);
2568 }
2569
2570 if (GET_CODE (rhs) == NEG)
2571 {
2572 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2573 rhs = XEXP (rhs, 0);
2574 }
2575 else if (GET_CODE (rhs) == MULT
2576 && CONST_INT_P (XEXP (rhs, 1)))
2577 {
2578 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2579 rhs = XEXP (rhs, 0);
2580 }
2581 else if (GET_CODE (rhs) == ASHIFT
2582 && CONST_INT_P (XEXP (rhs, 1))
2583 && INTVAL (XEXP (rhs, 1)) >= 0
2584 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2585 {
2586 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2587 GET_MODE_PRECISION (int_mode));
2588 rhs = XEXP (rhs, 0);
2589 }
2590
2591 if (rtx_equal_p (lhs, rhs))
2592 {
2593 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2594 rtx coeff;
2595 bool speed = optimize_function_for_speed_p (cfun);
2596
2597 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2598
2599 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2600 return (set_src_cost (tem, int_mode, speed)
2601 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2602 }
2603 }
2604
2605 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2606 if (CONST_SCALAR_INT_P (op1)
2607 && GET_CODE (op0) == XOR
2608 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2609 && mode_signbit_p (mode, op1))
2610 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2611 simplify_gen_binary (XOR, mode, op1,
2612 XEXP (op0, 1)));
2613
2614 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2615 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2616 && GET_CODE (op0) == MULT
2617 && GET_CODE (XEXP (op0, 0)) == NEG)
2618 {
2619 rtx in1, in2;
2620
2621 in1 = XEXP (XEXP (op0, 0), 0);
2622 in2 = XEXP (op0, 1);
2623 return simplify_gen_binary (MINUS, mode, op1,
2624 simplify_gen_binary (MULT, mode,
2625 in1, in2));
2626 }
2627
2628 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2629 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2630 is 1. */
2631 if (COMPARISON_P (op0)
2632 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2633 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2634 && (reversed = reversed_comparison (op0, mode)))
2635 return
2636 simplify_gen_unary (NEG, mode, reversed, mode);
2637
2638 /* If one of the operands is a PLUS or a MINUS, see if we can
2639 simplify this by the associative law.
2640 Don't use the associative law for floating point.
2641 The inaccuracy makes it nonassociative,
2642 and subtle programs can break if operations are associated. */
2643
2644 if (INTEGRAL_MODE_P (mode)
2645 && (plus_minus_operand_p (op0)
2646 || plus_minus_operand_p (op1))
2647 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2648 return tem;
2649
2650 /* Reassociate floating point addition only when the user
2651 specifies associative math operations. */
2652 if (FLOAT_MODE_P (mode)
2653 && flag_associative_math)
2654 {
2655 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (tem)
2657 return tem;
2658 }
2659
2660 /* Handle vector series. */
2661 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2662 {
2663 tem = simplify_binary_operation_series (code, mode, op0, op1);
2664 if (tem)
2665 return tem;
2666 }
2667 break;
2668
2669 case COMPARE:
2670 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2671 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2672 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2673 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2674 {
2675 rtx xop00 = XEXP (op0, 0);
2676 rtx xop10 = XEXP (op1, 0);
2677
2678 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2679 return xop00;
2680
2681 if (REG_P (xop00) && REG_P (xop10)
2682 && REGNO (xop00) == REGNO (xop10)
2683 && GET_MODE (xop00) == mode
2684 && GET_MODE (xop10) == mode
2685 && GET_MODE_CLASS (mode) == MODE_CC)
2686 return xop00;
2687 }
2688 break;
2689
2690 case MINUS:
2691 /* We can't assume x-x is 0 even with non-IEEE floating point,
2692 but since it is zero except in very strange circumstances, we
2693 will treat it as zero with -ffinite-math-only. */
2694 if (rtx_equal_p (trueop0, trueop1)
2695 && ! side_effects_p (op0)
2696 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2697 return CONST0_RTX (mode);
2698
2699 /* Change subtraction from zero into negation. (0 - x) is the
2700 same as -x when x is NaN, infinite, or finite and nonzero.
2701 But if the mode has signed zeros, and does not round towards
2702 -infinity, then 0 - 0 is 0, not -0. */
2703 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2704 return simplify_gen_unary (NEG, mode, op1, mode);
2705
2706 /* (-1 - a) is ~a, unless the expression contains symbolic
2707 constants, in which case not retaining additions and
2708 subtractions could cause invalid assembly to be produced. */
2709 if (trueop0 == constm1_rtx
2710 && !contains_symbolic_reference_p (op1))
2711 return simplify_gen_unary (NOT, mode, op1, mode);
2712
2713 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2714 or has signed zeros and supports rounding towards -infinity.
2715 In such a case, 0 - 0 is -0. */
2716 if (!(HONOR_SIGNED_ZEROS (mode)
2717 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2718 && !HONOR_SNANS (mode)
2719 && trueop1 == CONST0_RTX (mode))
2720 return op0;
2721
2722 /* See if this is something like X * C - X or vice versa or
2723 if the multiplication is written as a shift. If so, we can
2724 distribute and make a new multiply, shift, or maybe just
2725 have X (if C is 2 in the example above). But don't make
2726 something more expensive than we had before. */
2727
2728 if (is_a <scalar_int_mode> (mode, &int_mode))
2729 {
2730 rtx lhs = op0, rhs = op1;
2731
2732 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2733 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2734
2735 if (GET_CODE (lhs) == NEG)
2736 {
2737 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2738 lhs = XEXP (lhs, 0);
2739 }
2740 else if (GET_CODE (lhs) == MULT
2741 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2742 {
2743 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2744 lhs = XEXP (lhs, 0);
2745 }
2746 else if (GET_CODE (lhs) == ASHIFT
2747 && CONST_INT_P (XEXP (lhs, 1))
2748 && INTVAL (XEXP (lhs, 1)) >= 0
2749 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2750 {
2751 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2752 GET_MODE_PRECISION (int_mode));
2753 lhs = XEXP (lhs, 0);
2754 }
2755
2756 if (GET_CODE (rhs) == NEG)
2757 {
2758 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2759 rhs = XEXP (rhs, 0);
2760 }
2761 else if (GET_CODE (rhs) == MULT
2762 && CONST_INT_P (XEXP (rhs, 1)))
2763 {
2764 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2765 rhs = XEXP (rhs, 0);
2766 }
2767 else if (GET_CODE (rhs) == ASHIFT
2768 && CONST_INT_P (XEXP (rhs, 1))
2769 && INTVAL (XEXP (rhs, 1)) >= 0
2770 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2771 {
2772 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2773 GET_MODE_PRECISION (int_mode));
2774 negcoeff1 = -negcoeff1;
2775 rhs = XEXP (rhs, 0);
2776 }
2777
2778 if (rtx_equal_p (lhs, rhs))
2779 {
2780 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2781 rtx coeff;
2782 bool speed = optimize_function_for_speed_p (cfun);
2783
2784 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2785
2786 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2787 return (set_src_cost (tem, int_mode, speed)
2788 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2789 }
2790 }
2791
2792 /* (a - (-b)) -> (a + b). True even for IEEE. */
2793 if (GET_CODE (op1) == NEG)
2794 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2795
2796 /* (-x - c) may be simplified as (-c - x). */
2797 if (GET_CODE (op0) == NEG
2798 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2799 {
2800 tem = simplify_unary_operation (NEG, mode, op1, mode);
2801 if (tem)
2802 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2803 }
2804
2805 if ((GET_CODE (op0) == CONST
2806 || GET_CODE (op0) == SYMBOL_REF
2807 || GET_CODE (op0) == LABEL_REF)
2808 && poly_int_rtx_p (op1, &offset))
2809 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2810
2811 /* Don't let a relocatable value get a negative coeff. */
2812 if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
2813 return simplify_gen_binary (PLUS, mode,
2814 op0,
2815 neg_poly_int_rtx (mode, op1));
2816
2817 /* (x - (x & y)) -> (x & ~y) */
2818 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2819 {
2820 if (rtx_equal_p (op0, XEXP (op1, 0)))
2821 {
2822 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2823 GET_MODE (XEXP (op1, 1)));
2824 return simplify_gen_binary (AND, mode, op0, tem);
2825 }
2826 if (rtx_equal_p (op0, XEXP (op1, 1)))
2827 {
2828 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2829 GET_MODE (XEXP (op1, 0)));
2830 return simplify_gen_binary (AND, mode, op0, tem);
2831 }
2832 }
2833
2834 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2835 by reversing the comparison code if valid. */
2836 if (STORE_FLAG_VALUE == 1
2837 && trueop0 == const1_rtx
2838 && COMPARISON_P (op1)
2839 && (reversed = reversed_comparison (op1, mode)))
2840 return reversed;
2841
2842 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2843 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2844 && GET_CODE (op1) == MULT
2845 && GET_CODE (XEXP (op1, 0)) == NEG)
2846 {
2847 rtx in1, in2;
2848
2849 in1 = XEXP (XEXP (op1, 0), 0);
2850 in2 = XEXP (op1, 1);
2851 return simplify_gen_binary (PLUS, mode,
2852 simplify_gen_binary (MULT, mode,
2853 in1, in2),
2854 op0);
2855 }
2856
2857 /* Canonicalize (minus (neg A) (mult B C)) to
2858 (minus (mult (neg B) C) A). */
2859 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2860 && GET_CODE (op1) == MULT
2861 && GET_CODE (op0) == NEG)
2862 {
2863 rtx in1, in2;
2864
2865 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2866 in2 = XEXP (op1, 1);
2867 return simplify_gen_binary (MINUS, mode,
2868 simplify_gen_binary (MULT, mode,
2869 in1, in2),
2870 XEXP (op0, 0));
2871 }
2872
2873 /* If one of the operands is a PLUS or a MINUS, see if we can
2874 simplify this by the associative law. This will, for example,
2875 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2876 Don't use the associative law for floating point.
2877 The inaccuracy makes it nonassociative,
2878 and subtle programs can break if operations are associated. */
2879
2880 if (INTEGRAL_MODE_P (mode)
2881 && (plus_minus_operand_p (op0)
2882 || plus_minus_operand_p (op1))
2883 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2884 return tem;
2885
2886 /* Handle vector series. */
2887 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2888 {
2889 tem = simplify_binary_operation_series (code, mode, op0, op1);
2890 if (tem)
2891 return tem;
2892 }
2893 break;
2894
2895 case MULT:
2896 if (trueop1 == constm1_rtx)
2897 return simplify_gen_unary (NEG, mode, op0, mode);
2898
2899 if (GET_CODE (op0) == NEG)
2900 {
2901 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2902 /* If op1 is a MULT as well and simplify_unary_operation
2903 just moved the NEG to the second operand, simplify_gen_binary
2904 below could through simplify_associative_operation move
2905 the NEG around again and recurse endlessly. */
2906 if (temp
2907 && GET_CODE (op1) == MULT
2908 && GET_CODE (temp) == MULT
2909 && XEXP (op1, 0) == XEXP (temp, 0)
2910 && GET_CODE (XEXP (temp, 1)) == NEG
2911 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2912 temp = NULL_RTX;
2913 if (temp)
2914 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2915 }
2916 if (GET_CODE (op1) == NEG)
2917 {
2918 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2919 /* If op0 is a MULT as well and simplify_unary_operation
2920 just moved the NEG to the second operand, simplify_gen_binary
2921 below could through simplify_associative_operation move
2922 the NEG around again and recurse endlessly. */
2923 if (temp
2924 && GET_CODE (op0) == MULT
2925 && GET_CODE (temp) == MULT
2926 && XEXP (op0, 0) == XEXP (temp, 0)
2927 && GET_CODE (XEXP (temp, 1)) == NEG
2928 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2929 temp = NULL_RTX;
2930 if (temp)
2931 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2932 }
2933
2934 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2935 x is NaN, since x * 0 is then also NaN. Nor is it valid
2936 when the mode has signed zeros, since multiplying a negative
2937 number by 0 will give -0, not 0. */
2938 if (!HONOR_NANS (mode)
2939 && !HONOR_SIGNED_ZEROS (mode)
2940 && trueop1 == CONST0_RTX (mode)
2941 && ! side_effects_p (op0))
2942 return op1;
2943
2944 /* In IEEE floating point, x*1 is not equivalent to x for
2945 signalling NaNs. */
2946 if (!HONOR_SNANS (mode)
2947 && trueop1 == CONST1_RTX (mode))
2948 return op0;
2949
2950 /* Convert multiply by constant power of two into shift. */
2951 if (CONST_SCALAR_INT_P (trueop1))
2952 {
2953 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2954 if (val >= 0)
2955 return simplify_gen_binary (ASHIFT, mode, op0,
2956 gen_int_shift_amount (mode, val));
2957 }
2958
2959 /* x*2 is x+x and x*(-1) is -x */
2960 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2961 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2962 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2963 && GET_MODE (op0) == mode)
2964 {
2965 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2966
2967 if (real_equal (d1, &dconst2))
2968 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2969
2970 if (!HONOR_SNANS (mode)
2971 && real_equal (d1, &dconstm1))
2972 return simplify_gen_unary (NEG, mode, op0, mode);
2973 }
2974
2975 /* Optimize -x * -x as x * x. */
2976 if (FLOAT_MODE_P (mode)
2977 && GET_CODE (op0) == NEG
2978 && GET_CODE (op1) == NEG
2979 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2980 && !side_effects_p (XEXP (op0, 0)))
2981 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2982
2983 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2984 if (SCALAR_FLOAT_MODE_P (mode)
2985 && GET_CODE (op0) == ABS
2986 && GET_CODE (op1) == ABS
2987 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2988 && !side_effects_p (XEXP (op0, 0)))
2989 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2990
2991 /* Reassociate multiplication, but for floating point MULTs
2992 only when the user specifies unsafe math optimizations. */
2993 if (! FLOAT_MODE_P (mode)
2994 || flag_unsafe_math_optimizations)
2995 {
2996 tem = simplify_associative_operation (code, mode, op0, op1);
2997 if (tem)
2998 return tem;
2999 }
3000 break;
3001
3002 case IOR:
3003 if (trueop1 == CONST0_RTX (mode))
3004 return op0;
3005 if (INTEGRAL_MODE_P (mode)
3006 && trueop1 == CONSTM1_RTX (mode)
3007 && !side_effects_p (op0))
3008 return op1;
3009 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3010 return op0;
3011 /* A | (~A) -> -1 */
3012 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3013 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3014 && ! side_effects_p (op0)
3015 && SCALAR_INT_MODE_P (mode))
3016 return constm1_rtx;
3017
3018 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3019 if (CONST_INT_P (op1)
3020 && HWI_COMPUTABLE_MODE_P (mode)
3021 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3022 && !side_effects_p (op0))
3023 return op1;
3024
3025 /* Canonicalize (X & C1) | C2. */
3026 if (GET_CODE (op0) == AND
3027 && CONST_INT_P (trueop1)
3028 && CONST_INT_P (XEXP (op0, 1)))
3029 {
3030 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3031 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3032 HOST_WIDE_INT c2 = INTVAL (trueop1);
3033
3034 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3035 if ((c1 & c2) == c1
3036 && !side_effects_p (XEXP (op0, 0)))
3037 return trueop1;
3038
3039 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3040 if (((c1|c2) & mask) == mask)
3041 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3042 }
3043
3044 /* Convert (A & B) | A to A. */
3045 if (GET_CODE (op0) == AND
3046 && (rtx_equal_p (XEXP (op0, 0), op1)
3047 || rtx_equal_p (XEXP (op0, 1), op1))
3048 && ! side_effects_p (XEXP (op0, 0))
3049 && ! side_effects_p (XEXP (op0, 1)))
3050 return op1;
3051
3052 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3053 mode size to (rotate A CX). */
3054
3055 if (GET_CODE (op1) == ASHIFT
3056 || GET_CODE (op1) == SUBREG)
3057 {
3058 opleft = op1;
3059 opright = op0;
3060 }
3061 else
3062 {
3063 opright = op1;
3064 opleft = op0;
3065 }
3066
3067 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3068 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3069 && CONST_INT_P (XEXP (opleft, 1))
3070 && CONST_INT_P (XEXP (opright, 1))
3071 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3072 == GET_MODE_UNIT_PRECISION (mode)))
3073 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3074
3075 /* Same, but for ashift that has been "simplified" to a wider mode
3076 by simplify_shift_const. */
3077
3078 if (GET_CODE (opleft) == SUBREG
3079 && is_a <scalar_int_mode> (mode, &int_mode)
3080 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3081 &inner_mode)
3082 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3083 && GET_CODE (opright) == LSHIFTRT
3084 && GET_CODE (XEXP (opright, 0)) == SUBREG
3085 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3086 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3087 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3088 SUBREG_REG (XEXP (opright, 0)))
3089 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3090 && CONST_INT_P (XEXP (opright, 1))
3091 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3092 + INTVAL (XEXP (opright, 1))
3093 == GET_MODE_PRECISION (int_mode)))
3094 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3095 XEXP (SUBREG_REG (opleft), 1));
3096
3097 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3098 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3099 the PLUS does not affect any of the bits in OP1: then we can do
3100 the IOR as a PLUS and we can associate. This is valid if OP1
3101 can be safely shifted left C bits. */
3102 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3103 && GET_CODE (XEXP (op0, 0)) == PLUS
3104 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3105 && CONST_INT_P (XEXP (op0, 1))
3106 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3107 {
3108 int count = INTVAL (XEXP (op0, 1));
3109 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3110
3111 if (mask >> count == INTVAL (trueop1)
3112 && trunc_int_for_mode (mask, mode) == mask
3113 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3114 return simplify_gen_binary (ASHIFTRT, mode,
3115 plus_constant (mode, XEXP (op0, 0),
3116 mask),
3117 XEXP (op0, 1));
3118 }
3119
3120 /* The following happens with bitfield merging.
3121 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3122 if (GET_CODE (op0) == AND
3123 && GET_CODE (op1) == AND
3124 && CONST_INT_P (XEXP (op0, 1))
3125 && CONST_INT_P (XEXP (op1, 1))
3126 && (INTVAL (XEXP (op0, 1))
3127 == ~INTVAL (XEXP (op1, 1))))
3128 {
3129 /* The IOR may be on both sides. */
3130 rtx top0 = NULL_RTX, top1 = NULL_RTX;
3131 if (GET_CODE (XEXP (op1, 0)) == IOR)
3132 top0 = op0, top1 = op1;
3133 else if (GET_CODE (XEXP (op0, 0)) == IOR)
3134 top0 = op1, top1 = op0;
3135 if (top0 && top1)
3136 {
3137 /* X may be on either side of the inner IOR. */
3138 rtx tem = NULL_RTX;
3139 if (rtx_equal_p (XEXP (top0, 0),
3140 XEXP (XEXP (top1, 0), 0)))
3141 tem = XEXP (XEXP (top1, 0), 1);
3142 else if (rtx_equal_p (XEXP (top0, 0),
3143 XEXP (XEXP (top1, 0), 1)))
3144 tem = XEXP (XEXP (top1, 0), 0);
3145 if (tem)
3146 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3147 simplify_gen_binary
3148 (AND, mode, tem, XEXP (top1, 1)));
3149 }
3150 }
3151
3152 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3153 if (GET_CODE (op0) == GET_CODE (op1)
3154 && (GET_CODE (op0) == AND
3155 || GET_CODE (op0) == IOR
3156 || GET_CODE (op0) == LSHIFTRT
3157 || GET_CODE (op0) == ASHIFTRT
3158 || GET_CODE (op0) == ASHIFT
3159 || GET_CODE (op0) == ROTATE
3160 || GET_CODE (op0) == ROTATERT))
3161 {
3162 tem = simplify_distributive_operation (code, mode, op0, op1);
3163 if (tem)
3164 return tem;
3165 }
3166
3167 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3168 if (tem)
3169 return tem;
3170
3171 tem = simplify_associative_operation (code, mode, op0, op1);
3172 if (tem)
3173 return tem;
3174
3175 tem = simplify_logical_relational_operation (code, mode, op0, op1);
3176 if (tem)
3177 return tem;
3178 break;
3179
3180 case XOR:
3181 if (trueop1 == CONST0_RTX (mode))
3182 return op0;
3183 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3184 return simplify_gen_unary (NOT, mode, op0, mode);
3185 if (rtx_equal_p (trueop0, trueop1)
3186 && ! side_effects_p (op0)
3187 && GET_MODE_CLASS (mode) != MODE_CC)
3188 return CONST0_RTX (mode);
3189
3190 /* Canonicalize XOR of the most significant bit to PLUS. */
3191 if (CONST_SCALAR_INT_P (op1)
3192 && mode_signbit_p (mode, op1))
3193 return simplify_gen_binary (PLUS, mode, op0, op1);
3194 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3195 if (CONST_SCALAR_INT_P (op1)
3196 && GET_CODE (op0) == PLUS
3197 && CONST_SCALAR_INT_P (XEXP (op0, 1))
3198 && mode_signbit_p (mode, XEXP (op0, 1)))
3199 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3200 simplify_gen_binary (XOR, mode, op1,
3201 XEXP (op0, 1)));
3202
3203 /* If we are XORing two things that have no bits in common,
3204 convert them into an IOR. This helps to detect rotation encoded
3205 using those methods and possibly other simplifications. */
3206
3207 if (HWI_COMPUTABLE_MODE_P (mode)
3208 && (nonzero_bits (op0, mode)
3209 & nonzero_bits (op1, mode)) == 0)
3210 return (simplify_gen_binary (IOR, mode, op0, op1));
3211
3212 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3213 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3214 (NOT y). */
3215 {
3216 int num_negated = 0;
3217
3218 if (GET_CODE (op0) == NOT)
3219 num_negated++, op0 = XEXP (op0, 0);
3220 if (GET_CODE (op1) == NOT)
3221 num_negated++, op1 = XEXP (op1, 0);
3222
3223 if (num_negated == 2)
3224 return simplify_gen_binary (XOR, mode, op0, op1);
3225 else if (num_negated == 1)
3226 return simplify_gen_unary (NOT, mode,
3227 simplify_gen_binary (XOR, mode, op0, op1),
3228 mode);
3229 }
3230
3231 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3232 correspond to a machine insn or result in further simplifications
3233 if B is a constant. */
3234
3235 if (GET_CODE (op0) == AND
3236 && rtx_equal_p (XEXP (op0, 1), op1)
3237 && ! side_effects_p (op1))
3238 return simplify_gen_binary (AND, mode,
3239 simplify_gen_unary (NOT, mode,
3240 XEXP (op0, 0), mode),
3241 op1);
3242
3243 else if (GET_CODE (op0) == AND
3244 && rtx_equal_p (XEXP (op0, 0), op1)
3245 && ! side_effects_p (op1))
3246 return simplify_gen_binary (AND, mode,
3247 simplify_gen_unary (NOT, mode,
3248 XEXP (op0, 1), mode),
3249 op1);
3250
3251 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3252 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3253 out bits inverted twice and not set by C. Similarly, given
3254 (xor (and (xor A B) C) D), simplify without inverting C in
3255 the xor operand: (xor (and A C) (B&C)^D).
3256 */
3257 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3258 && GET_CODE (XEXP (op0, 0)) == XOR
3259 && CONST_INT_P (op1)
3260 && CONST_INT_P (XEXP (op0, 1))
3261 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3262 {
3263 enum rtx_code op = GET_CODE (op0);
3264 rtx a = XEXP (XEXP (op0, 0), 0);
3265 rtx b = XEXP (XEXP (op0, 0), 1);
3266 rtx c = XEXP (op0, 1);
3267 rtx d = op1;
3268 HOST_WIDE_INT bval = INTVAL (b);
3269 HOST_WIDE_INT cval = INTVAL (c);
3270 HOST_WIDE_INT dval = INTVAL (d);
3271 HOST_WIDE_INT xcval;
3272
3273 if (op == IOR)
3274 xcval = ~cval;
3275 else
3276 xcval = cval;
3277
3278 return simplify_gen_binary (XOR, mode,
3279 simplify_gen_binary (op, mode, a, c),
3280 gen_int_mode ((bval & xcval) ^ dval,
3281 mode));
3282 }
3283
3284 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3285 we can transform like this:
3286 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3287 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3288 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3289 Attempt a few simplifications when B and C are both constants. */
3290 if (GET_CODE (op0) == AND
3291 && CONST_INT_P (op1)
3292 && CONST_INT_P (XEXP (op0, 1)))
3293 {
3294 rtx a = XEXP (op0, 0);
3295 rtx b = XEXP (op0, 1);
3296 rtx c = op1;
3297 HOST_WIDE_INT bval = INTVAL (b);
3298 HOST_WIDE_INT cval = INTVAL (c);
3299
3300 /* Instead of computing ~A&C, we compute its negated value,
3301 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3302 optimize for sure. If it does not simplify, we still try
3303 to compute ~A&C below, but since that always allocates
3304 RTL, we don't try that before committing to returning a
3305 simplified expression. */
3306 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3307 GEN_INT (~cval));
3308
3309 if ((~cval & bval) == 0)
3310 {
3311 rtx na_c = NULL_RTX;
3312 if (n_na_c)
3313 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3314 else
3315 {
3316 /* If ~A does not simplify, don't bother: we don't
3317 want to simplify 2 operations into 3, and if na_c
3318 were to simplify with na, n_na_c would have
3319 simplified as well. */
3320 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3321 if (na)
3322 na_c = simplify_gen_binary (AND, mode, na, c);
3323 }
3324
3325 /* Try to simplify ~A&C | ~B&C. */
3326 if (na_c != NULL_RTX)
3327 return simplify_gen_binary (IOR, mode, na_c,
3328 gen_int_mode (~bval & cval, mode));
3329 }
3330 else
3331 {
3332 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3333 if (n_na_c == CONSTM1_RTX (mode))
3334 {
3335 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3336 gen_int_mode (~cval & bval,
3337 mode));
3338 return simplify_gen_binary (IOR, mode, a_nc_b,
3339 gen_int_mode (~bval & cval,
3340 mode));
3341 }
3342 }
3343 }
3344
3345 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3346 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3347 machines, and also has shorter instruction path length. */
3348 if (GET_CODE (op0) == AND
3349 && GET_CODE (XEXP (op0, 0)) == XOR
3350 && CONST_INT_P (XEXP (op0, 1))
3351 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3352 {
3353 rtx a = trueop1;
3354 rtx b = XEXP (XEXP (op0, 0), 1);
3355 rtx c = XEXP (op0, 1);
3356 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3357 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3358 rtx bc = simplify_gen_binary (AND, mode, b, c);
3359 return simplify_gen_binary (IOR, mode, a_nc, bc);
3360 }
3361 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3362 else if (GET_CODE (op0) == AND
3363 && GET_CODE (XEXP (op0, 0)) == XOR
3364 && CONST_INT_P (XEXP (op0, 1))
3365 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3366 {
3367 rtx a = XEXP (XEXP (op0, 0), 0);
3368 rtx b = trueop1;
3369 rtx c = XEXP (op0, 1);
3370 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3371 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3372 rtx ac = simplify_gen_binary (AND, mode, a, c);
3373 return simplify_gen_binary (IOR, mode, ac, b_nc);
3374 }
3375
3376 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3377 comparison if STORE_FLAG_VALUE is 1. */
3378 if (STORE_FLAG_VALUE == 1
3379 && trueop1 == const1_rtx
3380 && COMPARISON_P (op0)
3381 && (reversed = reversed_comparison (op0, mode)))
3382 return reversed;
3383
3384 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3385 is (lt foo (const_int 0)), so we can perform the above
3386 simplification if STORE_FLAG_VALUE is 1. */
3387
3388 if (is_a <scalar_int_mode> (mode, &int_mode)
3389 && STORE_FLAG_VALUE == 1
3390 && trueop1 == const1_rtx
3391 && GET_CODE (op0) == LSHIFTRT
3392 && CONST_INT_P (XEXP (op0, 1))
3393 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3394 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3395
3396 /* (xor (comparison foo bar) (const_int sign-bit))
3397 when STORE_FLAG_VALUE is the sign bit. */
3398 if (is_a <scalar_int_mode> (mode, &int_mode)
3399 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3400 && trueop1 == const_true_rtx
3401 && COMPARISON_P (op0)
3402 && (reversed = reversed_comparison (op0, int_mode)))
3403 return reversed;
3404
3405 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3406 if (GET_CODE (op0) == GET_CODE (op1)
3407 && (GET_CODE (op0) == AND
3408 || GET_CODE (op0) == LSHIFTRT
3409 || GET_CODE (op0) == ASHIFTRT
3410 || GET_CODE (op0) == ASHIFT
3411 || GET_CODE (op0) == ROTATE
3412 || GET_CODE (op0) == ROTATERT))
3413 {
3414 tem = simplify_distributive_operation (code, mode, op0, op1);
3415 if (tem)
3416 return tem;
3417 }
3418
3419 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3420 if (tem)
3421 return tem;
3422
3423 tem = simplify_associative_operation (code, mode, op0, op1);
3424 if (tem)
3425 return tem;
3426 break;
3427
3428 case AND:
3429 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3430 return trueop1;
3431 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3432 return op0;
3433 if (HWI_COMPUTABLE_MODE_P (mode))
3434 {
3435 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3436 HOST_WIDE_INT nzop1;
3437 if (CONST_INT_P (trueop1))
3438 {
3439 HOST_WIDE_INT val1 = INTVAL (trueop1);
3440 /* If we are turning off bits already known off in OP0, we need
3441 not do an AND. */
3442 if ((nzop0 & ~val1) == 0)
3443 return op0;
3444 }
3445 nzop1 = nonzero_bits (trueop1, mode);
3446 /* If we are clearing all the nonzero bits, the result is zero. */
3447 if ((nzop1 & nzop0) == 0
3448 && !side_effects_p (op0) && !side_effects_p (op1))
3449 return CONST0_RTX (mode);
3450 }
3451 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3452 && GET_MODE_CLASS (mode) != MODE_CC)
3453 return op0;
3454 /* A & (~A) -> 0 */
3455 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3456 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3457 && ! side_effects_p (op0)
3458 && GET_MODE_CLASS (mode) != MODE_CC)
3459 return CONST0_RTX (mode);
3460
3461 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3462 there are no nonzero bits of C outside of X's mode. */
3463 if ((GET_CODE (op0) == SIGN_EXTEND
3464 || GET_CODE (op0) == ZERO_EXTEND)
3465 && CONST_INT_P (trueop1)
3466 && HWI_COMPUTABLE_MODE_P (mode)
3467 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3468 & UINTVAL (trueop1)) == 0)
3469 {
3470 machine_mode imode = GET_MODE (XEXP (op0, 0));
3471 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3472 gen_int_mode (INTVAL (trueop1),
3473 imode));
3474 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3475 }
3476
3477 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3478 we might be able to further simplify the AND with X and potentially
3479 remove the truncation altogether. */
3480 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3481 {
3482 rtx x = XEXP (op0, 0);
3483 machine_mode xmode = GET_MODE (x);
3484 tem = simplify_gen_binary (AND, xmode, x,
3485 gen_int_mode (INTVAL (trueop1), xmode));
3486 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3487 }
3488
3489 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3490 if (GET_CODE (op0) == IOR
3491 && CONST_INT_P (trueop1)
3492 && CONST_INT_P (XEXP (op0, 1)))
3493 {
3494 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3495 return simplify_gen_binary (IOR, mode,
3496 simplify_gen_binary (AND, mode,
3497 XEXP (op0, 0), op1),
3498 gen_int_mode (tmp, mode));
3499 }
3500
3501 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3502 insn (and may simplify more). */
3503 if (GET_CODE (op0) == XOR
3504 && rtx_equal_p (XEXP (op0, 0), op1)
3505 && ! side_effects_p (op1))
3506 return simplify_gen_binary (AND, mode,
3507 simplify_gen_unary (NOT, mode,
3508 XEXP (op0, 1), mode),
3509 op1);
3510
3511 if (GET_CODE (op0) == XOR
3512 && rtx_equal_p (XEXP (op0, 1), op1)
3513 && ! side_effects_p (op1))
3514 return simplify_gen_binary (AND, mode,
3515 simplify_gen_unary (NOT, mode,
3516 XEXP (op0, 0), mode),
3517 op1);
3518
3519 /* Similarly for (~(A ^ B)) & A. */
3520 if (GET_CODE (op0) == NOT
3521 && GET_CODE (XEXP (op0, 0)) == XOR
3522 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3523 && ! side_effects_p (op1))
3524 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3525
3526 if (GET_CODE (op0) == NOT
3527 && GET_CODE (XEXP (op0, 0)) == XOR
3528 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3529 && ! side_effects_p (op1))
3530 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3531
3532 /* Convert (A | B) & A to A. */
3533 if (GET_CODE (op0) == IOR
3534 && (rtx_equal_p (XEXP (op0, 0), op1)
3535 || rtx_equal_p (XEXP (op0, 1), op1))
3536 && ! side_effects_p (XEXP (op0, 0))
3537 && ! side_effects_p (XEXP (op0, 1)))
3538 return op1;
3539
3540 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3541 ((A & N) + B) & M -> (A + B) & M
3542 Similarly if (N & M) == 0,
3543 ((A | N) + B) & M -> (A + B) & M
3544 and for - instead of + and/or ^ instead of |.
3545 Also, if (N & M) == 0, then
3546 (A +- N) & M -> A & M. */
3547 if (CONST_INT_P (trueop1)
3548 && HWI_COMPUTABLE_MODE_P (mode)
3549 && ~UINTVAL (trueop1)
3550 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3551 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3552 {
3553 rtx pmop[2];
3554 int which;
3555
3556 pmop[0] = XEXP (op0, 0);
3557 pmop[1] = XEXP (op0, 1);
3558
3559 if (CONST_INT_P (pmop[1])
3560 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3561 return simplify_gen_binary (AND, mode, pmop[0], op1);
3562
3563 for (which = 0; which < 2; which++)
3564 {
3565 tem = pmop[which];
3566 switch (GET_CODE (tem))
3567 {
3568 case AND:
3569 if (CONST_INT_P (XEXP (tem, 1))
3570 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3571 == UINTVAL (trueop1))
3572 pmop[which] = XEXP (tem, 0);
3573 break;
3574 case IOR:
3575 case XOR:
3576 if (CONST_INT_P (XEXP (tem, 1))
3577 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3578 pmop[which] = XEXP (tem, 0);
3579 break;
3580 default:
3581 break;
3582 }
3583 }
3584
3585 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3586 {
3587 tem = simplify_gen_binary (GET_CODE (op0), mode,
3588 pmop[0], pmop[1]);
3589 return simplify_gen_binary (code, mode, tem, op1);
3590 }
3591 }
3592
3593 /* (and X (ior (not X) Y) -> (and X Y) */
3594 if (GET_CODE (op1) == IOR
3595 && GET_CODE (XEXP (op1, 0)) == NOT
3596 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3597 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3598
3599 /* (and (ior (not X) Y) X) -> (and X Y) */
3600 if (GET_CODE (op0) == IOR
3601 && GET_CODE (XEXP (op0, 0)) == NOT
3602 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3603 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3604
3605 /* (and X (ior Y (not X)) -> (and X Y) */
3606 if (GET_CODE (op1) == IOR
3607 && GET_CODE (XEXP (op1, 1)) == NOT
3608 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3609 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3610
3611 /* (and (ior Y (not X)) X) -> (and X Y) */
3612 if (GET_CODE (op0) == IOR
3613 && GET_CODE (XEXP (op0, 1)) == NOT
3614 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3615 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3616
3617 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3618 if (GET_CODE (op0) == GET_CODE (op1)
3619 && (GET_CODE (op0) == AND
3620 || GET_CODE (op0) == IOR
3621 || GET_CODE (op0) == LSHIFTRT
3622 || GET_CODE (op0) == ASHIFTRT
3623 || GET_CODE (op0) == ASHIFT
3624 || GET_CODE (op0) == ROTATE
3625 || GET_CODE (op0) == ROTATERT))
3626 {
3627 tem = simplify_distributive_operation (code, mode, op0, op1);
3628 if (tem)
3629 return tem;
3630 }
3631
3632 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3633 if (tem)
3634 return tem;
3635
3636 tem = simplify_associative_operation (code, mode, op0, op1);
3637 if (tem)
3638 return tem;
3639 break;
3640
3641 case UDIV:
3642 /* 0/x is 0 (or x&0 if x has side-effects). */
3643 if (trueop0 == CONST0_RTX (mode)
3644 && !cfun->can_throw_non_call_exceptions)
3645 {
3646 if (side_effects_p (op1))
3647 return simplify_gen_binary (AND, mode, op1, trueop0);
3648 return trueop0;
3649 }
3650 /* x/1 is x. */
3651 if (trueop1 == CONST1_RTX (mode))
3652 {
3653 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3654 if (tem)
3655 return tem;
3656 }
3657 /* Convert divide by power of two into shift. */
3658 if (CONST_INT_P (trueop1)
3659 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3660 return simplify_gen_binary (LSHIFTRT, mode, op0,
3661 gen_int_shift_amount (mode, val));
3662 break;
3663
3664 case DIV:
3665 /* Handle floating point and integers separately. */
3666 if (SCALAR_FLOAT_MODE_P (mode))
3667 {
3668 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3669 safe for modes with NaNs, since 0.0 / 0.0 will then be
3670 NaN rather than 0.0. Nor is it safe for modes with signed
3671 zeros, since dividing 0 by a negative number gives -0.0 */
3672 if (trueop0 == CONST0_RTX (mode)
3673 && !HONOR_NANS (mode)
3674 && !HONOR_SIGNED_ZEROS (mode)
3675 && ! side_effects_p (op1))
3676 return op0;
3677 /* x/1.0 is x. */
3678 if (trueop1 == CONST1_RTX (mode)
3679 && !HONOR_SNANS (mode))
3680 return op0;
3681
3682 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3683 && trueop1 != CONST0_RTX (mode))
3684 {
3685 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3686
3687 /* x/-1.0 is -x. */
3688 if (real_equal (d1, &dconstm1)
3689 && !HONOR_SNANS (mode))
3690 return simplify_gen_unary (NEG, mode, op0, mode);
3691
3692 /* Change FP division by a constant into multiplication.
3693 Only do this with -freciprocal-math. */
3694 if (flag_reciprocal_math
3695 && !real_equal (d1, &dconst0))
3696 {
3697 REAL_VALUE_TYPE d;
3698 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3699 tem = const_double_from_real_value (d, mode);
3700 return simplify_gen_binary (MULT, mode, op0, tem);
3701 }
3702 }
3703 }
3704 else if (SCALAR_INT_MODE_P (mode))
3705 {
3706 /* 0/x is 0 (or x&0 if x has side-effects). */
3707 if (trueop0 == CONST0_RTX (mode)
3708 && !cfun->can_throw_non_call_exceptions)
3709 {
3710 if (side_effects_p (op1))
3711 return simplify_gen_binary (AND, mode, op1, trueop0);
3712 return trueop0;
3713 }
3714 /* x/1 is x. */
3715 if (trueop1 == CONST1_RTX (mode))
3716 {
3717 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3718 if (tem)
3719 return tem;
3720 }
3721 /* x/-1 is -x. */
3722 if (trueop1 == constm1_rtx)
3723 {
3724 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3725 if (x)
3726 return simplify_gen_unary (NEG, mode, x, mode);
3727 }
3728 }
3729 break;
3730
3731 case UMOD:
3732 /* 0%x is 0 (or x&0 if x has side-effects). */
3733 if (trueop0 == CONST0_RTX (mode))
3734 {
3735 if (side_effects_p (op1))
3736 return simplify_gen_binary (AND, mode, op1, trueop0);
3737 return trueop0;
3738 }
3739 /* x%1 is 0 (of x&0 if x has side-effects). */
3740 if (trueop1 == CONST1_RTX (mode))
3741 {
3742 if (side_effects_p (op0))
3743 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3744 return CONST0_RTX (mode);
3745 }
3746 /* Implement modulus by power of two as AND. */
3747 if (CONST_INT_P (trueop1)
3748 && exact_log2 (UINTVAL (trueop1)) > 0)
3749 return simplify_gen_binary (AND, mode, op0,
3750 gen_int_mode (UINTVAL (trueop1) - 1,
3751 mode));
3752 break;
3753
3754 case MOD:
3755 /* 0%x is 0 (or x&0 if x has side-effects). */
3756 if (trueop0 == CONST0_RTX (mode))
3757 {
3758 if (side_effects_p (op1))
3759 return simplify_gen_binary (AND, mode, op1, trueop0);
3760 return trueop0;
3761 }
3762 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3763 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3764 {
3765 if (side_effects_p (op0))
3766 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3767 return CONST0_RTX (mode);
3768 }
3769 break;
3770
3771 case ROTATERT:
3772 case ROTATE:
3773 if (trueop1 == CONST0_RTX (mode))
3774 return op0;
3775 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3776 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3777 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3778 amount instead. */
3779 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3780 if (CONST_INT_P (trueop1)
3781 && IN_RANGE (INTVAL (trueop1),
3782 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3783 GET_MODE_UNIT_PRECISION (mode) - 1))
3784 {
3785 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3786 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3787 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3788 mode, op0, new_amount_rtx);
3789 }
3790 #endif
3791 /* FALLTHRU */
3792 case ASHIFTRT:
3793 if (trueop1 == CONST0_RTX (mode))
3794 return op0;
3795 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3796 return op0;
3797 /* Rotating ~0 always results in ~0. */
3798 if (CONST_INT_P (trueop0)
3799 && HWI_COMPUTABLE_MODE_P (mode)
3800 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3801 && ! side_effects_p (op1))
3802 return op0;
3803
3804 canonicalize_shift:
3805 /* Given:
3806 scalar modes M1, M2
3807 scalar constants c1, c2
3808 size (M2) > size (M1)
3809 c1 == size (M2) - size (M1)
3810 optimize:
3811 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3812 <low_part>)
3813 (const_int <c2>))
3814 to:
3815 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3816 <low_part>). */
3817 if ((code == ASHIFTRT || code == LSHIFTRT)
3818 && is_a <scalar_int_mode> (mode, &int_mode)
3819 && SUBREG_P (op0)
3820 && CONST_INT_P (op1)
3821 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3822 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3823 &inner_mode)
3824 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3825 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3826 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3827 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3828 && subreg_lowpart_p (op0))
3829 {
3830 rtx tmp = gen_int_shift_amount
3831 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3832
3833 /* Combine would usually zero out the value when combining two
3834 local shifts and the range becomes larger or equal to the mode.
3835 However since we fold away one of the shifts here combine won't
3836 see it so we should immediately zero the result if it's out of
3837 range. */
3838 if (code == LSHIFTRT
3839 && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
3840 tmp = const0_rtx;
3841 else
3842 tmp = simplify_gen_binary (code,
3843 inner_mode,
3844 XEXP (SUBREG_REG (op0), 0),
3845 tmp);
3846
3847 return lowpart_subreg (int_mode, tmp, inner_mode);
3848 }
3849
3850 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3851 {
3852 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3853 if (val != INTVAL (op1))
3854 return simplify_gen_binary (code, mode, op0,
3855 gen_int_shift_amount (mode, val));
3856 }
3857 break;
3858
3859 case ASHIFT:
3860 case SS_ASHIFT:
3861 case US_ASHIFT:
3862 if (trueop1 == CONST0_RTX (mode))
3863 return op0;
3864 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3865 return op0;
3866 goto canonicalize_shift;
3867
3868 case LSHIFTRT:
3869 if (trueop1 == CONST0_RTX (mode))
3870 return op0;
3871 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3872 return op0;
3873 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3874 if (GET_CODE (op0) == CLZ
3875 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3876 && CONST_INT_P (trueop1)
3877 && STORE_FLAG_VALUE == 1
3878 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3879 {
3880 unsigned HOST_WIDE_INT zero_val = 0;
3881
3882 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3883 && zero_val == GET_MODE_PRECISION (inner_mode)
3884 && INTVAL (trueop1) == exact_log2 (zero_val))
3885 return simplify_gen_relational (EQ, mode, inner_mode,
3886 XEXP (op0, 0), const0_rtx);
3887 }
3888 goto canonicalize_shift;
3889
3890 case SMIN:
3891 if (HWI_COMPUTABLE_MODE_P (mode)
3892 && mode_signbit_p (mode, trueop1)
3893 && ! side_effects_p (op0))
3894 return op1;
3895 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3896 return op0;
3897 tem = simplify_associative_operation (code, mode, op0, op1);
3898 if (tem)
3899 return tem;
3900 break;
3901
3902 case SMAX:
3903 if (HWI_COMPUTABLE_MODE_P (mode)
3904 && CONST_INT_P (trueop1)
3905 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3906 && ! side_effects_p (op0))
3907 return op1;
3908 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3909 return op0;
3910 tem = simplify_associative_operation (code, mode, op0, op1);
3911 if (tem)
3912 return tem;
3913 break;
3914
3915 case UMIN:
3916 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3917 return op1;
3918 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3919 return op0;
3920 tem = simplify_associative_operation (code, mode, op0, op1);
3921 if (tem)
3922 return tem;
3923 break;
3924
3925 case UMAX:
3926 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3927 return op1;
3928 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3929 return op0;
3930 tem = simplify_associative_operation (code, mode, op0, op1);
3931 if (tem)
3932 return tem;
3933 break;
3934
3935 case SS_PLUS:
3936 case US_PLUS:
3937 case SS_MINUS:
3938 case US_MINUS:
3939 case SS_MULT:
3940 case US_MULT:
3941 case SS_DIV:
3942 case US_DIV:
3943 /* ??? There are simplifications that can be done. */
3944 return 0;
3945
3946 case VEC_SERIES:
3947 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3948 return gen_vec_duplicate (mode, op0);
3949 if (valid_for_const_vector_p (mode, op0)
3950 && valid_for_const_vector_p (mode, op1))
3951 return gen_const_vec_series (mode, op0, op1);
3952 return 0;
3953
3954 case VEC_SELECT:
3955 if (!VECTOR_MODE_P (mode))
3956 {
3957 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3958 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3959 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3960 gcc_assert (XVECLEN (trueop1, 0) == 1);
3961
3962 /* We can't reason about selections made at runtime. */
3963 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3964 return 0;
3965
3966 if (vec_duplicate_p (trueop0, &elt0))
3967 return elt0;
3968
3969 if (GET_CODE (trueop0) == CONST_VECTOR)
3970 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3971 (trueop1, 0, 0)));
3972
3973 /* Extract a scalar element from a nested VEC_SELECT expression
3974 (with optional nested VEC_CONCAT expression). Some targets
3975 (i386) extract scalar element from a vector using chain of
3976 nested VEC_SELECT expressions. When input operand is a memory
3977 operand, this operation can be simplified to a simple scalar
3978 load from an offseted memory address. */
3979 int n_elts;
3980 if (GET_CODE (trueop0) == VEC_SELECT
3981 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3982 .is_constant (&n_elts)))
3983 {
3984 rtx op0 = XEXP (trueop0, 0);
3985 rtx op1 = XEXP (trueop0, 1);
3986
3987 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3988 int elem;
3989
3990 rtvec vec;
3991 rtx tmp_op, tmp;
3992
3993 gcc_assert (GET_CODE (op1) == PARALLEL);
3994 gcc_assert (i < n_elts);
3995
3996 /* Select element, pointed by nested selector. */
3997 elem = INTVAL (XVECEXP (op1, 0, i));
3998
3999 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4000 if (GET_CODE (op0) == VEC_CONCAT)
4001 {
4002 rtx op00 = XEXP (op0, 0);
4003 rtx op01 = XEXP (op0, 1);
4004
4005 machine_mode mode00, mode01;
4006 int n_elts00, n_elts01;
4007
4008 mode00 = GET_MODE (op00);
4009 mode01 = GET_MODE (op01);
4010
4011 /* Find out the number of elements of each operand.
4012 Since the concatenated result has a constant number
4013 of elements, the operands must too. */
4014 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4015 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4016
4017 gcc_assert (n_elts == n_elts00 + n_elts01);
4018
4019 /* Select correct operand of VEC_CONCAT
4020 and adjust selector. */
4021 if (elem < n_elts01)
4022 tmp_op = op00;
4023 else
4024 {
4025 tmp_op = op01;
4026 elem -= n_elts00;
4027 }
4028 }
4029 else
4030 tmp_op = op0;
4031
4032 vec = rtvec_alloc (1);
4033 RTVEC_ELT (vec, 0) = GEN_INT (elem);
4034
4035 tmp = gen_rtx_fmt_ee (code, mode,
4036 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4037 return tmp;
4038 }
4039 }
4040 else
4041 {
4042 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4043 gcc_assert (GET_MODE_INNER (mode)
4044 == GET_MODE_INNER (GET_MODE (trueop0)));
4045 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4046
4047 if (vec_duplicate_p (trueop0, &elt0))
4048 /* It doesn't matter which elements are selected by trueop1,
4049 because they are all the same. */
4050 return gen_vec_duplicate (mode, elt0);
4051
4052 if (GET_CODE (trueop0) == CONST_VECTOR)
4053 {
4054 unsigned n_elts = XVECLEN (trueop1, 0);
4055 rtvec v = rtvec_alloc (n_elts);
4056 unsigned int i;
4057
4058 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4059 for (i = 0; i < n_elts; i++)
4060 {
4061 rtx x = XVECEXP (trueop1, 0, i);
4062
4063 if (!CONST_INT_P (x))
4064 return 0;
4065
4066 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4067 INTVAL (x));
4068 }
4069
4070 return gen_rtx_CONST_VECTOR (mode, v);
4071 }
4072
4073 /* Recognize the identity. */
4074 if (GET_MODE (trueop0) == mode)
4075 {
4076 bool maybe_ident = true;
4077 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4078 {
4079 rtx j = XVECEXP (trueop1, 0, i);
4080 if (!CONST_INT_P (j) || INTVAL (j) != i)
4081 {
4082 maybe_ident = false;
4083 break;
4084 }
4085 }
4086 if (maybe_ident)
4087 return trueop0;
4088 }
4089
4090 /* If we build {a,b} then permute it, build the result directly. */
4091 if (XVECLEN (trueop1, 0) == 2
4092 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4093 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4094 && GET_CODE (trueop0) == VEC_CONCAT
4095 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4096 && GET_MODE (XEXP (trueop0, 0)) == mode
4097 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4098 && GET_MODE (XEXP (trueop0, 1)) == mode)
4099 {
4100 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4101 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4102 rtx subop0, subop1;
4103
4104 gcc_assert (i0 < 4 && i1 < 4);
4105 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4106 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4107
4108 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4109 }
4110
4111 if (XVECLEN (trueop1, 0) == 2
4112 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4113 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4114 && GET_CODE (trueop0) == VEC_CONCAT
4115 && GET_MODE (trueop0) == mode)
4116 {
4117 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4118 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4119 rtx subop0, subop1;
4120
4121 gcc_assert (i0 < 2 && i1 < 2);
4122 subop0 = XEXP (trueop0, i0);
4123 subop1 = XEXP (trueop0, i1);
4124
4125 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4126 }
4127
4128 /* If we select one half of a vec_concat, return that. */
4129 int l0, l1;
4130 if (GET_CODE (trueop0) == VEC_CONCAT
4131 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4132 .is_constant (&l0))
4133 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4134 .is_constant (&l1))
4135 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4136 {
4137 rtx subop0 = XEXP (trueop0, 0);
4138 rtx subop1 = XEXP (trueop0, 1);
4139 machine_mode mode0 = GET_MODE (subop0);
4140 machine_mode mode1 = GET_MODE (subop1);
4141 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4142 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4143 {
4144 bool success = true;
4145 for (int i = 1; i < l0; ++i)
4146 {
4147 rtx j = XVECEXP (trueop1, 0, i);
4148 if (!CONST_INT_P (j) || INTVAL (j) != i)
4149 {
4150 success = false;
4151 break;
4152 }
4153 }
4154 if (success)
4155 return subop0;
4156 }
4157 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4158 {
4159 bool success = true;
4160 for (int i = 1; i < l1; ++i)
4161 {
4162 rtx j = XVECEXP (trueop1, 0, i);
4163 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4164 {
4165 success = false;
4166 break;
4167 }
4168 }
4169 if (success)
4170 return subop1;
4171 }
4172 }
4173 }
4174
4175 if (XVECLEN (trueop1, 0) == 1
4176 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4177 && GET_CODE (trueop0) == VEC_CONCAT)
4178 {
4179 rtx vec = trueop0;
4180 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4181
4182 /* Try to find the element in the VEC_CONCAT. */
4183 while (GET_MODE (vec) != mode
4184 && GET_CODE (vec) == VEC_CONCAT)
4185 {
4186 poly_int64 vec_size;
4187
4188 if (CONST_INT_P (XEXP (vec, 0)))
4189 {
4190 /* vec_concat of two const_ints doesn't make sense with
4191 respect to modes. */
4192 if (CONST_INT_P (XEXP (vec, 1)))
4193 return 0;
4194
4195 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4196 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4197 }
4198 else
4199 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4200
4201 if (known_lt (offset, vec_size))
4202 vec = XEXP (vec, 0);
4203 else if (known_ge (offset, vec_size))
4204 {
4205 offset -= vec_size;
4206 vec = XEXP (vec, 1);
4207 }
4208 else
4209 break;
4210 vec = avoid_constant_pool_reference (vec);
4211 }
4212
4213 if (GET_MODE (vec) == mode)
4214 return vec;
4215 }
4216
4217 /* If we select elements in a vec_merge that all come from the same
4218 operand, select from that operand directly. */
4219 if (GET_CODE (op0) == VEC_MERGE)
4220 {
4221 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4222 if (CONST_INT_P (trueop02))
4223 {
4224 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4225 bool all_operand0 = true;
4226 bool all_operand1 = true;
4227 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4228 {
4229 rtx j = XVECEXP (trueop1, 0, i);
4230 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4231 all_operand1 = false;
4232 else
4233 all_operand0 = false;
4234 }
4235 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4236 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4237 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4238 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4239 }
4240 }
4241
4242 /* If we have two nested selects that are inverses of each
4243 other, replace them with the source operand. */
4244 if (GET_CODE (trueop0) == VEC_SELECT
4245 && GET_MODE (XEXP (trueop0, 0)) == mode)
4246 {
4247 rtx op0_subop1 = XEXP (trueop0, 1);
4248 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4249 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4250
4251 /* Apply the outer ordering vector to the inner one. (The inner
4252 ordering vector is expressly permitted to be of a different
4253 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4254 then the two VEC_SELECTs cancel. */
4255 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4256 {
4257 rtx x = XVECEXP (trueop1, 0, i);
4258 if (!CONST_INT_P (x))
4259 return 0;
4260 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4261 if (!CONST_INT_P (y) || i != INTVAL (y))
4262 return 0;
4263 }
4264 return XEXP (trueop0, 0);
4265 }
4266
4267 return 0;
4268 case VEC_CONCAT:
4269 {
4270 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4271 ? GET_MODE (trueop0)
4272 : GET_MODE_INNER (mode));
4273 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4274 ? GET_MODE (trueop1)
4275 : GET_MODE_INNER (mode));
4276
4277 gcc_assert (VECTOR_MODE_P (mode));
4278 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4279 + GET_MODE_SIZE (op1_mode),
4280 GET_MODE_SIZE (mode)));
4281
4282 if (VECTOR_MODE_P (op0_mode))
4283 gcc_assert (GET_MODE_INNER (mode)
4284 == GET_MODE_INNER (op0_mode));
4285 else
4286 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4287
4288 if (VECTOR_MODE_P (op1_mode))
4289 gcc_assert (GET_MODE_INNER (mode)
4290 == GET_MODE_INNER (op1_mode));
4291 else
4292 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4293
4294 unsigned int n_elts, in_n_elts;
4295 if ((GET_CODE (trueop0) == CONST_VECTOR
4296 || CONST_SCALAR_INT_P (trueop0)
4297 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4298 && (GET_CODE (trueop1) == CONST_VECTOR
4299 || CONST_SCALAR_INT_P (trueop1)
4300 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
4301 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4302 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4303 {
4304 rtvec v = rtvec_alloc (n_elts);
4305 unsigned int i;
4306 for (i = 0; i < n_elts; i++)
4307 {
4308 if (i < in_n_elts)
4309 {
4310 if (!VECTOR_MODE_P (op0_mode))
4311 RTVEC_ELT (v, i) = trueop0;
4312 else
4313 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4314 }
4315 else
4316 {
4317 if (!VECTOR_MODE_P (op1_mode))
4318 RTVEC_ELT (v, i) = trueop1;
4319 else
4320 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4321 i - in_n_elts);
4322 }
4323 }
4324
4325 return gen_rtx_CONST_VECTOR (mode, v);
4326 }
4327
4328 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4329 Restrict the transformation to avoid generating a VEC_SELECT with a
4330 mode unrelated to its operand. */
4331 if (GET_CODE (trueop0) == VEC_SELECT
4332 && GET_CODE (trueop1) == VEC_SELECT
4333 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4334 && GET_MODE (XEXP (trueop0, 0)) == mode)
4335 {
4336 rtx par0 = XEXP (trueop0, 1);
4337 rtx par1 = XEXP (trueop1, 1);
4338 int len0 = XVECLEN (par0, 0);
4339 int len1 = XVECLEN (par1, 0);
4340 rtvec vec = rtvec_alloc (len0 + len1);
4341 for (int i = 0; i < len0; i++)
4342 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4343 for (int i = 0; i < len1; i++)
4344 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4345 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4346 gen_rtx_PARALLEL (VOIDmode, vec));
4347 }
4348 }
4349 return 0;
4350
4351 default:
4352 gcc_unreachable ();
4353 }
4354
4355 if (mode == GET_MODE (op0)
4356 && mode == GET_MODE (op1)
4357 && vec_duplicate_p (op0, &elt0)
4358 && vec_duplicate_p (op1, &elt1))
4359 {
4360 /* Try applying the operator to ELT and see if that simplifies.
4361 We can duplicate the result if so.
4362
4363 The reason we don't use simplify_gen_binary is that it isn't
4364 necessarily a win to convert things like:
4365
4366 (plus:V (vec_duplicate:V (reg:S R1))
4367 (vec_duplicate:V (reg:S R2)))
4368
4369 to:
4370
4371 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4372
4373 The first might be done entirely in vector registers while the
4374 second might need a move between register files. */
4375 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4376 elt0, elt1);
4377 if (tem)
4378 return gen_vec_duplicate (mode, tem);
4379 }
4380
4381 return 0;
4382 }
4383
4384 /* Return true if binary operation OP distributes over addition in operand
4385 OPNO, with the other operand being held constant. OPNO counts from 1. */
4386
4387 static bool
4388 distributes_over_addition_p (rtx_code op, int opno)
4389 {
4390 switch (op)
4391 {
4392 case PLUS:
4393 case MINUS:
4394 case MULT:
4395 return true;
4396
4397 case ASHIFT:
4398 return opno == 1;
4399
4400 default:
4401 return false;
4402 }
4403 }
4404
4405 rtx
4406 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4407 rtx op0, rtx op1)
4408 {
4409 if (VECTOR_MODE_P (mode)
4410 && code != VEC_CONCAT
4411 && GET_CODE (op0) == CONST_VECTOR
4412 && GET_CODE (op1) == CONST_VECTOR)
4413 {
4414 bool step_ok_p;
4415 if (CONST_VECTOR_STEPPED_P (op0)
4416 && CONST_VECTOR_STEPPED_P (op1))
4417 /* We can operate directly on the encoding if:
4418
4419 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4420 implies
4421 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4422
4423 Addition and subtraction are the supported operators
4424 for which this is true. */
4425 step_ok_p = (code == PLUS || code == MINUS);
4426 else if (CONST_VECTOR_STEPPED_P (op0))
4427 /* We can operate directly on stepped encodings if:
4428
4429 a3 - a2 == a2 - a1
4430 implies:
4431 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4432
4433 which is true if (x -> x op c) distributes over addition. */
4434 step_ok_p = distributes_over_addition_p (code, 1);
4435 else
4436 /* Similarly in reverse. */
4437 step_ok_p = distributes_over_addition_p (code, 2);
4438 rtx_vector_builder builder;
4439 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4440 return 0;
4441
4442 unsigned int count = builder.encoded_nelts ();
4443 for (unsigned int i = 0; i < count; i++)
4444 {
4445 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4446 CONST_VECTOR_ELT (op0, i),
4447 CONST_VECTOR_ELT (op1, i));
4448 if (!x || !valid_for_const_vector_p (mode, x))
4449 return 0;
4450 builder.quick_push (x);
4451 }
4452 return builder.build ();
4453 }
4454
4455 if (VECTOR_MODE_P (mode)
4456 && code == VEC_CONCAT
4457 && (CONST_SCALAR_INT_P (op0)
4458 || CONST_FIXED_P (op0)
4459 || CONST_DOUBLE_AS_FLOAT_P (op0))
4460 && (CONST_SCALAR_INT_P (op1)
4461 || CONST_DOUBLE_AS_FLOAT_P (op1)
4462 || CONST_FIXED_P (op1)))
4463 {
4464 /* Both inputs have a constant number of elements, so the result
4465 must too. */
4466 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4467 rtvec v = rtvec_alloc (n_elts);
4468
4469 gcc_assert (n_elts >= 2);
4470 if (n_elts == 2)
4471 {
4472 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4473 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4474
4475 RTVEC_ELT (v, 0) = op0;
4476 RTVEC_ELT (v, 1) = op1;
4477 }
4478 else
4479 {
4480 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4481 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4482 unsigned i;
4483
4484 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4485 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4486 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4487
4488 for (i = 0; i < op0_n_elts; ++i)
4489 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4490 for (i = 0; i < op1_n_elts; ++i)
4491 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4492 }
4493
4494 return gen_rtx_CONST_VECTOR (mode, v);
4495 }
4496
4497 if (SCALAR_FLOAT_MODE_P (mode)
4498 && CONST_DOUBLE_AS_FLOAT_P (op0)
4499 && CONST_DOUBLE_AS_FLOAT_P (op1)
4500 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4501 {
4502 if (code == AND
4503 || code == IOR
4504 || code == XOR)
4505 {
4506 long tmp0[4];
4507 long tmp1[4];
4508 REAL_VALUE_TYPE r;
4509 int i;
4510
4511 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4512 GET_MODE (op0));
4513 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4514 GET_MODE (op1));
4515 for (i = 0; i < 4; i++)
4516 {
4517 switch (code)
4518 {
4519 case AND:
4520 tmp0[i] &= tmp1[i];
4521 break;
4522 case IOR:
4523 tmp0[i] |= tmp1[i];
4524 break;
4525 case XOR:
4526 tmp0[i] ^= tmp1[i];
4527 break;
4528 default:
4529 gcc_unreachable ();
4530 }
4531 }
4532 real_from_target (&r, tmp0, mode);
4533 return const_double_from_real_value (r, mode);
4534 }
4535 else
4536 {
4537 REAL_VALUE_TYPE f0, f1, value, result;
4538 const REAL_VALUE_TYPE *opr0, *opr1;
4539 bool inexact;
4540
4541 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4542 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4543
4544 if (HONOR_SNANS (mode)
4545 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4546 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4547 return 0;
4548
4549 real_convert (&f0, mode, opr0);
4550 real_convert (&f1, mode, opr1);
4551
4552 if (code == DIV
4553 && real_equal (&f1, &dconst0)
4554 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4555 return 0;
4556
4557 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4558 && flag_trapping_math
4559 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4560 {
4561 int s0 = REAL_VALUE_NEGATIVE (f0);
4562 int s1 = REAL_VALUE_NEGATIVE (f1);
4563
4564 switch (code)
4565 {
4566 case PLUS:
4567 /* Inf + -Inf = NaN plus exception. */
4568 if (s0 != s1)
4569 return 0;
4570 break;
4571 case MINUS:
4572 /* Inf - Inf = NaN plus exception. */
4573 if (s0 == s1)
4574 return 0;
4575 break;
4576 case DIV:
4577 /* Inf / Inf = NaN plus exception. */
4578 return 0;
4579 default:
4580 break;
4581 }
4582 }
4583
4584 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4585 && flag_trapping_math
4586 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4587 || (REAL_VALUE_ISINF (f1)
4588 && real_equal (&f0, &dconst0))))
4589 /* Inf * 0 = NaN plus exception. */
4590 return 0;
4591
4592 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4593 &f0, &f1);
4594 real_convert (&result, mode, &value);
4595
4596 /* Don't constant fold this floating point operation if
4597 the result has overflowed and flag_trapping_math. */
4598
4599 if (flag_trapping_math
4600 && MODE_HAS_INFINITIES (mode)
4601 && REAL_VALUE_ISINF (result)
4602 && !REAL_VALUE_ISINF (f0)
4603 && !REAL_VALUE_ISINF (f1))
4604 /* Overflow plus exception. */
4605 return 0;
4606
4607 /* Don't constant fold this floating point operation if the
4608 result may dependent upon the run-time rounding mode and
4609 flag_rounding_math is set, or if GCC's software emulation
4610 is unable to accurately represent the result. */
4611
4612 if ((flag_rounding_math
4613 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4614 && (inexact || !real_identical (&result, &value)))
4615 return NULL_RTX;
4616
4617 return const_double_from_real_value (result, mode);
4618 }
4619 }
4620
4621 /* We can fold some multi-word operations. */
4622 scalar_int_mode int_mode;
4623 if (is_a <scalar_int_mode> (mode, &int_mode)
4624 && CONST_SCALAR_INT_P (op0)
4625 && CONST_SCALAR_INT_P (op1)
4626 && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4627 {
4628 wide_int result;
4629 wi::overflow_type overflow;
4630 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4631 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4632
4633 #if TARGET_SUPPORTS_WIDE_INT == 0
4634 /* This assert keeps the simplification from producing a result
4635 that cannot be represented in a CONST_DOUBLE but a lot of
4636 upstream callers expect that this function never fails to
4637 simplify something and so you if you added this to the test
4638 above the code would die later anyway. If this assert
4639 happens, you just need to make the port support wide int. */
4640 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4641 #endif
4642 switch (code)
4643 {
4644 case MINUS:
4645 result = wi::sub (pop0, pop1);
4646 break;
4647
4648 case PLUS:
4649 result = wi::add (pop0, pop1);
4650 break;
4651
4652 case MULT:
4653 result = wi::mul (pop0, pop1);
4654 break;
4655
4656 case DIV:
4657 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4658 if (overflow)
4659 return NULL_RTX;
4660 break;
4661
4662 case MOD:
4663 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4664 if (overflow)
4665 return NULL_RTX;
4666 break;
4667
4668 case UDIV:
4669 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4670 if (overflow)
4671 return NULL_RTX;
4672 break;
4673
4674 case UMOD:
4675 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4676 if (overflow)
4677 return NULL_RTX;
4678 break;
4679
4680 case AND:
4681 result = wi::bit_and (pop0, pop1);
4682 break;
4683
4684 case IOR:
4685 result = wi::bit_or (pop0, pop1);
4686 break;
4687
4688 case XOR:
4689 result = wi::bit_xor (pop0, pop1);
4690 break;
4691
4692 case SMIN:
4693 result = wi::smin (pop0, pop1);
4694 break;
4695
4696 case SMAX:
4697 result = wi::smax (pop0, pop1);
4698 break;
4699
4700 case UMIN:
4701 result = wi::umin (pop0, pop1);
4702 break;
4703
4704 case UMAX:
4705 result = wi::umax (pop0, pop1);
4706 break;
4707
4708 case LSHIFTRT:
4709 case ASHIFTRT:
4710 case ASHIFT:
4711 {
4712 wide_int wop1 = pop1;
4713 if (SHIFT_COUNT_TRUNCATED)
4714 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4715 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4716 return NULL_RTX;
4717
4718 switch (code)
4719 {
4720 case LSHIFTRT:
4721 result = wi::lrshift (pop0, wop1);
4722 break;
4723
4724 case ASHIFTRT:
4725 result = wi::arshift (pop0, wop1);
4726 break;
4727
4728 case ASHIFT:
4729 result = wi::lshift (pop0, wop1);
4730 break;
4731
4732 default:
4733 gcc_unreachable ();
4734 }
4735 break;
4736 }
4737 case ROTATE:
4738 case ROTATERT:
4739 {
4740 if (wi::neg_p (pop1))
4741 return NULL_RTX;
4742
4743 switch (code)
4744 {
4745 case ROTATE:
4746 result = wi::lrotate (pop0, pop1);
4747 break;
4748
4749 case ROTATERT:
4750 result = wi::rrotate (pop0, pop1);
4751 break;
4752
4753 default:
4754 gcc_unreachable ();
4755 }
4756 break;
4757 }
4758 default:
4759 return NULL_RTX;
4760 }
4761 return immed_wide_int_const (result, int_mode);
4762 }
4763
4764 /* Handle polynomial integers. */
4765 if (NUM_POLY_INT_COEFFS > 1
4766 && is_a <scalar_int_mode> (mode, &int_mode)
4767 && poly_int_rtx_p (op0)
4768 && poly_int_rtx_p (op1))
4769 {
4770 poly_wide_int result;
4771 switch (code)
4772 {
4773 case PLUS:
4774 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4775 break;
4776
4777 case MINUS:
4778 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4779 break;
4780
4781 case MULT:
4782 if (CONST_SCALAR_INT_P (op1))
4783 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4784 else
4785 return NULL_RTX;
4786 break;
4787
4788 case ASHIFT:
4789 if (CONST_SCALAR_INT_P (op1))
4790 {
4791 wide_int shift = rtx_mode_t (op1, mode);
4792 if (SHIFT_COUNT_TRUNCATED)
4793 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4794 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4795 return NULL_RTX;
4796 result = wi::to_poly_wide (op0, mode) << shift;
4797 }
4798 else
4799 return NULL_RTX;
4800 break;
4801
4802 case IOR:
4803 if (!CONST_SCALAR_INT_P (op1)
4804 || !can_ior_p (wi::to_poly_wide (op0, mode),
4805 rtx_mode_t (op1, mode), &result))
4806 return NULL_RTX;
4807 break;
4808
4809 default:
4810 return NULL_RTX;
4811 }
4812 return immed_wide_int_const (result, int_mode);
4813 }
4814
4815 return NULL_RTX;
4816 }
4817
4818
4819 \f
4820 /* Return a positive integer if X should sort after Y. The value
4821 returned is 1 if and only if X and Y are both regs. */
4822
4823 static int
4824 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4825 {
4826 int result;
4827
4828 result = (commutative_operand_precedence (y)
4829 - commutative_operand_precedence (x));
4830 if (result)
4831 return result + result;
4832
4833 /* Group together equal REGs to do more simplification. */
4834 if (REG_P (x) && REG_P (y))
4835 return REGNO (x) > REGNO (y);
4836
4837 return 0;
4838 }
4839
4840 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4841 operands may be another PLUS or MINUS.
4842
4843 Rather than test for specific case, we do this by a brute-force method
4844 and do all possible simplifications until no more changes occur. Then
4845 we rebuild the operation.
4846
4847 May return NULL_RTX when no changes were made. */
4848
4849 static rtx
4850 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4851 rtx op1)
4852 {
4853 struct simplify_plus_minus_op_data
4854 {
4855 rtx op;
4856 short neg;
4857 } ops[16];
4858 rtx result, tem;
4859 int n_ops = 2;
4860 int changed, n_constants, canonicalized = 0;
4861 int i, j;
4862
4863 memset (ops, 0, sizeof ops);
4864
4865 /* Set up the two operands and then expand them until nothing has been
4866 changed. If we run out of room in our array, give up; this should
4867 almost never happen. */
4868
4869 ops[0].op = op0;
4870 ops[0].neg = 0;
4871 ops[1].op = op1;
4872 ops[1].neg = (code == MINUS);
4873
4874 do
4875 {
4876 changed = 0;
4877 n_constants = 0;
4878
4879 for (i = 0; i < n_ops; i++)
4880 {
4881 rtx this_op = ops[i].op;
4882 int this_neg = ops[i].neg;
4883 enum rtx_code this_code = GET_CODE (this_op);
4884
4885 switch (this_code)
4886 {
4887 case PLUS:
4888 case MINUS:
4889 if (n_ops == ARRAY_SIZE (ops))
4890 return NULL_RTX;
4891
4892 ops[n_ops].op = XEXP (this_op, 1);
4893 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4894 n_ops++;
4895
4896 ops[i].op = XEXP (this_op, 0);
4897 changed = 1;
4898 /* If this operand was negated then we will potentially
4899 canonicalize the expression. Similarly if we don't
4900 place the operands adjacent we're re-ordering the
4901 expression and thus might be performing a
4902 canonicalization. Ignore register re-ordering.
4903 ??? It might be better to shuffle the ops array here,
4904 but then (plus (plus (A, B), plus (C, D))) wouldn't
4905 be seen as non-canonical. */
4906 if (this_neg
4907 || (i != n_ops - 2
4908 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4909 canonicalized = 1;
4910 break;
4911
4912 case NEG:
4913 ops[i].op = XEXP (this_op, 0);
4914 ops[i].neg = ! this_neg;
4915 changed = 1;
4916 canonicalized = 1;
4917 break;
4918
4919 case CONST:
4920 if (n_ops != ARRAY_SIZE (ops)
4921 && GET_CODE (XEXP (this_op, 0)) == PLUS
4922 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4923 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4924 {
4925 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4926 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4927 ops[n_ops].neg = this_neg;
4928 n_ops++;
4929 changed = 1;
4930 canonicalized = 1;
4931 }
4932 break;
4933
4934 case NOT:
4935 /* ~a -> (-a - 1) */
4936 if (n_ops != ARRAY_SIZE (ops))
4937 {
4938 ops[n_ops].op = CONSTM1_RTX (mode);
4939 ops[n_ops++].neg = this_neg;
4940 ops[i].op = XEXP (this_op, 0);
4941 ops[i].neg = !this_neg;
4942 changed = 1;
4943 canonicalized = 1;
4944 }
4945 break;
4946
4947 CASE_CONST_SCALAR_INT:
4948 case CONST_POLY_INT:
4949 n_constants++;
4950 if (this_neg)
4951 {
4952 ops[i].op = neg_poly_int_rtx (mode, this_op);
4953 ops[i].neg = 0;
4954 changed = 1;
4955 canonicalized = 1;
4956 }
4957 break;
4958
4959 default:
4960 break;
4961 }
4962 }
4963 }
4964 while (changed);
4965
4966 if (n_constants > 1)
4967 canonicalized = 1;
4968
4969 gcc_assert (n_ops >= 2);
4970
4971 /* If we only have two operands, we can avoid the loops. */
4972 if (n_ops == 2)
4973 {
4974 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4975 rtx lhs, rhs;
4976
4977 /* Get the two operands. Be careful with the order, especially for
4978 the cases where code == MINUS. */
4979 if (ops[0].neg && ops[1].neg)
4980 {
4981 lhs = gen_rtx_NEG (mode, ops[0].op);
4982 rhs = ops[1].op;
4983 }
4984 else if (ops[0].neg)
4985 {
4986 lhs = ops[1].op;
4987 rhs = ops[0].op;
4988 }
4989 else
4990 {
4991 lhs = ops[0].op;
4992 rhs = ops[1].op;
4993 }
4994
4995 return simplify_const_binary_operation (code, mode, lhs, rhs);
4996 }
4997
4998 /* Now simplify each pair of operands until nothing changes. */
4999 while (1)
5000 {
5001 /* Insertion sort is good enough for a small array. */
5002 for (i = 1; i < n_ops; i++)
5003 {
5004 struct simplify_plus_minus_op_data save;
5005 int cmp;
5006
5007 j = i - 1;
5008 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5009 if (cmp <= 0)
5010 continue;
5011 /* Just swapping registers doesn't count as canonicalization. */
5012 if (cmp != 1)
5013 canonicalized = 1;
5014
5015 save = ops[i];
5016 do
5017 ops[j + 1] = ops[j];
5018 while (j--
5019 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5020 ops[j + 1] = save;
5021 }
5022
5023 changed = 0;
5024 for (i = n_ops - 1; i > 0; i--)
5025 for (j = i - 1; j >= 0; j--)
5026 {
5027 rtx lhs = ops[j].op, rhs = ops[i].op;
5028 int lneg = ops[j].neg, rneg = ops[i].neg;
5029
5030 if (lhs != 0 && rhs != 0)
5031 {
5032 enum rtx_code ncode = PLUS;
5033
5034 if (lneg != rneg)
5035 {
5036 ncode = MINUS;
5037 if (lneg)
5038 std::swap (lhs, rhs);
5039 }
5040 else if (swap_commutative_operands_p (lhs, rhs))
5041 std::swap (lhs, rhs);
5042
5043 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5044 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5045 {
5046 rtx tem_lhs, tem_rhs;
5047
5048 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5049 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5050 tem = simplify_binary_operation (ncode, mode, tem_lhs,
5051 tem_rhs);
5052
5053 if (tem && !CONSTANT_P (tem))
5054 tem = gen_rtx_CONST (GET_MODE (tem), tem);
5055 }
5056 else
5057 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5058
5059 if (tem)
5060 {
5061 /* Reject "simplifications" that just wrap the two
5062 arguments in a CONST. Failure to do so can result
5063 in infinite recursion with simplify_binary_operation
5064 when it calls us to simplify CONST operations.
5065 Also, if we find such a simplification, don't try
5066 any more combinations with this rhs: We must have
5067 something like symbol+offset, ie. one of the
5068 trivial CONST expressions we handle later. */
5069 if (GET_CODE (tem) == CONST
5070 && GET_CODE (XEXP (tem, 0)) == ncode
5071 && XEXP (XEXP (tem, 0), 0) == lhs
5072 && XEXP (XEXP (tem, 0), 1) == rhs)
5073 break;
5074 lneg &= rneg;
5075 if (GET_CODE (tem) == NEG)
5076 tem = XEXP (tem, 0), lneg = !lneg;
5077 if (poly_int_rtx_p (tem) && lneg)
5078 tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5079
5080 ops[i].op = tem;
5081 ops[i].neg = lneg;
5082 ops[j].op = NULL_RTX;
5083 changed = 1;
5084 canonicalized = 1;
5085 }
5086 }
5087 }
5088
5089 if (!changed)
5090 break;
5091
5092 /* Pack all the operands to the lower-numbered entries. */
5093 for (i = 0, j = 0; j < n_ops; j++)
5094 if (ops[j].op)
5095 {
5096 ops[i] = ops[j];
5097 i++;
5098 }
5099 n_ops = i;
5100 }
5101
5102 /* If nothing changed, check that rematerialization of rtl instructions
5103 is still required. */
5104 if (!canonicalized)
5105 {
5106 /* Perform rematerialization if only all operands are registers and
5107 all operations are PLUS. */
5108 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5109 around rs6000 and how it uses the CA register. See PR67145. */
5110 for (i = 0; i < n_ops; i++)
5111 if (ops[i].neg
5112 || !REG_P (ops[i].op)
5113 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5114 && fixed_regs[REGNO (ops[i].op)]
5115 && !global_regs[REGNO (ops[i].op)]
5116 && ops[i].op != frame_pointer_rtx
5117 && ops[i].op != arg_pointer_rtx
5118 && ops[i].op != stack_pointer_rtx))
5119 return NULL_RTX;
5120 goto gen_result;
5121 }
5122
5123 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5124 if (n_ops == 2
5125 && CONST_INT_P (ops[1].op)
5126 && CONSTANT_P (ops[0].op)
5127 && ops[0].neg)
5128 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5129
5130 /* We suppressed creation of trivial CONST expressions in the
5131 combination loop to avoid recursion. Create one manually now.
5132 The combination loop should have ensured that there is exactly
5133 one CONST_INT, and the sort will have ensured that it is last
5134 in the array and that any other constant will be next-to-last. */
5135
5136 if (n_ops > 1
5137 && poly_int_rtx_p (ops[n_ops - 1].op)
5138 && CONSTANT_P (ops[n_ops - 2].op))
5139 {
5140 rtx value = ops[n_ops - 1].op;
5141 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5142 value = neg_poly_int_rtx (mode, value);
5143 if (CONST_INT_P (value))
5144 {
5145 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5146 INTVAL (value));
5147 n_ops--;
5148 }
5149 }
5150
5151 /* Put a non-negated operand first, if possible. */
5152
5153 for (i = 0; i < n_ops && ops[i].neg; i++)
5154 continue;
5155 if (i == n_ops)
5156 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5157 else if (i != 0)
5158 {
5159 tem = ops[0].op;
5160 ops[0] = ops[i];
5161 ops[i].op = tem;
5162 ops[i].neg = 1;
5163 }
5164
5165 /* Now make the result by performing the requested operations. */
5166 gen_result:
5167 result = ops[0].op;
5168 for (i = 1; i < n_ops; i++)
5169 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5170 mode, result, ops[i].op);
5171
5172 return result;
5173 }
5174
5175 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5176 static bool
5177 plus_minus_operand_p (const_rtx x)
5178 {
5179 return GET_CODE (x) == PLUS
5180 || GET_CODE (x) == MINUS
5181 || (GET_CODE (x) == CONST
5182 && GET_CODE (XEXP (x, 0)) == PLUS
5183 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5184 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5185 }
5186
5187 /* Like simplify_binary_operation except used for relational operators.
5188 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5189 not also be VOIDmode.
5190
5191 CMP_MODE specifies in which mode the comparison is done in, so it is
5192 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5193 the operands or, if both are VOIDmode, the operands are compared in
5194 "infinite precision". */
5195 rtx
5196 simplify_relational_operation (enum rtx_code code, machine_mode mode,
5197 machine_mode cmp_mode, rtx op0, rtx op1)
5198 {
5199 rtx tem, trueop0, trueop1;
5200
5201 if (cmp_mode == VOIDmode)
5202 cmp_mode = GET_MODE (op0);
5203 if (cmp_mode == VOIDmode)
5204 cmp_mode = GET_MODE (op1);
5205
5206 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5207 if (tem)
5208 {
5209 if (SCALAR_FLOAT_MODE_P (mode))
5210 {
5211 if (tem == const0_rtx)
5212 return CONST0_RTX (mode);
5213 #ifdef FLOAT_STORE_FLAG_VALUE
5214 {
5215 REAL_VALUE_TYPE val;
5216 val = FLOAT_STORE_FLAG_VALUE (mode);
5217 return const_double_from_real_value (val, mode);
5218 }
5219 #else
5220 return NULL_RTX;
5221 #endif
5222 }
5223 if (VECTOR_MODE_P (mode))
5224 {
5225 if (tem == const0_rtx)
5226 return CONST0_RTX (mode);
5227 #ifdef VECTOR_STORE_FLAG_VALUE
5228 {
5229 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
5230 if (val == NULL_RTX)
5231 return NULL_RTX;
5232 if (val == const1_rtx)
5233 return CONST1_RTX (mode);
5234
5235 return gen_const_vec_duplicate (mode, val);
5236 }
5237 #else
5238 return NULL_RTX;
5239 #endif
5240 }
5241 /* For vector comparison with scalar int result, it is unknown
5242 if the target means here a comparison into an integral bitmask,
5243 or comparison where all comparisons true mean const_true_rtx
5244 whole result, or where any comparisons true mean const_true_rtx
5245 whole result. For const0_rtx all the cases are the same. */
5246 if (VECTOR_MODE_P (cmp_mode)
5247 && SCALAR_INT_MODE_P (mode)
5248 && tem == const_true_rtx)
5249 return NULL_RTX;
5250
5251 return tem;
5252 }
5253
5254 /* For the following tests, ensure const0_rtx is op1. */
5255 if (swap_commutative_operands_p (op0, op1)
5256 || (op0 == const0_rtx && op1 != const0_rtx))
5257 std::swap (op0, op1), code = swap_condition (code);
5258
5259 /* If op0 is a compare, extract the comparison arguments from it. */
5260 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5261 return simplify_gen_relational (code, mode, VOIDmode,
5262 XEXP (op0, 0), XEXP (op0, 1));
5263
5264 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
5265 || CC0_P (op0))
5266 return NULL_RTX;
5267
5268 trueop0 = avoid_constant_pool_reference (op0);
5269 trueop1 = avoid_constant_pool_reference (op1);
5270 return simplify_relational_operation_1 (code, mode, cmp_mode,
5271 trueop0, trueop1);
5272 }
5273
5274 /* This part of simplify_relational_operation is only used when CMP_MODE
5275 is not in class MODE_CC (i.e. it is a real comparison).
5276
5277 MODE is the mode of the result, while CMP_MODE specifies in which
5278 mode the comparison is done in, so it is the mode of the operands. */
5279
5280 static rtx
5281 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
5282 machine_mode cmp_mode, rtx op0, rtx op1)
5283 {
5284 enum rtx_code op0code = GET_CODE (op0);
5285
5286 if (op1 == const0_rtx && COMPARISON_P (op0))
5287 {
5288 /* If op0 is a comparison, extract the comparison arguments
5289 from it. */
5290 if (code == NE)
5291 {
5292 if (GET_MODE (op0) == mode)
5293 return simplify_rtx (op0);
5294 else
5295 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5296 XEXP (op0, 0), XEXP (op0, 1));
5297 }
5298 else if (code == EQ)
5299 {
5300 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5301 if (new_code != UNKNOWN)
5302 return simplify_gen_relational (new_code, mode, VOIDmode,
5303 XEXP (op0, 0), XEXP (op0, 1));
5304 }
5305 }
5306
5307 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5308 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5309 if ((code == LTU || code == GEU)
5310 && GET_CODE (op0) == PLUS
5311 && CONST_INT_P (XEXP (op0, 1))
5312 && (rtx_equal_p (op1, XEXP (op0, 0))
5313 || rtx_equal_p (op1, XEXP (op0, 1)))
5314 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5315 && XEXP (op0, 1) != const0_rtx)
5316 {
5317 rtx new_cmp
5318 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5319 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5320 cmp_mode, XEXP (op0, 0), new_cmp);
5321 }
5322
5323 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5324 transformed into (LTU a -C). */
5325 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5326 && CONST_INT_P (XEXP (op0, 1))
5327 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5328 && XEXP (op0, 1) != const0_rtx)
5329 {
5330 rtx new_cmp
5331 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5332 return simplify_gen_relational (LTU, mode, cmp_mode,
5333 XEXP (op0, 0), new_cmp);
5334 }
5335
5336 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5337 if ((code == LTU || code == GEU)
5338 && GET_CODE (op0) == PLUS
5339 && rtx_equal_p (op1, XEXP (op0, 1))
5340 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5341 && !rtx_equal_p (op1, XEXP (op0, 0)))
5342 return simplify_gen_relational (code, mode, cmp_mode, op0,
5343 copy_rtx (XEXP (op0, 0)));
5344
5345 if (op1 == const0_rtx)
5346 {
5347 /* Canonicalize (GTU x 0) as (NE x 0). */
5348 if (code == GTU)
5349 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5350 /* Canonicalize (LEU x 0) as (EQ x 0). */
5351 if (code == LEU)
5352 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5353 }
5354 else if (op1 == const1_rtx)
5355 {
5356 switch (code)
5357 {
5358 case GE:
5359 /* Canonicalize (GE x 1) as (GT x 0). */
5360 return simplify_gen_relational (GT, mode, cmp_mode,
5361 op0, const0_rtx);
5362 case GEU:
5363 /* Canonicalize (GEU x 1) as (NE x 0). */
5364 return simplify_gen_relational (NE, mode, cmp_mode,
5365 op0, const0_rtx);
5366 case LT:
5367 /* Canonicalize (LT x 1) as (LE x 0). */
5368 return simplify_gen_relational (LE, mode, cmp_mode,
5369 op0, const0_rtx);
5370 case LTU:
5371 /* Canonicalize (LTU x 1) as (EQ x 0). */
5372 return simplify_gen_relational (EQ, mode, cmp_mode,
5373 op0, const0_rtx);
5374 default:
5375 break;
5376 }
5377 }
5378 else if (op1 == constm1_rtx)
5379 {
5380 /* Canonicalize (LE x -1) as (LT x 0). */
5381 if (code == LE)
5382 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5383 /* Canonicalize (GT x -1) as (GE x 0). */
5384 if (code == GT)
5385 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5386 }
5387
5388 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5389 if ((code == EQ || code == NE)
5390 && (op0code == PLUS || op0code == MINUS)
5391 && CONSTANT_P (op1)
5392 && CONSTANT_P (XEXP (op0, 1))
5393 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5394 {
5395 rtx x = XEXP (op0, 0);
5396 rtx c = XEXP (op0, 1);
5397 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5398 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5399
5400 /* Detect an infinite recursive condition, where we oscillate at this
5401 simplification case between:
5402 A + B == C <---> C - B == A,
5403 where A, B, and C are all constants with non-simplifiable expressions,
5404 usually SYMBOL_REFs. */
5405 if (GET_CODE (tem) == invcode
5406 && CONSTANT_P (x)
5407 && rtx_equal_p (c, XEXP (tem, 1)))
5408 return NULL_RTX;
5409
5410 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5411 }
5412
5413 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5414 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5415 scalar_int_mode int_mode, int_cmp_mode;
5416 if (code == NE
5417 && op1 == const0_rtx
5418 && is_int_mode (mode, &int_mode)
5419 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5420 /* ??? Work-around BImode bugs in the ia64 backend. */
5421 && int_mode != BImode
5422 && int_cmp_mode != BImode
5423 && nonzero_bits (op0, int_cmp_mode) == 1
5424 && STORE_FLAG_VALUE == 1)
5425 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5426 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5427 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5428
5429 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5430 if ((code == EQ || code == NE)
5431 && op1 == const0_rtx
5432 && op0code == XOR)
5433 return simplify_gen_relational (code, mode, cmp_mode,
5434 XEXP (op0, 0), XEXP (op0, 1));
5435
5436 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5437 if ((code == EQ || code == NE)
5438 && op0code == XOR
5439 && rtx_equal_p (XEXP (op0, 0), op1)
5440 && !side_effects_p (XEXP (op0, 0)))
5441 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5442 CONST0_RTX (mode));
5443
5444 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5445 if ((code == EQ || code == NE)
5446 && op0code == XOR
5447 && rtx_equal_p (XEXP (op0, 1), op1)
5448 && !side_effects_p (XEXP (op0, 1)))
5449 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5450 CONST0_RTX (mode));
5451
5452 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5453 if ((code == EQ || code == NE)
5454 && op0code == XOR
5455 && CONST_SCALAR_INT_P (op1)
5456 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5457 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5458 simplify_gen_binary (XOR, cmp_mode,
5459 XEXP (op0, 1), op1));
5460
5461 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5462 constant folding if x/y is a constant. */
5463 if ((code == EQ || code == NE)
5464 && (op0code == AND || op0code == IOR)
5465 && !side_effects_p (op1)
5466 && op1 != CONST0_RTX (cmp_mode))
5467 {
5468 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5469 (eq/ne (and (not y) x) 0). */
5470 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5471 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5472 {
5473 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5474 cmp_mode);
5475 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5476
5477 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5478 CONST0_RTX (cmp_mode));
5479 }
5480
5481 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5482 (eq/ne (and (not x) y) 0). */
5483 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5484 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5485 {
5486 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5487 cmp_mode);
5488 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5489
5490 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5491 CONST0_RTX (cmp_mode));
5492 }
5493 }
5494
5495 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5496 if ((code == EQ || code == NE)
5497 && GET_CODE (op0) == BSWAP
5498 && CONST_SCALAR_INT_P (op1))
5499 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5500 simplify_gen_unary (BSWAP, cmp_mode,
5501 op1, cmp_mode));
5502
5503 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5504 if ((code == EQ || code == NE)
5505 && GET_CODE (op0) == BSWAP
5506 && GET_CODE (op1) == BSWAP)
5507 return simplify_gen_relational (code, mode, cmp_mode,
5508 XEXP (op0, 0), XEXP (op1, 0));
5509
5510 if (op0code == POPCOUNT && op1 == const0_rtx)
5511 switch (code)
5512 {
5513 case EQ:
5514 case LE:
5515 case LEU:
5516 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5517 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5518 XEXP (op0, 0), const0_rtx);
5519
5520 case NE:
5521 case GT:
5522 case GTU:
5523 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5524 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5525 XEXP (op0, 0), const0_rtx);
5526
5527 default:
5528 break;
5529 }
5530
5531 return NULL_RTX;
5532 }
5533
5534 enum
5535 {
5536 CMP_EQ = 1,
5537 CMP_LT = 2,
5538 CMP_GT = 4,
5539 CMP_LTU = 8,
5540 CMP_GTU = 16
5541 };
5542
5543
5544 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5545 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5546 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5547 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5548 For floating-point comparisons, assume that the operands were ordered. */
5549
5550 static rtx
5551 comparison_result (enum rtx_code code, int known_results)
5552 {
5553 switch (code)
5554 {
5555 case EQ:
5556 case UNEQ:
5557 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5558 case NE:
5559 case LTGT:
5560 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5561
5562 case LT:
5563 case UNLT:
5564 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5565 case GE:
5566 case UNGE:
5567 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5568
5569 case GT:
5570 case UNGT:
5571 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5572 case LE:
5573 case UNLE:
5574 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5575
5576 case LTU:
5577 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5578 case GEU:
5579 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5580
5581 case GTU:
5582 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5583 case LEU:
5584 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5585
5586 case ORDERED:
5587 return const_true_rtx;
5588 case UNORDERED:
5589 return const0_rtx;
5590 default:
5591 gcc_unreachable ();
5592 }
5593 }
5594
5595 /* Check if the given comparison (done in the given MODE) is actually
5596 a tautology or a contradiction. If the mode is VOIDmode, the
5597 comparison is done in "infinite precision". If no simplification
5598 is possible, this function returns zero. Otherwise, it returns
5599 either const_true_rtx or const0_rtx. */
5600
5601 rtx
5602 simplify_const_relational_operation (enum rtx_code code,
5603 machine_mode mode,
5604 rtx op0, rtx op1)
5605 {
5606 rtx tem;
5607 rtx trueop0;
5608 rtx trueop1;
5609
5610 gcc_assert (mode != VOIDmode
5611 || (GET_MODE (op0) == VOIDmode
5612 && GET_MODE (op1) == VOIDmode));
5613
5614 /* If op0 is a compare, extract the comparison arguments from it. */
5615 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5616 {
5617 op1 = XEXP (op0, 1);
5618 op0 = XEXP (op0, 0);
5619
5620 if (GET_MODE (op0) != VOIDmode)
5621 mode = GET_MODE (op0);
5622 else if (GET_MODE (op1) != VOIDmode)
5623 mode = GET_MODE (op1);
5624 else
5625 return 0;
5626 }
5627
5628 /* We can't simplify MODE_CC values since we don't know what the
5629 actual comparison is. */
5630 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5631 return 0;
5632
5633 /* Make sure the constant is second. */
5634 if (swap_commutative_operands_p (op0, op1))
5635 {
5636 std::swap (op0, op1);
5637 code = swap_condition (code);
5638 }
5639
5640 trueop0 = avoid_constant_pool_reference (op0);
5641 trueop1 = avoid_constant_pool_reference (op1);
5642
5643 /* For integer comparisons of A and B maybe we can simplify A - B and can
5644 then simplify a comparison of that with zero. If A and B are both either
5645 a register or a CONST_INT, this can't help; testing for these cases will
5646 prevent infinite recursion here and speed things up.
5647
5648 We can only do this for EQ and NE comparisons as otherwise we may
5649 lose or introduce overflow which we cannot disregard as undefined as
5650 we do not know the signedness of the operation on either the left or
5651 the right hand side of the comparison. */
5652
5653 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5654 && (code == EQ || code == NE)
5655 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5656 && (REG_P (op1) || CONST_INT_P (trueop1)))
5657 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5658 /* We cannot do this if tem is a nonzero address. */
5659 && ! nonzero_address_p (tem))
5660 return simplify_const_relational_operation (signed_condition (code),
5661 mode, tem, const0_rtx);
5662
5663 if (! HONOR_NANS (mode) && code == ORDERED)
5664 return const_true_rtx;
5665
5666 if (! HONOR_NANS (mode) && code == UNORDERED)
5667 return const0_rtx;
5668
5669 /* For modes without NaNs, if the two operands are equal, we know the
5670 result except if they have side-effects. Even with NaNs we know
5671 the result of unordered comparisons and, if signaling NaNs are
5672 irrelevant, also the result of LT/GT/LTGT. */
5673 if ((! HONOR_NANS (trueop0)
5674 || code == UNEQ || code == UNLE || code == UNGE
5675 || ((code == LT || code == GT || code == LTGT)
5676 && ! HONOR_SNANS (trueop0)))
5677 && rtx_equal_p (trueop0, trueop1)
5678 && ! side_effects_p (trueop0))
5679 return comparison_result (code, CMP_EQ);
5680
5681 /* If the operands are floating-point constants, see if we can fold
5682 the result. */
5683 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5684 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5685 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5686 {
5687 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5688 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5689
5690 /* Comparisons are unordered iff at least one of the values is NaN. */
5691 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5692 switch (code)
5693 {
5694 case UNEQ:
5695 case UNLT:
5696 case UNGT:
5697 case UNLE:
5698 case UNGE:
5699 case NE:
5700 case UNORDERED:
5701 return const_true_rtx;
5702 case EQ:
5703 case LT:
5704 case GT:
5705 case LE:
5706 case GE:
5707 case LTGT:
5708 case ORDERED:
5709 return const0_rtx;
5710 default:
5711 return 0;
5712 }
5713
5714 return comparison_result (code,
5715 (real_equal (d0, d1) ? CMP_EQ :
5716 real_less (d0, d1) ? CMP_LT : CMP_GT));
5717 }
5718
5719 /* Otherwise, see if the operands are both integers. */
5720 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5721 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5722 {
5723 /* It would be nice if we really had a mode here. However, the
5724 largest int representable on the target is as good as
5725 infinite. */
5726 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5727 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5728 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5729
5730 if (wi::eq_p (ptrueop0, ptrueop1))
5731 return comparison_result (code, CMP_EQ);
5732 else
5733 {
5734 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5735 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5736 return comparison_result (code, cr);
5737 }
5738 }
5739
5740 /* Optimize comparisons with upper and lower bounds. */
5741 scalar_int_mode int_mode;
5742 if (CONST_INT_P (trueop1)
5743 && is_a <scalar_int_mode> (mode, &int_mode)
5744 && HWI_COMPUTABLE_MODE_P (int_mode)
5745 && !side_effects_p (trueop0))
5746 {
5747 int sign;
5748 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5749 HOST_WIDE_INT val = INTVAL (trueop1);
5750 HOST_WIDE_INT mmin, mmax;
5751
5752 if (code == GEU
5753 || code == LEU
5754 || code == GTU
5755 || code == LTU)
5756 sign = 0;
5757 else
5758 sign = 1;
5759
5760 /* Get a reduced range if the sign bit is zero. */
5761 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5762 {
5763 mmin = 0;
5764 mmax = nonzero;
5765 }
5766 else
5767 {
5768 rtx mmin_rtx, mmax_rtx;
5769 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5770
5771 mmin = INTVAL (mmin_rtx);
5772 mmax = INTVAL (mmax_rtx);
5773 if (sign)
5774 {
5775 unsigned int sign_copies
5776 = num_sign_bit_copies (trueop0, int_mode);
5777
5778 mmin >>= (sign_copies - 1);
5779 mmax >>= (sign_copies - 1);
5780 }
5781 }
5782
5783 switch (code)
5784 {
5785 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5786 case GEU:
5787 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5788 return const_true_rtx;
5789 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5790 return const0_rtx;
5791 break;
5792 case GE:
5793 if (val <= mmin)
5794 return const_true_rtx;
5795 if (val > mmax)
5796 return const0_rtx;
5797 break;
5798
5799 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5800 case LEU:
5801 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5802 return const_true_rtx;
5803 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5804 return const0_rtx;
5805 break;
5806 case LE:
5807 if (val >= mmax)
5808 return const_true_rtx;
5809 if (val < mmin)
5810 return const0_rtx;
5811 break;
5812
5813 case EQ:
5814 /* x == y is always false for y out of range. */
5815 if (val < mmin || val > mmax)
5816 return const0_rtx;
5817 break;
5818
5819 /* x > y is always false for y >= mmax, always true for y < mmin. */
5820 case GTU:
5821 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5822 return const0_rtx;
5823 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5824 return const_true_rtx;
5825 break;
5826 case GT:
5827 if (val >= mmax)
5828 return const0_rtx;
5829 if (val < mmin)
5830 return const_true_rtx;
5831 break;
5832
5833 /* x < y is always false for y <= mmin, always true for y > mmax. */
5834 case LTU:
5835 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5836 return const0_rtx;
5837 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5838 return const_true_rtx;
5839 break;
5840 case LT:
5841 if (val <= mmin)
5842 return const0_rtx;
5843 if (val > mmax)
5844 return const_true_rtx;
5845 break;
5846
5847 case NE:
5848 /* x != y is always true for y out of range. */
5849 if (val < mmin || val > mmax)
5850 return const_true_rtx;
5851 break;
5852
5853 default:
5854 break;
5855 }
5856 }
5857
5858 /* Optimize integer comparisons with zero. */
5859 if (is_a <scalar_int_mode> (mode, &int_mode)
5860 && trueop1 == const0_rtx
5861 && !side_effects_p (trueop0))
5862 {
5863 /* Some addresses are known to be nonzero. We don't know
5864 their sign, but equality comparisons are known. */
5865 if (nonzero_address_p (trueop0))
5866 {
5867 if (code == EQ || code == LEU)
5868 return const0_rtx;
5869 if (code == NE || code == GTU)
5870 return const_true_rtx;
5871 }
5872
5873 /* See if the first operand is an IOR with a constant. If so, we
5874 may be able to determine the result of this comparison. */
5875 if (GET_CODE (op0) == IOR)
5876 {
5877 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5878 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5879 {
5880 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5881 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5882 && (UINTVAL (inner_const)
5883 & (HOST_WIDE_INT_1U
5884 << sign_bitnum)));
5885
5886 switch (code)
5887 {
5888 case EQ:
5889 case LEU:
5890 return const0_rtx;
5891 case NE:
5892 case GTU:
5893 return const_true_rtx;
5894 case LT:
5895 case LE:
5896 if (has_sign)
5897 return const_true_rtx;
5898 break;
5899 case GT:
5900 case GE:
5901 if (has_sign)
5902 return const0_rtx;
5903 break;
5904 default:
5905 break;
5906 }
5907 }
5908 }
5909 }
5910
5911 /* Optimize comparison of ABS with zero. */
5912 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5913 && (GET_CODE (trueop0) == ABS
5914 || (GET_CODE (trueop0) == FLOAT_EXTEND
5915 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5916 {
5917 switch (code)
5918 {
5919 case LT:
5920 /* Optimize abs(x) < 0.0. */
5921 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5922 return const0_rtx;
5923 break;
5924
5925 case GE:
5926 /* Optimize abs(x) >= 0.0. */
5927 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5928 return const_true_rtx;
5929 break;
5930
5931 case UNGE:
5932 /* Optimize ! (abs(x) < 0.0). */
5933 return const_true_rtx;
5934
5935 default:
5936 break;
5937 }
5938 }
5939
5940 return 0;
5941 }
5942
5943 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5944 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5945 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5946 can be simplified to that or NULL_RTX if not.
5947 Assume X is compared against zero with CMP_CODE and the true
5948 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5949
5950 static rtx
5951 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5952 {
5953 if (cmp_code != EQ && cmp_code != NE)
5954 return NULL_RTX;
5955
5956 /* Result on X == 0 and X !=0 respectively. */
5957 rtx on_zero, on_nonzero;
5958 if (cmp_code == EQ)
5959 {
5960 on_zero = true_val;
5961 on_nonzero = false_val;
5962 }
5963 else
5964 {
5965 on_zero = false_val;
5966 on_nonzero = true_val;
5967 }
5968
5969 rtx_code op_code = GET_CODE (on_nonzero);
5970 if ((op_code != CLZ && op_code != CTZ)
5971 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5972 || !CONST_INT_P (on_zero))
5973 return NULL_RTX;
5974
5975 HOST_WIDE_INT op_val;
5976 scalar_int_mode mode ATTRIBUTE_UNUSED
5977 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5978 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5979 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5980 && op_val == INTVAL (on_zero))
5981 return on_nonzero;
5982
5983 return NULL_RTX;
5984 }
5985
5986 /* Try to simplify X given that it appears within operand OP of a
5987 VEC_MERGE operation whose mask is MASK. X need not use the same
5988 vector mode as the VEC_MERGE, but it must have the same number of
5989 elements.
5990
5991 Return the simplified X on success, otherwise return NULL_RTX. */
5992
5993 rtx
5994 simplify_merge_mask (rtx x, rtx mask, int op)
5995 {
5996 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5997 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5998 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5999 {
6000 if (side_effects_p (XEXP (x, 1 - op)))
6001 return NULL_RTX;
6002
6003 return XEXP (x, op);
6004 }
6005 if (UNARY_P (x)
6006 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6007 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6008 {
6009 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6010 if (top0)
6011 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6012 GET_MODE (XEXP (x, 0)));
6013 }
6014 if (BINARY_P (x)
6015 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6016 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6017 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6018 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6019 {
6020 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6021 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6022 if (top0 || top1)
6023 {
6024 if (COMPARISON_P (x))
6025 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6026 GET_MODE (XEXP (x, 0)) != VOIDmode
6027 ? GET_MODE (XEXP (x, 0))
6028 : GET_MODE (XEXP (x, 1)),
6029 top0 ? top0 : XEXP (x, 0),
6030 top1 ? top1 : XEXP (x, 1));
6031 else
6032 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6033 top0 ? top0 : XEXP (x, 0),
6034 top1 ? top1 : XEXP (x, 1));
6035 }
6036 }
6037 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6038 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6039 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6040 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6041 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6042 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6043 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6044 {
6045 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6046 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6047 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6048 if (top0 || top1 || top2)
6049 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6050 GET_MODE (XEXP (x, 0)),
6051 top0 ? top0 : XEXP (x, 0),
6052 top1 ? top1 : XEXP (x, 1),
6053 top2 ? top2 : XEXP (x, 2));
6054 }
6055 return NULL_RTX;
6056 }
6057
6058 \f
6059 /* Simplify CODE, an operation with result mode MODE and three operands,
6060 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6061 a constant. Return 0 if no simplifications is possible. */
6062
6063 rtx
6064 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
6065 machine_mode op0_mode, rtx op0, rtx op1,
6066 rtx op2)
6067 {
6068 bool any_change = false;
6069 rtx tem, trueop2;
6070 scalar_int_mode int_mode, int_op0_mode;
6071 unsigned int n_elts;
6072
6073 switch (code)
6074 {
6075 case FMA:
6076 /* Simplify negations around the multiplication. */
6077 /* -a * -b + c => a * b + c. */
6078 if (GET_CODE (op0) == NEG)
6079 {
6080 tem = simplify_unary_operation (NEG, mode, op1, mode);
6081 if (tem)
6082 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6083 }
6084 else if (GET_CODE (op1) == NEG)
6085 {
6086 tem = simplify_unary_operation (NEG, mode, op0, mode);
6087 if (tem)
6088 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6089 }
6090
6091 /* Canonicalize the two multiplication operands. */
6092 /* a * -b + c => -b * a + c. */
6093 if (swap_commutative_operands_p (op0, op1))
6094 std::swap (op0, op1), any_change = true;
6095
6096 if (any_change)
6097 return gen_rtx_FMA (mode, op0, op1, op2);
6098 return NULL_RTX;
6099
6100 case SIGN_EXTRACT:
6101 case ZERO_EXTRACT:
6102 if (CONST_INT_P (op0)
6103 && CONST_INT_P (op1)
6104 && CONST_INT_P (op2)
6105 && is_a <scalar_int_mode> (mode, &int_mode)
6106 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6107 && HWI_COMPUTABLE_MODE_P (int_mode))
6108 {
6109 /* Extracting a bit-field from a constant */
6110 unsigned HOST_WIDE_INT val = UINTVAL (op0);
6111 HOST_WIDE_INT op1val = INTVAL (op1);
6112 HOST_WIDE_INT op2val = INTVAL (op2);
6113 if (!BITS_BIG_ENDIAN)
6114 val >>= op2val;
6115 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6116 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6117 else
6118 /* Not enough information to calculate the bit position. */
6119 break;
6120
6121 if (HOST_BITS_PER_WIDE_INT != op1val)
6122 {
6123 /* First zero-extend. */
6124 val &= (HOST_WIDE_INT_1U << op1val) - 1;
6125 /* If desired, propagate sign bit. */
6126 if (code == SIGN_EXTRACT
6127 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6128 != 0)
6129 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6130 }
6131
6132 return gen_int_mode (val, int_mode);
6133 }
6134 break;
6135
6136 case IF_THEN_ELSE:
6137 if (CONST_INT_P (op0))
6138 return op0 != const0_rtx ? op1 : op2;
6139
6140 /* Convert c ? a : a into "a". */
6141 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6142 return op1;
6143
6144 /* Convert a != b ? a : b into "a". */
6145 if (GET_CODE (op0) == NE
6146 && ! side_effects_p (op0)
6147 && ! HONOR_NANS (mode)
6148 && ! HONOR_SIGNED_ZEROS (mode)
6149 && ((rtx_equal_p (XEXP (op0, 0), op1)
6150 && rtx_equal_p (XEXP (op0, 1), op2))
6151 || (rtx_equal_p (XEXP (op0, 0), op2)
6152 && rtx_equal_p (XEXP (op0, 1), op1))))
6153 return op1;
6154
6155 /* Convert a == b ? a : b into "b". */
6156 if (GET_CODE (op0) == EQ
6157 && ! side_effects_p (op0)
6158 && ! HONOR_NANS (mode)
6159 && ! HONOR_SIGNED_ZEROS (mode)
6160 && ((rtx_equal_p (XEXP (op0, 0), op1)
6161 && rtx_equal_p (XEXP (op0, 1), op2))
6162 || (rtx_equal_p (XEXP (op0, 0), op2)
6163 && rtx_equal_p (XEXP (op0, 1), op1))))
6164 return op2;
6165
6166 /* Convert (!c) != {0,...,0} ? a : b into
6167 c != {0,...,0} ? b : a for vector modes. */
6168 if (VECTOR_MODE_P (GET_MODE (op1))
6169 && GET_CODE (op0) == NE
6170 && GET_CODE (XEXP (op0, 0)) == NOT
6171 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6172 {
6173 rtx cv = XEXP (op0, 1);
6174 int nunits;
6175 bool ok = true;
6176 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6177 ok = false;
6178 else
6179 for (int i = 0; i < nunits; ++i)
6180 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6181 {
6182 ok = false;
6183 break;
6184 }
6185 if (ok)
6186 {
6187 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6188 XEXP (XEXP (op0, 0), 0),
6189 XEXP (op0, 1));
6190 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6191 return retval;
6192 }
6193 }
6194
6195 /* Convert x == 0 ? N : clz (x) into clz (x) when
6196 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6197 Similarly for ctz (x). */
6198 if (COMPARISON_P (op0) && !side_effects_p (op0)
6199 && XEXP (op0, 1) == const0_rtx)
6200 {
6201 rtx simplified
6202 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6203 op1, op2);
6204 if (simplified)
6205 return simplified;
6206 }
6207
6208 if (COMPARISON_P (op0) && ! side_effects_p (op0))
6209 {
6210 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6211 ? GET_MODE (XEXP (op0, 1))
6212 : GET_MODE (XEXP (op0, 0)));
6213 rtx temp;
6214
6215 /* Look for happy constants in op1 and op2. */
6216 if (CONST_INT_P (op1) && CONST_INT_P (op2))
6217 {
6218 HOST_WIDE_INT t = INTVAL (op1);
6219 HOST_WIDE_INT f = INTVAL (op2);
6220
6221 if (t == STORE_FLAG_VALUE && f == 0)
6222 code = GET_CODE (op0);
6223 else if (t == 0 && f == STORE_FLAG_VALUE)
6224 {
6225 enum rtx_code tmp;
6226 tmp = reversed_comparison_code (op0, NULL);
6227 if (tmp == UNKNOWN)
6228 break;
6229 code = tmp;
6230 }
6231 else
6232 break;
6233
6234 return simplify_gen_relational (code, mode, cmp_mode,
6235 XEXP (op0, 0), XEXP (op0, 1));
6236 }
6237
6238 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6239 cmp_mode, XEXP (op0, 0),
6240 XEXP (op0, 1));
6241
6242 /* See if any simplifications were possible. */
6243 if (temp)
6244 {
6245 if (CONST_INT_P (temp))
6246 return temp == const0_rtx ? op2 : op1;
6247 else if (temp)
6248 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6249 }
6250 }
6251 break;
6252
6253 case VEC_MERGE:
6254 gcc_assert (GET_MODE (op0) == mode);
6255 gcc_assert (GET_MODE (op1) == mode);
6256 gcc_assert (VECTOR_MODE_P (mode));
6257 trueop2 = avoid_constant_pool_reference (op2);
6258 if (CONST_INT_P (trueop2)
6259 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6260 {
6261 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6262 unsigned HOST_WIDE_INT mask;
6263 if (n_elts == HOST_BITS_PER_WIDE_INT)
6264 mask = -1;
6265 else
6266 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6267
6268 if (!(sel & mask) && !side_effects_p (op0))
6269 return op1;
6270 if ((sel & mask) == mask && !side_effects_p (op1))
6271 return op0;
6272
6273 rtx trueop0 = avoid_constant_pool_reference (op0);
6274 rtx trueop1 = avoid_constant_pool_reference (op1);
6275 if (GET_CODE (trueop0) == CONST_VECTOR
6276 && GET_CODE (trueop1) == CONST_VECTOR)
6277 {
6278 rtvec v = rtvec_alloc (n_elts);
6279 unsigned int i;
6280
6281 for (i = 0; i < n_elts; i++)
6282 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6283 ? CONST_VECTOR_ELT (trueop0, i)
6284 : CONST_VECTOR_ELT (trueop1, i));
6285 return gen_rtx_CONST_VECTOR (mode, v);
6286 }
6287
6288 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6289 if no element from a appears in the result. */
6290 if (GET_CODE (op0) == VEC_MERGE)
6291 {
6292 tem = avoid_constant_pool_reference (XEXP (op0, 2));
6293 if (CONST_INT_P (tem))
6294 {
6295 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6296 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6297 return simplify_gen_ternary (code, mode, mode,
6298 XEXP (op0, 1), op1, op2);
6299 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6300 return simplify_gen_ternary (code, mode, mode,
6301 XEXP (op0, 0), op1, op2);
6302 }
6303 }
6304 if (GET_CODE (op1) == VEC_MERGE)
6305 {
6306 tem = avoid_constant_pool_reference (XEXP (op1, 2));
6307 if (CONST_INT_P (tem))
6308 {
6309 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6310 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6311 return simplify_gen_ternary (code, mode, mode,
6312 op0, XEXP (op1, 1), op2);
6313 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6314 return simplify_gen_ternary (code, mode, mode,
6315 op0, XEXP (op1, 0), op2);
6316 }
6317 }
6318
6319 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6320 with a. */
6321 if (GET_CODE (op0) == VEC_DUPLICATE
6322 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6323 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6324 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6325 {
6326 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6327 if (CONST_INT_P (tem) && CONST_INT_P (op2))
6328 {
6329 if (XEXP (XEXP (op0, 0), 0) == op1
6330 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6331 return op1;
6332 }
6333 }
6334 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6335 (const_int N))
6336 with (vec_concat (X) (B)) if N == 1 or
6337 (vec_concat (A) (X)) if N == 2. */
6338 if (GET_CODE (op0) == VEC_DUPLICATE
6339 && GET_CODE (op1) == CONST_VECTOR
6340 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6341 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6342 && IN_RANGE (sel, 1, 2))
6343 {
6344 rtx newop0 = XEXP (op0, 0);
6345 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6346 if (sel == 2)
6347 std::swap (newop0, newop1);
6348 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6349 }
6350 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6351 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6352 Only applies for vectors of two elements. */
6353 if (GET_CODE (op0) == VEC_DUPLICATE
6354 && GET_CODE (op1) == VEC_CONCAT
6355 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6356 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6357 && IN_RANGE (sel, 1, 2))
6358 {
6359 rtx newop0 = XEXP (op0, 0);
6360 rtx newop1 = XEXP (op1, 2 - sel);
6361 rtx otherop = XEXP (op1, sel - 1);
6362 if (sel == 2)
6363 std::swap (newop0, newop1);
6364 /* Don't want to throw away the other part of the vec_concat if
6365 it has side-effects. */
6366 if (!side_effects_p (otherop))
6367 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6368 }
6369
6370 /* Replace:
6371
6372 (vec_merge:outer (vec_duplicate:outer x:inner)
6373 (subreg:outer y:inner 0)
6374 (const_int N))
6375
6376 with (vec_concat:outer x:inner y:inner) if N == 1,
6377 or (vec_concat:outer y:inner x:inner) if N == 2.
6378
6379 Implicitly, this means we have a paradoxical subreg, but such
6380 a check is cheap, so make it anyway.
6381
6382 Only applies for vectors of two elements. */
6383 if (GET_CODE (op0) == VEC_DUPLICATE
6384 && GET_CODE (op1) == SUBREG
6385 && GET_MODE (op1) == GET_MODE (op0)
6386 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6387 && paradoxical_subreg_p (op1)
6388 && subreg_lowpart_p (op1)
6389 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6390 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6391 && IN_RANGE (sel, 1, 2))
6392 {
6393 rtx newop0 = XEXP (op0, 0);
6394 rtx newop1 = SUBREG_REG (op1);
6395 if (sel == 2)
6396 std::swap (newop0, newop1);
6397 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6398 }
6399
6400 /* Same as above but with switched operands:
6401 Replace (vec_merge:outer (subreg:outer x:inner 0)
6402 (vec_duplicate:outer y:inner)
6403 (const_int N))
6404
6405 with (vec_concat:outer x:inner y:inner) if N == 1,
6406 or (vec_concat:outer y:inner x:inner) if N == 2. */
6407 if (GET_CODE (op1) == VEC_DUPLICATE
6408 && GET_CODE (op0) == SUBREG
6409 && GET_MODE (op0) == GET_MODE (op1)
6410 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6411 && paradoxical_subreg_p (op0)
6412 && subreg_lowpart_p (op0)
6413 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6414 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6415 && IN_RANGE (sel, 1, 2))
6416 {
6417 rtx newop0 = SUBREG_REG (op0);
6418 rtx newop1 = XEXP (op1, 0);
6419 if (sel == 2)
6420 std::swap (newop0, newop1);
6421 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6422 }
6423
6424 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6425 (const_int n))
6426 with (vec_concat x y) or (vec_concat y x) depending on value
6427 of N. */
6428 if (GET_CODE (op0) == VEC_DUPLICATE
6429 && GET_CODE (op1) == VEC_DUPLICATE
6430 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6431 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6432 && IN_RANGE (sel, 1, 2))
6433 {
6434 rtx newop0 = XEXP (op0, 0);
6435 rtx newop1 = XEXP (op1, 0);
6436 if (sel == 2)
6437 std::swap (newop0, newop1);
6438
6439 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6440 }
6441 }
6442
6443 if (rtx_equal_p (op0, op1)
6444 && !side_effects_p (op2) && !side_effects_p (op1))
6445 return op0;
6446
6447 if (!side_effects_p (op2))
6448 {
6449 rtx top0
6450 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6451 rtx top1
6452 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6453 if (top0 || top1)
6454 return simplify_gen_ternary (code, mode, mode,
6455 top0 ? top0 : op0,
6456 top1 ? top1 : op1, op2);
6457 }
6458
6459 break;
6460
6461 default:
6462 gcc_unreachable ();
6463 }
6464
6465 return 0;
6466 }
6467
6468 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6469 starting at byte FIRST_BYTE. Return true on success and add the
6470 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6471 that the bytes follow target memory order. Leave BYTES unmodified
6472 on failure.
6473
6474 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6475 BYTES before calling this function. */
6476
6477 bool
6478 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6479 unsigned int first_byte, unsigned int num_bytes)
6480 {
6481 /* Check the mode is sensible. */
6482 gcc_assert (GET_MODE (x) == VOIDmode
6483 ? is_a <scalar_int_mode> (mode)
6484 : mode == GET_MODE (x));
6485
6486 if (GET_CODE (x) == CONST_VECTOR)
6487 {
6488 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6489 is necessary. The only complication is that MODE_VECTOR_BOOL
6490 vectors can have several elements per byte. */
6491 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6492 GET_MODE_NUNITS (mode));
6493 unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6494 if (elt_bits < BITS_PER_UNIT)
6495 {
6496 /* This is the only case in which elements can be smaller than
6497 a byte. */
6498 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6499 for (unsigned int i = 0; i < num_bytes; ++i)
6500 {
6501 target_unit value = 0;
6502 for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6503 {
6504 value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6505 elt += 1;
6506 }
6507 bytes.quick_push (value);
6508 }
6509 return true;
6510 }
6511
6512 unsigned int start = bytes.length ();
6513 unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6514 /* Make FIRST_BYTE relative to ELT. */
6515 first_byte %= elt_bytes;
6516 while (num_bytes > 0)
6517 {
6518 /* Work out how many bytes we want from element ELT. */
6519 unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6520 if (!native_encode_rtx (GET_MODE_INNER (mode),
6521 CONST_VECTOR_ELT (x, elt), bytes,
6522 first_byte, chunk_bytes))
6523 {
6524 bytes.truncate (start);
6525 return false;
6526 }
6527 elt += 1;
6528 first_byte = 0;
6529 num_bytes -= chunk_bytes;
6530 }
6531 return true;
6532 }
6533
6534 /* All subsequent cases are limited to scalars. */
6535 scalar_mode smode;
6536 if (!is_a <scalar_mode> (mode, &smode))
6537 return false;
6538
6539 /* Make sure that the region is in range. */
6540 unsigned int end_byte = first_byte + num_bytes;
6541 unsigned int mode_bytes = GET_MODE_SIZE (smode);
6542 gcc_assert (end_byte <= mode_bytes);
6543
6544 if (CONST_SCALAR_INT_P (x))
6545 {
6546 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6547 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6548 position of each byte. */
6549 rtx_mode_t value (x, smode);
6550 wide_int_ref value_wi (value);
6551 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6552 {
6553 /* Always constant because the inputs are. */
6554 unsigned int lsb
6555 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6556 /* Operate directly on the encoding rather than using
6557 wi::extract_uhwi, so that we preserve the sign or zero
6558 extension for modes that are not a whole number of bits in
6559 size. (Zero extension is only used for the combination of
6560 innermode == BImode && STORE_FLAG_VALUE == 1). */
6561 unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6562 unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6563 unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6564 bytes.quick_push (uhwi >> shift);
6565 }
6566 return true;
6567 }
6568
6569 if (CONST_DOUBLE_P (x))
6570 {
6571 /* real_to_target produces an array of integers in target memory order.
6572 All integers before the last one have 32 bits; the last one may
6573 have 32 bits or fewer, depending on whether the mode bitsize
6574 is divisible by 32. Each of these integers is then laid out
6575 in target memory as any other integer would be. */
6576 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6577 real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6578
6579 /* The (maximum) number of target bytes per element of el32. */
6580 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6581 gcc_assert (bytes_per_el32 != 0);
6582
6583 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6584 handling above. */
6585 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6586 {
6587 unsigned int index = byte / bytes_per_el32;
6588 unsigned int subbyte = byte % bytes_per_el32;
6589 unsigned int int_bytes = MIN (bytes_per_el32,
6590 mode_bytes - index * bytes_per_el32);
6591 /* Always constant because the inputs are. */
6592 unsigned int lsb
6593 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6594 bytes.quick_push ((unsigned long) el32[index] >> lsb);
6595 }
6596 return true;
6597 }
6598
6599 if (GET_CODE (x) == CONST_FIXED)
6600 {
6601 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6602 {
6603 /* Always constant because the inputs are. */
6604 unsigned int lsb
6605 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6606 unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6607 if (lsb >= HOST_BITS_PER_WIDE_INT)
6608 {
6609 lsb -= HOST_BITS_PER_WIDE_INT;
6610 piece = CONST_FIXED_VALUE_HIGH (x);
6611 }
6612 bytes.quick_push (piece >> lsb);
6613 }
6614 return true;
6615 }
6616
6617 return false;
6618 }
6619
6620 /* Read a vector of mode MODE from the target memory image given by BYTES,
6621 starting at byte FIRST_BYTE. The vector is known to be encodable using
6622 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6623 and BYTES is known to have enough bytes to supply NPATTERNS *
6624 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6625 BITS_PER_UNIT bits and the bytes are in target memory order.
6626
6627 Return the vector on success, otherwise return NULL_RTX. */
6628
6629 rtx
6630 native_decode_vector_rtx (machine_mode mode, vec<target_unit> bytes,
6631 unsigned int first_byte, unsigned int npatterns,
6632 unsigned int nelts_per_pattern)
6633 {
6634 rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
6635
6636 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6637 GET_MODE_NUNITS (mode));
6638 if (elt_bits < BITS_PER_UNIT)
6639 {
6640 /* This is the only case in which elements can be smaller than a byte.
6641 Element 0 is always in the lsb of the containing byte. */
6642 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6643 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6644 {
6645 unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
6646 unsigned int byte_index = bit_index / BITS_PER_UNIT;
6647 unsigned int lsb = bit_index % BITS_PER_UNIT;
6648 builder.quick_push (bytes[byte_index] & (1 << lsb)
6649 ? CONST1_RTX (BImode)
6650 : CONST0_RTX (BImode));
6651 }
6652 }
6653 else
6654 {
6655 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6656 {
6657 rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
6658 if (!x)
6659 return NULL_RTX;
6660 builder.quick_push (x);
6661 first_byte += elt_bits / BITS_PER_UNIT;
6662 }
6663 }
6664 return builder.build ();
6665 }
6666
6667 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6668 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6669 bits and the bytes are in target memory order. The image has enough
6670 values to specify all bytes of MODE.
6671
6672 Return the rtx on success, otherwise return NULL_RTX. */
6673
6674 rtx
6675 native_decode_rtx (machine_mode mode, vec<target_unit> bytes,
6676 unsigned int first_byte)
6677 {
6678 if (VECTOR_MODE_P (mode))
6679 {
6680 /* If we know at compile time how many elements there are,
6681 pull each element directly from BYTES. */
6682 unsigned int nelts;
6683 if (GET_MODE_NUNITS (mode).is_constant (&nelts))
6684 return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
6685 return NULL_RTX;
6686 }
6687
6688 scalar_int_mode imode;
6689 if (is_a <scalar_int_mode> (mode, &imode)
6690 && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
6691 {
6692 /* Pull the bytes msb first, so that we can use simple
6693 shift-and-insert wide_int operations. */
6694 unsigned int size = GET_MODE_SIZE (imode);
6695 wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
6696 for (unsigned int i = 0; i < size; ++i)
6697 {
6698 unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
6699 /* Always constant because the inputs are. */
6700 unsigned int subbyte
6701 = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
6702 result <<= BITS_PER_UNIT;
6703 result |= bytes[first_byte + subbyte];
6704 }
6705 return immed_wide_int_const (result, imode);
6706 }
6707
6708 scalar_float_mode fmode;
6709 if (is_a <scalar_float_mode> (mode, &fmode))
6710 {
6711 /* We need to build an array of integers in target memory order.
6712 All integers before the last one have 32 bits; the last one may
6713 have 32 bits or fewer, depending on whether the mode bitsize
6714 is divisible by 32. */
6715 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6716 unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
6717 memset (el32, 0, num_el32 * sizeof (long));
6718
6719 /* The (maximum) number of target bytes per element of el32. */
6720 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6721 gcc_assert (bytes_per_el32 != 0);
6722
6723 unsigned int mode_bytes = GET_MODE_SIZE (fmode);
6724 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6725 {
6726 unsigned int index = byte / bytes_per_el32;
6727 unsigned int subbyte = byte % bytes_per_el32;
6728 unsigned int int_bytes = MIN (bytes_per_el32,
6729 mode_bytes - index * bytes_per_el32);
6730 /* Always constant because the inputs are. */
6731 unsigned int lsb
6732 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6733 el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
6734 }
6735 REAL_VALUE_TYPE r;
6736 real_from_target (&r, el32, fmode);
6737 return const_double_from_real_value (r, fmode);
6738 }
6739
6740 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
6741 {
6742 scalar_mode smode = as_a <scalar_mode> (mode);
6743 FIXED_VALUE_TYPE f;
6744 f.data.low = 0;
6745 f.data.high = 0;
6746 f.mode = smode;
6747
6748 unsigned int mode_bytes = GET_MODE_SIZE (smode);
6749 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6750 {
6751 /* Always constant because the inputs are. */
6752 unsigned int lsb
6753 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6754 unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
6755 if (lsb >= HOST_BITS_PER_WIDE_INT)
6756 f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
6757 else
6758 f.data.low |= unit << lsb;
6759 }
6760 return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
6761 }
6762
6763 return NULL_RTX;
6764 }
6765
6766 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
6767 is to convert a runtime BYTE value into a constant one. */
6768
6769 static poly_uint64
6770 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
6771 {
6772 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6773 machine_mode mode = GET_MODE (x);
6774 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6775 GET_MODE_NUNITS (mode));
6776 /* The number of bits needed to encode one element from each pattern. */
6777 unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
6778
6779 /* Identify the start point in terms of a sequence number and a byte offset
6780 within that sequence. */
6781 poly_uint64 first_sequence;
6782 unsigned HOST_WIDE_INT subbit;
6783 if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
6784 &first_sequence, &subbit))
6785 {
6786 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6787 if (nelts_per_pattern == 1)
6788 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
6789 doesn't matter. */
6790 byte = subbit / BITS_PER_UNIT;
6791 else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
6792 {
6793 /* The subreg drops the first element from each pattern and
6794 only uses the second element. Find the first sequence
6795 that starts on a byte boundary. */
6796 subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
6797 byte = subbit / BITS_PER_UNIT;
6798 }
6799 }
6800 return byte;
6801 }
6802
6803 /* Subroutine of simplify_subreg in which:
6804
6805 - X is known to be a CONST_VECTOR
6806 - OUTERMODE is known to be a vector mode
6807
6808 Try to handle the subreg by operating on the CONST_VECTOR encoding
6809 rather than on each individual element of the CONST_VECTOR.
6810
6811 Return the simplified subreg on success, otherwise return NULL_RTX. */
6812
6813 static rtx
6814 simplify_const_vector_subreg (machine_mode outermode, rtx x,
6815 machine_mode innermode, unsigned int first_byte)
6816 {
6817 /* Paradoxical subregs of vectors have dubious semantics. */
6818 if (paradoxical_subreg_p (outermode, innermode))
6819 return NULL_RTX;
6820
6821 /* We can only preserve the semantics of a stepped pattern if the new
6822 vector element is the same as the original one. */
6823 if (CONST_VECTOR_STEPPED_P (x)
6824 && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
6825 return NULL_RTX;
6826
6827 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6828 unsigned int x_elt_bits
6829 = vector_element_size (GET_MODE_BITSIZE (innermode),
6830 GET_MODE_NUNITS (innermode));
6831 unsigned int out_elt_bits
6832 = vector_element_size (GET_MODE_BITSIZE (outermode),
6833 GET_MODE_NUNITS (outermode));
6834
6835 /* The number of bits needed to encode one element from every pattern
6836 of the original vector. */
6837 unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
6838
6839 /* The number of bits needed to encode one element from every pattern
6840 of the result. */
6841 unsigned int out_sequence_bits
6842 = least_common_multiple (x_sequence_bits, out_elt_bits);
6843
6844 /* Work out the number of interleaved patterns in the output vector
6845 and the number of encoded elements per pattern. */
6846 unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
6847 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6848
6849 /* The encoding scheme requires the number of elements to be a multiple
6850 of the number of patterns, so that each pattern appears at least once
6851 and so that the same number of elements appear from each pattern. */
6852 bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
6853 unsigned int const_nunits;
6854 if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
6855 && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
6856 {
6857 /* Either the encoding is invalid, or applying it would give us
6858 more elements than we need. Just encode each element directly. */
6859 out_npatterns = const_nunits;
6860 nelts_per_pattern = 1;
6861 }
6862 else if (!ok_p)
6863 return NULL_RTX;
6864
6865 /* Get enough bytes of X to form the new encoding. */
6866 unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
6867 unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
6868 auto_vec<target_unit, 128> buffer (buffer_bytes);
6869 if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
6870 return NULL_RTX;
6871
6872 /* Reencode the bytes as OUTERMODE. */
6873 return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
6874 nelts_per_pattern);
6875 }
6876
6877 /* Try to simplify a subreg of a constant by encoding the subreg region
6878 as a sequence of target bytes and reading them back in the new mode.
6879 Return the new value on success, otherwise return null.
6880
6881 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6882 and byte offset FIRST_BYTE. */
6883
6884 static rtx
6885 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
6886 machine_mode innermode, unsigned int first_byte)
6887 {
6888 unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
6889 auto_vec<target_unit, 128> buffer (buffer_bytes);
6890
6891 /* Some ports misuse CCmode. */
6892 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
6893 return x;
6894
6895 /* Paradoxical subregs read undefined values for bytes outside of the
6896 inner value. However, we have traditionally always sign-extended
6897 integer constants and zero-extended others. */
6898 unsigned int inner_bytes = buffer_bytes;
6899 if (paradoxical_subreg_p (outermode, innermode))
6900 {
6901 if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
6902 return NULL_RTX;
6903
6904 target_unit filler = 0;
6905 if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
6906 filler = -1;
6907
6908 /* Add any leading bytes due to big-endian layout. The number of
6909 bytes must be constant because both modes have constant size. */
6910 unsigned int leading_bytes
6911 = -byte_lowpart_offset (outermode, innermode).to_constant ();
6912 for (unsigned int i = 0; i < leading_bytes; ++i)
6913 buffer.quick_push (filler);
6914
6915 if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
6916 return NULL_RTX;
6917
6918 /* Add any trailing bytes due to little-endian layout. */
6919 while (buffer.length () < buffer_bytes)
6920 buffer.quick_push (filler);
6921 }
6922 else
6923 {
6924 if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
6925 return NULL_RTX;
6926 }
6927 return native_decode_rtx (outermode, buffer, 0);
6928 }
6929
6930 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6931 Return 0 if no simplifications are possible. */
6932 rtx
6933 simplify_subreg (machine_mode outermode, rtx op,
6934 machine_mode innermode, poly_uint64 byte)
6935 {
6936 /* Little bit of sanity checking. */
6937 gcc_assert (innermode != VOIDmode);
6938 gcc_assert (outermode != VOIDmode);
6939 gcc_assert (innermode != BLKmode);
6940 gcc_assert (outermode != BLKmode);
6941
6942 gcc_assert (GET_MODE (op) == innermode
6943 || GET_MODE (op) == VOIDmode);
6944
6945 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6946 if (!multiple_p (byte, outersize))
6947 return NULL_RTX;
6948
6949 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6950 if (maybe_ge (byte, innersize))
6951 return NULL_RTX;
6952
6953 if (outermode == innermode && known_eq (byte, 0U))
6954 return op;
6955
6956 if (GET_CODE (op) == CONST_VECTOR)
6957 byte = simplify_const_vector_byte_offset (op, byte);
6958
6959 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6960 {
6961 rtx elt;
6962
6963 if (VECTOR_MODE_P (outermode)
6964 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6965 && vec_duplicate_p (op, &elt))
6966 return gen_vec_duplicate (outermode, elt);
6967
6968 if (outermode == GET_MODE_INNER (innermode)
6969 && vec_duplicate_p (op, &elt))
6970 return elt;
6971 }
6972
6973 if (CONST_SCALAR_INT_P (op)
6974 || CONST_DOUBLE_AS_FLOAT_P (op)
6975 || CONST_FIXED_P (op)
6976 || GET_CODE (op) == CONST_VECTOR)
6977 {
6978 unsigned HOST_WIDE_INT cbyte;
6979 if (byte.is_constant (&cbyte))
6980 {
6981 if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
6982 {
6983 rtx tmp = simplify_const_vector_subreg (outermode, op,
6984 innermode, cbyte);
6985 if (tmp)
6986 return tmp;
6987 }
6988
6989 fixed_size_mode fs_outermode;
6990 if (is_a <fixed_size_mode> (outermode, &fs_outermode))
6991 return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
6992 }
6993 }
6994
6995 /* Changing mode twice with SUBREG => just change it once,
6996 or not at all if changing back op starting mode. */
6997 if (GET_CODE (op) == SUBREG)
6998 {
6999 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7000 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7001 rtx newx;
7002
7003 if (outermode == innermostmode
7004 && known_eq (byte, 0U)
7005 && known_eq (SUBREG_BYTE (op), 0))
7006 return SUBREG_REG (op);
7007
7008 /* Work out the memory offset of the final OUTERMODE value relative
7009 to the inner value of OP. */
7010 poly_int64 mem_offset = subreg_memory_offset (outermode,
7011 innermode, byte);
7012 poly_int64 op_mem_offset = subreg_memory_offset (op);
7013 poly_int64 final_offset = mem_offset + op_mem_offset;
7014
7015 /* See whether resulting subreg will be paradoxical. */
7016 if (!paradoxical_subreg_p (outermode, innermostmode))
7017 {
7018 /* Bail out in case resulting subreg would be incorrect. */
7019 if (maybe_lt (final_offset, 0)
7020 || maybe_ge (poly_uint64 (final_offset), innermostsize)
7021 || !multiple_p (final_offset, outersize))
7022 return NULL_RTX;
7023 }
7024 else
7025 {
7026 poly_int64 required_offset = subreg_memory_offset (outermode,
7027 innermostmode, 0);
7028 if (maybe_ne (final_offset, required_offset))
7029 return NULL_RTX;
7030 /* Paradoxical subregs always have byte offset 0. */
7031 final_offset = 0;
7032 }
7033
7034 /* Recurse for further possible simplifications. */
7035 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7036 final_offset);
7037 if (newx)
7038 return newx;
7039 if (validate_subreg (outermode, innermostmode,
7040 SUBREG_REG (op), final_offset))
7041 {
7042 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7043 if (SUBREG_PROMOTED_VAR_P (op)
7044 && SUBREG_PROMOTED_SIGN (op) >= 0
7045 && GET_MODE_CLASS (outermode) == MODE_INT
7046 && known_ge (outersize, innersize)
7047 && known_le (outersize, innermostsize)
7048 && subreg_lowpart_p (newx))
7049 {
7050 SUBREG_PROMOTED_VAR_P (newx) = 1;
7051 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7052 }
7053 return newx;
7054 }
7055 return NULL_RTX;
7056 }
7057
7058 /* SUBREG of a hard register => just change the register number
7059 and/or mode. If the hard register is not valid in that mode,
7060 suppress this simplification. If the hard register is the stack,
7061 frame, or argument pointer, leave this as a SUBREG. */
7062
7063 if (REG_P (op) && HARD_REGISTER_P (op))
7064 {
7065 unsigned int regno, final_regno;
7066
7067 regno = REGNO (op);
7068 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7069 if (HARD_REGISTER_NUM_P (final_regno))
7070 {
7071 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7072 subreg_memory_offset (outermode,
7073 innermode, byte));
7074
7075 /* Propagate original regno. We don't have any way to specify
7076 the offset inside original regno, so do so only for lowpart.
7077 The information is used only by alias analysis that cannot
7078 grog partial register anyway. */
7079
7080 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7081 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7082 return x;
7083 }
7084 }
7085
7086 /* If we have a SUBREG of a register that we are replacing and we are
7087 replacing it with a MEM, make a new MEM and try replacing the
7088 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7089 or if we would be widening it. */
7090
7091 if (MEM_P (op)
7092 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7093 /* Allow splitting of volatile memory references in case we don't
7094 have instruction to move the whole thing. */
7095 && (! MEM_VOLATILE_P (op)
7096 || ! have_insn_for (SET, innermode))
7097 && known_le (outersize, innersize))
7098 return adjust_address_nv (op, outermode, byte);
7099
7100 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7101 of two parts. */
7102 if (GET_CODE (op) == CONCAT
7103 || GET_CODE (op) == VEC_CONCAT)
7104 {
7105 poly_uint64 final_offset;
7106 rtx part, res;
7107
7108 machine_mode part_mode = GET_MODE (XEXP (op, 0));
7109 if (part_mode == VOIDmode)
7110 part_mode = GET_MODE_INNER (GET_MODE (op));
7111 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7112 if (known_lt (byte, part_size))
7113 {
7114 part = XEXP (op, 0);
7115 final_offset = byte;
7116 }
7117 else if (known_ge (byte, part_size))
7118 {
7119 part = XEXP (op, 1);
7120 final_offset = byte - part_size;
7121 }
7122 else
7123 return NULL_RTX;
7124
7125 if (maybe_gt (final_offset + outersize, part_size))
7126 return NULL_RTX;
7127
7128 part_mode = GET_MODE (part);
7129 if (part_mode == VOIDmode)
7130 part_mode = GET_MODE_INNER (GET_MODE (op));
7131 res = simplify_subreg (outermode, part, part_mode, final_offset);
7132 if (res)
7133 return res;
7134 if (validate_subreg (outermode, part_mode, part, final_offset))
7135 return gen_rtx_SUBREG (outermode, part, final_offset);
7136 return NULL_RTX;
7137 }
7138
7139 /* Simplify
7140 (subreg (vec_merge (X)
7141 (vector)
7142 (const_int ((1 << N) | M)))
7143 (N * sizeof (outermode)))
7144 to
7145 (subreg (X) (N * sizeof (outermode)))
7146 */
7147 unsigned int idx;
7148 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7149 && idx < HOST_BITS_PER_WIDE_INT
7150 && GET_CODE (op) == VEC_MERGE
7151 && GET_MODE_INNER (innermode) == outermode
7152 && CONST_INT_P (XEXP (op, 2))
7153 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7154 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7155
7156 /* A SUBREG resulting from a zero extension may fold to zero if
7157 it extracts higher bits that the ZERO_EXTEND's source bits. */
7158 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7159 {
7160 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7161 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7162 return CONST0_RTX (outermode);
7163 }
7164
7165 scalar_int_mode int_outermode, int_innermode;
7166 if (is_a <scalar_int_mode> (outermode, &int_outermode)
7167 && is_a <scalar_int_mode> (innermode, &int_innermode)
7168 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7169 {
7170 /* Handle polynomial integers. The upper bits of a paradoxical
7171 subreg are undefined, so this is safe regardless of whether
7172 we're truncating or extending. */
7173 if (CONST_POLY_INT_P (op))
7174 {
7175 poly_wide_int val
7176 = poly_wide_int::from (const_poly_int_value (op),
7177 GET_MODE_PRECISION (int_outermode),
7178 SIGNED);
7179 return immed_wide_int_const (val, int_outermode);
7180 }
7181
7182 if (GET_MODE_PRECISION (int_outermode)
7183 < GET_MODE_PRECISION (int_innermode))
7184 {
7185 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7186 if (tem)
7187 return tem;
7188 }
7189 }
7190
7191 /* If OP is a vector comparison and the subreg is not changing the
7192 number of elements or the size of the elements, change the result
7193 of the comparison to the new mode. */
7194 if (COMPARISON_P (op)
7195 && VECTOR_MODE_P (outermode)
7196 && VECTOR_MODE_P (innermode)
7197 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7198 && known_eq (GET_MODE_UNIT_SIZE (outermode),
7199 GET_MODE_UNIT_SIZE (innermode)))
7200 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7201 XEXP (op, 0), XEXP (op, 1));
7202 return NULL_RTX;
7203 }
7204
7205 /* Make a SUBREG operation or equivalent if it folds. */
7206
7207 rtx
7208 simplify_gen_subreg (machine_mode outermode, rtx op,
7209 machine_mode innermode, poly_uint64 byte)
7210 {
7211 rtx newx;
7212
7213 newx = simplify_subreg (outermode, op, innermode, byte);
7214 if (newx)
7215 return newx;
7216
7217 if (GET_CODE (op) == SUBREG
7218 || GET_CODE (op) == CONCAT
7219 || GET_MODE (op) == VOIDmode)
7220 return NULL_RTX;
7221
7222 if (validate_subreg (outermode, innermode, op, byte))
7223 return gen_rtx_SUBREG (outermode, op, byte);
7224
7225 return NULL_RTX;
7226 }
7227
7228 /* Generates a subreg to get the least significant part of EXPR (in mode
7229 INNER_MODE) to OUTER_MODE. */
7230
7231 rtx
7232 lowpart_subreg (machine_mode outer_mode, rtx expr,
7233 machine_mode inner_mode)
7234 {
7235 return simplify_gen_subreg (outer_mode, expr, inner_mode,
7236 subreg_lowpart_offset (outer_mode, inner_mode));
7237 }
7238
7239 /* Simplify X, an rtx expression.
7240
7241 Return the simplified expression or NULL if no simplifications
7242 were possible.
7243
7244 This is the preferred entry point into the simplification routines;
7245 however, we still allow passes to call the more specific routines.
7246
7247 Right now GCC has three (yes, three) major bodies of RTL simplification
7248 code that need to be unified.
7249
7250 1. fold_rtx in cse.c. This code uses various CSE specific
7251 information to aid in RTL simplification.
7252
7253 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7254 it uses combine specific information to aid in RTL
7255 simplification.
7256
7257 3. The routines in this file.
7258
7259
7260 Long term we want to only have one body of simplification code; to
7261 get to that state I recommend the following steps:
7262
7263 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7264 which are not pass dependent state into these routines.
7265
7266 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7267 use this routine whenever possible.
7268
7269 3. Allow for pass dependent state to be provided to these
7270 routines and add simplifications based on the pass dependent
7271 state. Remove code from cse.c & combine.c that becomes
7272 redundant/dead.
7273
7274 It will take time, but ultimately the compiler will be easier to
7275 maintain and improve. It's totally silly that when we add a
7276 simplification that it needs to be added to 4 places (3 for RTL
7277 simplification and 1 for tree simplification. */
7278
7279 rtx
7280 simplify_rtx (const_rtx x)
7281 {
7282 const enum rtx_code code = GET_CODE (x);
7283 const machine_mode mode = GET_MODE (x);
7284
7285 switch (GET_RTX_CLASS (code))
7286 {
7287 case RTX_UNARY:
7288 return simplify_unary_operation (code, mode,
7289 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7290 case RTX_COMM_ARITH:
7291 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7292 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7293
7294 /* Fall through. */
7295
7296 case RTX_BIN_ARITH:
7297 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7298
7299 case RTX_TERNARY:
7300 case RTX_BITFIELD_OPS:
7301 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7302 XEXP (x, 0), XEXP (x, 1),
7303 XEXP (x, 2));
7304
7305 case RTX_COMPARE:
7306 case RTX_COMM_COMPARE:
7307 return simplify_relational_operation (code, mode,
7308 ((GET_MODE (XEXP (x, 0))
7309 != VOIDmode)
7310 ? GET_MODE (XEXP (x, 0))
7311 : GET_MODE (XEXP (x, 1))),
7312 XEXP (x, 0),
7313 XEXP (x, 1));
7314
7315 case RTX_EXTRA:
7316 if (code == SUBREG)
7317 return simplify_subreg (mode, SUBREG_REG (x),
7318 GET_MODE (SUBREG_REG (x)),
7319 SUBREG_BYTE (x));
7320 break;
7321
7322 case RTX_OBJ:
7323 if (code == LO_SUM)
7324 {
7325 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7326 if (GET_CODE (XEXP (x, 0)) == HIGH
7327 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7328 return XEXP (x, 1);
7329 }
7330 break;
7331
7332 default:
7333 break;
7334 }
7335 return NULL;
7336 }
7337
7338 #if CHECKING_P
7339
7340 namespace selftest {
7341
7342 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7343
7344 static rtx
7345 make_test_reg (machine_mode mode)
7346 {
7347 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7348
7349 return gen_rtx_REG (mode, test_reg_num++);
7350 }
7351
7352 static void
7353 test_scalar_int_ops (machine_mode mode)
7354 {
7355 rtx op0 = make_test_reg (mode);
7356 rtx op1 = make_test_reg (mode);
7357 rtx six = GEN_INT (6);
7358
7359 rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7360 rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7361 rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7362
7363 rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7364 rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7365 rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7366
7367 rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7368 rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7369
7370 /* Test some binary identities. */
7371 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7372 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7373 ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7374 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7375 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7376 ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7377 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7378 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7379 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7380 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7381 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7382 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7383 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7384 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7385 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7386 ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7387 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7388
7389 /* Test some self-inverse operations. */
7390 ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7391 ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7392 ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7393
7394 /* Test some reflexive operations. */
7395 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7396 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7397 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7398 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7399 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7400 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7401
7402 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7403 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7404
7405 /* Test simplify_distributive_operation. */
7406 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7407 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7408 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7409 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7410 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7411 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7412 }
7413
7414 /* Verify some simplifications involving scalar expressions. */
7415
7416 static void
7417 test_scalar_ops ()
7418 {
7419 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7420 {
7421 machine_mode mode = (machine_mode) i;
7422 if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7423 test_scalar_int_ops (mode);
7424 }
7425 }
7426
7427 /* Test vector simplifications involving VEC_DUPLICATE in which the
7428 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7429 register that holds one element of MODE. */
7430
7431 static void
7432 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7433 {
7434 scalar_mode inner_mode = GET_MODE_INNER (mode);
7435 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7436 poly_uint64 nunits = GET_MODE_NUNITS (mode);
7437 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7438 {
7439 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7440 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7441 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7442 ASSERT_RTX_EQ (duplicate,
7443 simplify_unary_operation (NOT, mode,
7444 duplicate_not, mode));
7445
7446 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7447 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7448 ASSERT_RTX_EQ (duplicate,
7449 simplify_unary_operation (NEG, mode,
7450 duplicate_neg, mode));
7451
7452 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7453 ASSERT_RTX_EQ (duplicate,
7454 simplify_binary_operation (PLUS, mode, duplicate,
7455 CONST0_RTX (mode)));
7456
7457 ASSERT_RTX_EQ (duplicate,
7458 simplify_binary_operation (MINUS, mode, duplicate,
7459 CONST0_RTX (mode)));
7460
7461 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7462 simplify_binary_operation (MINUS, mode, duplicate,
7463 duplicate));
7464 }
7465
7466 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7467 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7468 ASSERT_RTX_PTR_EQ (scalar_reg,
7469 simplify_binary_operation (VEC_SELECT, inner_mode,
7470 duplicate, zero_par));
7471
7472 unsigned HOST_WIDE_INT const_nunits;
7473 if (nunits.is_constant (&const_nunits))
7474 {
7475 /* And again with the final element. */
7476 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7477 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7478 ASSERT_RTX_PTR_EQ (scalar_reg,
7479 simplify_binary_operation (VEC_SELECT, inner_mode,
7480 duplicate, last_par));
7481
7482 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7483 rtx vector_reg = make_test_reg (mode);
7484 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7485 {
7486 if (i >= HOST_BITS_PER_WIDE_INT)
7487 break;
7488 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7489 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7490 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7491 ASSERT_RTX_EQ (scalar_reg,
7492 simplify_gen_subreg (inner_mode, vm,
7493 mode, offset));
7494 }
7495 }
7496
7497 /* Test a scalar subreg of a VEC_DUPLICATE. */
7498 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
7499 ASSERT_RTX_EQ (scalar_reg,
7500 simplify_gen_subreg (inner_mode, duplicate,
7501 mode, offset));
7502
7503 machine_mode narrower_mode;
7504 if (maybe_ne (nunits, 2U)
7505 && multiple_p (nunits, 2)
7506 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
7507 && VECTOR_MODE_P (narrower_mode))
7508 {
7509 /* Test VEC_DUPLICATE of a vector. */
7510 rtx_vector_builder nbuilder (narrower_mode, 2, 1);
7511 nbuilder.quick_push (const0_rtx);
7512 nbuilder.quick_push (const1_rtx);
7513 rtx_vector_builder builder (mode, 2, 1);
7514 builder.quick_push (const0_rtx);
7515 builder.quick_push (const1_rtx);
7516 ASSERT_RTX_EQ (builder.build (),
7517 simplify_unary_operation (VEC_DUPLICATE, mode,
7518 nbuilder.build (),
7519 narrower_mode));
7520
7521 /* Test VEC_SELECT of a vector. */
7522 rtx vec_par
7523 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
7524 rtx narrower_duplicate
7525 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
7526 ASSERT_RTX_EQ (narrower_duplicate,
7527 simplify_binary_operation (VEC_SELECT, narrower_mode,
7528 duplicate, vec_par));
7529
7530 /* Test a vector subreg of a VEC_DUPLICATE. */
7531 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
7532 ASSERT_RTX_EQ (narrower_duplicate,
7533 simplify_gen_subreg (narrower_mode, duplicate,
7534 mode, offset));
7535 }
7536 }
7537
7538 /* Test vector simplifications involving VEC_SERIES in which the
7539 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7540 register that holds one element of MODE. */
7541
7542 static void
7543 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7544 {
7545 /* Test unary cases with VEC_SERIES arguments. */
7546 scalar_mode inner_mode = GET_MODE_INNER (mode);
7547 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7548 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7549 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7550 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
7551 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
7552 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
7553 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
7554 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
7555 neg_scalar_reg);
7556 ASSERT_RTX_EQ (series_0_r,
7557 simplify_unary_operation (NEG, mode, series_0_nr, mode));
7558 ASSERT_RTX_EQ (series_r_m1,
7559 simplify_unary_operation (NEG, mode, series_nr_1, mode));
7560 ASSERT_RTX_EQ (series_r_r,
7561 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
7562
7563 /* Test that a VEC_SERIES with a zero step is simplified away. */
7564 ASSERT_RTX_EQ (duplicate,
7565 simplify_binary_operation (VEC_SERIES, mode,
7566 scalar_reg, const0_rtx));
7567
7568 /* Test PLUS and MINUS with VEC_SERIES. */
7569 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
7570 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
7571 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
7572 ASSERT_RTX_EQ (series_r_r,
7573 simplify_binary_operation (PLUS, mode, series_0_r,
7574 duplicate));
7575 ASSERT_RTX_EQ (series_r_1,
7576 simplify_binary_operation (PLUS, mode, duplicate,
7577 series_0_1));
7578 ASSERT_RTX_EQ (series_r_m1,
7579 simplify_binary_operation (PLUS, mode, duplicate,
7580 series_0_m1));
7581 ASSERT_RTX_EQ (series_0_r,
7582 simplify_binary_operation (MINUS, mode, series_r_r,
7583 duplicate));
7584 ASSERT_RTX_EQ (series_r_m1,
7585 simplify_binary_operation (MINUS, mode, duplicate,
7586 series_0_1));
7587 ASSERT_RTX_EQ (series_r_1,
7588 simplify_binary_operation (MINUS, mode, duplicate,
7589 series_0_m1));
7590 ASSERT_RTX_EQ (series_0_m1,
7591 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7592 constm1_rtx));
7593
7594 /* Test NEG on constant vector series. */
7595 ASSERT_RTX_EQ (series_0_m1,
7596 simplify_unary_operation (NEG, mode, series_0_1, mode));
7597 ASSERT_RTX_EQ (series_0_1,
7598 simplify_unary_operation (NEG, mode, series_0_m1, mode));
7599
7600 /* Test PLUS and MINUS on constant vector series. */
7601 rtx scalar2 = gen_int_mode (2, inner_mode);
7602 rtx scalar3 = gen_int_mode (3, inner_mode);
7603 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
7604 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
7605 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
7606 ASSERT_RTX_EQ (series_1_1,
7607 simplify_binary_operation (PLUS, mode, series_0_1,
7608 CONST1_RTX (mode)));
7609 ASSERT_RTX_EQ (series_0_m1,
7610 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
7611 series_0_m1));
7612 ASSERT_RTX_EQ (series_1_3,
7613 simplify_binary_operation (PLUS, mode, series_1_1,
7614 series_0_2));
7615 ASSERT_RTX_EQ (series_0_1,
7616 simplify_binary_operation (MINUS, mode, series_1_1,
7617 CONST1_RTX (mode)));
7618 ASSERT_RTX_EQ (series_1_1,
7619 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
7620 series_0_m1));
7621 ASSERT_RTX_EQ (series_1_1,
7622 simplify_binary_operation (MINUS, mode, series_1_3,
7623 series_0_2));
7624
7625 /* Test MULT between constant vectors. */
7626 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
7627 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
7628 rtx scalar9 = gen_int_mode (9, inner_mode);
7629 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
7630 ASSERT_RTX_EQ (series_0_2,
7631 simplify_binary_operation (MULT, mode, series_0_1, vec2));
7632 ASSERT_RTX_EQ (series_3_9,
7633 simplify_binary_operation (MULT, mode, vec3, series_1_3));
7634 if (!GET_MODE_NUNITS (mode).is_constant ())
7635 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
7636 series_0_1));
7637
7638 /* Test ASHIFT between constant vectors. */
7639 ASSERT_RTX_EQ (series_0_2,
7640 simplify_binary_operation (ASHIFT, mode, series_0_1,
7641 CONST1_RTX (mode)));
7642 if (!GET_MODE_NUNITS (mode).is_constant ())
7643 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
7644 series_0_1));
7645 }
7646
7647 /* Verify simplify_merge_mask works correctly. */
7648
7649 static void
7650 test_vec_merge (machine_mode mode)
7651 {
7652 rtx op0 = make_test_reg (mode);
7653 rtx op1 = make_test_reg (mode);
7654 rtx op2 = make_test_reg (mode);
7655 rtx op3 = make_test_reg (mode);
7656 rtx op4 = make_test_reg (mode);
7657 rtx op5 = make_test_reg (mode);
7658 rtx mask1 = make_test_reg (SImode);
7659 rtx mask2 = make_test_reg (SImode);
7660 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7661 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7662 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7663
7664 /* Simple vec_merge. */
7665 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7666 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7667 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7668 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7669
7670 /* Nested vec_merge.
7671 It's tempting to make this simplify right down to opN, but we don't
7672 because all the simplify_* functions assume that the operands have
7673 already been simplified. */
7674 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7675 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7676 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7677
7678 /* Intermediate unary op. */
7679 rtx unop = gen_rtx_NOT (mode, vm1);
7680 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7681 simplify_merge_mask (unop, mask1, 0));
7682 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7683 simplify_merge_mask (unop, mask1, 1));
7684
7685 /* Intermediate binary op. */
7686 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7687 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7688 simplify_merge_mask (binop, mask1, 0));
7689 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7690 simplify_merge_mask (binop, mask1, 1));
7691
7692 /* Intermediate ternary op. */
7693 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7694 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7695 simplify_merge_mask (tenop, mask1, 0));
7696 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7697 simplify_merge_mask (tenop, mask1, 1));
7698
7699 /* Side effects. */
7700 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7701 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7702 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7703 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7704
7705 /* Called indirectly. */
7706 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7707 simplify_rtx (nvm));
7708 }
7709
7710 /* Test subregs of integer vector constant X, trying elements in
7711 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7712 where NELTS is the number of elements in X. Subregs involving
7713 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
7714
7715 static void
7716 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
7717 unsigned int first_valid = 0)
7718 {
7719 machine_mode inner_mode = GET_MODE (x);
7720 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7721
7722 for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
7723 {
7724 machine_mode outer_mode = (machine_mode) modei;
7725 if (!VECTOR_MODE_P (outer_mode))
7726 continue;
7727
7728 unsigned int outer_nunits;
7729 if (GET_MODE_INNER (outer_mode) == int_mode
7730 && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
7731 && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
7732 {
7733 /* Test subregs in which the outer mode is a smaller,
7734 constant-sized vector of the same element type. */
7735 unsigned int limit
7736 = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
7737 for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
7738 {
7739 rtx expected = NULL_RTX;
7740 if (elt >= first_valid)
7741 {
7742 rtx_vector_builder builder (outer_mode, outer_nunits, 1);
7743 for (unsigned int i = 0; i < outer_nunits; ++i)
7744 builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
7745 expected = builder.build ();
7746 }
7747 poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
7748 ASSERT_RTX_EQ (expected,
7749 simplify_subreg (outer_mode, x,
7750 inner_mode, byte));
7751 }
7752 }
7753 else if (known_eq (GET_MODE_SIZE (outer_mode),
7754 GET_MODE_SIZE (inner_mode))
7755 && known_eq (elt_bias, 0U)
7756 && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
7757 || known_eq (GET_MODE_BITSIZE (outer_mode),
7758 GET_MODE_NUNITS (outer_mode)))
7759 && (!FLOAT_MODE_P (outer_mode)
7760 || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
7761 == GET_MODE_UNIT_PRECISION (outer_mode)))
7762 && (GET_MODE_SIZE (inner_mode).is_constant ()
7763 || !CONST_VECTOR_STEPPED_P (x)))
7764 {
7765 /* Try converting to OUTER_MODE and back. */
7766 rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
7767 ASSERT_TRUE (outer_x != NULL_RTX);
7768 ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
7769 outer_mode, 0));
7770 }
7771 }
7772
7773 if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
7774 {
7775 /* Test each byte in the element range. */
7776 unsigned int limit
7777 = constant_lower_bound (GET_MODE_SIZE (inner_mode));
7778 for (unsigned int i = 0; i < limit; ++i)
7779 {
7780 unsigned int elt = i / GET_MODE_SIZE (int_mode);
7781 rtx expected = NULL_RTX;
7782 if (elt >= first_valid)
7783 {
7784 unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
7785 if (BYTES_BIG_ENDIAN)
7786 byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
7787 rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
7788 wide_int shifted_elt
7789 = wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
7790 expected = immed_wide_int_const (shifted_elt, QImode);
7791 }
7792 poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
7793 ASSERT_RTX_EQ (expected,
7794 simplify_subreg (QImode, x, inner_mode, byte));
7795 }
7796 }
7797 }
7798
7799 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7800 element per pattern. */
7801
7802 static void
7803 test_vector_subregs_repeating (machine_mode inner_mode)
7804 {
7805 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7806 unsigned int min_nunits = constant_lower_bound (nunits);
7807 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7808 unsigned int count = gcd (min_nunits, 8);
7809
7810 rtx_vector_builder builder (inner_mode, count, 1);
7811 for (unsigned int i = 0; i < count; ++i)
7812 builder.quick_push (gen_int_mode (8 - i, int_mode));
7813 rtx x = builder.build ();
7814
7815 test_vector_subregs_modes (x);
7816 if (!nunits.is_constant ())
7817 test_vector_subregs_modes (x, nunits - min_nunits);
7818 }
7819
7820 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7821 elements per pattern. */
7822
7823 static void
7824 test_vector_subregs_fore_back (machine_mode inner_mode)
7825 {
7826 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7827 unsigned int min_nunits = constant_lower_bound (nunits);
7828 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7829 unsigned int count = gcd (min_nunits, 4);
7830
7831 rtx_vector_builder builder (inner_mode, count, 2);
7832 for (unsigned int i = 0; i < count; ++i)
7833 builder.quick_push (gen_int_mode (i, int_mode));
7834 for (unsigned int i = 0; i < count; ++i)
7835 builder.quick_push (gen_int_mode (-(int) i, int_mode));
7836 rtx x = builder.build ();
7837
7838 test_vector_subregs_modes (x);
7839 if (!nunits.is_constant ())
7840 test_vector_subregs_modes (x, nunits - min_nunits, count);
7841 }
7842
7843 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7844 elements per pattern. */
7845
7846 static void
7847 test_vector_subregs_stepped (machine_mode inner_mode)
7848 {
7849 /* Build { 0, 1, 2, 3, ... }. */
7850 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7851 rtx_vector_builder builder (inner_mode, 1, 3);
7852 for (unsigned int i = 0; i < 3; ++i)
7853 builder.quick_push (gen_int_mode (i, int_mode));
7854 rtx x = builder.build ();
7855
7856 test_vector_subregs_modes (x);
7857 }
7858
7859 /* Test constant subregs of integer vector mode INNER_MODE. */
7860
7861 static void
7862 test_vector_subregs (machine_mode inner_mode)
7863 {
7864 test_vector_subregs_repeating (inner_mode);
7865 test_vector_subregs_fore_back (inner_mode);
7866 test_vector_subregs_stepped (inner_mode);
7867 }
7868
7869 /* Verify some simplifications involving vectors. */
7870
7871 static void
7872 test_vector_ops ()
7873 {
7874 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7875 {
7876 machine_mode mode = (machine_mode) i;
7877 if (VECTOR_MODE_P (mode))
7878 {
7879 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7880 test_vector_ops_duplicate (mode, scalar_reg);
7881 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7882 && maybe_gt (GET_MODE_NUNITS (mode), 2))
7883 {
7884 test_vector_ops_series (mode, scalar_reg);
7885 test_vector_subregs (mode);
7886 }
7887 test_vec_merge (mode);
7888 }
7889 }
7890 }
7891
7892 template<unsigned int N>
7893 struct simplify_const_poly_int_tests
7894 {
7895 static void run ();
7896 };
7897
7898 template<>
7899 struct simplify_const_poly_int_tests<1>
7900 {
7901 static void run () {}
7902 };
7903
7904 /* Test various CONST_POLY_INT properties. */
7905
7906 template<unsigned int N>
7907 void
7908 simplify_const_poly_int_tests<N>::run ()
7909 {
7910 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7911 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7912 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7913 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7914 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7915 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7916 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7917 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7918 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7919 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7920 rtx two = GEN_INT (2);
7921 rtx six = GEN_INT (6);
7922 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7923
7924 /* These tests only try limited operation combinations. Fuller arithmetic
7925 testing is done directly on poly_ints. */
7926 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7927 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7928 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7929 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7930 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7931 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7932 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7933 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7934 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7935 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7936 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7937 }
7938
7939 /* Run all of the selftests within this file. */
7940
7941 void
7942 simplify_rtx_c_tests ()
7943 {
7944 test_scalar_ops ();
7945 test_vector_ops ();
7946 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7947 }
7948
7949 } // namespace selftest
7950
7951 #endif /* CHECKING_P */