Implement more rtx vector folds on variable-length vectors
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48
49 static rtx neg_const_int (machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
52 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
53 rtx, rtx);
54 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
55 machine_mode, rtx, rtx);
56 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
57 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
58 rtx, rtx, rtx, rtx);
59 \f
60 /* Negate a CONST_INT rtx. */
61 static rtx
62 neg_const_int (machine_mode mode, const_rtx i)
63 {
64 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65
66 if (!HWI_COMPUTABLE_MODE_P (mode)
67 && val == UINTVAL (i))
68 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
69 mode);
70 return gen_int_mode (val, mode);
71 }
72
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
75
76 bool
77 mode_signbit_p (machine_mode mode, const_rtx x)
78 {
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
81 scalar_int_mode int_mode;
82
83 if (!is_int_mode (mode, &int_mode))
84 return false;
85
86 width = GET_MODE_PRECISION (int_mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 #if TARGET_SUPPORTS_WIDE_INT
94 else if (CONST_WIDE_INT_P (x))
95 {
96 unsigned int i;
97 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
98 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
99 return false;
100 for (i = 0; i < elts - 1; i++)
101 if (CONST_WIDE_INT_ELT (x, i) != 0)
102 return false;
103 val = CONST_WIDE_INT_ELT (x, elts - 1);
104 width %= HOST_BITS_PER_WIDE_INT;
105 if (width == 0)
106 width = HOST_BITS_PER_WIDE_INT;
107 }
108 #else
109 else if (width <= HOST_BITS_PER_DOUBLE_INT
110 && CONST_DOUBLE_AS_INT_P (x)
111 && CONST_DOUBLE_LOW (x) == 0)
112 {
113 val = CONST_DOUBLE_HIGH (x);
114 width -= HOST_BITS_PER_WIDE_INT;
115 }
116 #endif
117 else
118 /* X is not an integer constant. */
119 return false;
120
121 if (width < HOST_BITS_PER_WIDE_INT)
122 val &= (HOST_WIDE_INT_1U << width) - 1;
123 return val == (HOST_WIDE_INT_1U << (width - 1));
124 }
125
126 /* Test whether VAL is equal to the most significant bit of mode MODE
127 (after masking with the mode mask of MODE). Returns false if the
128 precision of MODE is too large to handle. */
129
130 bool
131 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 {
133 unsigned int width;
134 scalar_int_mode int_mode;
135
136 if (!is_int_mode (mode, &int_mode))
137 return false;
138
139 width = GET_MODE_PRECISION (int_mode);
140 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
141 return false;
142
143 val &= GET_MODE_MASK (int_mode);
144 return val == (HOST_WIDE_INT_1U << (width - 1));
145 }
146
147 /* Test whether the most significant bit of mode MODE is set in VAL.
148 Returns false if the precision of MODE is too large to handle. */
149 bool
150 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 {
152 unsigned int width;
153
154 scalar_int_mode int_mode;
155 if (!is_int_mode (mode, &int_mode))
156 return false;
157
158 width = GET_MODE_PRECISION (int_mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
161
162 val &= HOST_WIDE_INT_1U << (width - 1);
163 return val != 0;
164 }
165
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168 bool
169 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 {
171 unsigned int width;
172
173 scalar_int_mode int_mode;
174 if (!is_int_mode (mode, &int_mode))
175 return false;
176
177 width = GET_MODE_PRECISION (int_mode);
178 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
179 return false;
180
181 val &= HOST_WIDE_INT_1U << (width - 1);
182 return val == 0;
183 }
184 \f
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
187
188 rtx
189 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
190 rtx op1)
191 {
192 rtx tem;
193
194 /* If this simplifies, do it. */
195 tem = simplify_binary_operation (code, mode, op0, op1);
196 if (tem)
197 return tem;
198
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0, op1))
202 std::swap (op0, op1);
203
204 return gen_rtx_fmt_ee (code, mode, op0, op1);
205 }
206 \f
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
209 rtx
210 avoid_constant_pool_reference (rtx x)
211 {
212 rtx c, tmp, addr;
213 machine_mode cmode;
214 poly_int64 offset = 0;
215
216 switch (GET_CODE (x))
217 {
218 case MEM:
219 break;
220
221 case FLOAT_EXTEND:
222 /* Handle float extensions of constant pool references. */
223 tmp = XEXP (x, 0);
224 c = avoid_constant_pool_reference (tmp);
225 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
226 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
227 GET_MODE (x));
228 return x;
229
230 default:
231 return x;
232 }
233
234 if (GET_MODE (x) == BLKmode)
235 return x;
236
237 addr = XEXP (x, 0);
238
239 /* Call target hook to avoid the effects of -fpic etc.... */
240 addr = targetm.delegitimize_address (addr);
241
242 /* Split the address into a base and integer offset. */
243 addr = strip_offset (addr, &offset);
244
245 if (GET_CODE (addr) == LO_SUM)
246 addr = XEXP (addr, 1);
247
248 /* If this is a constant pool reference, we can turn it into its
249 constant and hope that simplifications happen. */
250 if (GET_CODE (addr) == SYMBOL_REF
251 && CONSTANT_POOL_ADDRESS_P (addr))
252 {
253 c = get_pool_constant (addr);
254 cmode = get_pool_mode (addr);
255
256 /* If we're accessing the constant in a different mode than it was
257 originally stored, attempt to fix that up via subreg simplifications.
258 If that fails we have no choice but to return the original memory. */
259 if (known_eq (offset, 0) && cmode == GET_MODE (x))
260 return c;
261 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
262 {
263 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
264 if (tem && CONSTANT_P (tem))
265 return tem;
266 }
267 }
268
269 return x;
270 }
271 \f
272 /* Simplify a MEM based on its attributes. This is the default
273 delegitimize_address target hook, and it's recommended that every
274 overrider call it. */
275
276 rtx
277 delegitimize_mem_from_attrs (rtx x)
278 {
279 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280 use their base addresses as equivalent. */
281 if (MEM_P (x)
282 && MEM_EXPR (x)
283 && MEM_OFFSET_KNOWN_P (x))
284 {
285 tree decl = MEM_EXPR (x);
286 machine_mode mode = GET_MODE (x);
287 poly_int64 offset = 0;
288
289 switch (TREE_CODE (decl))
290 {
291 default:
292 decl = NULL;
293 break;
294
295 case VAR_DECL:
296 break;
297
298 case ARRAY_REF:
299 case ARRAY_RANGE_REF:
300 case COMPONENT_REF:
301 case BIT_FIELD_REF:
302 case REALPART_EXPR:
303 case IMAGPART_EXPR:
304 case VIEW_CONVERT_EXPR:
305 {
306 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
307 tree toffset;
308 int unsignedp, reversep, volatilep = 0;
309
310 decl
311 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
312 &unsignedp, &reversep, &volatilep);
313 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
314 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
315 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
316 decl = NULL;
317 else
318 offset += bytepos + toffset_val;
319 break;
320 }
321 }
322
323 if (decl
324 && mode == GET_MODE (x)
325 && VAR_P (decl)
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
330 {
331 rtx newx;
332
333 offset += MEM_OFFSET (x);
334
335 newx = DECL_RTL (decl);
336
337 if (MEM_P (newx))
338 {
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
340 poly_int64 n_offset, o_offset;
341
342 /* Avoid creating a new MEM needlessly if we already had
343 the same address. We do if there's no OFFSET and the
344 old address X is identical to NEWX, or if X is of the
345 form (plus NEWX OFFSET), or the NEWX is of the form
346 (plus Y (const_int Z)) and X is that with the offset
347 added: (plus Y (const_int Z+OFFSET)). */
348 n = strip_offset (n, &n_offset);
349 o = strip_offset (o, &o_offset);
350 if (!(known_eq (o_offset, n_offset + offset)
351 && rtx_equal_p (o, n)))
352 x = adjust_address_nv (newx, mode, offset);
353 }
354 else if (GET_MODE (x) == GET_MODE (newx)
355 && known_eq (offset, 0))
356 x = newx;
357 }
358 }
359
360 return x;
361 }
362 \f
363 /* Make a unary operation by first seeing if it folds and otherwise making
364 the specified operation. */
365
366 rtx
367 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
368 machine_mode op_mode)
369 {
370 rtx tem;
371
372 /* If this simplifies, use it. */
373 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
374 return tem;
375
376 return gen_rtx_fmt_e (code, mode, op);
377 }
378
379 /* Likewise for ternary operations. */
380
381 rtx
382 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
383 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
384 {
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
389 op0, op1, op2)) != 0)
390 return tem;
391
392 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
393 }
394
395 /* Likewise, for relational operations.
396 CMP_MODE specifies mode comparison is done in. */
397
398 rtx
399 simplify_gen_relational (enum rtx_code code, machine_mode mode,
400 machine_mode cmp_mode, rtx op0, rtx op1)
401 {
402 rtx tem;
403
404 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
405 op0, op1)) != 0)
406 return tem;
407
408 return gen_rtx_fmt_ee (code, mode, op0, op1);
409 }
410 \f
411 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
412 and simplify the result. If FN is non-NULL, call this callback on each
413 X, if it returns non-NULL, replace X with its return value and simplify the
414 result. */
415
416 rtx
417 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
418 rtx (*fn) (rtx, const_rtx, void *), void *data)
419 {
420 enum rtx_code code = GET_CODE (x);
421 machine_mode mode = GET_MODE (x);
422 machine_mode op_mode;
423 const char *fmt;
424 rtx op0, op1, op2, newx, op;
425 rtvec vec, newvec;
426 int i, j;
427
428 if (__builtin_expect (fn != NULL, 0))
429 {
430 newx = fn (x, old_rtx, data);
431 if (newx)
432 return newx;
433 }
434 else if (rtx_equal_p (x, old_rtx))
435 return copy_rtx ((rtx) data);
436
437 switch (GET_RTX_CLASS (code))
438 {
439 case RTX_UNARY:
440 op0 = XEXP (x, 0);
441 op_mode = GET_MODE (op0);
442 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
443 if (op0 == XEXP (x, 0))
444 return x;
445 return simplify_gen_unary (code, mode, op0, op_mode);
446
447 case RTX_BIN_ARITH:
448 case RTX_COMM_ARITH:
449 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
450 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return simplify_gen_binary (code, mode, op0, op1);
454
455 case RTX_COMPARE:
456 case RTX_COMM_COMPARE:
457 op0 = XEXP (x, 0);
458 op1 = XEXP (x, 1);
459 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
460 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_relational (code, mode, op_mode, op0, op1);
465
466 case RTX_TERNARY:
467 case RTX_BITFIELD_OPS:
468 op0 = XEXP (x, 0);
469 op_mode = GET_MODE (op0);
470 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
474 return x;
475 if (op_mode == VOIDmode)
476 op_mode = GET_MODE (op0);
477 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
478
479 case RTX_EXTRA:
480 if (code == SUBREG)
481 {
482 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
483 if (op0 == SUBREG_REG (x))
484 return x;
485 op0 = simplify_gen_subreg (GET_MODE (x), op0,
486 GET_MODE (SUBREG_REG (x)),
487 SUBREG_BYTE (x));
488 return op0 ? op0 : x;
489 }
490 break;
491
492 case RTX_OBJ:
493 if (code == MEM)
494 {
495 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
496 if (op0 == XEXP (x, 0))
497 return x;
498 return replace_equiv_address_nv (x, op0);
499 }
500 else if (code == LO_SUM)
501 {
502 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
503 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
504
505 /* (lo_sum (high x) y) -> y where x and y have the same base. */
506 if (GET_CODE (op0) == HIGH)
507 {
508 rtx base0, base1, offset0, offset1;
509 split_const (XEXP (op0, 0), &base0, &offset0);
510 split_const (op1, &base1, &offset1);
511 if (rtx_equal_p (base0, base1))
512 return op1;
513 }
514
515 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
516 return x;
517 return gen_rtx_LO_SUM (mode, op0, op1);
518 }
519 break;
520
521 default:
522 break;
523 }
524
525 newx = x;
526 fmt = GET_RTX_FORMAT (code);
527 for (i = 0; fmt[i]; i++)
528 switch (fmt[i])
529 {
530 case 'E':
531 vec = XVEC (x, i);
532 newvec = XVEC (newx, i);
533 for (j = 0; j < GET_NUM_ELEM (vec); j++)
534 {
535 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
536 old_rtx, fn, data);
537 if (op != RTVEC_ELT (vec, j))
538 {
539 if (newvec == vec)
540 {
541 newvec = shallow_copy_rtvec (vec);
542 if (x == newx)
543 newx = shallow_copy_rtx (x);
544 XVEC (newx, i) = newvec;
545 }
546 RTVEC_ELT (newvec, j) = op;
547 }
548 }
549 break;
550
551 case 'e':
552 if (XEXP (x, i))
553 {
554 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
555 if (op != XEXP (x, i))
556 {
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XEXP (newx, i) = op;
560 }
561 }
562 break;
563 }
564 return newx;
565 }
566
567 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
568 resulting RTX. Return a new RTX which is as simplified as possible. */
569
570 rtx
571 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
572 {
573 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
574 }
575 \f
576 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
577 Only handle cases where the truncated value is inherently an rvalue.
578
579 RTL provides two ways of truncating a value:
580
581 1. a lowpart subreg. This form is only a truncation when both
582 the outer and inner modes (here MODE and OP_MODE respectively)
583 are scalar integers, and only then when the subreg is used as
584 an rvalue.
585
586 It is only valid to form such truncating subregs if the
587 truncation requires no action by the target. The onus for
588 proving this is on the creator of the subreg -- e.g. the
589 caller to simplify_subreg or simplify_gen_subreg -- and typically
590 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
591
592 2. a TRUNCATE. This form handles both scalar and compound integers.
593
594 The first form is preferred where valid. However, the TRUNCATE
595 handling in simplify_unary_operation turns the second form into the
596 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
597 so it is generally safe to form rvalue truncations using:
598
599 simplify_gen_unary (TRUNCATE, ...)
600
601 and leave simplify_unary_operation to work out which representation
602 should be used.
603
604 Because of the proof requirements on (1), simplify_truncation must
605 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
606 regardless of whether the outer truncation came from a SUBREG or a
607 TRUNCATE. For example, if the caller has proven that an SImode
608 truncation of:
609
610 (and:DI X Y)
611
612 is a no-op and can be represented as a subreg, it does not follow
613 that SImode truncations of X and Y are also no-ops. On a target
614 like 64-bit MIPS that requires SImode values to be stored in
615 sign-extended form, an SImode truncation of:
616
617 (and:DI (reg:DI X) (const_int 63))
618
619 is trivially a no-op because only the lower 6 bits can be set.
620 However, X is still an arbitrary 64-bit number and so we cannot
621 assume that truncating it too is a no-op. */
622
623 static rtx
624 simplify_truncation (machine_mode mode, rtx op,
625 machine_mode op_mode)
626 {
627 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
628 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
629 scalar_int_mode int_mode, int_op_mode, subreg_mode;
630
631 gcc_assert (precision <= op_precision);
632
633 /* Optimize truncations of zero and sign extended values. */
634 if (GET_CODE (op) == ZERO_EXTEND
635 || GET_CODE (op) == SIGN_EXTEND)
636 {
637 /* There are three possibilities. If MODE is the same as the
638 origmode, we can omit both the extension and the subreg.
639 If MODE is not larger than the origmode, we can apply the
640 truncation without the extension. Finally, if the outermode
641 is larger than the origmode, we can just extend to the appropriate
642 mode. */
643 machine_mode origmode = GET_MODE (XEXP (op, 0));
644 if (mode == origmode)
645 return XEXP (op, 0);
646 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
647 return simplify_gen_unary (TRUNCATE, mode,
648 XEXP (op, 0), origmode);
649 else
650 return simplify_gen_unary (GET_CODE (op), mode,
651 XEXP (op, 0), origmode);
652 }
653
654 /* If the machine can perform operations in the truncated mode, distribute
655 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
656 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
657 if (1
658 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
659 && (GET_CODE (op) == PLUS
660 || GET_CODE (op) == MINUS
661 || GET_CODE (op) == MULT))
662 {
663 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
664 if (op0)
665 {
666 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
667 if (op1)
668 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
669 }
670 }
671
672 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
673 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
674 the outer subreg is effectively a truncation to the original mode. */
675 if ((GET_CODE (op) == LSHIFTRT
676 || GET_CODE (op) == ASHIFTRT)
677 /* Ensure that OP_MODE is at least twice as wide as MODE
678 to avoid the possibility that an outer LSHIFTRT shifts by more
679 than the sign extension's sign_bit_copies and introduces zeros
680 into the high bits of the result. */
681 && 2 * precision <= op_precision
682 && CONST_INT_P (XEXP (op, 1))
683 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
684 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
685 && UINTVAL (XEXP (op, 1)) < precision)
686 return simplify_gen_binary (ASHIFTRT, mode,
687 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
688
689 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
690 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if ((GET_CODE (op) == LSHIFTRT
693 || GET_CODE (op) == ASHIFTRT)
694 && CONST_INT_P (XEXP (op, 1))
695 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
697 && UINTVAL (XEXP (op, 1)) < precision)
698 return simplify_gen_binary (LSHIFTRT, mode,
699 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
700
701 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
702 to (ashift:QI (x:QI) C), where C is a suitable small constant and
703 the outer subreg is effectively a truncation to the original mode. */
704 if (GET_CODE (op) == ASHIFT
705 && CONST_INT_P (XEXP (op, 1))
706 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
707 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
708 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
709 && UINTVAL (XEXP (op, 1)) < precision)
710 return simplify_gen_binary (ASHIFT, mode,
711 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
712
713 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
714 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
715 and C2. */
716 if (GET_CODE (op) == AND
717 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
718 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
719 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
720 && CONST_INT_P (XEXP (op, 1)))
721 {
722 rtx op0 = (XEXP (XEXP (op, 0), 0));
723 rtx shift_op = XEXP (XEXP (op, 0), 1);
724 rtx mask_op = XEXP (op, 1);
725 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
726 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
727
728 if (shift < precision
729 /* If doing this transform works for an X with all bits set,
730 it works for any X. */
731 && ((GET_MODE_MASK (mode) >> shift) & mask)
732 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
733 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
734 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
735 {
736 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
737 return simplify_gen_binary (AND, mode, op0, mask_op);
738 }
739 }
740
741 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
742 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
743 changing len. */
744 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
745 && REG_P (XEXP (op, 0))
746 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
747 && CONST_INT_P (XEXP (op, 1))
748 && CONST_INT_P (XEXP (op, 2)))
749 {
750 rtx op0 = XEXP (op, 0);
751 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
752 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
753 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
754 {
755 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
756 if (op0)
757 {
758 pos -= op_precision - precision;
759 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
760 XEXP (op, 1), GEN_INT (pos));
761 }
762 }
763 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
764 {
765 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
766 if (op0)
767 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
768 XEXP (op, 1), XEXP (op, 2));
769 }
770 }
771
772 /* Recognize a word extraction from a multi-word subreg. */
773 if ((GET_CODE (op) == LSHIFTRT
774 || GET_CODE (op) == ASHIFTRT)
775 && SCALAR_INT_MODE_P (mode)
776 && SCALAR_INT_MODE_P (op_mode)
777 && precision >= BITS_PER_WORD
778 && 2 * precision <= op_precision
779 && CONST_INT_P (XEXP (op, 1))
780 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
781 && UINTVAL (XEXP (op, 1)) < op_precision)
782 {
783 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
784 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
785 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
786 (WORDS_BIG_ENDIAN
787 ? byte - shifted_bytes
788 : byte + shifted_bytes));
789 }
790
791 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
792 and try replacing the TRUNCATE and shift with it. Don't do this
793 if the MEM has a mode-dependent address. */
794 if ((GET_CODE (op) == LSHIFTRT
795 || GET_CODE (op) == ASHIFTRT)
796 && is_a <scalar_int_mode> (mode, &int_mode)
797 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
798 && MEM_P (XEXP (op, 0))
799 && CONST_INT_P (XEXP (op, 1))
800 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
801 && INTVAL (XEXP (op, 1)) > 0
802 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
803 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
804 MEM_ADDR_SPACE (XEXP (op, 0)))
805 && ! MEM_VOLATILE_P (XEXP (op, 0))
806 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
807 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
808 {
809 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
810 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
811 return adjust_address_nv (XEXP (op, 0), int_mode,
812 (WORDS_BIG_ENDIAN
813 ? byte - shifted_bytes
814 : byte + shifted_bytes));
815 }
816
817 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
818 (OP:SI foo:SI) if OP is NEG or ABS. */
819 if ((GET_CODE (op) == ABS
820 || GET_CODE (op) == NEG)
821 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
822 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
823 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
824 return simplify_gen_unary (GET_CODE (op), mode,
825 XEXP (XEXP (op, 0), 0), mode);
826
827 /* (truncate:A (subreg:B (truncate:C X) 0)) is
828 (truncate:A X). */
829 if (GET_CODE (op) == SUBREG
830 && is_a <scalar_int_mode> (mode, &int_mode)
831 && SCALAR_INT_MODE_P (op_mode)
832 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
833 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
834 && subreg_lowpart_p (op))
835 {
836 rtx inner = XEXP (SUBREG_REG (op), 0);
837 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
838 return simplify_gen_unary (TRUNCATE, int_mode, inner,
839 GET_MODE (inner));
840 else
841 /* If subreg above is paradoxical and C is narrower
842 than A, return (subreg:A (truncate:C X) 0). */
843 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
844 }
845
846 /* (truncate:A (truncate:B X)) is (truncate:A X). */
847 if (GET_CODE (op) == TRUNCATE)
848 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
849 GET_MODE (XEXP (op, 0)));
850
851 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
852 in mode A. */
853 if (GET_CODE (op) == IOR
854 && SCALAR_INT_MODE_P (mode)
855 && SCALAR_INT_MODE_P (op_mode)
856 && CONST_INT_P (XEXP (op, 1))
857 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
858 return constm1_rtx;
859
860 return NULL_RTX;
861 }
862 \f
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
866 rtx
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
869 {
870 rtx trueop, tem;
871
872 trueop = avoid_constant_pool_reference (op);
873
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
877
878 return simplify_unary_operation_1 (code, mode, op);
879 }
880
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
883
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
886 {
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
894 {
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
903 }
904 return in_bits <= out_bits;
905 }
906
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
911 {
912 enum rtx_code reversed;
913 rtx temp, elt, base, step;
914 scalar_int_mode inner, int_mode, op_mode, op0_mode;
915
916 switch (code)
917 {
918 case NOT:
919 /* (not (not X)) == X. */
920 if (GET_CODE (op) == NOT)
921 return XEXP (op, 0);
922
923 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
924 comparison is all ones. */
925 if (COMPARISON_P (op)
926 && (mode == BImode || STORE_FLAG_VALUE == -1)
927 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
928 return simplify_gen_relational (reversed, mode, VOIDmode,
929 XEXP (op, 0), XEXP (op, 1));
930
931 /* (not (plus X -1)) can become (neg X). */
932 if (GET_CODE (op) == PLUS
933 && XEXP (op, 1) == constm1_rtx)
934 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935
936 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
937 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
938 and MODE_VECTOR_INT. */
939 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
940 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
941 CONSTM1_RTX (mode));
942
943 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
944 if (GET_CODE (op) == XOR
945 && CONST_INT_P (XEXP (op, 1))
946 && (temp = simplify_unary_operation (NOT, mode,
947 XEXP (op, 1), mode)) != 0)
948 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949
950 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
951 if (GET_CODE (op) == PLUS
952 && CONST_INT_P (XEXP (op, 1))
953 && mode_signbit_p (mode, XEXP (op, 1))
954 && (temp = simplify_unary_operation (NOT, mode,
955 XEXP (op, 1), mode)) != 0)
956 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
957
958
959 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
960 operands other than 1, but that is not valid. We could do a
961 similar simplification for (not (lshiftrt C X)) where C is
962 just the sign bit, but this doesn't seem common enough to
963 bother with. */
964 if (GET_CODE (op) == ASHIFT
965 && XEXP (op, 0) == const1_rtx)
966 {
967 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
968 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
969 }
970
971 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
972 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
973 so we can perform the above simplification. */
974 if (STORE_FLAG_VALUE == -1
975 && is_a <scalar_int_mode> (mode, &int_mode)
976 && GET_CODE (op) == ASHIFTRT
977 && CONST_INT_P (XEXP (op, 1))
978 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
979 return simplify_gen_relational (GE, int_mode, VOIDmode,
980 XEXP (op, 0), const0_rtx);
981
982
983 if (partial_subreg_p (op)
984 && subreg_lowpart_p (op)
985 && GET_CODE (SUBREG_REG (op)) == ASHIFT
986 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
987 {
988 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
989 rtx x;
990
991 x = gen_rtx_ROTATE (inner_mode,
992 simplify_gen_unary (NOT, inner_mode, const1_rtx,
993 inner_mode),
994 XEXP (SUBREG_REG (op), 1));
995 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
996 if (temp)
997 return temp;
998 }
999
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1003 coded. */
1004 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1005 {
1006 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1007 machine_mode op_mode;
1008
1009 op_mode = GET_MODE (in1);
1010 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1011
1012 op_mode = GET_MODE (in2);
1013 if (op_mode == VOIDmode)
1014 op_mode = mode;
1015 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1016
1017 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1018 std::swap (in1, in2);
1019
1020 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1021 mode, in1, in2);
1022 }
1023
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op) == BSWAP)
1026 {
1027 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1028 return simplify_gen_unary (BSWAP, mode, x, mode);
1029 }
1030 break;
1031
1032 case NEG:
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op) == NEG)
1035 return XEXP (op, 0);
1036
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1039 x ? y : (neg y). */
1040 if (GET_CODE (op) == IF_THEN_ELSE)
1041 {
1042 rtx cond = XEXP (op, 0);
1043 rtx true_rtx = XEXP (op, 1);
1044 rtx false_rtx = XEXP (op, 2);
1045
1046 if ((GET_CODE (true_rtx) == NEG
1047 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1048 || (GET_CODE (false_rtx) == NEG
1049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1050 {
1051 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1052 temp = reversed_comparison (cond, mode);
1053 else
1054 {
1055 temp = cond;
1056 std::swap (true_rtx, false_rtx);
1057 }
1058 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1059 mode, temp, true_rtx, false_rtx);
1060 }
1061 }
1062
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op) == PLUS
1065 && XEXP (op, 1) == const1_rtx)
1066 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1067
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op) == NOT)
1070 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1071 CONST1_RTX (mode));
1072
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1081 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1082
1083 if (GET_CODE (op) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 {
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1090 {
1091 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1092 if (temp)
1093 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1094 }
1095
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1099 }
1100
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1105 {
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1107 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1108 }
1109
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1112 is a constant). */
1113 if (GET_CODE (op) == ASHIFT)
1114 {
1115 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1116 if (temp)
1117 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1118 }
1119
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1125 return simplify_gen_binary (LSHIFTRT, mode,
1126 XEXP (op, 0), XEXP (op, 1));
1127
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1133 return simplify_gen_binary (ASHIFTRT, mode,
1134 XEXP (op, 0), XEXP (op, 1));
1135
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op) == XOR
1138 && XEXP (op, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op, 0), mode) == 1)
1140 return plus_constant (mode, XEXP (op, 0), -1);
1141
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op) == LT
1145 && XEXP (op, 1) == const0_rtx
1146 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1147 {
1148 int_mode = as_a <scalar_int_mode> (mode);
1149 int isize = GET_MODE_PRECISION (inner);
1150 if (STORE_FLAG_VALUE == 1)
1151 {
1152 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1153 gen_int_shift_amount (inner,
1154 isize - 1));
1155 if (int_mode == inner)
1156 return temp;
1157 if (GET_MODE_PRECISION (int_mode) > isize)
1158 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1159 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1160 }
1161 else if (STORE_FLAG_VALUE == -1)
1162 {
1163 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1164 gen_int_shift_amount (inner,
1165 isize - 1));
1166 if (int_mode == inner)
1167 return temp;
1168 if (GET_MODE_PRECISION (int_mode) > isize)
1169 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1170 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1171 }
1172 }
1173
1174 if (vec_series_p (op, &base, &step))
1175 {
1176 /* Only create a new series if we can simplify both parts. In other
1177 cases this isn't really a simplification, and it's not necessarily
1178 a win to replace a vector operation with a scalar operation. */
1179 scalar_mode inner_mode = GET_MODE_INNER (mode);
1180 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1181 if (base)
1182 {
1183 step = simplify_unary_operation (NEG, inner_mode,
1184 step, inner_mode);
1185 if (step)
1186 return gen_vec_series (mode, base, step);
1187 }
1188 }
1189 break;
1190
1191 case TRUNCATE:
1192 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1193 with the umulXi3_highpart patterns. */
1194 if (GET_CODE (op) == LSHIFTRT
1195 && GET_CODE (XEXP (op, 0)) == MULT)
1196 break;
1197
1198 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1199 {
1200 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1201 {
1202 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1203 if (temp)
1204 return temp;
1205 }
1206 /* We can't handle truncation to a partial integer mode here
1207 because we don't know the real bitsize of the partial
1208 integer mode. */
1209 break;
1210 }
1211
1212 if (GET_MODE (op) != VOIDmode)
1213 {
1214 temp = simplify_truncation (mode, op, GET_MODE (op));
1215 if (temp)
1216 return temp;
1217 }
1218
1219 /* If we know that the value is already truncated, we can
1220 replace the TRUNCATE with a SUBREG. */
1221 if (known_eq (GET_MODE_NUNITS (mode), 1)
1222 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1223 || truncated_to_mode (mode, op)))
1224 {
1225 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1226 if (temp)
1227 return temp;
1228 }
1229
1230 /* A truncate of a comparison can be replaced with a subreg if
1231 STORE_FLAG_VALUE permits. This is like the previous test,
1232 but it works even if the comparison is done in a mode larger
1233 than HOST_BITS_PER_WIDE_INT. */
1234 if (HWI_COMPUTABLE_MODE_P (mode)
1235 && COMPARISON_P (op)
1236 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1237 {
1238 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1239 if (temp)
1240 return temp;
1241 }
1242
1243 /* A truncate of a memory is just loading the low part of the memory
1244 if we are not changing the meaning of the address. */
1245 if (GET_CODE (op) == MEM
1246 && !VECTOR_MODE_P (mode)
1247 && !MEM_VOLATILE_P (op)
1248 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1249 {
1250 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1251 if (temp)
1252 return temp;
1253 }
1254
1255 break;
1256
1257 case FLOAT_TRUNCATE:
1258 if (DECIMAL_FLOAT_MODE_P (mode))
1259 break;
1260
1261 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1262 if (GET_CODE (op) == FLOAT_EXTEND
1263 && GET_MODE (XEXP (op, 0)) == mode)
1264 return XEXP (op, 0);
1265
1266 /* (float_truncate:SF (float_truncate:DF foo:XF))
1267 = (float_truncate:SF foo:XF).
1268 This may eliminate double rounding, so it is unsafe.
1269
1270 (float_truncate:SF (float_extend:XF foo:DF))
1271 = (float_truncate:SF foo:DF).
1272
1273 (float_truncate:DF (float_extend:XF foo:SF))
1274 = (float_extend:DF foo:SF). */
1275 if ((GET_CODE (op) == FLOAT_TRUNCATE
1276 && flag_unsafe_math_optimizations)
1277 || GET_CODE (op) == FLOAT_EXTEND)
1278 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1279 > GET_MODE_UNIT_SIZE (mode)
1280 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1281 mode,
1282 XEXP (op, 0), mode);
1283
1284 /* (float_truncate (float x)) is (float x) */
1285 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1286 && (flag_unsafe_math_optimizations
1287 || exact_int_to_float_conversion_p (op)))
1288 return simplify_gen_unary (GET_CODE (op), mode,
1289 XEXP (op, 0),
1290 GET_MODE (XEXP (op, 0)));
1291
1292 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1293 (OP:SF foo:SF) if OP is NEG or ABS. */
1294 if ((GET_CODE (op) == ABS
1295 || GET_CODE (op) == NEG)
1296 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1297 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1298 return simplify_gen_unary (GET_CODE (op), mode,
1299 XEXP (XEXP (op, 0), 0), mode);
1300
1301 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1302 is (float_truncate:SF x). */
1303 if (GET_CODE (op) == SUBREG
1304 && subreg_lowpart_p (op)
1305 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1306 return SUBREG_REG (op);
1307 break;
1308
1309 case FLOAT_EXTEND:
1310 if (DECIMAL_FLOAT_MODE_P (mode))
1311 break;
1312
1313 /* (float_extend (float_extend x)) is (float_extend x)
1314
1315 (float_extend (float x)) is (float x) assuming that double
1316 rounding can't happen.
1317 */
1318 if (GET_CODE (op) == FLOAT_EXTEND
1319 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1320 && exact_int_to_float_conversion_p (op)))
1321 return simplify_gen_unary (GET_CODE (op), mode,
1322 XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1324
1325 break;
1326
1327 case ABS:
1328 /* (abs (neg <foo>)) -> (abs <foo>) */
1329 if (GET_CODE (op) == NEG)
1330 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1331 GET_MODE (XEXP (op, 0)));
1332
1333 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1334 do nothing. */
1335 if (GET_MODE (op) == VOIDmode)
1336 break;
1337
1338 /* If operand is something known to be positive, ignore the ABS. */
1339 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1340 || val_signbit_known_clear_p (GET_MODE (op),
1341 nonzero_bits (op, GET_MODE (op))))
1342 return op;
1343
1344 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1345 if (is_a <scalar_int_mode> (mode, &int_mode)
1346 && (num_sign_bit_copies (op, int_mode)
1347 == GET_MODE_PRECISION (int_mode)))
1348 return gen_rtx_NEG (int_mode, op);
1349
1350 break;
1351
1352 case FFS:
1353 /* (ffs (*_extend <X>)) = (ffs <X>) */
1354 if (GET_CODE (op) == SIGN_EXTEND
1355 || GET_CODE (op) == ZERO_EXTEND)
1356 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1357 GET_MODE (XEXP (op, 0)));
1358 break;
1359
1360 case POPCOUNT:
1361 switch (GET_CODE (op))
1362 {
1363 case BSWAP:
1364 case ZERO_EXTEND:
1365 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1366 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1367 GET_MODE (XEXP (op, 0)));
1368
1369 case ROTATE:
1370 case ROTATERT:
1371 /* Rotations don't affect popcount. */
1372 if (!side_effects_p (XEXP (op, 1)))
1373 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1374 GET_MODE (XEXP (op, 0)));
1375 break;
1376
1377 default:
1378 break;
1379 }
1380 break;
1381
1382 case PARITY:
1383 switch (GET_CODE (op))
1384 {
1385 case NOT:
1386 case BSWAP:
1387 case ZERO_EXTEND:
1388 case SIGN_EXTEND:
1389 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1390 GET_MODE (XEXP (op, 0)));
1391
1392 case ROTATE:
1393 case ROTATERT:
1394 /* Rotations don't affect parity. */
1395 if (!side_effects_p (XEXP (op, 1)))
1396 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1397 GET_MODE (XEXP (op, 0)));
1398 break;
1399
1400 default:
1401 break;
1402 }
1403 break;
1404
1405 case BSWAP:
1406 /* (bswap (bswap x)) -> x. */
1407 if (GET_CODE (op) == BSWAP)
1408 return XEXP (op, 0);
1409 break;
1410
1411 case FLOAT:
1412 /* (float (sign_extend <X>)) = (float <X>). */
1413 if (GET_CODE (op) == SIGN_EXTEND)
1414 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1415 GET_MODE (XEXP (op, 0)));
1416 break;
1417
1418 case SIGN_EXTEND:
1419 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1420 becomes just the MINUS if its mode is MODE. This allows
1421 folding switch statements on machines using casesi (such as
1422 the VAX). */
1423 if (GET_CODE (op) == TRUNCATE
1424 && GET_MODE (XEXP (op, 0)) == mode
1425 && GET_CODE (XEXP (op, 0)) == MINUS
1426 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1427 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1428 return XEXP (op, 0);
1429
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1433 {
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1438
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == SIGN_EXTEND
1442 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == SIGN_EXTEND
1444 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1445 {
1446 machine_mode lmode = GET_MODE (lhs);
1447 machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1449
1450 if (lcode == ASHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = (GET_MODE_UNIT_PRECISION (lmode)
1453 - INTVAL (XEXP (lhs, 1)));
1454 else /* lcode == SIGN_EXTEND */
1455 /* Size of inner mode. */
1456 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457
1458 if (rcode == ASHIFTRT)
1459 bits += (GET_MODE_UNIT_PRECISION (rmode)
1460 - INTVAL (XEXP (rhs, 1)));
1461 else /* rcode == SIGN_EXTEND */
1462 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1463
1464 /* We can only widen multiplies if the result is mathematiclly
1465 equivalent. I.e. if overflow was impossible. */
1466 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1467 return simplify_gen_binary
1468 (MULT, mode,
1469 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1470 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1471 }
1472 }
1473
1474 /* Check for a sign extension of a subreg of a promoted
1475 variable, where the promotion is sign-extended, and the
1476 target mode is the same as the variable's promotion. */
1477 if (GET_CODE (op) == SUBREG
1478 && SUBREG_PROMOTED_VAR_P (op)
1479 && SUBREG_PROMOTED_SIGNED_P (op)
1480 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1481 {
1482 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1483 if (temp)
1484 return temp;
1485 }
1486
1487 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1488 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1489 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1490 {
1491 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1492 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1493 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1494 GET_MODE (XEXP (op, 0)));
1495 }
1496
1497 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is (sign_extend:M (subreg:O <X>)) if there is mode with
1499 GET_MODE_BITSIZE (N) - I bits.
1500 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1501 is similarly (zero_extend:M (subreg:O <X>)). */
1502 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1503 && GET_CODE (XEXP (op, 0)) == ASHIFT
1504 && is_a <scalar_int_mode> (mode, &int_mode)
1505 && CONST_INT_P (XEXP (op, 1))
1506 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1507 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1508 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1509 {
1510 scalar_int_mode tmode;
1511 gcc_assert (GET_MODE_PRECISION (int_mode)
1512 > GET_MODE_PRECISION (op_mode));
1513 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1514 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1515 {
1516 rtx inner =
1517 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1518 if (inner)
1519 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1520 ? SIGN_EXTEND : ZERO_EXTEND,
1521 int_mode, inner, tmode);
1522 }
1523 }
1524
1525 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1526 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1527 if (GET_CODE (op) == LSHIFTRT
1528 && CONST_INT_P (XEXP (op, 1))
1529 && XEXP (op, 1) != const0_rtx)
1530 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1531
1532 #if defined(POINTERS_EXTEND_UNSIGNED)
1533 /* As we do not know which address space the pointer is referring to,
1534 we can do this only if the target does not support different pointer
1535 or address modes depending on the address space. */
1536 if (target_default_pointer_address_modes_p ()
1537 && ! POINTERS_EXTEND_UNSIGNED
1538 && mode == Pmode && GET_MODE (op) == ptr_mode
1539 && (CONSTANT_P (op)
1540 || (GET_CODE (op) == SUBREG
1541 && REG_P (SUBREG_REG (op))
1542 && REG_POINTER (SUBREG_REG (op))
1543 && GET_MODE (SUBREG_REG (op)) == Pmode))
1544 && !targetm.have_ptr_extend ())
1545 {
1546 temp
1547 = convert_memory_address_addr_space_1 (Pmode, op,
1548 ADDR_SPACE_GENERIC, false,
1549 true);
1550 if (temp)
1551 return temp;
1552 }
1553 #endif
1554 break;
1555
1556 case ZERO_EXTEND:
1557 /* Check for a zero extension of a subreg of a promoted
1558 variable, where the promotion is zero-extended, and the
1559 target mode is the same as the variable's promotion. */
1560 if (GET_CODE (op) == SUBREG
1561 && SUBREG_PROMOTED_VAR_P (op)
1562 && SUBREG_PROMOTED_UNSIGNED_P (op)
1563 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1564 {
1565 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1566 if (temp)
1567 return temp;
1568 }
1569
1570 /* Extending a widening multiplication should be canonicalized to
1571 a wider widening multiplication. */
1572 if (GET_CODE (op) == MULT)
1573 {
1574 rtx lhs = XEXP (op, 0);
1575 rtx rhs = XEXP (op, 1);
1576 enum rtx_code lcode = GET_CODE (lhs);
1577 enum rtx_code rcode = GET_CODE (rhs);
1578
1579 /* Widening multiplies usually extend both operands, but sometimes
1580 they use a shift to extract a portion of a register. */
1581 if ((lcode == ZERO_EXTEND
1582 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1583 && (rcode == ZERO_EXTEND
1584 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1585 {
1586 machine_mode lmode = GET_MODE (lhs);
1587 machine_mode rmode = GET_MODE (rhs);
1588 int bits;
1589
1590 if (lcode == LSHIFTRT)
1591 /* Number of bits not shifted off the end. */
1592 bits = (GET_MODE_UNIT_PRECISION (lmode)
1593 - INTVAL (XEXP (lhs, 1)));
1594 else /* lcode == ZERO_EXTEND */
1595 /* Size of inner mode. */
1596 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1597
1598 if (rcode == LSHIFTRT)
1599 bits += (GET_MODE_UNIT_PRECISION (rmode)
1600 - INTVAL (XEXP (rhs, 1)));
1601 else /* rcode == ZERO_EXTEND */
1602 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1603
1604 /* We can only widen multiplies if the result is mathematiclly
1605 equivalent. I.e. if overflow was impossible. */
1606 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1607 return simplify_gen_binary
1608 (MULT, mode,
1609 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1610 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1611 }
1612 }
1613
1614 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1615 if (GET_CODE (op) == ZERO_EXTEND)
1616 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1617 GET_MODE (XEXP (op, 0)));
1618
1619 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1620 is (zero_extend:M (subreg:O <X>)) if there is mode with
1621 GET_MODE_PRECISION (N) - I bits. */
1622 if (GET_CODE (op) == LSHIFTRT
1623 && GET_CODE (XEXP (op, 0)) == ASHIFT
1624 && is_a <scalar_int_mode> (mode, &int_mode)
1625 && CONST_INT_P (XEXP (op, 1))
1626 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1627 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1628 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1629 {
1630 scalar_int_mode tmode;
1631 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1632 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1633 {
1634 rtx inner =
1635 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1636 if (inner)
1637 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1638 inner, tmode);
1639 }
1640 }
1641
1642 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1643 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1644 of mode N. E.g.
1645 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1646 (and:SI (reg:SI) (const_int 63)). */
1647 if (partial_subreg_p (op)
1648 && is_a <scalar_int_mode> (mode, &int_mode)
1649 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1650 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1651 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1652 && subreg_lowpart_p (op)
1653 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1654 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1655 {
1656 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1657 return SUBREG_REG (op);
1658 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1659 op0_mode);
1660 }
1661
1662 #if defined(POINTERS_EXTEND_UNSIGNED)
1663 /* As we do not know which address space the pointer is referring to,
1664 we can do this only if the target does not support different pointer
1665 or address modes depending on the address space. */
1666 if (target_default_pointer_address_modes_p ()
1667 && POINTERS_EXTEND_UNSIGNED > 0
1668 && mode == Pmode && GET_MODE (op) == ptr_mode
1669 && (CONSTANT_P (op)
1670 || (GET_CODE (op) == SUBREG
1671 && REG_P (SUBREG_REG (op))
1672 && REG_POINTER (SUBREG_REG (op))
1673 && GET_MODE (SUBREG_REG (op)) == Pmode))
1674 && !targetm.have_ptr_extend ())
1675 {
1676 temp
1677 = convert_memory_address_addr_space_1 (Pmode, op,
1678 ADDR_SPACE_GENERIC, false,
1679 true);
1680 if (temp)
1681 return temp;
1682 }
1683 #endif
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 if (VECTOR_MODE_P (mode)
1691 && vec_duplicate_p (op, &elt)
1692 && code != VEC_DUPLICATE)
1693 {
1694 /* Try applying the operator to ELT and see if that simplifies.
1695 We can duplicate the result if so.
1696
1697 The reason we don't use simplify_gen_unary is that it isn't
1698 necessarily a win to convert things like:
1699
1700 (neg:V (vec_duplicate:V (reg:S R)))
1701
1702 to:
1703
1704 (vec_duplicate:V (neg:S (reg:S R)))
1705
1706 The first might be done entirely in vector registers while the
1707 second might need a move between register files. */
1708 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1709 elt, GET_MODE_INNER (GET_MODE (op)));
1710 if (temp)
1711 return gen_vec_duplicate (mode, temp);
1712 }
1713
1714 return 0;
1715 }
1716
1717 /* Try to compute the value of a unary operation CODE whose output mode is to
1718 be MODE with input operand OP whose mode was originally OP_MODE.
1719 Return zero if the value cannot be computed. */
1720 rtx
1721 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1722 rtx op, machine_mode op_mode)
1723 {
1724 scalar_int_mode result_mode;
1725
1726 if (code == VEC_DUPLICATE)
1727 {
1728 gcc_assert (VECTOR_MODE_P (mode));
1729 if (GET_MODE (op) != VOIDmode)
1730 {
1731 if (!VECTOR_MODE_P (GET_MODE (op)))
1732 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1733 else
1734 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1735 (GET_MODE (op)));
1736 }
1737 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1738 return gen_const_vec_duplicate (mode, op);
1739 unsigned int n_elts;
1740 if (GET_CODE (op) == CONST_VECTOR
1741 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1742 {
1743 /* This must be constant if we're duplicating it to a constant
1744 number of elements. */
1745 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1746 gcc_assert (in_n_elts < n_elts);
1747 gcc_assert ((n_elts % in_n_elts) == 0);
1748 rtvec v = rtvec_alloc (n_elts);
1749 for (unsigned i = 0; i < n_elts; i++)
1750 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1751 return gen_rtx_CONST_VECTOR (mode, v);
1752 }
1753 }
1754
1755 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1756 {
1757 gcc_assert (GET_MODE (op) == op_mode);
1758
1759 rtx_vector_builder builder;
1760 if (!builder.new_unary_operation (mode, op, false))
1761 return 0;
1762
1763 unsigned int count = builder.encoded_nelts ();
1764 for (unsigned int i = 0; i < count; i++)
1765 {
1766 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1767 CONST_VECTOR_ELT (op, i),
1768 GET_MODE_INNER (op_mode));
1769 if (!x || !valid_for_const_vector_p (mode, x))
1770 return 0;
1771 builder.quick_push (x);
1772 }
1773 return builder.build ();
1774 }
1775
1776 /* The order of these tests is critical so that, for example, we don't
1777 check the wrong mode (input vs. output) for a conversion operation,
1778 such as FIX. At some point, this should be simplified. */
1779
1780 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1781 {
1782 REAL_VALUE_TYPE d;
1783
1784 if (op_mode == VOIDmode)
1785 {
1786 /* CONST_INT have VOIDmode as the mode. We assume that all
1787 the bits of the constant are significant, though, this is
1788 a dangerous assumption as many times CONST_INTs are
1789 created and used with garbage in the bits outside of the
1790 precision of the implied mode of the const_int. */
1791 op_mode = MAX_MODE_INT;
1792 }
1793
1794 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1795
1796 /* Avoid the folding if flag_signaling_nans is on and
1797 operand is a signaling NaN. */
1798 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1799 return 0;
1800
1801 d = real_value_truncate (mode, d);
1802 return const_double_from_real_value (d, mode);
1803 }
1804 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1805 {
1806 REAL_VALUE_TYPE d;
1807
1808 if (op_mode == VOIDmode)
1809 {
1810 /* CONST_INT have VOIDmode as the mode. We assume that all
1811 the bits of the constant are significant, though, this is
1812 a dangerous assumption as many times CONST_INTs are
1813 created and used with garbage in the bits outside of the
1814 precision of the implied mode of the const_int. */
1815 op_mode = MAX_MODE_INT;
1816 }
1817
1818 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1819
1820 /* Avoid the folding if flag_signaling_nans is on and
1821 operand is a signaling NaN. */
1822 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1823 return 0;
1824
1825 d = real_value_truncate (mode, d);
1826 return const_double_from_real_value (d, mode);
1827 }
1828
1829 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1830 {
1831 unsigned int width = GET_MODE_PRECISION (result_mode);
1832 wide_int result;
1833 scalar_int_mode imode = (op_mode == VOIDmode
1834 ? result_mode
1835 : as_a <scalar_int_mode> (op_mode));
1836 rtx_mode_t op0 = rtx_mode_t (op, imode);
1837 int int_value;
1838
1839 #if TARGET_SUPPORTS_WIDE_INT == 0
1840 /* This assert keeps the simplification from producing a result
1841 that cannot be represented in a CONST_DOUBLE but a lot of
1842 upstream callers expect that this function never fails to
1843 simplify something and so you if you added this to the test
1844 above the code would die later anyway. If this assert
1845 happens, you just need to make the port support wide int. */
1846 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1847 #endif
1848
1849 switch (code)
1850 {
1851 case NOT:
1852 result = wi::bit_not (op0);
1853 break;
1854
1855 case NEG:
1856 result = wi::neg (op0);
1857 break;
1858
1859 case ABS:
1860 result = wi::abs (op0);
1861 break;
1862
1863 case FFS:
1864 result = wi::shwi (wi::ffs (op0), result_mode);
1865 break;
1866
1867 case CLZ:
1868 if (wi::ne_p (op0, 0))
1869 int_value = wi::clz (op0);
1870 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1871 return NULL_RTX;
1872 result = wi::shwi (int_value, result_mode);
1873 break;
1874
1875 case CLRSB:
1876 result = wi::shwi (wi::clrsb (op0), result_mode);
1877 break;
1878
1879 case CTZ:
1880 if (wi::ne_p (op0, 0))
1881 int_value = wi::ctz (op0);
1882 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1883 return NULL_RTX;
1884 result = wi::shwi (int_value, result_mode);
1885 break;
1886
1887 case POPCOUNT:
1888 result = wi::shwi (wi::popcount (op0), result_mode);
1889 break;
1890
1891 case PARITY:
1892 result = wi::shwi (wi::parity (op0), result_mode);
1893 break;
1894
1895 case BSWAP:
1896 result = wide_int (op0).bswap ();
1897 break;
1898
1899 case TRUNCATE:
1900 case ZERO_EXTEND:
1901 result = wide_int::from (op0, width, UNSIGNED);
1902 break;
1903
1904 case SIGN_EXTEND:
1905 result = wide_int::from (op0, width, SIGNED);
1906 break;
1907
1908 case SQRT:
1909 default:
1910 return 0;
1911 }
1912
1913 return immed_wide_int_const (result, result_mode);
1914 }
1915
1916 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1917 && SCALAR_FLOAT_MODE_P (mode)
1918 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1919 {
1920 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1921 switch (code)
1922 {
1923 case SQRT:
1924 return 0;
1925 case ABS:
1926 d = real_value_abs (&d);
1927 break;
1928 case NEG:
1929 d = real_value_negate (&d);
1930 break;
1931 case FLOAT_TRUNCATE:
1932 /* Don't perform the operation if flag_signaling_nans is on
1933 and the operand is a signaling NaN. */
1934 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1935 return NULL_RTX;
1936 d = real_value_truncate (mode, d);
1937 break;
1938 case FLOAT_EXTEND:
1939 /* Don't perform the operation if flag_signaling_nans is on
1940 and the operand is a signaling NaN. */
1941 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1942 return NULL_RTX;
1943 /* All this does is change the mode, unless changing
1944 mode class. */
1945 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1946 real_convert (&d, mode, &d);
1947 break;
1948 case FIX:
1949 /* Don't perform the operation if flag_signaling_nans is on
1950 and the operand is a signaling NaN. */
1951 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1952 return NULL_RTX;
1953 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1954 break;
1955 case NOT:
1956 {
1957 long tmp[4];
1958 int i;
1959
1960 real_to_target (tmp, &d, GET_MODE (op));
1961 for (i = 0; i < 4; i++)
1962 tmp[i] = ~tmp[i];
1963 real_from_target (&d, tmp, mode);
1964 break;
1965 }
1966 default:
1967 gcc_unreachable ();
1968 }
1969 return const_double_from_real_value (d, mode);
1970 }
1971 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1972 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1973 && is_int_mode (mode, &result_mode))
1974 {
1975 unsigned int width = GET_MODE_PRECISION (result_mode);
1976 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1977 operators are intentionally left unspecified (to ease implementation
1978 by target backends), for consistency, this routine implements the
1979 same semantics for constant folding as used by the middle-end. */
1980
1981 /* This was formerly used only for non-IEEE float.
1982 eggert@twinsun.com says it is safe for IEEE also. */
1983 REAL_VALUE_TYPE t;
1984 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1985 wide_int wmax, wmin;
1986 /* This is part of the abi to real_to_integer, but we check
1987 things before making this call. */
1988 bool fail;
1989
1990 switch (code)
1991 {
1992 case FIX:
1993 if (REAL_VALUE_ISNAN (*x))
1994 return const0_rtx;
1995
1996 /* Test against the signed upper bound. */
1997 wmax = wi::max_value (width, SIGNED);
1998 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1999 if (real_less (&t, x))
2000 return immed_wide_int_const (wmax, mode);
2001
2002 /* Test against the signed lower bound. */
2003 wmin = wi::min_value (width, SIGNED);
2004 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2005 if (real_less (x, &t))
2006 return immed_wide_int_const (wmin, mode);
2007
2008 return immed_wide_int_const (real_to_integer (x, &fail, width),
2009 mode);
2010
2011 case UNSIGNED_FIX:
2012 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2013 return const0_rtx;
2014
2015 /* Test against the unsigned upper bound. */
2016 wmax = wi::max_value (width, UNSIGNED);
2017 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2018 if (real_less (&t, x))
2019 return immed_wide_int_const (wmax, mode);
2020
2021 return immed_wide_int_const (real_to_integer (x, &fail, width),
2022 mode);
2023
2024 default:
2025 gcc_unreachable ();
2026 }
2027 }
2028
2029 /* Handle polynomial integers. */
2030 else if (CONST_POLY_INT_P (op))
2031 {
2032 poly_wide_int result;
2033 switch (code)
2034 {
2035 case NEG:
2036 result = -const_poly_int_value (op);
2037 break;
2038
2039 case NOT:
2040 result = ~const_poly_int_value (op);
2041 break;
2042
2043 default:
2044 return NULL_RTX;
2045 }
2046 return immed_wide_int_const (result, mode);
2047 }
2048
2049 return NULL_RTX;
2050 }
2051 \f
2052 /* Subroutine of simplify_binary_operation to simplify a binary operation
2053 CODE that can commute with byte swapping, with result mode MODE and
2054 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2055 Return zero if no simplification or canonicalization is possible. */
2056
2057 static rtx
2058 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2059 rtx op0, rtx op1)
2060 {
2061 rtx tem;
2062
2063 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2064 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2065 {
2066 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2067 simplify_gen_unary (BSWAP, mode, op1, mode));
2068 return simplify_gen_unary (BSWAP, mode, tem, mode);
2069 }
2070
2071 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2072 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2073 {
2074 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2075 return simplify_gen_unary (BSWAP, mode, tem, mode);
2076 }
2077
2078 return NULL_RTX;
2079 }
2080
2081 /* Subroutine of simplify_binary_operation to simplify a commutative,
2082 associative binary operation CODE with result mode MODE, operating
2083 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2084 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2085 canonicalization is possible. */
2086
2087 static rtx
2088 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2089 rtx op0, rtx op1)
2090 {
2091 rtx tem;
2092
2093 /* Linearize the operator to the left. */
2094 if (GET_CODE (op1) == code)
2095 {
2096 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2097 if (GET_CODE (op0) == code)
2098 {
2099 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2100 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2101 }
2102
2103 /* "a op (b op c)" becomes "(b op c) op a". */
2104 if (! swap_commutative_operands_p (op1, op0))
2105 return simplify_gen_binary (code, mode, op1, op0);
2106
2107 std::swap (op0, op1);
2108 }
2109
2110 if (GET_CODE (op0) == code)
2111 {
2112 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2113 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2114 {
2115 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2116 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2117 }
2118
2119 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2120 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2121 if (tem != 0)
2122 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2123
2124 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2125 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2126 if (tem != 0)
2127 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2128 }
2129
2130 return 0;
2131 }
2132
2133
2134 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2135 and OP1. Return 0 if no simplification is possible.
2136
2137 Don't use this for relational operations such as EQ or LT.
2138 Use simplify_relational_operation instead. */
2139 rtx
2140 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2141 rtx op0, rtx op1)
2142 {
2143 rtx trueop0, trueop1;
2144 rtx tem;
2145
2146 /* Relational operations don't work here. We must know the mode
2147 of the operands in order to do the comparison correctly.
2148 Assuming a full word can give incorrect results.
2149 Consider comparing 128 with -128 in QImode. */
2150 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2151 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2152
2153 /* Make sure the constant is second. */
2154 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2155 && swap_commutative_operands_p (op0, op1))
2156 std::swap (op0, op1);
2157
2158 trueop0 = avoid_constant_pool_reference (op0);
2159 trueop1 = avoid_constant_pool_reference (op1);
2160
2161 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2162 if (tem)
2163 return tem;
2164 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2165
2166 if (tem)
2167 return tem;
2168
2169 /* If the above steps did not result in a simplification and op0 or op1
2170 were constant pool references, use the referenced constants directly. */
2171 if (trueop0 != op0 || trueop1 != op1)
2172 return simplify_gen_binary (code, mode, trueop0, trueop1);
2173
2174 return NULL_RTX;
2175 }
2176
2177 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2178 which OP0 and OP1 are both vector series or vector duplicates
2179 (which are really just series with a step of 0). If so, try to
2180 form a new series by applying CODE to the bases and to the steps.
2181 Return null if no simplification is possible.
2182
2183 MODE is the mode of the operation and is known to be a vector
2184 integer mode. */
2185
2186 static rtx
2187 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2188 rtx op0, rtx op1)
2189 {
2190 rtx base0, step0;
2191 if (vec_duplicate_p (op0, &base0))
2192 step0 = const0_rtx;
2193 else if (!vec_series_p (op0, &base0, &step0))
2194 return NULL_RTX;
2195
2196 rtx base1, step1;
2197 if (vec_duplicate_p (op1, &base1))
2198 step1 = const0_rtx;
2199 else if (!vec_series_p (op1, &base1, &step1))
2200 return NULL_RTX;
2201
2202 /* Only create a new series if we can simplify both parts. In other
2203 cases this isn't really a simplification, and it's not necessarily
2204 a win to replace a vector operation with a scalar operation. */
2205 scalar_mode inner_mode = GET_MODE_INNER (mode);
2206 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2207 if (!new_base)
2208 return NULL_RTX;
2209
2210 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2211 if (!new_step)
2212 return NULL_RTX;
2213
2214 return gen_vec_series (mode, new_base, new_step);
2215 }
2216
2217 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2218 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2219 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2220 actual constants. */
2221
2222 static rtx
2223 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2224 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2225 {
2226 rtx tem, reversed, opleft, opright, elt0, elt1;
2227 HOST_WIDE_INT val;
2228 scalar_int_mode int_mode, inner_mode;
2229 poly_int64 offset;
2230
2231 /* Even if we can't compute a constant result,
2232 there are some cases worth simplifying. */
2233
2234 switch (code)
2235 {
2236 case PLUS:
2237 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2238 when x is NaN, infinite, or finite and nonzero. They aren't
2239 when x is -0 and the rounding mode is not towards -infinity,
2240 since (-0) + 0 is then 0. */
2241 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2242 return op0;
2243
2244 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2245 transformations are safe even for IEEE. */
2246 if (GET_CODE (op0) == NEG)
2247 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2248 else if (GET_CODE (op1) == NEG)
2249 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2250
2251 /* (~a) + 1 -> -a */
2252 if (INTEGRAL_MODE_P (mode)
2253 && GET_CODE (op0) == NOT
2254 && trueop1 == const1_rtx)
2255 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2256
2257 /* Handle both-operands-constant cases. We can only add
2258 CONST_INTs to constants since the sum of relocatable symbols
2259 can't be handled by most assemblers. Don't add CONST_INT
2260 to CONST_INT since overflow won't be computed properly if wider
2261 than HOST_BITS_PER_WIDE_INT. */
2262
2263 if ((GET_CODE (op0) == CONST
2264 || GET_CODE (op0) == SYMBOL_REF
2265 || GET_CODE (op0) == LABEL_REF)
2266 && poly_int_rtx_p (op1, &offset))
2267 return plus_constant (mode, op0, offset);
2268 else if ((GET_CODE (op1) == CONST
2269 || GET_CODE (op1) == SYMBOL_REF
2270 || GET_CODE (op1) == LABEL_REF)
2271 && poly_int_rtx_p (op0, &offset))
2272 return plus_constant (mode, op1, offset);
2273
2274 /* See if this is something like X * C - X or vice versa or
2275 if the multiplication is written as a shift. If so, we can
2276 distribute and make a new multiply, shift, or maybe just
2277 have X (if C is 2 in the example above). But don't make
2278 something more expensive than we had before. */
2279
2280 if (is_a <scalar_int_mode> (mode, &int_mode))
2281 {
2282 rtx lhs = op0, rhs = op1;
2283
2284 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2285 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2286
2287 if (GET_CODE (lhs) == NEG)
2288 {
2289 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2290 lhs = XEXP (lhs, 0);
2291 }
2292 else if (GET_CODE (lhs) == MULT
2293 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2294 {
2295 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2296 lhs = XEXP (lhs, 0);
2297 }
2298 else if (GET_CODE (lhs) == ASHIFT
2299 && CONST_INT_P (XEXP (lhs, 1))
2300 && INTVAL (XEXP (lhs, 1)) >= 0
2301 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2302 {
2303 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2304 GET_MODE_PRECISION (int_mode));
2305 lhs = XEXP (lhs, 0);
2306 }
2307
2308 if (GET_CODE (rhs) == NEG)
2309 {
2310 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2311 rhs = XEXP (rhs, 0);
2312 }
2313 else if (GET_CODE (rhs) == MULT
2314 && CONST_INT_P (XEXP (rhs, 1)))
2315 {
2316 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2317 rhs = XEXP (rhs, 0);
2318 }
2319 else if (GET_CODE (rhs) == ASHIFT
2320 && CONST_INT_P (XEXP (rhs, 1))
2321 && INTVAL (XEXP (rhs, 1)) >= 0
2322 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2323 {
2324 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2325 GET_MODE_PRECISION (int_mode));
2326 rhs = XEXP (rhs, 0);
2327 }
2328
2329 if (rtx_equal_p (lhs, rhs))
2330 {
2331 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2332 rtx coeff;
2333 bool speed = optimize_function_for_speed_p (cfun);
2334
2335 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2336
2337 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2338 return (set_src_cost (tem, int_mode, speed)
2339 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2340 }
2341 }
2342
2343 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2344 if (CONST_SCALAR_INT_P (op1)
2345 && GET_CODE (op0) == XOR
2346 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2347 && mode_signbit_p (mode, op1))
2348 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2349 simplify_gen_binary (XOR, mode, op1,
2350 XEXP (op0, 1)));
2351
2352 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2353 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2354 && GET_CODE (op0) == MULT
2355 && GET_CODE (XEXP (op0, 0)) == NEG)
2356 {
2357 rtx in1, in2;
2358
2359 in1 = XEXP (XEXP (op0, 0), 0);
2360 in2 = XEXP (op0, 1);
2361 return simplify_gen_binary (MINUS, mode, op1,
2362 simplify_gen_binary (MULT, mode,
2363 in1, in2));
2364 }
2365
2366 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2367 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2368 is 1. */
2369 if (COMPARISON_P (op0)
2370 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2371 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2372 && (reversed = reversed_comparison (op0, mode)))
2373 return
2374 simplify_gen_unary (NEG, mode, reversed, mode);
2375
2376 /* If one of the operands is a PLUS or a MINUS, see if we can
2377 simplify this by the associative law.
2378 Don't use the associative law for floating point.
2379 The inaccuracy makes it nonassociative,
2380 and subtle programs can break if operations are associated. */
2381
2382 if (INTEGRAL_MODE_P (mode)
2383 && (plus_minus_operand_p (op0)
2384 || plus_minus_operand_p (op1))
2385 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2386 return tem;
2387
2388 /* Reassociate floating point addition only when the user
2389 specifies associative math operations. */
2390 if (FLOAT_MODE_P (mode)
2391 && flag_associative_math)
2392 {
2393 tem = simplify_associative_operation (code, mode, op0, op1);
2394 if (tem)
2395 return tem;
2396 }
2397
2398 /* Handle vector series. */
2399 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2400 {
2401 tem = simplify_binary_operation_series (code, mode, op0, op1);
2402 if (tem)
2403 return tem;
2404 }
2405 break;
2406
2407 case COMPARE:
2408 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2409 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2410 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2411 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2412 {
2413 rtx xop00 = XEXP (op0, 0);
2414 rtx xop10 = XEXP (op1, 0);
2415
2416 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2417 return xop00;
2418
2419 if (REG_P (xop00) && REG_P (xop10)
2420 && REGNO (xop00) == REGNO (xop10)
2421 && GET_MODE (xop00) == mode
2422 && GET_MODE (xop10) == mode
2423 && GET_MODE_CLASS (mode) == MODE_CC)
2424 return xop00;
2425 }
2426 break;
2427
2428 case MINUS:
2429 /* We can't assume x-x is 0 even with non-IEEE floating point,
2430 but since it is zero except in very strange circumstances, we
2431 will treat it as zero with -ffinite-math-only. */
2432 if (rtx_equal_p (trueop0, trueop1)
2433 && ! side_effects_p (op0)
2434 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2435 return CONST0_RTX (mode);
2436
2437 /* Change subtraction from zero into negation. (0 - x) is the
2438 same as -x when x is NaN, infinite, or finite and nonzero.
2439 But if the mode has signed zeros, and does not round towards
2440 -infinity, then 0 - 0 is 0, not -0. */
2441 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2442 return simplify_gen_unary (NEG, mode, op1, mode);
2443
2444 /* (-1 - a) is ~a, unless the expression contains symbolic
2445 constants, in which case not retaining additions and
2446 subtractions could cause invalid assembly to be produced. */
2447 if (trueop0 == constm1_rtx
2448 && !contains_symbolic_reference_p (op1))
2449 return simplify_gen_unary (NOT, mode, op1, mode);
2450
2451 /* Subtracting 0 has no effect unless the mode has signed zeros
2452 and supports rounding towards -infinity. In such a case,
2453 0 - 0 is -0. */
2454 if (!(HONOR_SIGNED_ZEROS (mode)
2455 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2456 && trueop1 == CONST0_RTX (mode))
2457 return op0;
2458
2459 /* See if this is something like X * C - X or vice versa or
2460 if the multiplication is written as a shift. If so, we can
2461 distribute and make a new multiply, shift, or maybe just
2462 have X (if C is 2 in the example above). But don't make
2463 something more expensive than we had before. */
2464
2465 if (is_a <scalar_int_mode> (mode, &int_mode))
2466 {
2467 rtx lhs = op0, rhs = op1;
2468
2469 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2470 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2471
2472 if (GET_CODE (lhs) == NEG)
2473 {
2474 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2475 lhs = XEXP (lhs, 0);
2476 }
2477 else if (GET_CODE (lhs) == MULT
2478 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2479 {
2480 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2481 lhs = XEXP (lhs, 0);
2482 }
2483 else if (GET_CODE (lhs) == ASHIFT
2484 && CONST_INT_P (XEXP (lhs, 1))
2485 && INTVAL (XEXP (lhs, 1)) >= 0
2486 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2487 {
2488 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2489 GET_MODE_PRECISION (int_mode));
2490 lhs = XEXP (lhs, 0);
2491 }
2492
2493 if (GET_CODE (rhs) == NEG)
2494 {
2495 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2496 rhs = XEXP (rhs, 0);
2497 }
2498 else if (GET_CODE (rhs) == MULT
2499 && CONST_INT_P (XEXP (rhs, 1)))
2500 {
2501 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2502 rhs = XEXP (rhs, 0);
2503 }
2504 else if (GET_CODE (rhs) == ASHIFT
2505 && CONST_INT_P (XEXP (rhs, 1))
2506 && INTVAL (XEXP (rhs, 1)) >= 0
2507 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2508 {
2509 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2510 GET_MODE_PRECISION (int_mode));
2511 negcoeff1 = -negcoeff1;
2512 rhs = XEXP (rhs, 0);
2513 }
2514
2515 if (rtx_equal_p (lhs, rhs))
2516 {
2517 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2518 rtx coeff;
2519 bool speed = optimize_function_for_speed_p (cfun);
2520
2521 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2522
2523 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2524 return (set_src_cost (tem, int_mode, speed)
2525 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2526 }
2527 }
2528
2529 /* (a - (-b)) -> (a + b). True even for IEEE. */
2530 if (GET_CODE (op1) == NEG)
2531 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2532
2533 /* (-x - c) may be simplified as (-c - x). */
2534 if (GET_CODE (op0) == NEG
2535 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2536 {
2537 tem = simplify_unary_operation (NEG, mode, op1, mode);
2538 if (tem)
2539 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2540 }
2541
2542 if ((GET_CODE (op0) == CONST
2543 || GET_CODE (op0) == SYMBOL_REF
2544 || GET_CODE (op0) == LABEL_REF)
2545 && poly_int_rtx_p (op1, &offset))
2546 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2547
2548 /* Don't let a relocatable value get a negative coeff. */
2549 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2550 return simplify_gen_binary (PLUS, mode,
2551 op0,
2552 neg_const_int (mode, op1));
2553
2554 /* (x - (x & y)) -> (x & ~y) */
2555 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2556 {
2557 if (rtx_equal_p (op0, XEXP (op1, 0)))
2558 {
2559 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2560 GET_MODE (XEXP (op1, 1)));
2561 return simplify_gen_binary (AND, mode, op0, tem);
2562 }
2563 if (rtx_equal_p (op0, XEXP (op1, 1)))
2564 {
2565 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2566 GET_MODE (XEXP (op1, 0)));
2567 return simplify_gen_binary (AND, mode, op0, tem);
2568 }
2569 }
2570
2571 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2572 by reversing the comparison code if valid. */
2573 if (STORE_FLAG_VALUE == 1
2574 && trueop0 == const1_rtx
2575 && COMPARISON_P (op1)
2576 && (reversed = reversed_comparison (op1, mode)))
2577 return reversed;
2578
2579 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2580 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2581 && GET_CODE (op1) == MULT
2582 && GET_CODE (XEXP (op1, 0)) == NEG)
2583 {
2584 rtx in1, in2;
2585
2586 in1 = XEXP (XEXP (op1, 0), 0);
2587 in2 = XEXP (op1, 1);
2588 return simplify_gen_binary (PLUS, mode,
2589 simplify_gen_binary (MULT, mode,
2590 in1, in2),
2591 op0);
2592 }
2593
2594 /* Canonicalize (minus (neg A) (mult B C)) to
2595 (minus (mult (neg B) C) A). */
2596 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2597 && GET_CODE (op1) == MULT
2598 && GET_CODE (op0) == NEG)
2599 {
2600 rtx in1, in2;
2601
2602 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2603 in2 = XEXP (op1, 1);
2604 return simplify_gen_binary (MINUS, mode,
2605 simplify_gen_binary (MULT, mode,
2606 in1, in2),
2607 XEXP (op0, 0));
2608 }
2609
2610 /* If one of the operands is a PLUS or a MINUS, see if we can
2611 simplify this by the associative law. This will, for example,
2612 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2613 Don't use the associative law for floating point.
2614 The inaccuracy makes it nonassociative,
2615 and subtle programs can break if operations are associated. */
2616
2617 if (INTEGRAL_MODE_P (mode)
2618 && (plus_minus_operand_p (op0)
2619 || plus_minus_operand_p (op1))
2620 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2621 return tem;
2622
2623 /* Handle vector series. */
2624 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2625 {
2626 tem = simplify_binary_operation_series (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2629 }
2630 break;
2631
2632 case MULT:
2633 if (trueop1 == constm1_rtx)
2634 return simplify_gen_unary (NEG, mode, op0, mode);
2635
2636 if (GET_CODE (op0) == NEG)
2637 {
2638 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2639 /* If op1 is a MULT as well and simplify_unary_operation
2640 just moved the NEG to the second operand, simplify_gen_binary
2641 below could through simplify_associative_operation move
2642 the NEG around again and recurse endlessly. */
2643 if (temp
2644 && GET_CODE (op1) == MULT
2645 && GET_CODE (temp) == MULT
2646 && XEXP (op1, 0) == XEXP (temp, 0)
2647 && GET_CODE (XEXP (temp, 1)) == NEG
2648 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2649 temp = NULL_RTX;
2650 if (temp)
2651 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2652 }
2653 if (GET_CODE (op1) == NEG)
2654 {
2655 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2656 /* If op0 is a MULT as well and simplify_unary_operation
2657 just moved the NEG to the second operand, simplify_gen_binary
2658 below could through simplify_associative_operation move
2659 the NEG around again and recurse endlessly. */
2660 if (temp
2661 && GET_CODE (op0) == MULT
2662 && GET_CODE (temp) == MULT
2663 && XEXP (op0, 0) == XEXP (temp, 0)
2664 && GET_CODE (XEXP (temp, 1)) == NEG
2665 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2666 temp = NULL_RTX;
2667 if (temp)
2668 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2669 }
2670
2671 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2672 x is NaN, since x * 0 is then also NaN. Nor is it valid
2673 when the mode has signed zeros, since multiplying a negative
2674 number by 0 will give -0, not 0. */
2675 if (!HONOR_NANS (mode)
2676 && !HONOR_SIGNED_ZEROS (mode)
2677 && trueop1 == CONST0_RTX (mode)
2678 && ! side_effects_p (op0))
2679 return op1;
2680
2681 /* In IEEE floating point, x*1 is not equivalent to x for
2682 signalling NaNs. */
2683 if (!HONOR_SNANS (mode)
2684 && trueop1 == CONST1_RTX (mode))
2685 return op0;
2686
2687 /* Convert multiply by constant power of two into shift. */
2688 if (CONST_SCALAR_INT_P (trueop1))
2689 {
2690 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2691 if (val >= 0)
2692 return simplify_gen_binary (ASHIFT, mode, op0,
2693 gen_int_shift_amount (mode, val));
2694 }
2695
2696 /* x*2 is x+x and x*(-1) is -x */
2697 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2698 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2699 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2700 && GET_MODE (op0) == mode)
2701 {
2702 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2703
2704 if (real_equal (d1, &dconst2))
2705 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2706
2707 if (!HONOR_SNANS (mode)
2708 && real_equal (d1, &dconstm1))
2709 return simplify_gen_unary (NEG, mode, op0, mode);
2710 }
2711
2712 /* Optimize -x * -x as x * x. */
2713 if (FLOAT_MODE_P (mode)
2714 && GET_CODE (op0) == NEG
2715 && GET_CODE (op1) == NEG
2716 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2717 && !side_effects_p (XEXP (op0, 0)))
2718 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2719
2720 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2721 if (SCALAR_FLOAT_MODE_P (mode)
2722 && GET_CODE (op0) == ABS
2723 && GET_CODE (op1) == ABS
2724 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2725 && !side_effects_p (XEXP (op0, 0)))
2726 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2727
2728 /* Reassociate multiplication, but for floating point MULTs
2729 only when the user specifies unsafe math optimizations. */
2730 if (! FLOAT_MODE_P (mode)
2731 || flag_unsafe_math_optimizations)
2732 {
2733 tem = simplify_associative_operation (code, mode, op0, op1);
2734 if (tem)
2735 return tem;
2736 }
2737 break;
2738
2739 case IOR:
2740 if (trueop1 == CONST0_RTX (mode))
2741 return op0;
2742 if (INTEGRAL_MODE_P (mode)
2743 && trueop1 == CONSTM1_RTX (mode)
2744 && !side_effects_p (op0))
2745 return op1;
2746 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2747 return op0;
2748 /* A | (~A) -> -1 */
2749 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2750 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2751 && ! side_effects_p (op0)
2752 && SCALAR_INT_MODE_P (mode))
2753 return constm1_rtx;
2754
2755 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2756 if (CONST_INT_P (op1)
2757 && HWI_COMPUTABLE_MODE_P (mode)
2758 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2759 && !side_effects_p (op0))
2760 return op1;
2761
2762 /* Canonicalize (X & C1) | C2. */
2763 if (GET_CODE (op0) == AND
2764 && CONST_INT_P (trueop1)
2765 && CONST_INT_P (XEXP (op0, 1)))
2766 {
2767 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2768 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2769 HOST_WIDE_INT c2 = INTVAL (trueop1);
2770
2771 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2772 if ((c1 & c2) == c1
2773 && !side_effects_p (XEXP (op0, 0)))
2774 return trueop1;
2775
2776 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2777 if (((c1|c2) & mask) == mask)
2778 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2779 }
2780
2781 /* Convert (A & B) | A to A. */
2782 if (GET_CODE (op0) == AND
2783 && (rtx_equal_p (XEXP (op0, 0), op1)
2784 || rtx_equal_p (XEXP (op0, 1), op1))
2785 && ! side_effects_p (XEXP (op0, 0))
2786 && ! side_effects_p (XEXP (op0, 1)))
2787 return op1;
2788
2789 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2790 mode size to (rotate A CX). */
2791
2792 if (GET_CODE (op1) == ASHIFT
2793 || GET_CODE (op1) == SUBREG)
2794 {
2795 opleft = op1;
2796 opright = op0;
2797 }
2798 else
2799 {
2800 opright = op1;
2801 opleft = op0;
2802 }
2803
2804 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2805 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2806 && CONST_INT_P (XEXP (opleft, 1))
2807 && CONST_INT_P (XEXP (opright, 1))
2808 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2809 == GET_MODE_UNIT_PRECISION (mode)))
2810 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2811
2812 /* Same, but for ashift that has been "simplified" to a wider mode
2813 by simplify_shift_const. */
2814
2815 if (GET_CODE (opleft) == SUBREG
2816 && is_a <scalar_int_mode> (mode, &int_mode)
2817 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2818 &inner_mode)
2819 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2820 && GET_CODE (opright) == LSHIFTRT
2821 && GET_CODE (XEXP (opright, 0)) == SUBREG
2822 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2823 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2824 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2825 SUBREG_REG (XEXP (opright, 0)))
2826 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2827 && CONST_INT_P (XEXP (opright, 1))
2828 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2829 + INTVAL (XEXP (opright, 1))
2830 == GET_MODE_PRECISION (int_mode)))
2831 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2832 XEXP (SUBREG_REG (opleft), 1));
2833
2834 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2835 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2836 the PLUS does not affect any of the bits in OP1: then we can do
2837 the IOR as a PLUS and we can associate. This is valid if OP1
2838 can be safely shifted left C bits. */
2839 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2840 && GET_CODE (XEXP (op0, 0)) == PLUS
2841 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2842 && CONST_INT_P (XEXP (op0, 1))
2843 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2844 {
2845 int count = INTVAL (XEXP (op0, 1));
2846 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2847
2848 if (mask >> count == INTVAL (trueop1)
2849 && trunc_int_for_mode (mask, mode) == mask
2850 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2851 return simplify_gen_binary (ASHIFTRT, mode,
2852 plus_constant (mode, XEXP (op0, 0),
2853 mask),
2854 XEXP (op0, 1));
2855 }
2856
2857 /* The following happens with bitfield merging.
2858 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2859 if (GET_CODE (op0) == AND
2860 && GET_CODE (op1) == AND
2861 && CONST_INT_P (XEXP (op0, 1))
2862 && CONST_INT_P (XEXP (op1, 1))
2863 && (INTVAL (XEXP (op0, 1))
2864 == ~INTVAL (XEXP (op1, 1))))
2865 {
2866 /* The IOR may be on both sides. */
2867 rtx top0 = NULL_RTX, top1 = NULL_RTX;
2868 if (GET_CODE (XEXP (op1, 0)) == IOR)
2869 top0 = op0, top1 = op1;
2870 else if (GET_CODE (XEXP (op0, 0)) == IOR)
2871 top0 = op1, top1 = op0;
2872 if (top0 && top1)
2873 {
2874 /* X may be on either side of the inner IOR. */
2875 rtx tem = NULL_RTX;
2876 if (rtx_equal_p (XEXP (top0, 0),
2877 XEXP (XEXP (top1, 0), 0)))
2878 tem = XEXP (XEXP (top1, 0), 1);
2879 else if (rtx_equal_p (XEXP (top0, 0),
2880 XEXP (XEXP (top1, 0), 1)))
2881 tem = XEXP (XEXP (top1, 0), 0);
2882 if (tem)
2883 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
2884 simplify_gen_binary
2885 (AND, mode, tem, XEXP (top1, 1)));
2886 }
2887 }
2888
2889 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2890 if (tem)
2891 return tem;
2892
2893 tem = simplify_associative_operation (code, mode, op0, op1);
2894 if (tem)
2895 return tem;
2896 break;
2897
2898 case XOR:
2899 if (trueop1 == CONST0_RTX (mode))
2900 return op0;
2901 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2902 return simplify_gen_unary (NOT, mode, op0, mode);
2903 if (rtx_equal_p (trueop0, trueop1)
2904 && ! side_effects_p (op0)
2905 && GET_MODE_CLASS (mode) != MODE_CC)
2906 return CONST0_RTX (mode);
2907
2908 /* Canonicalize XOR of the most significant bit to PLUS. */
2909 if (CONST_SCALAR_INT_P (op1)
2910 && mode_signbit_p (mode, op1))
2911 return simplify_gen_binary (PLUS, mode, op0, op1);
2912 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2913 if (CONST_SCALAR_INT_P (op1)
2914 && GET_CODE (op0) == PLUS
2915 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2916 && mode_signbit_p (mode, XEXP (op0, 1)))
2917 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2918 simplify_gen_binary (XOR, mode, op1,
2919 XEXP (op0, 1)));
2920
2921 /* If we are XORing two things that have no bits in common,
2922 convert them into an IOR. This helps to detect rotation encoded
2923 using those methods and possibly other simplifications. */
2924
2925 if (HWI_COMPUTABLE_MODE_P (mode)
2926 && (nonzero_bits (op0, mode)
2927 & nonzero_bits (op1, mode)) == 0)
2928 return (simplify_gen_binary (IOR, mode, op0, op1));
2929
2930 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2931 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2932 (NOT y). */
2933 {
2934 int num_negated = 0;
2935
2936 if (GET_CODE (op0) == NOT)
2937 num_negated++, op0 = XEXP (op0, 0);
2938 if (GET_CODE (op1) == NOT)
2939 num_negated++, op1 = XEXP (op1, 0);
2940
2941 if (num_negated == 2)
2942 return simplify_gen_binary (XOR, mode, op0, op1);
2943 else if (num_negated == 1)
2944 return simplify_gen_unary (NOT, mode,
2945 simplify_gen_binary (XOR, mode, op0, op1),
2946 mode);
2947 }
2948
2949 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2950 correspond to a machine insn or result in further simplifications
2951 if B is a constant. */
2952
2953 if (GET_CODE (op0) == AND
2954 && rtx_equal_p (XEXP (op0, 1), op1)
2955 && ! side_effects_p (op1))
2956 return simplify_gen_binary (AND, mode,
2957 simplify_gen_unary (NOT, mode,
2958 XEXP (op0, 0), mode),
2959 op1);
2960
2961 else if (GET_CODE (op0) == AND
2962 && rtx_equal_p (XEXP (op0, 0), op1)
2963 && ! side_effects_p (op1))
2964 return simplify_gen_binary (AND, mode,
2965 simplify_gen_unary (NOT, mode,
2966 XEXP (op0, 1), mode),
2967 op1);
2968
2969 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2970 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2971 out bits inverted twice and not set by C. Similarly, given
2972 (xor (and (xor A B) C) D), simplify without inverting C in
2973 the xor operand: (xor (and A C) (B&C)^D).
2974 */
2975 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2976 && GET_CODE (XEXP (op0, 0)) == XOR
2977 && CONST_INT_P (op1)
2978 && CONST_INT_P (XEXP (op0, 1))
2979 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2980 {
2981 enum rtx_code op = GET_CODE (op0);
2982 rtx a = XEXP (XEXP (op0, 0), 0);
2983 rtx b = XEXP (XEXP (op0, 0), 1);
2984 rtx c = XEXP (op0, 1);
2985 rtx d = op1;
2986 HOST_WIDE_INT bval = INTVAL (b);
2987 HOST_WIDE_INT cval = INTVAL (c);
2988 HOST_WIDE_INT dval = INTVAL (d);
2989 HOST_WIDE_INT xcval;
2990
2991 if (op == IOR)
2992 xcval = ~cval;
2993 else
2994 xcval = cval;
2995
2996 return simplify_gen_binary (XOR, mode,
2997 simplify_gen_binary (op, mode, a, c),
2998 gen_int_mode ((bval & xcval) ^ dval,
2999 mode));
3000 }
3001
3002 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3003 we can transform like this:
3004 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3005 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3006 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3007 Attempt a few simplifications when B and C are both constants. */
3008 if (GET_CODE (op0) == AND
3009 && CONST_INT_P (op1)
3010 && CONST_INT_P (XEXP (op0, 1)))
3011 {
3012 rtx a = XEXP (op0, 0);
3013 rtx b = XEXP (op0, 1);
3014 rtx c = op1;
3015 HOST_WIDE_INT bval = INTVAL (b);
3016 HOST_WIDE_INT cval = INTVAL (c);
3017
3018 /* Instead of computing ~A&C, we compute its negated value,
3019 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3020 optimize for sure. If it does not simplify, we still try
3021 to compute ~A&C below, but since that always allocates
3022 RTL, we don't try that before committing to returning a
3023 simplified expression. */
3024 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3025 GEN_INT (~cval));
3026
3027 if ((~cval & bval) == 0)
3028 {
3029 rtx na_c = NULL_RTX;
3030 if (n_na_c)
3031 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3032 else
3033 {
3034 /* If ~A does not simplify, don't bother: we don't
3035 want to simplify 2 operations into 3, and if na_c
3036 were to simplify with na, n_na_c would have
3037 simplified as well. */
3038 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3039 if (na)
3040 na_c = simplify_gen_binary (AND, mode, na, c);
3041 }
3042
3043 /* Try to simplify ~A&C | ~B&C. */
3044 if (na_c != NULL_RTX)
3045 return simplify_gen_binary (IOR, mode, na_c,
3046 gen_int_mode (~bval & cval, mode));
3047 }
3048 else
3049 {
3050 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3051 if (n_na_c == CONSTM1_RTX (mode))
3052 {
3053 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3054 gen_int_mode (~cval & bval,
3055 mode));
3056 return simplify_gen_binary (IOR, mode, a_nc_b,
3057 gen_int_mode (~bval & cval,
3058 mode));
3059 }
3060 }
3061 }
3062
3063 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3064 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3065 machines, and also has shorter instruction path length. */
3066 if (GET_CODE (op0) == AND
3067 && GET_CODE (XEXP (op0, 0)) == XOR
3068 && CONST_INT_P (XEXP (op0, 1))
3069 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3070 {
3071 rtx a = trueop1;
3072 rtx b = XEXP (XEXP (op0, 0), 1);
3073 rtx c = XEXP (op0, 1);
3074 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3075 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3076 rtx bc = simplify_gen_binary (AND, mode, b, c);
3077 return simplify_gen_binary (IOR, mode, a_nc, bc);
3078 }
3079 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3080 else if (GET_CODE (op0) == AND
3081 && GET_CODE (XEXP (op0, 0)) == XOR
3082 && CONST_INT_P (XEXP (op0, 1))
3083 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3084 {
3085 rtx a = XEXP (XEXP (op0, 0), 0);
3086 rtx b = trueop1;
3087 rtx c = XEXP (op0, 1);
3088 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3089 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3090 rtx ac = simplify_gen_binary (AND, mode, a, c);
3091 return simplify_gen_binary (IOR, mode, ac, b_nc);
3092 }
3093
3094 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3095 comparison if STORE_FLAG_VALUE is 1. */
3096 if (STORE_FLAG_VALUE == 1
3097 && trueop1 == const1_rtx
3098 && COMPARISON_P (op0)
3099 && (reversed = reversed_comparison (op0, mode)))
3100 return reversed;
3101
3102 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3103 is (lt foo (const_int 0)), so we can perform the above
3104 simplification if STORE_FLAG_VALUE is 1. */
3105
3106 if (is_a <scalar_int_mode> (mode, &int_mode)
3107 && STORE_FLAG_VALUE == 1
3108 && trueop1 == const1_rtx
3109 && GET_CODE (op0) == LSHIFTRT
3110 && CONST_INT_P (XEXP (op0, 1))
3111 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3112 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3113
3114 /* (xor (comparison foo bar) (const_int sign-bit))
3115 when STORE_FLAG_VALUE is the sign bit. */
3116 if (is_a <scalar_int_mode> (mode, &int_mode)
3117 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3118 && trueop1 == const_true_rtx
3119 && COMPARISON_P (op0)
3120 && (reversed = reversed_comparison (op0, int_mode)))
3121 return reversed;
3122
3123 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3124 if (tem)
3125 return tem;
3126
3127 tem = simplify_associative_operation (code, mode, op0, op1);
3128 if (tem)
3129 return tem;
3130 break;
3131
3132 case AND:
3133 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3134 return trueop1;
3135 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3136 return op0;
3137 if (HWI_COMPUTABLE_MODE_P (mode))
3138 {
3139 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3140 HOST_WIDE_INT nzop1;
3141 if (CONST_INT_P (trueop1))
3142 {
3143 HOST_WIDE_INT val1 = INTVAL (trueop1);
3144 /* If we are turning off bits already known off in OP0, we need
3145 not do an AND. */
3146 if ((nzop0 & ~val1) == 0)
3147 return op0;
3148 }
3149 nzop1 = nonzero_bits (trueop1, mode);
3150 /* If we are clearing all the nonzero bits, the result is zero. */
3151 if ((nzop1 & nzop0) == 0
3152 && !side_effects_p (op0) && !side_effects_p (op1))
3153 return CONST0_RTX (mode);
3154 }
3155 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3156 && GET_MODE_CLASS (mode) != MODE_CC)
3157 return op0;
3158 /* A & (~A) -> 0 */
3159 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3160 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3161 && ! side_effects_p (op0)
3162 && GET_MODE_CLASS (mode) != MODE_CC)
3163 return CONST0_RTX (mode);
3164
3165 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3166 there are no nonzero bits of C outside of X's mode. */
3167 if ((GET_CODE (op0) == SIGN_EXTEND
3168 || GET_CODE (op0) == ZERO_EXTEND)
3169 && CONST_INT_P (trueop1)
3170 && HWI_COMPUTABLE_MODE_P (mode)
3171 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3172 & UINTVAL (trueop1)) == 0)
3173 {
3174 machine_mode imode = GET_MODE (XEXP (op0, 0));
3175 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3176 gen_int_mode (INTVAL (trueop1),
3177 imode));
3178 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3179 }
3180
3181 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3182 we might be able to further simplify the AND with X and potentially
3183 remove the truncation altogether. */
3184 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3185 {
3186 rtx x = XEXP (op0, 0);
3187 machine_mode xmode = GET_MODE (x);
3188 tem = simplify_gen_binary (AND, xmode, x,
3189 gen_int_mode (INTVAL (trueop1), xmode));
3190 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3191 }
3192
3193 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3194 if (GET_CODE (op0) == IOR
3195 && CONST_INT_P (trueop1)
3196 && CONST_INT_P (XEXP (op0, 1)))
3197 {
3198 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3199 return simplify_gen_binary (IOR, mode,
3200 simplify_gen_binary (AND, mode,
3201 XEXP (op0, 0), op1),
3202 gen_int_mode (tmp, mode));
3203 }
3204
3205 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3206 insn (and may simplify more). */
3207 if (GET_CODE (op0) == XOR
3208 && rtx_equal_p (XEXP (op0, 0), op1)
3209 && ! side_effects_p (op1))
3210 return simplify_gen_binary (AND, mode,
3211 simplify_gen_unary (NOT, mode,
3212 XEXP (op0, 1), mode),
3213 op1);
3214
3215 if (GET_CODE (op0) == XOR
3216 && rtx_equal_p (XEXP (op0, 1), op1)
3217 && ! side_effects_p (op1))
3218 return simplify_gen_binary (AND, mode,
3219 simplify_gen_unary (NOT, mode,
3220 XEXP (op0, 0), mode),
3221 op1);
3222
3223 /* Similarly for (~(A ^ B)) & A. */
3224 if (GET_CODE (op0) == NOT
3225 && GET_CODE (XEXP (op0, 0)) == XOR
3226 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3227 && ! side_effects_p (op1))
3228 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3229
3230 if (GET_CODE (op0) == NOT
3231 && GET_CODE (XEXP (op0, 0)) == XOR
3232 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3233 && ! side_effects_p (op1))
3234 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3235
3236 /* Convert (A | B) & A to A. */
3237 if (GET_CODE (op0) == IOR
3238 && (rtx_equal_p (XEXP (op0, 0), op1)
3239 || rtx_equal_p (XEXP (op0, 1), op1))
3240 && ! side_effects_p (XEXP (op0, 0))
3241 && ! side_effects_p (XEXP (op0, 1)))
3242 return op1;
3243
3244 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3245 ((A & N) + B) & M -> (A + B) & M
3246 Similarly if (N & M) == 0,
3247 ((A | N) + B) & M -> (A + B) & M
3248 and for - instead of + and/or ^ instead of |.
3249 Also, if (N & M) == 0, then
3250 (A +- N) & M -> A & M. */
3251 if (CONST_INT_P (trueop1)
3252 && HWI_COMPUTABLE_MODE_P (mode)
3253 && ~UINTVAL (trueop1)
3254 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3255 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3256 {
3257 rtx pmop[2];
3258 int which;
3259
3260 pmop[0] = XEXP (op0, 0);
3261 pmop[1] = XEXP (op0, 1);
3262
3263 if (CONST_INT_P (pmop[1])
3264 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3265 return simplify_gen_binary (AND, mode, pmop[0], op1);
3266
3267 for (which = 0; which < 2; which++)
3268 {
3269 tem = pmop[which];
3270 switch (GET_CODE (tem))
3271 {
3272 case AND:
3273 if (CONST_INT_P (XEXP (tem, 1))
3274 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3275 == UINTVAL (trueop1))
3276 pmop[which] = XEXP (tem, 0);
3277 break;
3278 case IOR:
3279 case XOR:
3280 if (CONST_INT_P (XEXP (tem, 1))
3281 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3282 pmop[which] = XEXP (tem, 0);
3283 break;
3284 default:
3285 break;
3286 }
3287 }
3288
3289 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3290 {
3291 tem = simplify_gen_binary (GET_CODE (op0), mode,
3292 pmop[0], pmop[1]);
3293 return simplify_gen_binary (code, mode, tem, op1);
3294 }
3295 }
3296
3297 /* (and X (ior (not X) Y) -> (and X Y) */
3298 if (GET_CODE (op1) == IOR
3299 && GET_CODE (XEXP (op1, 0)) == NOT
3300 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3301 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3302
3303 /* (and (ior (not X) Y) X) -> (and X Y) */
3304 if (GET_CODE (op0) == IOR
3305 && GET_CODE (XEXP (op0, 0)) == NOT
3306 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3307 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3308
3309 /* (and X (ior Y (not X)) -> (and X Y) */
3310 if (GET_CODE (op1) == IOR
3311 && GET_CODE (XEXP (op1, 1)) == NOT
3312 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3313 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3314
3315 /* (and (ior Y (not X)) X) -> (and X Y) */
3316 if (GET_CODE (op0) == IOR
3317 && GET_CODE (XEXP (op0, 1)) == NOT
3318 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3319 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3320
3321 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3322 if (tem)
3323 return tem;
3324
3325 tem = simplify_associative_operation (code, mode, op0, op1);
3326 if (tem)
3327 return tem;
3328 break;
3329
3330 case UDIV:
3331 /* 0/x is 0 (or x&0 if x has side-effects). */
3332 if (trueop0 == CONST0_RTX (mode)
3333 && !cfun->can_throw_non_call_exceptions)
3334 {
3335 if (side_effects_p (op1))
3336 return simplify_gen_binary (AND, mode, op1, trueop0);
3337 return trueop0;
3338 }
3339 /* x/1 is x. */
3340 if (trueop1 == CONST1_RTX (mode))
3341 {
3342 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3343 if (tem)
3344 return tem;
3345 }
3346 /* Convert divide by power of two into shift. */
3347 if (CONST_INT_P (trueop1)
3348 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3349 return simplify_gen_binary (LSHIFTRT, mode, op0,
3350 gen_int_shift_amount (mode, val));
3351 break;
3352
3353 case DIV:
3354 /* Handle floating point and integers separately. */
3355 if (SCALAR_FLOAT_MODE_P (mode))
3356 {
3357 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3358 safe for modes with NaNs, since 0.0 / 0.0 will then be
3359 NaN rather than 0.0. Nor is it safe for modes with signed
3360 zeros, since dividing 0 by a negative number gives -0.0 */
3361 if (trueop0 == CONST0_RTX (mode)
3362 && !HONOR_NANS (mode)
3363 && !HONOR_SIGNED_ZEROS (mode)
3364 && ! side_effects_p (op1))
3365 return op0;
3366 /* x/1.0 is x. */
3367 if (trueop1 == CONST1_RTX (mode)
3368 && !HONOR_SNANS (mode))
3369 return op0;
3370
3371 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3372 && trueop1 != CONST0_RTX (mode))
3373 {
3374 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3375
3376 /* x/-1.0 is -x. */
3377 if (real_equal (d1, &dconstm1)
3378 && !HONOR_SNANS (mode))
3379 return simplify_gen_unary (NEG, mode, op0, mode);
3380
3381 /* Change FP division by a constant into multiplication.
3382 Only do this with -freciprocal-math. */
3383 if (flag_reciprocal_math
3384 && !real_equal (d1, &dconst0))
3385 {
3386 REAL_VALUE_TYPE d;
3387 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3388 tem = const_double_from_real_value (d, mode);
3389 return simplify_gen_binary (MULT, mode, op0, tem);
3390 }
3391 }
3392 }
3393 else if (SCALAR_INT_MODE_P (mode))
3394 {
3395 /* 0/x is 0 (or x&0 if x has side-effects). */
3396 if (trueop0 == CONST0_RTX (mode)
3397 && !cfun->can_throw_non_call_exceptions)
3398 {
3399 if (side_effects_p (op1))
3400 return simplify_gen_binary (AND, mode, op1, trueop0);
3401 return trueop0;
3402 }
3403 /* x/1 is x. */
3404 if (trueop1 == CONST1_RTX (mode))
3405 {
3406 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3407 if (tem)
3408 return tem;
3409 }
3410 /* x/-1 is -x. */
3411 if (trueop1 == constm1_rtx)
3412 {
3413 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3414 if (x)
3415 return simplify_gen_unary (NEG, mode, x, mode);
3416 }
3417 }
3418 break;
3419
3420 case UMOD:
3421 /* 0%x is 0 (or x&0 if x has side-effects). */
3422 if (trueop0 == CONST0_RTX (mode))
3423 {
3424 if (side_effects_p (op1))
3425 return simplify_gen_binary (AND, mode, op1, trueop0);
3426 return trueop0;
3427 }
3428 /* x%1 is 0 (of x&0 if x has side-effects). */
3429 if (trueop1 == CONST1_RTX (mode))
3430 {
3431 if (side_effects_p (op0))
3432 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3433 return CONST0_RTX (mode);
3434 }
3435 /* Implement modulus by power of two as AND. */
3436 if (CONST_INT_P (trueop1)
3437 && exact_log2 (UINTVAL (trueop1)) > 0)
3438 return simplify_gen_binary (AND, mode, op0,
3439 gen_int_mode (UINTVAL (trueop1) - 1,
3440 mode));
3441 break;
3442
3443 case MOD:
3444 /* 0%x is 0 (or x&0 if x has side-effects). */
3445 if (trueop0 == CONST0_RTX (mode))
3446 {
3447 if (side_effects_p (op1))
3448 return simplify_gen_binary (AND, mode, op1, trueop0);
3449 return trueop0;
3450 }
3451 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3452 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3453 {
3454 if (side_effects_p (op0))
3455 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3456 return CONST0_RTX (mode);
3457 }
3458 break;
3459
3460 case ROTATERT:
3461 case ROTATE:
3462 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3463 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3464 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3465 amount instead. */
3466 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3467 if (CONST_INT_P (trueop1)
3468 && IN_RANGE (INTVAL (trueop1),
3469 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3470 GET_MODE_UNIT_PRECISION (mode) - 1))
3471 {
3472 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3473 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3474 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3475 mode, op0, new_amount_rtx);
3476 }
3477 #endif
3478 /* FALLTHRU */
3479 case ASHIFTRT:
3480 if (trueop1 == CONST0_RTX (mode))
3481 return op0;
3482 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3483 return op0;
3484 /* Rotating ~0 always results in ~0. */
3485 if (CONST_INT_P (trueop0)
3486 && HWI_COMPUTABLE_MODE_P (mode)
3487 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3488 && ! side_effects_p (op1))
3489 return op0;
3490
3491 canonicalize_shift:
3492 /* Given:
3493 scalar modes M1, M2
3494 scalar constants c1, c2
3495 size (M2) > size (M1)
3496 c1 == size (M2) - size (M1)
3497 optimize:
3498 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3499 <low_part>)
3500 (const_int <c2>))
3501 to:
3502 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3503 <low_part>). */
3504 if ((code == ASHIFTRT || code == LSHIFTRT)
3505 && is_a <scalar_int_mode> (mode, &int_mode)
3506 && SUBREG_P (op0)
3507 && CONST_INT_P (op1)
3508 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3509 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3510 &inner_mode)
3511 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3512 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3513 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3514 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3515 && subreg_lowpart_p (op0))
3516 {
3517 rtx tmp = gen_int_shift_amount
3518 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3519 tmp = simplify_gen_binary (code, inner_mode,
3520 XEXP (SUBREG_REG (op0), 0),
3521 tmp);
3522 return lowpart_subreg (int_mode, tmp, inner_mode);
3523 }
3524
3525 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3526 {
3527 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3528 if (val != INTVAL (op1))
3529 return simplify_gen_binary (code, mode, op0,
3530 gen_int_shift_amount (mode, val));
3531 }
3532 break;
3533
3534 case ASHIFT:
3535 case SS_ASHIFT:
3536 case US_ASHIFT:
3537 if (trueop1 == CONST0_RTX (mode))
3538 return op0;
3539 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3540 return op0;
3541 goto canonicalize_shift;
3542
3543 case LSHIFTRT:
3544 if (trueop1 == CONST0_RTX (mode))
3545 return op0;
3546 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3547 return op0;
3548 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3549 if (GET_CODE (op0) == CLZ
3550 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3551 && CONST_INT_P (trueop1)
3552 && STORE_FLAG_VALUE == 1
3553 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3554 {
3555 unsigned HOST_WIDE_INT zero_val = 0;
3556
3557 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3558 && zero_val == GET_MODE_PRECISION (inner_mode)
3559 && INTVAL (trueop1) == exact_log2 (zero_val))
3560 return simplify_gen_relational (EQ, mode, inner_mode,
3561 XEXP (op0, 0), const0_rtx);
3562 }
3563 goto canonicalize_shift;
3564
3565 case SMIN:
3566 if (HWI_COMPUTABLE_MODE_P (mode)
3567 && mode_signbit_p (mode, trueop1)
3568 && ! side_effects_p (op0))
3569 return op1;
3570 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3571 return op0;
3572 tem = simplify_associative_operation (code, mode, op0, op1);
3573 if (tem)
3574 return tem;
3575 break;
3576
3577 case SMAX:
3578 if (HWI_COMPUTABLE_MODE_P (mode)
3579 && CONST_INT_P (trueop1)
3580 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3581 && ! side_effects_p (op0))
3582 return op1;
3583 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3584 return op0;
3585 tem = simplify_associative_operation (code, mode, op0, op1);
3586 if (tem)
3587 return tem;
3588 break;
3589
3590 case UMIN:
3591 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3592 return op1;
3593 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3594 return op0;
3595 tem = simplify_associative_operation (code, mode, op0, op1);
3596 if (tem)
3597 return tem;
3598 break;
3599
3600 case UMAX:
3601 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3602 return op1;
3603 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3604 return op0;
3605 tem = simplify_associative_operation (code, mode, op0, op1);
3606 if (tem)
3607 return tem;
3608 break;
3609
3610 case SS_PLUS:
3611 case US_PLUS:
3612 case SS_MINUS:
3613 case US_MINUS:
3614 case SS_MULT:
3615 case US_MULT:
3616 case SS_DIV:
3617 case US_DIV:
3618 /* ??? There are simplifications that can be done. */
3619 return 0;
3620
3621 case VEC_SERIES:
3622 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3623 return gen_vec_duplicate (mode, op0);
3624 if (valid_for_const_vector_p (mode, op0)
3625 && valid_for_const_vector_p (mode, op1))
3626 return gen_const_vec_series (mode, op0, op1);
3627 return 0;
3628
3629 case VEC_SELECT:
3630 if (!VECTOR_MODE_P (mode))
3631 {
3632 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3633 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3634 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3635 gcc_assert (XVECLEN (trueop1, 0) == 1);
3636
3637 /* We can't reason about selections made at runtime. */
3638 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3639 return 0;
3640
3641 if (vec_duplicate_p (trueop0, &elt0))
3642 return elt0;
3643
3644 if (GET_CODE (trueop0) == CONST_VECTOR)
3645 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3646 (trueop1, 0, 0)));
3647
3648 /* Extract a scalar element from a nested VEC_SELECT expression
3649 (with optional nested VEC_CONCAT expression). Some targets
3650 (i386) extract scalar element from a vector using chain of
3651 nested VEC_SELECT expressions. When input operand is a memory
3652 operand, this operation can be simplified to a simple scalar
3653 load from an offseted memory address. */
3654 int n_elts;
3655 if (GET_CODE (trueop0) == VEC_SELECT
3656 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3657 .is_constant (&n_elts)))
3658 {
3659 rtx op0 = XEXP (trueop0, 0);
3660 rtx op1 = XEXP (trueop0, 1);
3661
3662 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3663 int elem;
3664
3665 rtvec vec;
3666 rtx tmp_op, tmp;
3667
3668 gcc_assert (GET_CODE (op1) == PARALLEL);
3669 gcc_assert (i < n_elts);
3670
3671 /* Select element, pointed by nested selector. */
3672 elem = INTVAL (XVECEXP (op1, 0, i));
3673
3674 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3675 if (GET_CODE (op0) == VEC_CONCAT)
3676 {
3677 rtx op00 = XEXP (op0, 0);
3678 rtx op01 = XEXP (op0, 1);
3679
3680 machine_mode mode00, mode01;
3681 int n_elts00, n_elts01;
3682
3683 mode00 = GET_MODE (op00);
3684 mode01 = GET_MODE (op01);
3685
3686 /* Find out the number of elements of each operand.
3687 Since the concatenated result has a constant number
3688 of elements, the operands must too. */
3689 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3690 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3691
3692 gcc_assert (n_elts == n_elts00 + n_elts01);
3693
3694 /* Select correct operand of VEC_CONCAT
3695 and adjust selector. */
3696 if (elem < n_elts01)
3697 tmp_op = op00;
3698 else
3699 {
3700 tmp_op = op01;
3701 elem -= n_elts00;
3702 }
3703 }
3704 else
3705 tmp_op = op0;
3706
3707 vec = rtvec_alloc (1);
3708 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3709
3710 tmp = gen_rtx_fmt_ee (code, mode,
3711 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3712 return tmp;
3713 }
3714 }
3715 else
3716 {
3717 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3718 gcc_assert (GET_MODE_INNER (mode)
3719 == GET_MODE_INNER (GET_MODE (trueop0)));
3720 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3721
3722 if (vec_duplicate_p (trueop0, &elt0))
3723 /* It doesn't matter which elements are selected by trueop1,
3724 because they are all the same. */
3725 return gen_vec_duplicate (mode, elt0);
3726
3727 if (GET_CODE (trueop0) == CONST_VECTOR)
3728 {
3729 unsigned n_elts = XVECLEN (trueop1, 0);
3730 rtvec v = rtvec_alloc (n_elts);
3731 unsigned int i;
3732
3733 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3734 for (i = 0; i < n_elts; i++)
3735 {
3736 rtx x = XVECEXP (trueop1, 0, i);
3737
3738 if (!CONST_INT_P (x))
3739 return 0;
3740
3741 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3742 INTVAL (x));
3743 }
3744
3745 return gen_rtx_CONST_VECTOR (mode, v);
3746 }
3747
3748 /* Recognize the identity. */
3749 if (GET_MODE (trueop0) == mode)
3750 {
3751 bool maybe_ident = true;
3752 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3753 {
3754 rtx j = XVECEXP (trueop1, 0, i);
3755 if (!CONST_INT_P (j) || INTVAL (j) != i)
3756 {
3757 maybe_ident = false;
3758 break;
3759 }
3760 }
3761 if (maybe_ident)
3762 return trueop0;
3763 }
3764
3765 /* If we build {a,b} then permute it, build the result directly. */
3766 if (XVECLEN (trueop1, 0) == 2
3767 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3768 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3769 && GET_CODE (trueop0) == VEC_CONCAT
3770 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3771 && GET_MODE (XEXP (trueop0, 0)) == mode
3772 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3773 && GET_MODE (XEXP (trueop0, 1)) == mode)
3774 {
3775 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3776 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3777 rtx subop0, subop1;
3778
3779 gcc_assert (i0 < 4 && i1 < 4);
3780 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3781 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3782
3783 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3784 }
3785
3786 if (XVECLEN (trueop1, 0) == 2
3787 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3788 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3789 && GET_CODE (trueop0) == VEC_CONCAT
3790 && GET_MODE (trueop0) == mode)
3791 {
3792 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3793 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3794 rtx subop0, subop1;
3795
3796 gcc_assert (i0 < 2 && i1 < 2);
3797 subop0 = XEXP (trueop0, i0);
3798 subop1 = XEXP (trueop0, i1);
3799
3800 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3801 }
3802
3803 /* If we select one half of a vec_concat, return that. */
3804 int l0, l1;
3805 if (GET_CODE (trueop0) == VEC_CONCAT
3806 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3807 .is_constant (&l0))
3808 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3809 .is_constant (&l1))
3810 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3811 {
3812 rtx subop0 = XEXP (trueop0, 0);
3813 rtx subop1 = XEXP (trueop0, 1);
3814 machine_mode mode0 = GET_MODE (subop0);
3815 machine_mode mode1 = GET_MODE (subop1);
3816 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3817 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3818 {
3819 bool success = true;
3820 for (int i = 1; i < l0; ++i)
3821 {
3822 rtx j = XVECEXP (trueop1, 0, i);
3823 if (!CONST_INT_P (j) || INTVAL (j) != i)
3824 {
3825 success = false;
3826 break;
3827 }
3828 }
3829 if (success)
3830 return subop0;
3831 }
3832 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3833 {
3834 bool success = true;
3835 for (int i = 1; i < l1; ++i)
3836 {
3837 rtx j = XVECEXP (trueop1, 0, i);
3838 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3839 {
3840 success = false;
3841 break;
3842 }
3843 }
3844 if (success)
3845 return subop1;
3846 }
3847 }
3848 }
3849
3850 if (XVECLEN (trueop1, 0) == 1
3851 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3852 && GET_CODE (trueop0) == VEC_CONCAT)
3853 {
3854 rtx vec = trueop0;
3855 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3856
3857 /* Try to find the element in the VEC_CONCAT. */
3858 while (GET_MODE (vec) != mode
3859 && GET_CODE (vec) == VEC_CONCAT)
3860 {
3861 poly_int64 vec_size;
3862
3863 if (CONST_INT_P (XEXP (vec, 0)))
3864 {
3865 /* vec_concat of two const_ints doesn't make sense with
3866 respect to modes. */
3867 if (CONST_INT_P (XEXP (vec, 1)))
3868 return 0;
3869
3870 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3871 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3872 }
3873 else
3874 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3875
3876 if (known_lt (offset, vec_size))
3877 vec = XEXP (vec, 0);
3878 else if (known_ge (offset, vec_size))
3879 {
3880 offset -= vec_size;
3881 vec = XEXP (vec, 1);
3882 }
3883 else
3884 break;
3885 vec = avoid_constant_pool_reference (vec);
3886 }
3887
3888 if (GET_MODE (vec) == mode)
3889 return vec;
3890 }
3891
3892 /* If we select elements in a vec_merge that all come from the same
3893 operand, select from that operand directly. */
3894 if (GET_CODE (op0) == VEC_MERGE)
3895 {
3896 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3897 if (CONST_INT_P (trueop02))
3898 {
3899 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3900 bool all_operand0 = true;
3901 bool all_operand1 = true;
3902 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3903 {
3904 rtx j = XVECEXP (trueop1, 0, i);
3905 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3906 all_operand1 = false;
3907 else
3908 all_operand0 = false;
3909 }
3910 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3911 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3912 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3913 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3914 }
3915 }
3916
3917 /* If we have two nested selects that are inverses of each
3918 other, replace them with the source operand. */
3919 if (GET_CODE (trueop0) == VEC_SELECT
3920 && GET_MODE (XEXP (trueop0, 0)) == mode)
3921 {
3922 rtx op0_subop1 = XEXP (trueop0, 1);
3923 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3924 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3925
3926 /* Apply the outer ordering vector to the inner one. (The inner
3927 ordering vector is expressly permitted to be of a different
3928 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3929 then the two VEC_SELECTs cancel. */
3930 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3931 {
3932 rtx x = XVECEXP (trueop1, 0, i);
3933 if (!CONST_INT_P (x))
3934 return 0;
3935 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3936 if (!CONST_INT_P (y) || i != INTVAL (y))
3937 return 0;
3938 }
3939 return XEXP (trueop0, 0);
3940 }
3941
3942 return 0;
3943 case VEC_CONCAT:
3944 {
3945 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3946 ? GET_MODE (trueop0)
3947 : GET_MODE_INNER (mode));
3948 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3949 ? GET_MODE (trueop1)
3950 : GET_MODE_INNER (mode));
3951
3952 gcc_assert (VECTOR_MODE_P (mode));
3953 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3954 + GET_MODE_SIZE (op1_mode),
3955 GET_MODE_SIZE (mode)));
3956
3957 if (VECTOR_MODE_P (op0_mode))
3958 gcc_assert (GET_MODE_INNER (mode)
3959 == GET_MODE_INNER (op0_mode));
3960 else
3961 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3962
3963 if (VECTOR_MODE_P (op1_mode))
3964 gcc_assert (GET_MODE_INNER (mode)
3965 == GET_MODE_INNER (op1_mode));
3966 else
3967 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3968
3969 unsigned int n_elts, in_n_elts;
3970 if ((GET_CODE (trueop0) == CONST_VECTOR
3971 || CONST_SCALAR_INT_P (trueop0)
3972 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3973 && (GET_CODE (trueop1) == CONST_VECTOR
3974 || CONST_SCALAR_INT_P (trueop1)
3975 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3976 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3977 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3978 {
3979 rtvec v = rtvec_alloc (n_elts);
3980 unsigned int i;
3981 for (i = 0; i < n_elts; i++)
3982 {
3983 if (i < in_n_elts)
3984 {
3985 if (!VECTOR_MODE_P (op0_mode))
3986 RTVEC_ELT (v, i) = trueop0;
3987 else
3988 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3989 }
3990 else
3991 {
3992 if (!VECTOR_MODE_P (op1_mode))
3993 RTVEC_ELT (v, i) = trueop1;
3994 else
3995 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3996 i - in_n_elts);
3997 }
3998 }
3999
4000 return gen_rtx_CONST_VECTOR (mode, v);
4001 }
4002
4003 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4004 Restrict the transformation to avoid generating a VEC_SELECT with a
4005 mode unrelated to its operand. */
4006 if (GET_CODE (trueop0) == VEC_SELECT
4007 && GET_CODE (trueop1) == VEC_SELECT
4008 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4009 && GET_MODE (XEXP (trueop0, 0)) == mode)
4010 {
4011 rtx par0 = XEXP (trueop0, 1);
4012 rtx par1 = XEXP (trueop1, 1);
4013 int len0 = XVECLEN (par0, 0);
4014 int len1 = XVECLEN (par1, 0);
4015 rtvec vec = rtvec_alloc (len0 + len1);
4016 for (int i = 0; i < len0; i++)
4017 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4018 for (int i = 0; i < len1; i++)
4019 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4020 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4021 gen_rtx_PARALLEL (VOIDmode, vec));
4022 }
4023 }
4024 return 0;
4025
4026 default:
4027 gcc_unreachable ();
4028 }
4029
4030 if (mode == GET_MODE (op0)
4031 && mode == GET_MODE (op1)
4032 && vec_duplicate_p (op0, &elt0)
4033 && vec_duplicate_p (op1, &elt1))
4034 {
4035 /* Try applying the operator to ELT and see if that simplifies.
4036 We can duplicate the result if so.
4037
4038 The reason we don't use simplify_gen_binary is that it isn't
4039 necessarily a win to convert things like:
4040
4041 (plus:V (vec_duplicate:V (reg:S R1))
4042 (vec_duplicate:V (reg:S R2)))
4043
4044 to:
4045
4046 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4047
4048 The first might be done entirely in vector registers while the
4049 second might need a move between register files. */
4050 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4051 elt0, elt1);
4052 if (tem)
4053 return gen_vec_duplicate (mode, tem);
4054 }
4055
4056 return 0;
4057 }
4058
4059 /* Return true if binary operation OP distributes over addition in operand
4060 OPNO, with the other operand being held constant. OPNO counts from 1. */
4061
4062 static bool
4063 distributes_over_addition_p (rtx_code op, int opno)
4064 {
4065 switch (op)
4066 {
4067 case PLUS:
4068 case MINUS:
4069 case MULT:
4070 return true;
4071
4072 case ASHIFT:
4073 return opno == 1;
4074
4075 default:
4076 return false;
4077 }
4078 }
4079
4080 rtx
4081 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4082 rtx op0, rtx op1)
4083 {
4084 if (VECTOR_MODE_P (mode)
4085 && code != VEC_CONCAT
4086 && GET_CODE (op0) == CONST_VECTOR
4087 && GET_CODE (op1) == CONST_VECTOR)
4088 {
4089 bool step_ok_p;
4090 if (CONST_VECTOR_STEPPED_P (op0)
4091 && CONST_VECTOR_STEPPED_P (op1))
4092 /* We can operate directly on the encoding if:
4093
4094 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4095 implies
4096 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4097
4098 Addition and subtraction are the supported operators
4099 for which this is true. */
4100 step_ok_p = (code == PLUS || code == MINUS);
4101 else if (CONST_VECTOR_STEPPED_P (op0))
4102 /* We can operate directly on stepped encodings if:
4103
4104 a3 - a2 == a2 - a1
4105 implies:
4106 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4107
4108 which is true if (x -> x op c) distributes over addition. */
4109 step_ok_p = distributes_over_addition_p (code, 1);
4110 else
4111 /* Similarly in reverse. */
4112 step_ok_p = distributes_over_addition_p (code, 2);
4113 rtx_vector_builder builder;
4114 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4115 return 0;
4116
4117 unsigned int count = builder.encoded_nelts ();
4118 for (unsigned int i = 0; i < count; i++)
4119 {
4120 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4121 CONST_VECTOR_ELT (op0, i),
4122 CONST_VECTOR_ELT (op1, i));
4123 if (!x || !valid_for_const_vector_p (mode, x))
4124 return 0;
4125 builder.quick_push (x);
4126 }
4127 return builder.build ();
4128 }
4129
4130 if (VECTOR_MODE_P (mode)
4131 && code == VEC_CONCAT
4132 && (CONST_SCALAR_INT_P (op0)
4133 || CONST_FIXED_P (op0)
4134 || CONST_DOUBLE_AS_FLOAT_P (op0))
4135 && (CONST_SCALAR_INT_P (op1)
4136 || CONST_DOUBLE_AS_FLOAT_P (op1)
4137 || CONST_FIXED_P (op1)))
4138 {
4139 /* Both inputs have a constant number of elements, so the result
4140 must too. */
4141 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4142 rtvec v = rtvec_alloc (n_elts);
4143
4144 gcc_assert (n_elts >= 2);
4145 if (n_elts == 2)
4146 {
4147 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4148 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4149
4150 RTVEC_ELT (v, 0) = op0;
4151 RTVEC_ELT (v, 1) = op1;
4152 }
4153 else
4154 {
4155 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4156 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4157 unsigned i;
4158
4159 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4160 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4161 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4162
4163 for (i = 0; i < op0_n_elts; ++i)
4164 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4165 for (i = 0; i < op1_n_elts; ++i)
4166 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4167 }
4168
4169 return gen_rtx_CONST_VECTOR (mode, v);
4170 }
4171
4172 if (SCALAR_FLOAT_MODE_P (mode)
4173 && CONST_DOUBLE_AS_FLOAT_P (op0)
4174 && CONST_DOUBLE_AS_FLOAT_P (op1)
4175 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4176 {
4177 if (code == AND
4178 || code == IOR
4179 || code == XOR)
4180 {
4181 long tmp0[4];
4182 long tmp1[4];
4183 REAL_VALUE_TYPE r;
4184 int i;
4185
4186 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4187 GET_MODE (op0));
4188 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4189 GET_MODE (op1));
4190 for (i = 0; i < 4; i++)
4191 {
4192 switch (code)
4193 {
4194 case AND:
4195 tmp0[i] &= tmp1[i];
4196 break;
4197 case IOR:
4198 tmp0[i] |= tmp1[i];
4199 break;
4200 case XOR:
4201 tmp0[i] ^= tmp1[i];
4202 break;
4203 default:
4204 gcc_unreachable ();
4205 }
4206 }
4207 real_from_target (&r, tmp0, mode);
4208 return const_double_from_real_value (r, mode);
4209 }
4210 else
4211 {
4212 REAL_VALUE_TYPE f0, f1, value, result;
4213 const REAL_VALUE_TYPE *opr0, *opr1;
4214 bool inexact;
4215
4216 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4217 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4218
4219 if (HONOR_SNANS (mode)
4220 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4221 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4222 return 0;
4223
4224 real_convert (&f0, mode, opr0);
4225 real_convert (&f1, mode, opr1);
4226
4227 if (code == DIV
4228 && real_equal (&f1, &dconst0)
4229 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4230 return 0;
4231
4232 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4233 && flag_trapping_math
4234 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4235 {
4236 int s0 = REAL_VALUE_NEGATIVE (f0);
4237 int s1 = REAL_VALUE_NEGATIVE (f1);
4238
4239 switch (code)
4240 {
4241 case PLUS:
4242 /* Inf + -Inf = NaN plus exception. */
4243 if (s0 != s1)
4244 return 0;
4245 break;
4246 case MINUS:
4247 /* Inf - Inf = NaN plus exception. */
4248 if (s0 == s1)
4249 return 0;
4250 break;
4251 case DIV:
4252 /* Inf / Inf = NaN plus exception. */
4253 return 0;
4254 default:
4255 break;
4256 }
4257 }
4258
4259 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4260 && flag_trapping_math
4261 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4262 || (REAL_VALUE_ISINF (f1)
4263 && real_equal (&f0, &dconst0))))
4264 /* Inf * 0 = NaN plus exception. */
4265 return 0;
4266
4267 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4268 &f0, &f1);
4269 real_convert (&result, mode, &value);
4270
4271 /* Don't constant fold this floating point operation if
4272 the result has overflowed and flag_trapping_math. */
4273
4274 if (flag_trapping_math
4275 && MODE_HAS_INFINITIES (mode)
4276 && REAL_VALUE_ISINF (result)
4277 && !REAL_VALUE_ISINF (f0)
4278 && !REAL_VALUE_ISINF (f1))
4279 /* Overflow plus exception. */
4280 return 0;
4281
4282 /* Don't constant fold this floating point operation if the
4283 result may dependent upon the run-time rounding mode and
4284 flag_rounding_math is set, or if GCC's software emulation
4285 is unable to accurately represent the result. */
4286
4287 if ((flag_rounding_math
4288 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4289 && (inexact || !real_identical (&result, &value)))
4290 return NULL_RTX;
4291
4292 return const_double_from_real_value (result, mode);
4293 }
4294 }
4295
4296 /* We can fold some multi-word operations. */
4297 scalar_int_mode int_mode;
4298 if (is_a <scalar_int_mode> (mode, &int_mode)
4299 && CONST_SCALAR_INT_P (op0)
4300 && CONST_SCALAR_INT_P (op1))
4301 {
4302 wide_int result;
4303 wi::overflow_type overflow;
4304 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4305 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4306
4307 #if TARGET_SUPPORTS_WIDE_INT == 0
4308 /* This assert keeps the simplification from producing a result
4309 that cannot be represented in a CONST_DOUBLE but a lot of
4310 upstream callers expect that this function never fails to
4311 simplify something and so you if you added this to the test
4312 above the code would die later anyway. If this assert
4313 happens, you just need to make the port support wide int. */
4314 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4315 #endif
4316 switch (code)
4317 {
4318 case MINUS:
4319 result = wi::sub (pop0, pop1);
4320 break;
4321
4322 case PLUS:
4323 result = wi::add (pop0, pop1);
4324 break;
4325
4326 case MULT:
4327 result = wi::mul (pop0, pop1);
4328 break;
4329
4330 case DIV:
4331 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4332 if (overflow)
4333 return NULL_RTX;
4334 break;
4335
4336 case MOD:
4337 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4338 if (overflow)
4339 return NULL_RTX;
4340 break;
4341
4342 case UDIV:
4343 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4344 if (overflow)
4345 return NULL_RTX;
4346 break;
4347
4348 case UMOD:
4349 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4350 if (overflow)
4351 return NULL_RTX;
4352 break;
4353
4354 case AND:
4355 result = wi::bit_and (pop0, pop1);
4356 break;
4357
4358 case IOR:
4359 result = wi::bit_or (pop0, pop1);
4360 break;
4361
4362 case XOR:
4363 result = wi::bit_xor (pop0, pop1);
4364 break;
4365
4366 case SMIN:
4367 result = wi::smin (pop0, pop1);
4368 break;
4369
4370 case SMAX:
4371 result = wi::smax (pop0, pop1);
4372 break;
4373
4374 case UMIN:
4375 result = wi::umin (pop0, pop1);
4376 break;
4377
4378 case UMAX:
4379 result = wi::umax (pop0, pop1);
4380 break;
4381
4382 case LSHIFTRT:
4383 case ASHIFTRT:
4384 case ASHIFT:
4385 {
4386 wide_int wop1 = pop1;
4387 if (SHIFT_COUNT_TRUNCATED)
4388 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4389 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4390 return NULL_RTX;
4391
4392 switch (code)
4393 {
4394 case LSHIFTRT:
4395 result = wi::lrshift (pop0, wop1);
4396 break;
4397
4398 case ASHIFTRT:
4399 result = wi::arshift (pop0, wop1);
4400 break;
4401
4402 case ASHIFT:
4403 result = wi::lshift (pop0, wop1);
4404 break;
4405
4406 default:
4407 gcc_unreachable ();
4408 }
4409 break;
4410 }
4411 case ROTATE:
4412 case ROTATERT:
4413 {
4414 if (wi::neg_p (pop1))
4415 return NULL_RTX;
4416
4417 switch (code)
4418 {
4419 case ROTATE:
4420 result = wi::lrotate (pop0, pop1);
4421 break;
4422
4423 case ROTATERT:
4424 result = wi::rrotate (pop0, pop1);
4425 break;
4426
4427 default:
4428 gcc_unreachable ();
4429 }
4430 break;
4431 }
4432 default:
4433 return NULL_RTX;
4434 }
4435 return immed_wide_int_const (result, int_mode);
4436 }
4437
4438 /* Handle polynomial integers. */
4439 if (NUM_POLY_INT_COEFFS > 1
4440 && is_a <scalar_int_mode> (mode, &int_mode)
4441 && poly_int_rtx_p (op0)
4442 && poly_int_rtx_p (op1))
4443 {
4444 poly_wide_int result;
4445 switch (code)
4446 {
4447 case PLUS:
4448 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4449 break;
4450
4451 case MINUS:
4452 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4453 break;
4454
4455 case MULT:
4456 if (CONST_SCALAR_INT_P (op1))
4457 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4458 else
4459 return NULL_RTX;
4460 break;
4461
4462 case ASHIFT:
4463 if (CONST_SCALAR_INT_P (op1))
4464 {
4465 wide_int shift = rtx_mode_t (op1, mode);
4466 if (SHIFT_COUNT_TRUNCATED)
4467 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4468 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4469 return NULL_RTX;
4470 result = wi::to_poly_wide (op0, mode) << shift;
4471 }
4472 else
4473 return NULL_RTX;
4474 break;
4475
4476 case IOR:
4477 if (!CONST_SCALAR_INT_P (op1)
4478 || !can_ior_p (wi::to_poly_wide (op0, mode),
4479 rtx_mode_t (op1, mode), &result))
4480 return NULL_RTX;
4481 break;
4482
4483 default:
4484 return NULL_RTX;
4485 }
4486 return immed_wide_int_const (result, int_mode);
4487 }
4488
4489 return NULL_RTX;
4490 }
4491
4492
4493 \f
4494 /* Return a positive integer if X should sort after Y. The value
4495 returned is 1 if and only if X and Y are both regs. */
4496
4497 static int
4498 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4499 {
4500 int result;
4501
4502 result = (commutative_operand_precedence (y)
4503 - commutative_operand_precedence (x));
4504 if (result)
4505 return result + result;
4506
4507 /* Group together equal REGs to do more simplification. */
4508 if (REG_P (x) && REG_P (y))
4509 return REGNO (x) > REGNO (y);
4510
4511 return 0;
4512 }
4513
4514 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4515 operands may be another PLUS or MINUS.
4516
4517 Rather than test for specific case, we do this by a brute-force method
4518 and do all possible simplifications until no more changes occur. Then
4519 we rebuild the operation.
4520
4521 May return NULL_RTX when no changes were made. */
4522
4523 static rtx
4524 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4525 rtx op1)
4526 {
4527 struct simplify_plus_minus_op_data
4528 {
4529 rtx op;
4530 short neg;
4531 } ops[16];
4532 rtx result, tem;
4533 int n_ops = 2;
4534 int changed, n_constants, canonicalized = 0;
4535 int i, j;
4536
4537 memset (ops, 0, sizeof ops);
4538
4539 /* Set up the two operands and then expand them until nothing has been
4540 changed. If we run out of room in our array, give up; this should
4541 almost never happen. */
4542
4543 ops[0].op = op0;
4544 ops[0].neg = 0;
4545 ops[1].op = op1;
4546 ops[1].neg = (code == MINUS);
4547
4548 do
4549 {
4550 changed = 0;
4551 n_constants = 0;
4552
4553 for (i = 0; i < n_ops; i++)
4554 {
4555 rtx this_op = ops[i].op;
4556 int this_neg = ops[i].neg;
4557 enum rtx_code this_code = GET_CODE (this_op);
4558
4559 switch (this_code)
4560 {
4561 case PLUS:
4562 case MINUS:
4563 if (n_ops == ARRAY_SIZE (ops))
4564 return NULL_RTX;
4565
4566 ops[n_ops].op = XEXP (this_op, 1);
4567 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4568 n_ops++;
4569
4570 ops[i].op = XEXP (this_op, 0);
4571 changed = 1;
4572 /* If this operand was negated then we will potentially
4573 canonicalize the expression. Similarly if we don't
4574 place the operands adjacent we're re-ordering the
4575 expression and thus might be performing a
4576 canonicalization. Ignore register re-ordering.
4577 ??? It might be better to shuffle the ops array here,
4578 but then (plus (plus (A, B), plus (C, D))) wouldn't
4579 be seen as non-canonical. */
4580 if (this_neg
4581 || (i != n_ops - 2
4582 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4583 canonicalized = 1;
4584 break;
4585
4586 case NEG:
4587 ops[i].op = XEXP (this_op, 0);
4588 ops[i].neg = ! this_neg;
4589 changed = 1;
4590 canonicalized = 1;
4591 break;
4592
4593 case CONST:
4594 if (n_ops != ARRAY_SIZE (ops)
4595 && GET_CODE (XEXP (this_op, 0)) == PLUS
4596 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4597 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4598 {
4599 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4600 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4601 ops[n_ops].neg = this_neg;
4602 n_ops++;
4603 changed = 1;
4604 canonicalized = 1;
4605 }
4606 break;
4607
4608 case NOT:
4609 /* ~a -> (-a - 1) */
4610 if (n_ops != ARRAY_SIZE (ops))
4611 {
4612 ops[n_ops].op = CONSTM1_RTX (mode);
4613 ops[n_ops++].neg = this_neg;
4614 ops[i].op = XEXP (this_op, 0);
4615 ops[i].neg = !this_neg;
4616 changed = 1;
4617 canonicalized = 1;
4618 }
4619 break;
4620
4621 case CONST_INT:
4622 n_constants++;
4623 if (this_neg)
4624 {
4625 ops[i].op = neg_const_int (mode, this_op);
4626 ops[i].neg = 0;
4627 changed = 1;
4628 canonicalized = 1;
4629 }
4630 break;
4631
4632 default:
4633 break;
4634 }
4635 }
4636 }
4637 while (changed);
4638
4639 if (n_constants > 1)
4640 canonicalized = 1;
4641
4642 gcc_assert (n_ops >= 2);
4643
4644 /* If we only have two operands, we can avoid the loops. */
4645 if (n_ops == 2)
4646 {
4647 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4648 rtx lhs, rhs;
4649
4650 /* Get the two operands. Be careful with the order, especially for
4651 the cases where code == MINUS. */
4652 if (ops[0].neg && ops[1].neg)
4653 {
4654 lhs = gen_rtx_NEG (mode, ops[0].op);
4655 rhs = ops[1].op;
4656 }
4657 else if (ops[0].neg)
4658 {
4659 lhs = ops[1].op;
4660 rhs = ops[0].op;
4661 }
4662 else
4663 {
4664 lhs = ops[0].op;
4665 rhs = ops[1].op;
4666 }
4667
4668 return simplify_const_binary_operation (code, mode, lhs, rhs);
4669 }
4670
4671 /* Now simplify each pair of operands until nothing changes. */
4672 while (1)
4673 {
4674 /* Insertion sort is good enough for a small array. */
4675 for (i = 1; i < n_ops; i++)
4676 {
4677 struct simplify_plus_minus_op_data save;
4678 int cmp;
4679
4680 j = i - 1;
4681 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4682 if (cmp <= 0)
4683 continue;
4684 /* Just swapping registers doesn't count as canonicalization. */
4685 if (cmp != 1)
4686 canonicalized = 1;
4687
4688 save = ops[i];
4689 do
4690 ops[j + 1] = ops[j];
4691 while (j--
4692 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4693 ops[j + 1] = save;
4694 }
4695
4696 changed = 0;
4697 for (i = n_ops - 1; i > 0; i--)
4698 for (j = i - 1; j >= 0; j--)
4699 {
4700 rtx lhs = ops[j].op, rhs = ops[i].op;
4701 int lneg = ops[j].neg, rneg = ops[i].neg;
4702
4703 if (lhs != 0 && rhs != 0)
4704 {
4705 enum rtx_code ncode = PLUS;
4706
4707 if (lneg != rneg)
4708 {
4709 ncode = MINUS;
4710 if (lneg)
4711 std::swap (lhs, rhs);
4712 }
4713 else if (swap_commutative_operands_p (lhs, rhs))
4714 std::swap (lhs, rhs);
4715
4716 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4717 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4718 {
4719 rtx tem_lhs, tem_rhs;
4720
4721 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4722 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4723 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4724 tem_rhs);
4725
4726 if (tem && !CONSTANT_P (tem))
4727 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4728 }
4729 else
4730 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4731
4732 if (tem)
4733 {
4734 /* Reject "simplifications" that just wrap the two
4735 arguments in a CONST. Failure to do so can result
4736 in infinite recursion with simplify_binary_operation
4737 when it calls us to simplify CONST operations.
4738 Also, if we find such a simplification, don't try
4739 any more combinations with this rhs: We must have
4740 something like symbol+offset, ie. one of the
4741 trivial CONST expressions we handle later. */
4742 if (GET_CODE (tem) == CONST
4743 && GET_CODE (XEXP (tem, 0)) == ncode
4744 && XEXP (XEXP (tem, 0), 0) == lhs
4745 && XEXP (XEXP (tem, 0), 1) == rhs)
4746 break;
4747 lneg &= rneg;
4748 if (GET_CODE (tem) == NEG)
4749 tem = XEXP (tem, 0), lneg = !lneg;
4750 if (CONST_INT_P (tem) && lneg)
4751 tem = neg_const_int (mode, tem), lneg = 0;
4752
4753 ops[i].op = tem;
4754 ops[i].neg = lneg;
4755 ops[j].op = NULL_RTX;
4756 changed = 1;
4757 canonicalized = 1;
4758 }
4759 }
4760 }
4761
4762 if (!changed)
4763 break;
4764
4765 /* Pack all the operands to the lower-numbered entries. */
4766 for (i = 0, j = 0; j < n_ops; j++)
4767 if (ops[j].op)
4768 {
4769 ops[i] = ops[j];
4770 i++;
4771 }
4772 n_ops = i;
4773 }
4774
4775 /* If nothing changed, check that rematerialization of rtl instructions
4776 is still required. */
4777 if (!canonicalized)
4778 {
4779 /* Perform rematerialization if only all operands are registers and
4780 all operations are PLUS. */
4781 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4782 around rs6000 and how it uses the CA register. See PR67145. */
4783 for (i = 0; i < n_ops; i++)
4784 if (ops[i].neg
4785 || !REG_P (ops[i].op)
4786 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4787 && fixed_regs[REGNO (ops[i].op)]
4788 && !global_regs[REGNO (ops[i].op)]
4789 && ops[i].op != frame_pointer_rtx
4790 && ops[i].op != arg_pointer_rtx
4791 && ops[i].op != stack_pointer_rtx))
4792 return NULL_RTX;
4793 goto gen_result;
4794 }
4795
4796 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4797 if (n_ops == 2
4798 && CONST_INT_P (ops[1].op)
4799 && CONSTANT_P (ops[0].op)
4800 && ops[0].neg)
4801 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4802
4803 /* We suppressed creation of trivial CONST expressions in the
4804 combination loop to avoid recursion. Create one manually now.
4805 The combination loop should have ensured that there is exactly
4806 one CONST_INT, and the sort will have ensured that it is last
4807 in the array and that any other constant will be next-to-last. */
4808
4809 if (n_ops > 1
4810 && CONST_INT_P (ops[n_ops - 1].op)
4811 && CONSTANT_P (ops[n_ops - 2].op))
4812 {
4813 rtx value = ops[n_ops - 1].op;
4814 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4815 value = neg_const_int (mode, value);
4816 if (CONST_INT_P (value))
4817 {
4818 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4819 INTVAL (value));
4820 n_ops--;
4821 }
4822 }
4823
4824 /* Put a non-negated operand first, if possible. */
4825
4826 for (i = 0; i < n_ops && ops[i].neg; i++)
4827 continue;
4828 if (i == n_ops)
4829 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4830 else if (i != 0)
4831 {
4832 tem = ops[0].op;
4833 ops[0] = ops[i];
4834 ops[i].op = tem;
4835 ops[i].neg = 1;
4836 }
4837
4838 /* Now make the result by performing the requested operations. */
4839 gen_result:
4840 result = ops[0].op;
4841 for (i = 1; i < n_ops; i++)
4842 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4843 mode, result, ops[i].op);
4844
4845 return result;
4846 }
4847
4848 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4849 static bool
4850 plus_minus_operand_p (const_rtx x)
4851 {
4852 return GET_CODE (x) == PLUS
4853 || GET_CODE (x) == MINUS
4854 || (GET_CODE (x) == CONST
4855 && GET_CODE (XEXP (x, 0)) == PLUS
4856 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4857 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4858 }
4859
4860 /* Like simplify_binary_operation except used for relational operators.
4861 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4862 not also be VOIDmode.
4863
4864 CMP_MODE specifies in which mode the comparison is done in, so it is
4865 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4866 the operands or, if both are VOIDmode, the operands are compared in
4867 "infinite precision". */
4868 rtx
4869 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4870 machine_mode cmp_mode, rtx op0, rtx op1)
4871 {
4872 rtx tem, trueop0, trueop1;
4873
4874 if (cmp_mode == VOIDmode)
4875 cmp_mode = GET_MODE (op0);
4876 if (cmp_mode == VOIDmode)
4877 cmp_mode = GET_MODE (op1);
4878
4879 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4880 if (tem)
4881 {
4882 if (SCALAR_FLOAT_MODE_P (mode))
4883 {
4884 if (tem == const0_rtx)
4885 return CONST0_RTX (mode);
4886 #ifdef FLOAT_STORE_FLAG_VALUE
4887 {
4888 REAL_VALUE_TYPE val;
4889 val = FLOAT_STORE_FLAG_VALUE (mode);
4890 return const_double_from_real_value (val, mode);
4891 }
4892 #else
4893 return NULL_RTX;
4894 #endif
4895 }
4896 if (VECTOR_MODE_P (mode))
4897 {
4898 if (tem == const0_rtx)
4899 return CONST0_RTX (mode);
4900 #ifdef VECTOR_STORE_FLAG_VALUE
4901 {
4902 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4903 if (val == NULL_RTX)
4904 return NULL_RTX;
4905 if (val == const1_rtx)
4906 return CONST1_RTX (mode);
4907
4908 return gen_const_vec_duplicate (mode, val);
4909 }
4910 #else
4911 return NULL_RTX;
4912 #endif
4913 }
4914
4915 return tem;
4916 }
4917
4918 /* For the following tests, ensure const0_rtx is op1. */
4919 if (swap_commutative_operands_p (op0, op1)
4920 || (op0 == const0_rtx && op1 != const0_rtx))
4921 std::swap (op0, op1), code = swap_condition (code);
4922
4923 /* If op0 is a compare, extract the comparison arguments from it. */
4924 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4925 return simplify_gen_relational (code, mode, VOIDmode,
4926 XEXP (op0, 0), XEXP (op0, 1));
4927
4928 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4929 || CC0_P (op0))
4930 return NULL_RTX;
4931
4932 trueop0 = avoid_constant_pool_reference (op0);
4933 trueop1 = avoid_constant_pool_reference (op1);
4934 return simplify_relational_operation_1 (code, mode, cmp_mode,
4935 trueop0, trueop1);
4936 }
4937
4938 /* This part of simplify_relational_operation is only used when CMP_MODE
4939 is not in class MODE_CC (i.e. it is a real comparison).
4940
4941 MODE is the mode of the result, while CMP_MODE specifies in which
4942 mode the comparison is done in, so it is the mode of the operands. */
4943
4944 static rtx
4945 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4946 machine_mode cmp_mode, rtx op0, rtx op1)
4947 {
4948 enum rtx_code op0code = GET_CODE (op0);
4949
4950 if (op1 == const0_rtx && COMPARISON_P (op0))
4951 {
4952 /* If op0 is a comparison, extract the comparison arguments
4953 from it. */
4954 if (code == NE)
4955 {
4956 if (GET_MODE (op0) == mode)
4957 return simplify_rtx (op0);
4958 else
4959 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4960 XEXP (op0, 0), XEXP (op0, 1));
4961 }
4962 else if (code == EQ)
4963 {
4964 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4965 if (new_code != UNKNOWN)
4966 return simplify_gen_relational (new_code, mode, VOIDmode,
4967 XEXP (op0, 0), XEXP (op0, 1));
4968 }
4969 }
4970
4971 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4972 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4973 if ((code == LTU || code == GEU)
4974 && GET_CODE (op0) == PLUS
4975 && CONST_INT_P (XEXP (op0, 1))
4976 && (rtx_equal_p (op1, XEXP (op0, 0))
4977 || rtx_equal_p (op1, XEXP (op0, 1)))
4978 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4979 && XEXP (op0, 1) != const0_rtx)
4980 {
4981 rtx new_cmp
4982 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4983 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4984 cmp_mode, XEXP (op0, 0), new_cmp);
4985 }
4986
4987 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4988 transformed into (LTU a -C). */
4989 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4990 && CONST_INT_P (XEXP (op0, 1))
4991 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4992 && XEXP (op0, 1) != const0_rtx)
4993 {
4994 rtx new_cmp
4995 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4996 return simplify_gen_relational (LTU, mode, cmp_mode,
4997 XEXP (op0, 0), new_cmp);
4998 }
4999
5000 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5001 if ((code == LTU || code == GEU)
5002 && GET_CODE (op0) == PLUS
5003 && rtx_equal_p (op1, XEXP (op0, 1))
5004 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5005 && !rtx_equal_p (op1, XEXP (op0, 0)))
5006 return simplify_gen_relational (code, mode, cmp_mode, op0,
5007 copy_rtx (XEXP (op0, 0)));
5008
5009 if (op1 == const0_rtx)
5010 {
5011 /* Canonicalize (GTU x 0) as (NE x 0). */
5012 if (code == GTU)
5013 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5014 /* Canonicalize (LEU x 0) as (EQ x 0). */
5015 if (code == LEU)
5016 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5017 }
5018 else if (op1 == const1_rtx)
5019 {
5020 switch (code)
5021 {
5022 case GE:
5023 /* Canonicalize (GE x 1) as (GT x 0). */
5024 return simplify_gen_relational (GT, mode, cmp_mode,
5025 op0, const0_rtx);
5026 case GEU:
5027 /* Canonicalize (GEU x 1) as (NE x 0). */
5028 return simplify_gen_relational (NE, mode, cmp_mode,
5029 op0, const0_rtx);
5030 case LT:
5031 /* Canonicalize (LT x 1) as (LE x 0). */
5032 return simplify_gen_relational (LE, mode, cmp_mode,
5033 op0, const0_rtx);
5034 case LTU:
5035 /* Canonicalize (LTU x 1) as (EQ x 0). */
5036 return simplify_gen_relational (EQ, mode, cmp_mode,
5037 op0, const0_rtx);
5038 default:
5039 break;
5040 }
5041 }
5042 else if (op1 == constm1_rtx)
5043 {
5044 /* Canonicalize (LE x -1) as (LT x 0). */
5045 if (code == LE)
5046 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5047 /* Canonicalize (GT x -1) as (GE x 0). */
5048 if (code == GT)
5049 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5050 }
5051
5052 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5053 if ((code == EQ || code == NE)
5054 && (op0code == PLUS || op0code == MINUS)
5055 && CONSTANT_P (op1)
5056 && CONSTANT_P (XEXP (op0, 1))
5057 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5058 {
5059 rtx x = XEXP (op0, 0);
5060 rtx c = XEXP (op0, 1);
5061 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5062 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5063
5064 /* Detect an infinite recursive condition, where we oscillate at this
5065 simplification case between:
5066 A + B == C <---> C - B == A,
5067 where A, B, and C are all constants with non-simplifiable expressions,
5068 usually SYMBOL_REFs. */
5069 if (GET_CODE (tem) == invcode
5070 && CONSTANT_P (x)
5071 && rtx_equal_p (c, XEXP (tem, 1)))
5072 return NULL_RTX;
5073
5074 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5075 }
5076
5077 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5078 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5079 scalar_int_mode int_mode, int_cmp_mode;
5080 if (code == NE
5081 && op1 == const0_rtx
5082 && is_int_mode (mode, &int_mode)
5083 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5084 /* ??? Work-around BImode bugs in the ia64 backend. */
5085 && int_mode != BImode
5086 && int_cmp_mode != BImode
5087 && nonzero_bits (op0, int_cmp_mode) == 1
5088 && STORE_FLAG_VALUE == 1)
5089 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5090 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5091 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5092
5093 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5094 if ((code == EQ || code == NE)
5095 && op1 == const0_rtx
5096 && op0code == XOR)
5097 return simplify_gen_relational (code, mode, cmp_mode,
5098 XEXP (op0, 0), XEXP (op0, 1));
5099
5100 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5101 if ((code == EQ || code == NE)
5102 && op0code == XOR
5103 && rtx_equal_p (XEXP (op0, 0), op1)
5104 && !side_effects_p (XEXP (op0, 0)))
5105 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5106 CONST0_RTX (mode));
5107
5108 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5109 if ((code == EQ || code == NE)
5110 && op0code == XOR
5111 && rtx_equal_p (XEXP (op0, 1), op1)
5112 && !side_effects_p (XEXP (op0, 1)))
5113 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5114 CONST0_RTX (mode));
5115
5116 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5117 if ((code == EQ || code == NE)
5118 && op0code == XOR
5119 && CONST_SCALAR_INT_P (op1)
5120 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5121 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5122 simplify_gen_binary (XOR, cmp_mode,
5123 XEXP (op0, 1), op1));
5124
5125 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5126 constant folding if x/y is a constant. */
5127 if ((code == EQ || code == NE)
5128 && (op0code == AND || op0code == IOR)
5129 && !side_effects_p (op1)
5130 && op1 != CONST0_RTX (cmp_mode))
5131 {
5132 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5133 (eq/ne (and (not y) x) 0). */
5134 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5135 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5136 {
5137 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5138 cmp_mode);
5139 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5140
5141 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5142 CONST0_RTX (cmp_mode));
5143 }
5144
5145 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5146 (eq/ne (and (not x) y) 0). */
5147 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5148 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5149 {
5150 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5151 cmp_mode);
5152 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5153
5154 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5155 CONST0_RTX (cmp_mode));
5156 }
5157 }
5158
5159 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5160 if ((code == EQ || code == NE)
5161 && GET_CODE (op0) == BSWAP
5162 && CONST_SCALAR_INT_P (op1))
5163 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5164 simplify_gen_unary (BSWAP, cmp_mode,
5165 op1, cmp_mode));
5166
5167 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5168 if ((code == EQ || code == NE)
5169 && GET_CODE (op0) == BSWAP
5170 && GET_CODE (op1) == BSWAP)
5171 return simplify_gen_relational (code, mode, cmp_mode,
5172 XEXP (op0, 0), XEXP (op1, 0));
5173
5174 if (op0code == POPCOUNT && op1 == const0_rtx)
5175 switch (code)
5176 {
5177 case EQ:
5178 case LE:
5179 case LEU:
5180 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5181 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5182 XEXP (op0, 0), const0_rtx);
5183
5184 case NE:
5185 case GT:
5186 case GTU:
5187 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5188 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5189 XEXP (op0, 0), const0_rtx);
5190
5191 default:
5192 break;
5193 }
5194
5195 return NULL_RTX;
5196 }
5197
5198 enum
5199 {
5200 CMP_EQ = 1,
5201 CMP_LT = 2,
5202 CMP_GT = 4,
5203 CMP_LTU = 8,
5204 CMP_GTU = 16
5205 };
5206
5207
5208 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5209 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5210 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5211 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5212 For floating-point comparisons, assume that the operands were ordered. */
5213
5214 static rtx
5215 comparison_result (enum rtx_code code, int known_results)
5216 {
5217 switch (code)
5218 {
5219 case EQ:
5220 case UNEQ:
5221 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5222 case NE:
5223 case LTGT:
5224 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5225
5226 case LT:
5227 case UNLT:
5228 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5229 case GE:
5230 case UNGE:
5231 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5232
5233 case GT:
5234 case UNGT:
5235 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5236 case LE:
5237 case UNLE:
5238 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5239
5240 case LTU:
5241 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5242 case GEU:
5243 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5244
5245 case GTU:
5246 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5247 case LEU:
5248 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5249
5250 case ORDERED:
5251 return const_true_rtx;
5252 case UNORDERED:
5253 return const0_rtx;
5254 default:
5255 gcc_unreachable ();
5256 }
5257 }
5258
5259 /* Check if the given comparison (done in the given MODE) is actually
5260 a tautology or a contradiction. If the mode is VOID_mode, the
5261 comparison is done in "infinite precision". If no simplification
5262 is possible, this function returns zero. Otherwise, it returns
5263 either const_true_rtx or const0_rtx. */
5264
5265 rtx
5266 simplify_const_relational_operation (enum rtx_code code,
5267 machine_mode mode,
5268 rtx op0, rtx op1)
5269 {
5270 rtx tem;
5271 rtx trueop0;
5272 rtx trueop1;
5273
5274 gcc_assert (mode != VOIDmode
5275 || (GET_MODE (op0) == VOIDmode
5276 && GET_MODE (op1) == VOIDmode));
5277
5278 /* If op0 is a compare, extract the comparison arguments from it. */
5279 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5280 {
5281 op1 = XEXP (op0, 1);
5282 op0 = XEXP (op0, 0);
5283
5284 if (GET_MODE (op0) != VOIDmode)
5285 mode = GET_MODE (op0);
5286 else if (GET_MODE (op1) != VOIDmode)
5287 mode = GET_MODE (op1);
5288 else
5289 return 0;
5290 }
5291
5292 /* We can't simplify MODE_CC values since we don't know what the
5293 actual comparison is. */
5294 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5295 return 0;
5296
5297 /* Make sure the constant is second. */
5298 if (swap_commutative_operands_p (op0, op1))
5299 {
5300 std::swap (op0, op1);
5301 code = swap_condition (code);
5302 }
5303
5304 trueop0 = avoid_constant_pool_reference (op0);
5305 trueop1 = avoid_constant_pool_reference (op1);
5306
5307 /* For integer comparisons of A and B maybe we can simplify A - B and can
5308 then simplify a comparison of that with zero. If A and B are both either
5309 a register or a CONST_INT, this can't help; testing for these cases will
5310 prevent infinite recursion here and speed things up.
5311
5312 We can only do this for EQ and NE comparisons as otherwise we may
5313 lose or introduce overflow which we cannot disregard as undefined as
5314 we do not know the signedness of the operation on either the left or
5315 the right hand side of the comparison. */
5316
5317 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5318 && (code == EQ || code == NE)
5319 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5320 && (REG_P (op1) || CONST_INT_P (trueop1)))
5321 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5322 /* We cannot do this if tem is a nonzero address. */
5323 && ! nonzero_address_p (tem))
5324 return simplify_const_relational_operation (signed_condition (code),
5325 mode, tem, const0_rtx);
5326
5327 if (! HONOR_NANS (mode) && code == ORDERED)
5328 return const_true_rtx;
5329
5330 if (! HONOR_NANS (mode) && code == UNORDERED)
5331 return const0_rtx;
5332
5333 /* For modes without NaNs, if the two operands are equal, we know the
5334 result except if they have side-effects. Even with NaNs we know
5335 the result of unordered comparisons and, if signaling NaNs are
5336 irrelevant, also the result of LT/GT/LTGT. */
5337 if ((! HONOR_NANS (trueop0)
5338 || code == UNEQ || code == UNLE || code == UNGE
5339 || ((code == LT || code == GT || code == LTGT)
5340 && ! HONOR_SNANS (trueop0)))
5341 && rtx_equal_p (trueop0, trueop1)
5342 && ! side_effects_p (trueop0))
5343 return comparison_result (code, CMP_EQ);
5344
5345 /* If the operands are floating-point constants, see if we can fold
5346 the result. */
5347 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5348 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5349 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5350 {
5351 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5352 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5353
5354 /* Comparisons are unordered iff at least one of the values is NaN. */
5355 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5356 switch (code)
5357 {
5358 case UNEQ:
5359 case UNLT:
5360 case UNGT:
5361 case UNLE:
5362 case UNGE:
5363 case NE:
5364 case UNORDERED:
5365 return const_true_rtx;
5366 case EQ:
5367 case LT:
5368 case GT:
5369 case LE:
5370 case GE:
5371 case LTGT:
5372 case ORDERED:
5373 return const0_rtx;
5374 default:
5375 return 0;
5376 }
5377
5378 return comparison_result (code,
5379 (real_equal (d0, d1) ? CMP_EQ :
5380 real_less (d0, d1) ? CMP_LT : CMP_GT));
5381 }
5382
5383 /* Otherwise, see if the operands are both integers. */
5384 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5385 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5386 {
5387 /* It would be nice if we really had a mode here. However, the
5388 largest int representable on the target is as good as
5389 infinite. */
5390 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5391 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5392 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5393
5394 if (wi::eq_p (ptrueop0, ptrueop1))
5395 return comparison_result (code, CMP_EQ);
5396 else
5397 {
5398 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5399 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5400 return comparison_result (code, cr);
5401 }
5402 }
5403
5404 /* Optimize comparisons with upper and lower bounds. */
5405 scalar_int_mode int_mode;
5406 if (CONST_INT_P (trueop1)
5407 && is_a <scalar_int_mode> (mode, &int_mode)
5408 && HWI_COMPUTABLE_MODE_P (int_mode)
5409 && !side_effects_p (trueop0))
5410 {
5411 int sign;
5412 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5413 HOST_WIDE_INT val = INTVAL (trueop1);
5414 HOST_WIDE_INT mmin, mmax;
5415
5416 if (code == GEU
5417 || code == LEU
5418 || code == GTU
5419 || code == LTU)
5420 sign = 0;
5421 else
5422 sign = 1;
5423
5424 /* Get a reduced range if the sign bit is zero. */
5425 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5426 {
5427 mmin = 0;
5428 mmax = nonzero;
5429 }
5430 else
5431 {
5432 rtx mmin_rtx, mmax_rtx;
5433 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5434
5435 mmin = INTVAL (mmin_rtx);
5436 mmax = INTVAL (mmax_rtx);
5437 if (sign)
5438 {
5439 unsigned int sign_copies
5440 = num_sign_bit_copies (trueop0, int_mode);
5441
5442 mmin >>= (sign_copies - 1);
5443 mmax >>= (sign_copies - 1);
5444 }
5445 }
5446
5447 switch (code)
5448 {
5449 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5450 case GEU:
5451 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5452 return const_true_rtx;
5453 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5454 return const0_rtx;
5455 break;
5456 case GE:
5457 if (val <= mmin)
5458 return const_true_rtx;
5459 if (val > mmax)
5460 return const0_rtx;
5461 break;
5462
5463 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5464 case LEU:
5465 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5466 return const_true_rtx;
5467 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5468 return const0_rtx;
5469 break;
5470 case LE:
5471 if (val >= mmax)
5472 return const_true_rtx;
5473 if (val < mmin)
5474 return const0_rtx;
5475 break;
5476
5477 case EQ:
5478 /* x == y is always false for y out of range. */
5479 if (val < mmin || val > mmax)
5480 return const0_rtx;
5481 break;
5482
5483 /* x > y is always false for y >= mmax, always true for y < mmin. */
5484 case GTU:
5485 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5486 return const0_rtx;
5487 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5488 return const_true_rtx;
5489 break;
5490 case GT:
5491 if (val >= mmax)
5492 return const0_rtx;
5493 if (val < mmin)
5494 return const_true_rtx;
5495 break;
5496
5497 /* x < y is always false for y <= mmin, always true for y > mmax. */
5498 case LTU:
5499 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5500 return const0_rtx;
5501 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5502 return const_true_rtx;
5503 break;
5504 case LT:
5505 if (val <= mmin)
5506 return const0_rtx;
5507 if (val > mmax)
5508 return const_true_rtx;
5509 break;
5510
5511 case NE:
5512 /* x != y is always true for y out of range. */
5513 if (val < mmin || val > mmax)
5514 return const_true_rtx;
5515 break;
5516
5517 default:
5518 break;
5519 }
5520 }
5521
5522 /* Optimize integer comparisons with zero. */
5523 if (is_a <scalar_int_mode> (mode, &int_mode)
5524 && trueop1 == const0_rtx
5525 && !side_effects_p (trueop0))
5526 {
5527 /* Some addresses are known to be nonzero. We don't know
5528 their sign, but equality comparisons are known. */
5529 if (nonzero_address_p (trueop0))
5530 {
5531 if (code == EQ || code == LEU)
5532 return const0_rtx;
5533 if (code == NE || code == GTU)
5534 return const_true_rtx;
5535 }
5536
5537 /* See if the first operand is an IOR with a constant. If so, we
5538 may be able to determine the result of this comparison. */
5539 if (GET_CODE (op0) == IOR)
5540 {
5541 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5542 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5543 {
5544 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5545 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5546 && (UINTVAL (inner_const)
5547 & (HOST_WIDE_INT_1U
5548 << sign_bitnum)));
5549
5550 switch (code)
5551 {
5552 case EQ:
5553 case LEU:
5554 return const0_rtx;
5555 case NE:
5556 case GTU:
5557 return const_true_rtx;
5558 case LT:
5559 case LE:
5560 if (has_sign)
5561 return const_true_rtx;
5562 break;
5563 case GT:
5564 case GE:
5565 if (has_sign)
5566 return const0_rtx;
5567 break;
5568 default:
5569 break;
5570 }
5571 }
5572 }
5573 }
5574
5575 /* Optimize comparison of ABS with zero. */
5576 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5577 && (GET_CODE (trueop0) == ABS
5578 || (GET_CODE (trueop0) == FLOAT_EXTEND
5579 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5580 {
5581 switch (code)
5582 {
5583 case LT:
5584 /* Optimize abs(x) < 0.0. */
5585 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5586 return const0_rtx;
5587 break;
5588
5589 case GE:
5590 /* Optimize abs(x) >= 0.0. */
5591 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5592 return const_true_rtx;
5593 break;
5594
5595 case UNGE:
5596 /* Optimize ! (abs(x) < 0.0). */
5597 return const_true_rtx;
5598
5599 default:
5600 break;
5601 }
5602 }
5603
5604 return 0;
5605 }
5606
5607 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5608 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5609 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5610 can be simplified to that or NULL_RTX if not.
5611 Assume X is compared against zero with CMP_CODE and the true
5612 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5613
5614 static rtx
5615 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5616 {
5617 if (cmp_code != EQ && cmp_code != NE)
5618 return NULL_RTX;
5619
5620 /* Result on X == 0 and X !=0 respectively. */
5621 rtx on_zero, on_nonzero;
5622 if (cmp_code == EQ)
5623 {
5624 on_zero = true_val;
5625 on_nonzero = false_val;
5626 }
5627 else
5628 {
5629 on_zero = false_val;
5630 on_nonzero = true_val;
5631 }
5632
5633 rtx_code op_code = GET_CODE (on_nonzero);
5634 if ((op_code != CLZ && op_code != CTZ)
5635 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5636 || !CONST_INT_P (on_zero))
5637 return NULL_RTX;
5638
5639 HOST_WIDE_INT op_val;
5640 scalar_int_mode mode ATTRIBUTE_UNUSED
5641 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5642 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5643 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5644 && op_val == INTVAL (on_zero))
5645 return on_nonzero;
5646
5647 return NULL_RTX;
5648 }
5649
5650 /* Try to simplify X given that it appears within operand OP of a
5651 VEC_MERGE operation whose mask is MASK. X need not use the same
5652 vector mode as the VEC_MERGE, but it must have the same number of
5653 elements.
5654
5655 Return the simplified X on success, otherwise return NULL_RTX. */
5656
5657 rtx
5658 simplify_merge_mask (rtx x, rtx mask, int op)
5659 {
5660 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5661 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5662 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5663 {
5664 if (side_effects_p (XEXP (x, 1 - op)))
5665 return NULL_RTX;
5666
5667 return XEXP (x, op);
5668 }
5669 if (UNARY_P (x)
5670 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5671 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
5672 {
5673 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5674 if (top0)
5675 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
5676 GET_MODE (XEXP (x, 0)));
5677 }
5678 if (BINARY_P (x)
5679 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5680 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5681 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5682 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
5683 {
5684 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5685 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5686 if (top0 || top1)
5687 {
5688 if (COMPARISON_P (x))
5689 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
5690 GET_MODE (XEXP (x, 0)) != VOIDmode
5691 ? GET_MODE (XEXP (x, 0))
5692 : GET_MODE (XEXP (x, 1)),
5693 top0 ? top0 : XEXP (x, 0),
5694 top1 ? top1 : XEXP (x, 1));
5695 else
5696 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
5697 top0 ? top0 : XEXP (x, 0),
5698 top1 ? top1 : XEXP (x, 1));
5699 }
5700 }
5701 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
5702 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5703 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5704 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5705 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
5706 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
5707 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
5708 {
5709 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5710 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5711 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
5712 if (top0 || top1 || top2)
5713 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
5714 GET_MODE (XEXP (x, 0)),
5715 top0 ? top0 : XEXP (x, 0),
5716 top1 ? top1 : XEXP (x, 1),
5717 top2 ? top2 : XEXP (x, 2));
5718 }
5719 return NULL_RTX;
5720 }
5721
5722 \f
5723 /* Simplify CODE, an operation with result mode MODE and three operands,
5724 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5725 a constant. Return 0 if no simplifications is possible. */
5726
5727 rtx
5728 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5729 machine_mode op0_mode, rtx op0, rtx op1,
5730 rtx op2)
5731 {
5732 bool any_change = false;
5733 rtx tem, trueop2;
5734 scalar_int_mode int_mode, int_op0_mode;
5735 unsigned int n_elts;
5736
5737 switch (code)
5738 {
5739 case FMA:
5740 /* Simplify negations around the multiplication. */
5741 /* -a * -b + c => a * b + c. */
5742 if (GET_CODE (op0) == NEG)
5743 {
5744 tem = simplify_unary_operation (NEG, mode, op1, mode);
5745 if (tem)
5746 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5747 }
5748 else if (GET_CODE (op1) == NEG)
5749 {
5750 tem = simplify_unary_operation (NEG, mode, op0, mode);
5751 if (tem)
5752 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5753 }
5754
5755 /* Canonicalize the two multiplication operands. */
5756 /* a * -b + c => -b * a + c. */
5757 if (swap_commutative_operands_p (op0, op1))
5758 std::swap (op0, op1), any_change = true;
5759
5760 if (any_change)
5761 return gen_rtx_FMA (mode, op0, op1, op2);
5762 return NULL_RTX;
5763
5764 case SIGN_EXTRACT:
5765 case ZERO_EXTRACT:
5766 if (CONST_INT_P (op0)
5767 && CONST_INT_P (op1)
5768 && CONST_INT_P (op2)
5769 && is_a <scalar_int_mode> (mode, &int_mode)
5770 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5771 && HWI_COMPUTABLE_MODE_P (int_mode))
5772 {
5773 /* Extracting a bit-field from a constant */
5774 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5775 HOST_WIDE_INT op1val = INTVAL (op1);
5776 HOST_WIDE_INT op2val = INTVAL (op2);
5777 if (!BITS_BIG_ENDIAN)
5778 val >>= op2val;
5779 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5780 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5781 else
5782 /* Not enough information to calculate the bit position. */
5783 break;
5784
5785 if (HOST_BITS_PER_WIDE_INT != op1val)
5786 {
5787 /* First zero-extend. */
5788 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5789 /* If desired, propagate sign bit. */
5790 if (code == SIGN_EXTRACT
5791 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5792 != 0)
5793 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5794 }
5795
5796 return gen_int_mode (val, int_mode);
5797 }
5798 break;
5799
5800 case IF_THEN_ELSE:
5801 if (CONST_INT_P (op0))
5802 return op0 != const0_rtx ? op1 : op2;
5803
5804 /* Convert c ? a : a into "a". */
5805 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5806 return op1;
5807
5808 /* Convert a != b ? a : b into "a". */
5809 if (GET_CODE (op0) == NE
5810 && ! side_effects_p (op0)
5811 && ! HONOR_NANS (mode)
5812 && ! HONOR_SIGNED_ZEROS (mode)
5813 && ((rtx_equal_p (XEXP (op0, 0), op1)
5814 && rtx_equal_p (XEXP (op0, 1), op2))
5815 || (rtx_equal_p (XEXP (op0, 0), op2)
5816 && rtx_equal_p (XEXP (op0, 1), op1))))
5817 return op1;
5818
5819 /* Convert a == b ? a : b into "b". */
5820 if (GET_CODE (op0) == EQ
5821 && ! side_effects_p (op0)
5822 && ! HONOR_NANS (mode)
5823 && ! HONOR_SIGNED_ZEROS (mode)
5824 && ((rtx_equal_p (XEXP (op0, 0), op1)
5825 && rtx_equal_p (XEXP (op0, 1), op2))
5826 || (rtx_equal_p (XEXP (op0, 0), op2)
5827 && rtx_equal_p (XEXP (op0, 1), op1))))
5828 return op2;
5829
5830 /* Convert (!c) != {0,...,0} ? a : b into
5831 c != {0,...,0} ? b : a for vector modes. */
5832 if (VECTOR_MODE_P (GET_MODE (op1))
5833 && GET_CODE (op0) == NE
5834 && GET_CODE (XEXP (op0, 0)) == NOT
5835 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5836 {
5837 rtx cv = XEXP (op0, 1);
5838 int nunits;
5839 bool ok = true;
5840 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5841 ok = false;
5842 else
5843 for (int i = 0; i < nunits; ++i)
5844 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5845 {
5846 ok = false;
5847 break;
5848 }
5849 if (ok)
5850 {
5851 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5852 XEXP (XEXP (op0, 0), 0),
5853 XEXP (op0, 1));
5854 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5855 return retval;
5856 }
5857 }
5858
5859 /* Convert x == 0 ? N : clz (x) into clz (x) when
5860 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5861 Similarly for ctz (x). */
5862 if (COMPARISON_P (op0) && !side_effects_p (op0)
5863 && XEXP (op0, 1) == const0_rtx)
5864 {
5865 rtx simplified
5866 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5867 op1, op2);
5868 if (simplified)
5869 return simplified;
5870 }
5871
5872 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5873 {
5874 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5875 ? GET_MODE (XEXP (op0, 1))
5876 : GET_MODE (XEXP (op0, 0)));
5877 rtx temp;
5878
5879 /* Look for happy constants in op1 and op2. */
5880 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5881 {
5882 HOST_WIDE_INT t = INTVAL (op1);
5883 HOST_WIDE_INT f = INTVAL (op2);
5884
5885 if (t == STORE_FLAG_VALUE && f == 0)
5886 code = GET_CODE (op0);
5887 else if (t == 0 && f == STORE_FLAG_VALUE)
5888 {
5889 enum rtx_code tmp;
5890 tmp = reversed_comparison_code (op0, NULL);
5891 if (tmp == UNKNOWN)
5892 break;
5893 code = tmp;
5894 }
5895 else
5896 break;
5897
5898 return simplify_gen_relational (code, mode, cmp_mode,
5899 XEXP (op0, 0), XEXP (op0, 1));
5900 }
5901
5902 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5903 cmp_mode, XEXP (op0, 0),
5904 XEXP (op0, 1));
5905
5906 /* See if any simplifications were possible. */
5907 if (temp)
5908 {
5909 if (CONST_INT_P (temp))
5910 return temp == const0_rtx ? op2 : op1;
5911 else if (temp)
5912 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5913 }
5914 }
5915 break;
5916
5917 case VEC_MERGE:
5918 gcc_assert (GET_MODE (op0) == mode);
5919 gcc_assert (GET_MODE (op1) == mode);
5920 gcc_assert (VECTOR_MODE_P (mode));
5921 trueop2 = avoid_constant_pool_reference (op2);
5922 if (CONST_INT_P (trueop2)
5923 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5924 {
5925 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5926 unsigned HOST_WIDE_INT mask;
5927 if (n_elts == HOST_BITS_PER_WIDE_INT)
5928 mask = -1;
5929 else
5930 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5931
5932 if (!(sel & mask) && !side_effects_p (op0))
5933 return op1;
5934 if ((sel & mask) == mask && !side_effects_p (op1))
5935 return op0;
5936
5937 rtx trueop0 = avoid_constant_pool_reference (op0);
5938 rtx trueop1 = avoid_constant_pool_reference (op1);
5939 if (GET_CODE (trueop0) == CONST_VECTOR
5940 && GET_CODE (trueop1) == CONST_VECTOR)
5941 {
5942 rtvec v = rtvec_alloc (n_elts);
5943 unsigned int i;
5944
5945 for (i = 0; i < n_elts; i++)
5946 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5947 ? CONST_VECTOR_ELT (trueop0, i)
5948 : CONST_VECTOR_ELT (trueop1, i));
5949 return gen_rtx_CONST_VECTOR (mode, v);
5950 }
5951
5952 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5953 if no element from a appears in the result. */
5954 if (GET_CODE (op0) == VEC_MERGE)
5955 {
5956 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5957 if (CONST_INT_P (tem))
5958 {
5959 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5960 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5961 return simplify_gen_ternary (code, mode, mode,
5962 XEXP (op0, 1), op1, op2);
5963 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5964 return simplify_gen_ternary (code, mode, mode,
5965 XEXP (op0, 0), op1, op2);
5966 }
5967 }
5968 if (GET_CODE (op1) == VEC_MERGE)
5969 {
5970 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5971 if (CONST_INT_P (tem))
5972 {
5973 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5974 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5975 return simplify_gen_ternary (code, mode, mode,
5976 op0, XEXP (op1, 1), op2);
5977 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5978 return simplify_gen_ternary (code, mode, mode,
5979 op0, XEXP (op1, 0), op2);
5980 }
5981 }
5982
5983 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5984 with a. */
5985 if (GET_CODE (op0) == VEC_DUPLICATE
5986 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5987 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5988 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5989 {
5990 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5991 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5992 {
5993 if (XEXP (XEXP (op0, 0), 0) == op1
5994 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5995 return op1;
5996 }
5997 }
5998 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5999 (const_int N))
6000 with (vec_concat (X) (B)) if N == 1 or
6001 (vec_concat (A) (X)) if N == 2. */
6002 if (GET_CODE (op0) == VEC_DUPLICATE
6003 && GET_CODE (op1) == CONST_VECTOR
6004 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6005 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6006 && IN_RANGE (sel, 1, 2))
6007 {
6008 rtx newop0 = XEXP (op0, 0);
6009 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6010 if (sel == 2)
6011 std::swap (newop0, newop1);
6012 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6013 }
6014 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6015 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6016 Only applies for vectors of two elements. */
6017 if (GET_CODE (op0) == VEC_DUPLICATE
6018 && GET_CODE (op1) == VEC_CONCAT
6019 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6020 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6021 && IN_RANGE (sel, 1, 2))
6022 {
6023 rtx newop0 = XEXP (op0, 0);
6024 rtx newop1 = XEXP (op1, 2 - sel);
6025 rtx otherop = XEXP (op1, sel - 1);
6026 if (sel == 2)
6027 std::swap (newop0, newop1);
6028 /* Don't want to throw away the other part of the vec_concat if
6029 it has side-effects. */
6030 if (!side_effects_p (otherop))
6031 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6032 }
6033
6034 /* Replace:
6035
6036 (vec_merge:outer (vec_duplicate:outer x:inner)
6037 (subreg:outer y:inner 0)
6038 (const_int N))
6039
6040 with (vec_concat:outer x:inner y:inner) if N == 1,
6041 or (vec_concat:outer y:inner x:inner) if N == 2.
6042
6043 Implicitly, this means we have a paradoxical subreg, but such
6044 a check is cheap, so make it anyway.
6045
6046 Only applies for vectors of two elements. */
6047 if (GET_CODE (op0) == VEC_DUPLICATE
6048 && GET_CODE (op1) == SUBREG
6049 && GET_MODE (op1) == GET_MODE (op0)
6050 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6051 && paradoxical_subreg_p (op1)
6052 && subreg_lowpart_p (op1)
6053 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6054 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6055 && IN_RANGE (sel, 1, 2))
6056 {
6057 rtx newop0 = XEXP (op0, 0);
6058 rtx newop1 = SUBREG_REG (op1);
6059 if (sel == 2)
6060 std::swap (newop0, newop1);
6061 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6062 }
6063
6064 /* Same as above but with switched operands:
6065 Replace (vec_merge:outer (subreg:outer x:inner 0)
6066 (vec_duplicate:outer y:inner)
6067 (const_int N))
6068
6069 with (vec_concat:outer x:inner y:inner) if N == 1,
6070 or (vec_concat:outer y:inner x:inner) if N == 2. */
6071 if (GET_CODE (op1) == VEC_DUPLICATE
6072 && GET_CODE (op0) == SUBREG
6073 && GET_MODE (op0) == GET_MODE (op1)
6074 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6075 && paradoxical_subreg_p (op0)
6076 && subreg_lowpart_p (op0)
6077 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6078 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6079 && IN_RANGE (sel, 1, 2))
6080 {
6081 rtx newop0 = SUBREG_REG (op0);
6082 rtx newop1 = XEXP (op1, 0);
6083 if (sel == 2)
6084 std::swap (newop0, newop1);
6085 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6086 }
6087
6088 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6089 (const_int n))
6090 with (vec_concat x y) or (vec_concat y x) depending on value
6091 of N. */
6092 if (GET_CODE (op0) == VEC_DUPLICATE
6093 && GET_CODE (op1) == VEC_DUPLICATE
6094 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6095 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6096 && IN_RANGE (sel, 1, 2))
6097 {
6098 rtx newop0 = XEXP (op0, 0);
6099 rtx newop1 = XEXP (op1, 0);
6100 if (sel == 2)
6101 std::swap (newop0, newop1);
6102
6103 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6104 }
6105 }
6106
6107 if (rtx_equal_p (op0, op1)
6108 && !side_effects_p (op2) && !side_effects_p (op1))
6109 return op0;
6110
6111 if (!side_effects_p (op2))
6112 {
6113 rtx top0
6114 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6115 rtx top1
6116 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6117 if (top0 || top1)
6118 return simplify_gen_ternary (code, mode, mode,
6119 top0 ? top0 : op0,
6120 top1 ? top1 : op1, op2);
6121 }
6122
6123 break;
6124
6125 default:
6126 gcc_unreachable ();
6127 }
6128
6129 return 0;
6130 }
6131
6132 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6133 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6134 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6135
6136 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6137 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6138 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6139 FIRST_ELEM is the number of the first element to extract, otherwise
6140 FIRST_ELEM is ignored. */
6141
6142 static rtx
6143 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
6144 machine_mode innermode, unsigned int byte,
6145 unsigned int first_elem, unsigned int inner_bytes)
6146 {
6147 enum {
6148 value_bit = 8,
6149 value_mask = (1 << value_bit) - 1
6150 };
6151 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
6152 int value_start;
6153 int i;
6154 int elem;
6155
6156 int num_elem;
6157 rtx * elems;
6158 int elem_bitsize;
6159 rtx result_s = NULL;
6160 rtvec result_v = NULL;
6161 enum mode_class outer_class;
6162 scalar_mode outer_submode;
6163 int max_bitsize;
6164
6165 /* Some ports misuse CCmode. */
6166 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6167 return op;
6168
6169 /* We have no way to represent a complex constant at the rtl level. */
6170 if (COMPLEX_MODE_P (outermode))
6171 return NULL_RTX;
6172
6173 /* We support any size mode. */
6174 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6175 inner_bytes * BITS_PER_UNIT);
6176
6177 /* Unpack the value. */
6178
6179 if (GET_CODE (op) == CONST_VECTOR)
6180 {
6181 num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
6182 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6183 }
6184 else
6185 {
6186 num_elem = 1;
6187 elem_bitsize = max_bitsize;
6188 }
6189 /* If this asserts, it is too complicated; reducing value_bit may help. */
6190 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6191 /* I don't know how to handle endianness of sub-units. */
6192 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6193
6194 for (elem = 0; elem < num_elem; elem++)
6195 {
6196 unsigned char * vp;
6197 rtx el = (GET_CODE (op) == CONST_VECTOR
6198 ? CONST_VECTOR_ELT (op, first_elem + elem)
6199 : op);
6200
6201 /* Vectors are kept in target memory order. (This is probably
6202 a mistake.) */
6203 {
6204 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6205 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6206 / BITS_PER_UNIT);
6207 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6208 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6209 unsigned bytele = (subword_byte % UNITS_PER_WORD
6210 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6211 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6212 }
6213
6214 switch (GET_CODE (el))
6215 {
6216 case CONST_INT:
6217 for (i = 0;
6218 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6219 i += value_bit)
6220 *vp++ = INTVAL (el) >> i;
6221 /* CONST_INTs are always logically sign-extended. */
6222 for (; i < elem_bitsize; i += value_bit)
6223 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6224 break;
6225
6226 case CONST_WIDE_INT:
6227 {
6228 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6229 unsigned char extend = wi::sign_mask (val);
6230 int prec = wi::get_precision (val);
6231
6232 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6233 *vp++ = wi::extract_uhwi (val, i, value_bit);
6234 for (; i < elem_bitsize; i += value_bit)
6235 *vp++ = extend;
6236 }
6237 break;
6238
6239 case CONST_DOUBLE:
6240 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6241 {
6242 unsigned char extend = 0;
6243 /* If this triggers, someone should have generated a
6244 CONST_INT instead. */
6245 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6246
6247 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6248 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6249 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6250 {
6251 *vp++
6252 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6253 i += value_bit;
6254 }
6255
6256 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6257 extend = -1;
6258 for (; i < elem_bitsize; i += value_bit)
6259 *vp++ = extend;
6260 }
6261 else
6262 {
6263 /* This is big enough for anything on the platform. */
6264 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6265 scalar_float_mode el_mode;
6266
6267 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6268 int bitsize = GET_MODE_BITSIZE (el_mode);
6269
6270 gcc_assert (bitsize <= elem_bitsize);
6271 gcc_assert (bitsize % value_bit == 0);
6272
6273 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6274 GET_MODE (el));
6275
6276 /* real_to_target produces its result in words affected by
6277 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6278 and use WORDS_BIG_ENDIAN instead; see the documentation
6279 of SUBREG in rtl.texi. */
6280 for (i = 0; i < bitsize; i += value_bit)
6281 {
6282 int ibase;
6283 if (WORDS_BIG_ENDIAN)
6284 ibase = bitsize - 1 - i;
6285 else
6286 ibase = i;
6287 *vp++ = tmp[ibase / 32] >> i % 32;
6288 }
6289
6290 /* It shouldn't matter what's done here, so fill it with
6291 zero. */
6292 for (; i < elem_bitsize; i += value_bit)
6293 *vp++ = 0;
6294 }
6295 break;
6296
6297 case CONST_FIXED:
6298 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6299 {
6300 for (i = 0; i < elem_bitsize; i += value_bit)
6301 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6302 }
6303 else
6304 {
6305 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6306 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6307 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6308 i += value_bit)
6309 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6310 >> (i - HOST_BITS_PER_WIDE_INT);
6311 for (; i < elem_bitsize; i += value_bit)
6312 *vp++ = 0;
6313 }
6314 break;
6315
6316 default:
6317 gcc_unreachable ();
6318 }
6319 }
6320
6321 /* Now, pick the right byte to start with. */
6322 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6323 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6324 will already have offset 0. */
6325 if (inner_bytes >= GET_MODE_SIZE (outermode))
6326 {
6327 unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6328 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6329 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6330 byte = (subword_byte % UNITS_PER_WORD
6331 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6332 }
6333
6334 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6335 so if it's become negative it will instead be very large.) */
6336 gcc_assert (byte < inner_bytes);
6337
6338 /* Convert from bytes to chunks of size value_bit. */
6339 value_start = byte * (BITS_PER_UNIT / value_bit);
6340
6341 /* Re-pack the value. */
6342 num_elem = GET_MODE_NUNITS (outermode);
6343
6344 if (VECTOR_MODE_P (outermode))
6345 {
6346 result_v = rtvec_alloc (num_elem);
6347 elems = &RTVEC_ELT (result_v, 0);
6348 }
6349 else
6350 elems = &result_s;
6351
6352 outer_submode = GET_MODE_INNER (outermode);
6353 outer_class = GET_MODE_CLASS (outer_submode);
6354 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6355
6356 gcc_assert (elem_bitsize % value_bit == 0);
6357 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6358
6359 for (elem = 0; elem < num_elem; elem++)
6360 {
6361 unsigned char *vp;
6362
6363 /* Vectors are stored in target memory order. (This is probably
6364 a mistake.) */
6365 {
6366 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6367 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6368 / BITS_PER_UNIT);
6369 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6370 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6371 unsigned bytele = (subword_byte % UNITS_PER_WORD
6372 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6373 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6374 }
6375
6376 switch (outer_class)
6377 {
6378 case MODE_INT:
6379 case MODE_PARTIAL_INT:
6380 {
6381 int u;
6382 int base = 0;
6383 int units
6384 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6385 / HOST_BITS_PER_WIDE_INT;
6386 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6387 wide_int r;
6388
6389 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6390 return NULL_RTX;
6391 for (u = 0; u < units; u++)
6392 {
6393 unsigned HOST_WIDE_INT buf = 0;
6394 for (i = 0;
6395 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6396 i += value_bit)
6397 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6398
6399 tmp[u] = buf;
6400 base += HOST_BITS_PER_WIDE_INT;
6401 }
6402 r = wide_int::from_array (tmp, units,
6403 GET_MODE_PRECISION (outer_submode));
6404 #if TARGET_SUPPORTS_WIDE_INT == 0
6405 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6406 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6407 return NULL_RTX;
6408 #endif
6409 elems[elem] = immed_wide_int_const (r, outer_submode);
6410 }
6411 break;
6412
6413 case MODE_FLOAT:
6414 case MODE_DECIMAL_FLOAT:
6415 {
6416 REAL_VALUE_TYPE r;
6417 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6418
6419 /* real_from_target wants its input in words affected by
6420 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6421 and use WORDS_BIG_ENDIAN instead; see the documentation
6422 of SUBREG in rtl.texi. */
6423 for (i = 0; i < elem_bitsize; i += value_bit)
6424 {
6425 int ibase;
6426 if (WORDS_BIG_ENDIAN)
6427 ibase = elem_bitsize - 1 - i;
6428 else
6429 ibase = i;
6430 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6431 }
6432
6433 real_from_target (&r, tmp, outer_submode);
6434 elems[elem] = const_double_from_real_value (r, outer_submode);
6435 }
6436 break;
6437
6438 case MODE_FRACT:
6439 case MODE_UFRACT:
6440 case MODE_ACCUM:
6441 case MODE_UACCUM:
6442 {
6443 FIXED_VALUE_TYPE f;
6444 f.data.low = 0;
6445 f.data.high = 0;
6446 f.mode = outer_submode;
6447
6448 for (i = 0;
6449 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6450 i += value_bit)
6451 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6452 for (; i < elem_bitsize; i += value_bit)
6453 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6454 << (i - HOST_BITS_PER_WIDE_INT));
6455
6456 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6457 }
6458 break;
6459
6460 default:
6461 gcc_unreachable ();
6462 }
6463 }
6464 if (VECTOR_MODE_P (outermode))
6465 return gen_rtx_CONST_VECTOR (outermode, result_v);
6466 else
6467 return result_s;
6468 }
6469
6470 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6471 Return 0 if no simplifications are possible. */
6472 rtx
6473 simplify_subreg (machine_mode outermode, rtx op,
6474 machine_mode innermode, poly_uint64 byte)
6475 {
6476 /* Little bit of sanity checking. */
6477 gcc_assert (innermode != VOIDmode);
6478 gcc_assert (outermode != VOIDmode);
6479 gcc_assert (innermode != BLKmode);
6480 gcc_assert (outermode != BLKmode);
6481
6482 gcc_assert (GET_MODE (op) == innermode
6483 || GET_MODE (op) == VOIDmode);
6484
6485 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6486 if (!multiple_p (byte, outersize))
6487 return NULL_RTX;
6488
6489 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6490 if (maybe_ge (byte, innersize))
6491 return NULL_RTX;
6492
6493 if (outermode == innermode && known_eq (byte, 0U))
6494 return op;
6495
6496 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6497 {
6498 rtx elt;
6499
6500 if (VECTOR_MODE_P (outermode)
6501 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6502 && vec_duplicate_p (op, &elt))
6503 return gen_vec_duplicate (outermode, elt);
6504
6505 if (outermode == GET_MODE_INNER (innermode)
6506 && vec_duplicate_p (op, &elt))
6507 return elt;
6508 }
6509
6510 if (CONST_SCALAR_INT_P (op)
6511 || CONST_DOUBLE_AS_FLOAT_P (op)
6512 || CONST_FIXED_P (op)
6513 || GET_CODE (op) == CONST_VECTOR)
6514 {
6515 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6516 the result from bytes, so it only works if the sizes of the modes
6517 and the value of the offset are known at compile time. Cases that
6518 that apply to general modes and offsets should be handled here
6519 before calling simplify_immed_subreg. */
6520 fixed_size_mode fs_outermode, fs_innermode;
6521 unsigned HOST_WIDE_INT cbyte;
6522 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6523 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6524 && byte.is_constant (&cbyte))
6525 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6526 0, GET_MODE_SIZE (fs_innermode));
6527
6528 /* Handle constant-sized outer modes and variable-sized inner modes. */
6529 unsigned HOST_WIDE_INT first_elem;
6530 if (GET_CODE (op) == CONST_VECTOR
6531 && is_a <fixed_size_mode> (outermode, &fs_outermode)
6532 && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6533 &first_elem))
6534 return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6535 first_elem,
6536 GET_MODE_SIZE (fs_outermode));
6537
6538 return NULL_RTX;
6539 }
6540
6541 /* Changing mode twice with SUBREG => just change it once,
6542 or not at all if changing back op starting mode. */
6543 if (GET_CODE (op) == SUBREG)
6544 {
6545 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6546 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6547 rtx newx;
6548
6549 if (outermode == innermostmode
6550 && known_eq (byte, 0U)
6551 && known_eq (SUBREG_BYTE (op), 0))
6552 return SUBREG_REG (op);
6553
6554 /* Work out the memory offset of the final OUTERMODE value relative
6555 to the inner value of OP. */
6556 poly_int64 mem_offset = subreg_memory_offset (outermode,
6557 innermode, byte);
6558 poly_int64 op_mem_offset = subreg_memory_offset (op);
6559 poly_int64 final_offset = mem_offset + op_mem_offset;
6560
6561 /* See whether resulting subreg will be paradoxical. */
6562 if (!paradoxical_subreg_p (outermode, innermostmode))
6563 {
6564 /* Bail out in case resulting subreg would be incorrect. */
6565 if (maybe_lt (final_offset, 0)
6566 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6567 || !multiple_p (final_offset, outersize))
6568 return NULL_RTX;
6569 }
6570 else
6571 {
6572 poly_int64 required_offset = subreg_memory_offset (outermode,
6573 innermostmode, 0);
6574 if (maybe_ne (final_offset, required_offset))
6575 return NULL_RTX;
6576 /* Paradoxical subregs always have byte offset 0. */
6577 final_offset = 0;
6578 }
6579
6580 /* Recurse for further possible simplifications. */
6581 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6582 final_offset);
6583 if (newx)
6584 return newx;
6585 if (validate_subreg (outermode, innermostmode,
6586 SUBREG_REG (op), final_offset))
6587 {
6588 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6589 if (SUBREG_PROMOTED_VAR_P (op)
6590 && SUBREG_PROMOTED_SIGN (op) >= 0
6591 && GET_MODE_CLASS (outermode) == MODE_INT
6592 && known_ge (outersize, innersize)
6593 && known_le (outersize, innermostsize)
6594 && subreg_lowpart_p (newx))
6595 {
6596 SUBREG_PROMOTED_VAR_P (newx) = 1;
6597 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6598 }
6599 return newx;
6600 }
6601 return NULL_RTX;
6602 }
6603
6604 /* SUBREG of a hard register => just change the register number
6605 and/or mode. If the hard register is not valid in that mode,
6606 suppress this simplification. If the hard register is the stack,
6607 frame, or argument pointer, leave this as a SUBREG. */
6608
6609 if (REG_P (op) && HARD_REGISTER_P (op))
6610 {
6611 unsigned int regno, final_regno;
6612
6613 regno = REGNO (op);
6614 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6615 if (HARD_REGISTER_NUM_P (final_regno))
6616 {
6617 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6618 subreg_memory_offset (outermode,
6619 innermode, byte));
6620
6621 /* Propagate original regno. We don't have any way to specify
6622 the offset inside original regno, so do so only for lowpart.
6623 The information is used only by alias analysis that cannot
6624 grog partial register anyway. */
6625
6626 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6627 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6628 return x;
6629 }
6630 }
6631
6632 /* If we have a SUBREG of a register that we are replacing and we are
6633 replacing it with a MEM, make a new MEM and try replacing the
6634 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6635 or if we would be widening it. */
6636
6637 if (MEM_P (op)
6638 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6639 /* Allow splitting of volatile memory references in case we don't
6640 have instruction to move the whole thing. */
6641 && (! MEM_VOLATILE_P (op)
6642 || ! have_insn_for (SET, innermode))
6643 && known_le (outersize, innersize))
6644 return adjust_address_nv (op, outermode, byte);
6645
6646 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6647 of two parts. */
6648 if (GET_CODE (op) == CONCAT
6649 || GET_CODE (op) == VEC_CONCAT)
6650 {
6651 poly_uint64 final_offset;
6652 rtx part, res;
6653
6654 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6655 if (part_mode == VOIDmode)
6656 part_mode = GET_MODE_INNER (GET_MODE (op));
6657 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6658 if (known_lt (byte, part_size))
6659 {
6660 part = XEXP (op, 0);
6661 final_offset = byte;
6662 }
6663 else if (known_ge (byte, part_size))
6664 {
6665 part = XEXP (op, 1);
6666 final_offset = byte - part_size;
6667 }
6668 else
6669 return NULL_RTX;
6670
6671 if (maybe_gt (final_offset + outersize, part_size))
6672 return NULL_RTX;
6673
6674 part_mode = GET_MODE (part);
6675 if (part_mode == VOIDmode)
6676 part_mode = GET_MODE_INNER (GET_MODE (op));
6677 res = simplify_subreg (outermode, part, part_mode, final_offset);
6678 if (res)
6679 return res;
6680 if (validate_subreg (outermode, part_mode, part, final_offset))
6681 return gen_rtx_SUBREG (outermode, part, final_offset);
6682 return NULL_RTX;
6683 }
6684
6685 /* Simplify
6686 (subreg (vec_merge (X)
6687 (vector)
6688 (const_int ((1 << N) | M)))
6689 (N * sizeof (outermode)))
6690 to
6691 (subreg (X) (N * sizeof (outermode)))
6692 */
6693 unsigned int idx;
6694 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
6695 && idx < HOST_BITS_PER_WIDE_INT
6696 && GET_CODE (op) == VEC_MERGE
6697 && GET_MODE_INNER (innermode) == outermode
6698 && CONST_INT_P (XEXP (op, 2))
6699 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
6700 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
6701
6702 /* A SUBREG resulting from a zero extension may fold to zero if
6703 it extracts higher bits that the ZERO_EXTEND's source bits. */
6704 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6705 {
6706 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6707 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6708 return CONST0_RTX (outermode);
6709 }
6710
6711 scalar_int_mode int_outermode, int_innermode;
6712 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6713 && is_a <scalar_int_mode> (innermode, &int_innermode)
6714 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6715 {
6716 /* Handle polynomial integers. The upper bits of a paradoxical
6717 subreg are undefined, so this is safe regardless of whether
6718 we're truncating or extending. */
6719 if (CONST_POLY_INT_P (op))
6720 {
6721 poly_wide_int val
6722 = poly_wide_int::from (const_poly_int_value (op),
6723 GET_MODE_PRECISION (int_outermode),
6724 SIGNED);
6725 return immed_wide_int_const (val, int_outermode);
6726 }
6727
6728 if (GET_MODE_PRECISION (int_outermode)
6729 < GET_MODE_PRECISION (int_innermode))
6730 {
6731 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6732 if (tem)
6733 return tem;
6734 }
6735 }
6736
6737 /* If OP is a vector comparison and the subreg is not changing the
6738 number of elements or the size of the elements, change the result
6739 of the comparison to the new mode. */
6740 if (COMPARISON_P (op)
6741 && VECTOR_MODE_P (outermode)
6742 && VECTOR_MODE_P (innermode)
6743 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
6744 && known_eq (GET_MODE_UNIT_SIZE (outermode),
6745 GET_MODE_UNIT_SIZE (innermode)))
6746 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
6747 XEXP (op, 0), XEXP (op, 1));
6748 return NULL_RTX;
6749 }
6750
6751 /* Make a SUBREG operation or equivalent if it folds. */
6752
6753 rtx
6754 simplify_gen_subreg (machine_mode outermode, rtx op,
6755 machine_mode innermode, poly_uint64 byte)
6756 {
6757 rtx newx;
6758
6759 newx = simplify_subreg (outermode, op, innermode, byte);
6760 if (newx)
6761 return newx;
6762
6763 if (GET_CODE (op) == SUBREG
6764 || GET_CODE (op) == CONCAT
6765 || GET_MODE (op) == VOIDmode)
6766 return NULL_RTX;
6767
6768 if (validate_subreg (outermode, innermode, op, byte))
6769 return gen_rtx_SUBREG (outermode, op, byte);
6770
6771 return NULL_RTX;
6772 }
6773
6774 /* Generates a subreg to get the least significant part of EXPR (in mode
6775 INNER_MODE) to OUTER_MODE. */
6776
6777 rtx
6778 lowpart_subreg (machine_mode outer_mode, rtx expr,
6779 machine_mode inner_mode)
6780 {
6781 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6782 subreg_lowpart_offset (outer_mode, inner_mode));
6783 }
6784
6785 /* Simplify X, an rtx expression.
6786
6787 Return the simplified expression or NULL if no simplifications
6788 were possible.
6789
6790 This is the preferred entry point into the simplification routines;
6791 however, we still allow passes to call the more specific routines.
6792
6793 Right now GCC has three (yes, three) major bodies of RTL simplification
6794 code that need to be unified.
6795
6796 1. fold_rtx in cse.c. This code uses various CSE specific
6797 information to aid in RTL simplification.
6798
6799 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6800 it uses combine specific information to aid in RTL
6801 simplification.
6802
6803 3. The routines in this file.
6804
6805
6806 Long term we want to only have one body of simplification code; to
6807 get to that state I recommend the following steps:
6808
6809 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6810 which are not pass dependent state into these routines.
6811
6812 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6813 use this routine whenever possible.
6814
6815 3. Allow for pass dependent state to be provided to these
6816 routines and add simplifications based on the pass dependent
6817 state. Remove code from cse.c & combine.c that becomes
6818 redundant/dead.
6819
6820 It will take time, but ultimately the compiler will be easier to
6821 maintain and improve. It's totally silly that when we add a
6822 simplification that it needs to be added to 4 places (3 for RTL
6823 simplification and 1 for tree simplification. */
6824
6825 rtx
6826 simplify_rtx (const_rtx x)
6827 {
6828 const enum rtx_code code = GET_CODE (x);
6829 const machine_mode mode = GET_MODE (x);
6830
6831 switch (GET_RTX_CLASS (code))
6832 {
6833 case RTX_UNARY:
6834 return simplify_unary_operation (code, mode,
6835 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6836 case RTX_COMM_ARITH:
6837 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6838 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6839
6840 /* Fall through. */
6841
6842 case RTX_BIN_ARITH:
6843 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6844
6845 case RTX_TERNARY:
6846 case RTX_BITFIELD_OPS:
6847 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6848 XEXP (x, 0), XEXP (x, 1),
6849 XEXP (x, 2));
6850
6851 case RTX_COMPARE:
6852 case RTX_COMM_COMPARE:
6853 return simplify_relational_operation (code, mode,
6854 ((GET_MODE (XEXP (x, 0))
6855 != VOIDmode)
6856 ? GET_MODE (XEXP (x, 0))
6857 : GET_MODE (XEXP (x, 1))),
6858 XEXP (x, 0),
6859 XEXP (x, 1));
6860
6861 case RTX_EXTRA:
6862 if (code == SUBREG)
6863 return simplify_subreg (mode, SUBREG_REG (x),
6864 GET_MODE (SUBREG_REG (x)),
6865 SUBREG_BYTE (x));
6866 break;
6867
6868 case RTX_OBJ:
6869 if (code == LO_SUM)
6870 {
6871 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6872 if (GET_CODE (XEXP (x, 0)) == HIGH
6873 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6874 return XEXP (x, 1);
6875 }
6876 break;
6877
6878 default:
6879 break;
6880 }
6881 return NULL;
6882 }
6883
6884 #if CHECKING_P
6885
6886 namespace selftest {
6887
6888 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6889
6890 static rtx
6891 make_test_reg (machine_mode mode)
6892 {
6893 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6894
6895 return gen_rtx_REG (mode, test_reg_num++);
6896 }
6897
6898 /* Test vector simplifications involving VEC_DUPLICATE in which the
6899 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6900 register that holds one element of MODE. */
6901
6902 static void
6903 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6904 {
6905 scalar_mode inner_mode = GET_MODE_INNER (mode);
6906 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6907 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6908 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6909 {
6910 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6911 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6912 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6913 ASSERT_RTX_EQ (duplicate,
6914 simplify_unary_operation (NOT, mode,
6915 duplicate_not, mode));
6916
6917 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6918 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6919 ASSERT_RTX_EQ (duplicate,
6920 simplify_unary_operation (NEG, mode,
6921 duplicate_neg, mode));
6922
6923 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6924 ASSERT_RTX_EQ (duplicate,
6925 simplify_binary_operation (PLUS, mode, duplicate,
6926 CONST0_RTX (mode)));
6927
6928 ASSERT_RTX_EQ (duplicate,
6929 simplify_binary_operation (MINUS, mode, duplicate,
6930 CONST0_RTX (mode)));
6931
6932 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6933 simplify_binary_operation (MINUS, mode, duplicate,
6934 duplicate));
6935 }
6936
6937 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6938 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6939 ASSERT_RTX_PTR_EQ (scalar_reg,
6940 simplify_binary_operation (VEC_SELECT, inner_mode,
6941 duplicate, zero_par));
6942
6943 unsigned HOST_WIDE_INT const_nunits;
6944 if (nunits.is_constant (&const_nunits))
6945 {
6946 /* And again with the final element. */
6947 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6948 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6949 ASSERT_RTX_PTR_EQ (scalar_reg,
6950 simplify_binary_operation (VEC_SELECT, inner_mode,
6951 duplicate, last_par));
6952
6953 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6954 rtx vector_reg = make_test_reg (mode);
6955 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
6956 {
6957 if (i >= HOST_BITS_PER_WIDE_INT)
6958 break;
6959 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
6960 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
6961 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
6962 ASSERT_RTX_EQ (scalar_reg,
6963 simplify_gen_subreg (inner_mode, vm,
6964 mode, offset));
6965 }
6966 }
6967
6968 /* Test a scalar subreg of a VEC_DUPLICATE. */
6969 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6970 ASSERT_RTX_EQ (scalar_reg,
6971 simplify_gen_subreg (inner_mode, duplicate,
6972 mode, offset));
6973
6974 machine_mode narrower_mode;
6975 if (maybe_ne (nunits, 2U)
6976 && multiple_p (nunits, 2)
6977 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6978 && VECTOR_MODE_P (narrower_mode))
6979 {
6980 /* Test VEC_SELECT of a vector. */
6981 rtx vec_par
6982 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6983 rtx narrower_duplicate
6984 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6985 ASSERT_RTX_EQ (narrower_duplicate,
6986 simplify_binary_operation (VEC_SELECT, narrower_mode,
6987 duplicate, vec_par));
6988
6989 /* Test a vector subreg of a VEC_DUPLICATE. */
6990 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6991 ASSERT_RTX_EQ (narrower_duplicate,
6992 simplify_gen_subreg (narrower_mode, duplicate,
6993 mode, offset));
6994 }
6995 }
6996
6997 /* Test vector simplifications involving VEC_SERIES in which the
6998 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6999 register that holds one element of MODE. */
7000
7001 static void
7002 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7003 {
7004 /* Test unary cases with VEC_SERIES arguments. */
7005 scalar_mode inner_mode = GET_MODE_INNER (mode);
7006 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7007 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7008 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7009 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
7010 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
7011 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
7012 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
7013 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
7014 neg_scalar_reg);
7015 ASSERT_RTX_EQ (series_0_r,
7016 simplify_unary_operation (NEG, mode, series_0_nr, mode));
7017 ASSERT_RTX_EQ (series_r_m1,
7018 simplify_unary_operation (NEG, mode, series_nr_1, mode));
7019 ASSERT_RTX_EQ (series_r_r,
7020 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
7021
7022 /* Test that a VEC_SERIES with a zero step is simplified away. */
7023 ASSERT_RTX_EQ (duplicate,
7024 simplify_binary_operation (VEC_SERIES, mode,
7025 scalar_reg, const0_rtx));
7026
7027 /* Test PLUS and MINUS with VEC_SERIES. */
7028 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
7029 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
7030 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
7031 ASSERT_RTX_EQ (series_r_r,
7032 simplify_binary_operation (PLUS, mode, series_0_r,
7033 duplicate));
7034 ASSERT_RTX_EQ (series_r_1,
7035 simplify_binary_operation (PLUS, mode, duplicate,
7036 series_0_1));
7037 ASSERT_RTX_EQ (series_r_m1,
7038 simplify_binary_operation (PLUS, mode, duplicate,
7039 series_0_m1));
7040 ASSERT_RTX_EQ (series_0_r,
7041 simplify_binary_operation (MINUS, mode, series_r_r,
7042 duplicate));
7043 ASSERT_RTX_EQ (series_r_m1,
7044 simplify_binary_operation (MINUS, mode, duplicate,
7045 series_0_1));
7046 ASSERT_RTX_EQ (series_r_1,
7047 simplify_binary_operation (MINUS, mode, duplicate,
7048 series_0_m1));
7049 ASSERT_RTX_EQ (series_0_m1,
7050 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7051 constm1_rtx));
7052
7053 /* Test NEG on constant vector series. */
7054 ASSERT_RTX_EQ (series_0_m1,
7055 simplify_unary_operation (NEG, mode, series_0_1, mode));
7056 ASSERT_RTX_EQ (series_0_1,
7057 simplify_unary_operation (NEG, mode, series_0_m1, mode));
7058
7059 /* Test PLUS and MINUS on constant vector series. */
7060 rtx scalar2 = gen_int_mode (2, inner_mode);
7061 rtx scalar3 = gen_int_mode (3, inner_mode);
7062 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
7063 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
7064 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
7065 ASSERT_RTX_EQ (series_1_1,
7066 simplify_binary_operation (PLUS, mode, series_0_1,
7067 CONST1_RTX (mode)));
7068 ASSERT_RTX_EQ (series_0_m1,
7069 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
7070 series_0_m1));
7071 ASSERT_RTX_EQ (series_1_3,
7072 simplify_binary_operation (PLUS, mode, series_1_1,
7073 series_0_2));
7074 ASSERT_RTX_EQ (series_0_1,
7075 simplify_binary_operation (MINUS, mode, series_1_1,
7076 CONST1_RTX (mode)));
7077 ASSERT_RTX_EQ (series_1_1,
7078 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
7079 series_0_m1));
7080 ASSERT_RTX_EQ (series_1_1,
7081 simplify_binary_operation (MINUS, mode, series_1_3,
7082 series_0_2));
7083
7084 /* Test MULT between constant vectors. */
7085 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
7086 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
7087 rtx scalar9 = gen_int_mode (9, inner_mode);
7088 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
7089 ASSERT_RTX_EQ (series_0_2,
7090 simplify_binary_operation (MULT, mode, series_0_1, vec2));
7091 ASSERT_RTX_EQ (series_3_9,
7092 simplify_binary_operation (MULT, mode, vec3, series_1_3));
7093 if (!GET_MODE_NUNITS (mode).is_constant ())
7094 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
7095 series_0_1));
7096
7097 /* Test ASHIFT between constant vectors. */
7098 ASSERT_RTX_EQ (series_0_2,
7099 simplify_binary_operation (ASHIFT, mode, series_0_1,
7100 CONST1_RTX (mode)));
7101 if (!GET_MODE_NUNITS (mode).is_constant ())
7102 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
7103 series_0_1));
7104 }
7105
7106 /* Verify simplify_merge_mask works correctly. */
7107
7108 static void
7109 test_vec_merge (machine_mode mode)
7110 {
7111 rtx op0 = make_test_reg (mode);
7112 rtx op1 = make_test_reg (mode);
7113 rtx op2 = make_test_reg (mode);
7114 rtx op3 = make_test_reg (mode);
7115 rtx op4 = make_test_reg (mode);
7116 rtx op5 = make_test_reg (mode);
7117 rtx mask1 = make_test_reg (SImode);
7118 rtx mask2 = make_test_reg (SImode);
7119 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7120 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7121 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7122
7123 /* Simple vec_merge. */
7124 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7125 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7126 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7127 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7128
7129 /* Nested vec_merge.
7130 It's tempting to make this simplify right down to opN, but we don't
7131 because all the simplify_* functions assume that the operands have
7132 already been simplified. */
7133 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7134 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7135 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7136
7137 /* Intermediate unary op. */
7138 rtx unop = gen_rtx_NOT (mode, vm1);
7139 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7140 simplify_merge_mask (unop, mask1, 0));
7141 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7142 simplify_merge_mask (unop, mask1, 1));
7143
7144 /* Intermediate binary op. */
7145 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7146 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7147 simplify_merge_mask (binop, mask1, 0));
7148 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7149 simplify_merge_mask (binop, mask1, 1));
7150
7151 /* Intermediate ternary op. */
7152 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7153 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7154 simplify_merge_mask (tenop, mask1, 0));
7155 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7156 simplify_merge_mask (tenop, mask1, 1));
7157
7158 /* Side effects. */
7159 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7160 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7161 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7162 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7163
7164 /* Called indirectly. */
7165 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7166 simplify_rtx (nvm));
7167 }
7168
7169 /* Verify some simplifications involving vectors. */
7170
7171 static void
7172 test_vector_ops ()
7173 {
7174 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7175 {
7176 machine_mode mode = (machine_mode) i;
7177 if (VECTOR_MODE_P (mode))
7178 {
7179 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7180 test_vector_ops_duplicate (mode, scalar_reg);
7181 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7182 && maybe_gt (GET_MODE_NUNITS (mode), 2))
7183 test_vector_ops_series (mode, scalar_reg);
7184 test_vec_merge (mode);
7185 }
7186 }
7187 }
7188
7189 template<unsigned int N>
7190 struct simplify_const_poly_int_tests
7191 {
7192 static void run ();
7193 };
7194
7195 template<>
7196 struct simplify_const_poly_int_tests<1>
7197 {
7198 static void run () {}
7199 };
7200
7201 /* Test various CONST_POLY_INT properties. */
7202
7203 template<unsigned int N>
7204 void
7205 simplify_const_poly_int_tests<N>::run ()
7206 {
7207 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7208 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7209 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7210 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7211 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7212 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7213 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7214 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7215 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7216 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7217 rtx two = GEN_INT (2);
7218 rtx six = GEN_INT (6);
7219 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7220
7221 /* These tests only try limited operation combinations. Fuller arithmetic
7222 testing is done directly on poly_ints. */
7223 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7224 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7225 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7226 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7227 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7228 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7229 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7230 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7231 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7232 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7233 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7234 }
7235
7236 /* Run all of the selftests within this file. */
7237
7238 void
7239 simplify_rtx_c_tests ()
7240 {
7241 test_vector_ops ();
7242 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7243 }
7244
7245 } // namespace selftest
7246
7247 #endif /* CHECKING_P */