Make mode_for_vector return an opt_mode
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
46
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
51
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
54 \f
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
58
59 If the last insn does not set TARGET, don't do anything, but return 1.
60
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
64
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
67 {
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
71
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
73
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
80
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
83
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
87 ;
88
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
93 {
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
97 {
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
114 }
115 return 0;
116 }
117
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
121
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
127
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
130 {
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
139 {
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_SIZE (GET_MODE (op0))
142 > GET_MODE_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
149 }
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
154 }
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
157
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
159
160 return 1;
161 }
162 \f
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
166
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
169 {
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
173
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
180
181 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
182 return to_mode;
183
184 return result;
185 }
186 \f
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
192
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
196 {
197 rtx result;
198 scalar_int_mode int_mode;
199
200 /* If we don't have to extend and this is a constant, return it. */
201 if (no_extend && GET_MODE (op) == VOIDmode)
202 return op;
203
204 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
205 extend since it will be more efficient to do so unless the signedness of
206 a promoted object differs from our extension. */
207 if (! no_extend
208 || !is_a <scalar_int_mode> (mode, &int_mode)
209 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
210 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
211 return convert_modes (mode, oldmode, op, unsignedp);
212
213 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
214 SUBREG. */
215 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
216 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
217
218 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 part to OP. */
220
221 result = gen_reg_rtx (int_mode);
222 emit_clobber (result);
223 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
224 return result;
225 }
226 \f
227 /* Expand vector widening operations.
228
229 There are two different classes of operations handled here:
230 1) Operations whose result is wider than all the arguments to the operation.
231 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
232 In this case OP0 and optionally OP1 would be initialized,
233 but WIDE_OP wouldn't (not relevant for this case).
234 2) Operations whose result is of the same size as the last argument to the
235 operation, but wider than all the other arguments to the operation.
236 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
237 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
238
239 E.g, when called to expand the following operations, this is how
240 the arguments will be initialized:
241 nops OP0 OP1 WIDE_OP
242 widening-sum 2 oprnd0 - oprnd1
243 widening-dot-product 3 oprnd0 oprnd1 oprnd2
244 widening-mult 2 oprnd0 oprnd1 -
245 type-promotion (vec-unpack) 1 oprnd0 - - */
246
247 rtx
248 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
249 rtx target, int unsignedp)
250 {
251 struct expand_operand eops[4];
252 tree oprnd0, oprnd1, oprnd2;
253 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
254 optab widen_pattern_optab;
255 enum insn_code icode;
256 int nops = TREE_CODE_LENGTH (ops->code);
257 int op;
258
259 oprnd0 = ops->op0;
260 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
261 widen_pattern_optab =
262 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
263 if (ops->code == WIDEN_MULT_PLUS_EXPR
264 || ops->code == WIDEN_MULT_MINUS_EXPR)
265 icode = find_widening_optab_handler (widen_pattern_optab,
266 TYPE_MODE (TREE_TYPE (ops->op2)),
267 tmode0, 0);
268 else
269 icode = optab_handler (widen_pattern_optab, tmode0);
270 gcc_assert (icode != CODE_FOR_nothing);
271
272 if (nops >= 2)
273 {
274 oprnd1 = ops->op1;
275 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
276 }
277
278 /* The last operand is of a wider mode than the rest of the operands. */
279 if (nops == 2)
280 wmode = tmode1;
281 else if (nops == 3)
282 {
283 gcc_assert (tmode1 == tmode0);
284 gcc_assert (op1);
285 oprnd2 = ops->op2;
286 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
287 }
288
289 op = 0;
290 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
291 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
292 if (op1)
293 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
294 if (wide_op)
295 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
296 expand_insn (icode, op, eops);
297 return eops[0].value;
298 }
299
300 /* Generate code to perform an operation specified by TERNARY_OPTAB
301 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
302
303 UNSIGNEDP is for the case where we have to widen the operands
304 to perform the operation. It says to use zero-extension.
305
306 If TARGET is nonzero, the value
307 is generated there, if it is convenient to do so.
308 In all cases an rtx is returned for the locus of the value;
309 this may or may not be TARGET. */
310
311 rtx
312 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
313 rtx op1, rtx op2, rtx target, int unsignedp)
314 {
315 struct expand_operand ops[4];
316 enum insn_code icode = optab_handler (ternary_optab, mode);
317
318 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
319
320 create_output_operand (&ops[0], target, mode);
321 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
322 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
323 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
324 expand_insn (icode, 4, ops);
325 return ops[0].value;
326 }
327
328
329 /* Like expand_binop, but return a constant rtx if the result can be
330 calculated at compile time. The arguments and return value are
331 otherwise the same as for expand_binop. */
332
333 rtx
334 simplify_expand_binop (machine_mode mode, optab binoptab,
335 rtx op0, rtx op1, rtx target, int unsignedp,
336 enum optab_methods methods)
337 {
338 if (CONSTANT_P (op0) && CONSTANT_P (op1))
339 {
340 rtx x = simplify_binary_operation (optab_to_code (binoptab),
341 mode, op0, op1);
342 if (x)
343 return x;
344 }
345
346 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
347 }
348
349 /* Like simplify_expand_binop, but always put the result in TARGET.
350 Return true if the expansion succeeded. */
351
352 bool
353 force_expand_binop (machine_mode mode, optab binoptab,
354 rtx op0, rtx op1, rtx target, int unsignedp,
355 enum optab_methods methods)
356 {
357 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
358 target, unsignedp, methods);
359 if (x == 0)
360 return false;
361 if (x != target)
362 emit_move_insn (target, x);
363 return true;
364 }
365
366 /* Create a new vector value in VMODE with all elements set to OP. The
367 mode of OP must be the element mode of VMODE. If OP is a constant,
368 then the return value will be a constant. */
369
370 static rtx
371 expand_vector_broadcast (machine_mode vmode, rtx op)
372 {
373 enum insn_code icode;
374 rtvec vec;
375 rtx ret;
376 int i, n;
377
378 gcc_checking_assert (VECTOR_MODE_P (vmode));
379
380 n = GET_MODE_NUNITS (vmode);
381 vec = rtvec_alloc (n);
382 for (i = 0; i < n; ++i)
383 RTVEC_ELT (vec, i) = op;
384
385 if (CONSTANT_P (op))
386 return gen_rtx_CONST_VECTOR (vmode, vec);
387
388 /* ??? If the target doesn't have a vec_init, then we have no easy way
389 of performing this operation. Most of this sort of generic support
390 is hidden away in the vector lowering support in gimple. */
391 icode = convert_optab_handler (vec_init_optab, vmode,
392 GET_MODE_INNER (vmode));
393 if (icode == CODE_FOR_nothing)
394 return NULL;
395
396 ret = gen_reg_rtx (vmode);
397 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
398
399 return ret;
400 }
401
402 /* This subroutine of expand_doubleword_shift handles the cases in which
403 the effective shift value is >= BITS_PER_WORD. The arguments and return
404 value are the same as for the parent routine, except that SUPERWORD_OP1
405 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
406 INTO_TARGET may be null if the caller has decided to calculate it. */
407
408 static bool
409 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
410 rtx outof_target, rtx into_target,
411 int unsignedp, enum optab_methods methods)
412 {
413 if (into_target != 0)
414 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
415 into_target, unsignedp, methods))
416 return false;
417
418 if (outof_target != 0)
419 {
420 /* For a signed right shift, we must fill OUTOF_TARGET with copies
421 of the sign bit, otherwise we must fill it with zeros. */
422 if (binoptab != ashr_optab)
423 emit_move_insn (outof_target, CONST0_RTX (word_mode));
424 else
425 if (!force_expand_binop (word_mode, binoptab,
426 outof_input, GEN_INT (BITS_PER_WORD - 1),
427 outof_target, unsignedp, methods))
428 return false;
429 }
430 return true;
431 }
432
433 /* This subroutine of expand_doubleword_shift handles the cases in which
434 the effective shift value is < BITS_PER_WORD. The arguments and return
435 value are the same as for the parent routine. */
436
437 static bool
438 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
439 rtx outof_input, rtx into_input, rtx op1,
440 rtx outof_target, rtx into_target,
441 int unsignedp, enum optab_methods methods,
442 unsigned HOST_WIDE_INT shift_mask)
443 {
444 optab reverse_unsigned_shift, unsigned_shift;
445 rtx tmp, carries;
446
447 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
448 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
449
450 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
451 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
452 the opposite direction to BINOPTAB. */
453 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
454 {
455 carries = outof_input;
456 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
457 op1_mode), op1_mode);
458 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
459 0, true, methods);
460 }
461 else
462 {
463 /* We must avoid shifting by BITS_PER_WORD bits since that is either
464 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
465 has unknown behavior. Do a single shift first, then shift by the
466 remainder. It's OK to use ~OP1 as the remainder if shift counts
467 are truncated to the mode size. */
468 carries = expand_binop (word_mode, reverse_unsigned_shift,
469 outof_input, const1_rtx, 0, unsignedp, methods);
470 if (shift_mask == BITS_PER_WORD - 1)
471 {
472 tmp = immed_wide_int_const
473 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
474 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
475 0, true, methods);
476 }
477 else
478 {
479 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
480 op1_mode), op1_mode);
481 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
482 0, true, methods);
483 }
484 }
485 if (tmp == 0 || carries == 0)
486 return false;
487 carries = expand_binop (word_mode, reverse_unsigned_shift,
488 carries, tmp, 0, unsignedp, methods);
489 if (carries == 0)
490 return false;
491
492 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
493 so the result can go directly into INTO_TARGET if convenient. */
494 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
495 into_target, unsignedp, methods);
496 if (tmp == 0)
497 return false;
498
499 /* Now OR in the bits carried over from OUTOF_INPUT. */
500 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
501 into_target, unsignedp, methods))
502 return false;
503
504 /* Use a standard word_mode shift for the out-of half. */
505 if (outof_target != 0)
506 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
507 outof_target, unsignedp, methods))
508 return false;
509
510 return true;
511 }
512
513
514 /* Try implementing expand_doubleword_shift using conditional moves.
515 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
516 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
517 are the shift counts to use in the former and latter case. All other
518 arguments are the same as the parent routine. */
519
520 static bool
521 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
522 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
523 rtx outof_input, rtx into_input,
524 rtx subword_op1, rtx superword_op1,
525 rtx outof_target, rtx into_target,
526 int unsignedp, enum optab_methods methods,
527 unsigned HOST_WIDE_INT shift_mask)
528 {
529 rtx outof_superword, into_superword;
530
531 /* Put the superword version of the output into OUTOF_SUPERWORD and
532 INTO_SUPERWORD. */
533 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
534 if (outof_target != 0 && subword_op1 == superword_op1)
535 {
536 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
537 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
538 into_superword = outof_target;
539 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
540 outof_superword, 0, unsignedp, methods))
541 return false;
542 }
543 else
544 {
545 into_superword = gen_reg_rtx (word_mode);
546 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
547 outof_superword, into_superword,
548 unsignedp, methods))
549 return false;
550 }
551
552 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
553 if (!expand_subword_shift (op1_mode, binoptab,
554 outof_input, into_input, subword_op1,
555 outof_target, into_target,
556 unsignedp, methods, shift_mask))
557 return false;
558
559 /* Select between them. Do the INTO half first because INTO_SUPERWORD
560 might be the current value of OUTOF_TARGET. */
561 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
562 into_target, into_superword, word_mode, false))
563 return false;
564
565 if (outof_target != 0)
566 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
567 outof_target, outof_superword,
568 word_mode, false))
569 return false;
570
571 return true;
572 }
573
574 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
575 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
576 input operand; the shift moves bits in the direction OUTOF_INPUT->
577 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
578 of the target. OP1 is the shift count and OP1_MODE is its mode.
579 If OP1 is constant, it will have been truncated as appropriate
580 and is known to be nonzero.
581
582 If SHIFT_MASK is zero, the result of word shifts is undefined when the
583 shift count is outside the range [0, BITS_PER_WORD). This routine must
584 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
585
586 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
587 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
588 fill with zeros or sign bits as appropriate.
589
590 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
591 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
592 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
593 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
594 are undefined.
595
596 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
597 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
598 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
599 function wants to calculate it itself.
600
601 Return true if the shift could be successfully synthesized. */
602
603 static bool
604 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
605 rtx outof_input, rtx into_input, rtx op1,
606 rtx outof_target, rtx into_target,
607 int unsignedp, enum optab_methods methods,
608 unsigned HOST_WIDE_INT shift_mask)
609 {
610 rtx superword_op1, tmp, cmp1, cmp2;
611 enum rtx_code cmp_code;
612
613 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
614 fill the result with sign or zero bits as appropriate. If so, the value
615 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
616 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
617 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
618
619 This isn't worthwhile for constant shifts since the optimizers will
620 cope better with in-range shift counts. */
621 if (shift_mask >= BITS_PER_WORD
622 && outof_target != 0
623 && !CONSTANT_P (op1))
624 {
625 if (!expand_doubleword_shift (op1_mode, binoptab,
626 outof_input, into_input, op1,
627 0, into_target,
628 unsignedp, methods, shift_mask))
629 return false;
630 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
631 outof_target, unsignedp, methods))
632 return false;
633 return true;
634 }
635
636 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
637 is true when the effective shift value is less than BITS_PER_WORD.
638 Set SUPERWORD_OP1 to the shift count that should be used to shift
639 OUTOF_INPUT into INTO_TARGET when the condition is false. */
640 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
641 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
642 {
643 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
644 is a subword shift count. */
645 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
646 0, true, methods);
647 cmp2 = CONST0_RTX (op1_mode);
648 cmp_code = EQ;
649 superword_op1 = op1;
650 }
651 else
652 {
653 /* Set CMP1 to OP1 - BITS_PER_WORD. */
654 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
655 0, true, methods);
656 cmp2 = CONST0_RTX (op1_mode);
657 cmp_code = LT;
658 superword_op1 = cmp1;
659 }
660 if (cmp1 == 0)
661 return false;
662
663 /* If we can compute the condition at compile time, pick the
664 appropriate subroutine. */
665 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
666 if (tmp != 0 && CONST_INT_P (tmp))
667 {
668 if (tmp == const0_rtx)
669 return expand_superword_shift (binoptab, outof_input, superword_op1,
670 outof_target, into_target,
671 unsignedp, methods);
672 else
673 return expand_subword_shift (op1_mode, binoptab,
674 outof_input, into_input, op1,
675 outof_target, into_target,
676 unsignedp, methods, shift_mask);
677 }
678
679 /* Try using conditional moves to generate straight-line code. */
680 if (HAVE_conditional_move)
681 {
682 rtx_insn *start = get_last_insn ();
683 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
684 cmp_code, cmp1, cmp2,
685 outof_input, into_input,
686 op1, superword_op1,
687 outof_target, into_target,
688 unsignedp, methods, shift_mask))
689 return true;
690 delete_insns_since (start);
691 }
692
693 /* As a last resort, use branches to select the correct alternative. */
694 rtx_code_label *subword_label = gen_label_rtx ();
695 rtx_code_label *done_label = gen_label_rtx ();
696
697 NO_DEFER_POP;
698 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
699 0, 0, subword_label,
700 profile_probability::uninitialized ());
701 OK_DEFER_POP;
702
703 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
704 outof_target, into_target,
705 unsignedp, methods))
706 return false;
707
708 emit_jump_insn (targetm.gen_jump (done_label));
709 emit_barrier ();
710 emit_label (subword_label);
711
712 if (!expand_subword_shift (op1_mode, binoptab,
713 outof_input, into_input, op1,
714 outof_target, into_target,
715 unsignedp, methods, shift_mask))
716 return false;
717
718 emit_label (done_label);
719 return true;
720 }
721 \f
722 /* Subroutine of expand_binop. Perform a double word multiplication of
723 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
724 as the target's word_mode. This function return NULL_RTX if anything
725 goes wrong, in which case it may have already emitted instructions
726 which need to be deleted.
727
728 If we want to multiply two two-word values and have normal and widening
729 multiplies of single-word values, we can do this with three smaller
730 multiplications.
731
732 The multiplication proceeds as follows:
733 _______________________
734 [__op0_high_|__op0_low__]
735 _______________________
736 * [__op1_high_|__op1_low__]
737 _______________________________________________
738 _______________________
739 (1) [__op0_low__*__op1_low__]
740 _______________________
741 (2a) [__op0_low__*__op1_high_]
742 _______________________
743 (2b) [__op0_high_*__op1_low__]
744 _______________________
745 (3) [__op0_high_*__op1_high_]
746
747
748 This gives a 4-word result. Since we are only interested in the
749 lower 2 words, partial result (3) and the upper words of (2a) and
750 (2b) don't need to be calculated. Hence (2a) and (2b) can be
751 calculated using non-widening multiplication.
752
753 (1), however, needs to be calculated with an unsigned widening
754 multiplication. If this operation is not directly supported we
755 try using a signed widening multiplication and adjust the result.
756 This adjustment works as follows:
757
758 If both operands are positive then no adjustment is needed.
759
760 If the operands have different signs, for example op0_low < 0 and
761 op1_low >= 0, the instruction treats the most significant bit of
762 op0_low as a sign bit instead of a bit with significance
763 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
764 with 2**BITS_PER_WORD - op0_low, and two's complements the
765 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
766 the result.
767
768 Similarly, if both operands are negative, we need to add
769 (op0_low + op1_low) * 2**BITS_PER_WORD.
770
771 We use a trick to adjust quickly. We logically shift op0_low right
772 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
773 op0_high (op1_high) before it is used to calculate 2b (2a). If no
774 logical shift exists, we do an arithmetic right shift and subtract
775 the 0 or -1. */
776
777 static rtx
778 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
779 bool umulp, enum optab_methods methods)
780 {
781 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
782 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
783 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
784 rtx product, adjust, product_high, temp;
785
786 rtx op0_high = operand_subword_force (op0, high, mode);
787 rtx op0_low = operand_subword_force (op0, low, mode);
788 rtx op1_high = operand_subword_force (op1, high, mode);
789 rtx op1_low = operand_subword_force (op1, low, mode);
790
791 /* If we're using an unsigned multiply to directly compute the product
792 of the low-order words of the operands and perform any required
793 adjustments of the operands, we begin by trying two more multiplications
794 and then computing the appropriate sum.
795
796 We have checked above that the required addition is provided.
797 Full-word addition will normally always succeed, especially if
798 it is provided at all, so we don't worry about its failure. The
799 multiplication may well fail, however, so we do handle that. */
800
801 if (!umulp)
802 {
803 /* ??? This could be done with emit_store_flag where available. */
804 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
805 NULL_RTX, 1, methods);
806 if (temp)
807 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
808 NULL_RTX, 0, OPTAB_DIRECT);
809 else
810 {
811 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
812 NULL_RTX, 0, methods);
813 if (!temp)
814 return NULL_RTX;
815 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
816 NULL_RTX, 0, OPTAB_DIRECT);
817 }
818
819 if (!op0_high)
820 return NULL_RTX;
821 }
822
823 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
824 NULL_RTX, 0, OPTAB_DIRECT);
825 if (!adjust)
826 return NULL_RTX;
827
828 /* OP0_HIGH should now be dead. */
829
830 if (!umulp)
831 {
832 /* ??? This could be done with emit_store_flag where available. */
833 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
834 NULL_RTX, 1, methods);
835 if (temp)
836 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
837 NULL_RTX, 0, OPTAB_DIRECT);
838 else
839 {
840 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
841 NULL_RTX, 0, methods);
842 if (!temp)
843 return NULL_RTX;
844 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
845 NULL_RTX, 0, OPTAB_DIRECT);
846 }
847
848 if (!op1_high)
849 return NULL_RTX;
850 }
851
852 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
853 NULL_RTX, 0, OPTAB_DIRECT);
854 if (!temp)
855 return NULL_RTX;
856
857 /* OP1_HIGH should now be dead. */
858
859 adjust = expand_binop (word_mode, add_optab, adjust, temp,
860 NULL_RTX, 0, OPTAB_DIRECT);
861
862 if (target && !REG_P (target))
863 target = NULL_RTX;
864
865 if (umulp)
866 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
867 target, 1, OPTAB_DIRECT);
868 else
869 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
870 target, 1, OPTAB_DIRECT);
871
872 if (!product)
873 return NULL_RTX;
874
875 product_high = operand_subword (product, high, 1, mode);
876 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
877 NULL_RTX, 0, OPTAB_DIRECT);
878 emit_move_insn (product_high, adjust);
879 return product;
880 }
881 \f
882 /* Wrapper around expand_binop which takes an rtx code to specify
883 the operation to perform, not an optab pointer. All other
884 arguments are the same. */
885 rtx
886 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
887 rtx op1, rtx target, int unsignedp,
888 enum optab_methods methods)
889 {
890 optab binop = code_to_optab (code);
891 gcc_assert (binop);
892
893 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
894 }
895
896 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
897 binop. Order them according to commutative_operand_precedence and, if
898 possible, try to put TARGET or a pseudo first. */
899 static bool
900 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
901 {
902 int op0_prec = commutative_operand_precedence (op0);
903 int op1_prec = commutative_operand_precedence (op1);
904
905 if (op0_prec < op1_prec)
906 return true;
907
908 if (op0_prec > op1_prec)
909 return false;
910
911 /* With equal precedence, both orders are ok, but it is better if the
912 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
913 if (target == 0 || REG_P (target))
914 return (REG_P (op1) && !REG_P (op0)) || target == op1;
915 else
916 return rtx_equal_p (op1, target);
917 }
918
919 /* Return true if BINOPTAB implements a shift operation. */
920
921 static bool
922 shift_optab_p (optab binoptab)
923 {
924 switch (optab_to_code (binoptab))
925 {
926 case ASHIFT:
927 case SS_ASHIFT:
928 case US_ASHIFT:
929 case ASHIFTRT:
930 case LSHIFTRT:
931 case ROTATE:
932 case ROTATERT:
933 return true;
934
935 default:
936 return false;
937 }
938 }
939
940 /* Return true if BINOPTAB implements a commutative binary operation. */
941
942 static bool
943 commutative_optab_p (optab binoptab)
944 {
945 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
946 || binoptab == smul_widen_optab
947 || binoptab == umul_widen_optab
948 || binoptab == smul_highpart_optab
949 || binoptab == umul_highpart_optab);
950 }
951
952 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
953 optimizing, and if the operand is a constant that costs more than
954 1 instruction, force the constant into a register and return that
955 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
956
957 static rtx
958 avoid_expensive_constant (machine_mode mode, optab binoptab,
959 int opn, rtx x, bool unsignedp)
960 {
961 bool speed = optimize_insn_for_speed_p ();
962
963 if (mode != VOIDmode
964 && optimize
965 && CONSTANT_P (x)
966 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
967 > set_src_cost (x, mode, speed)))
968 {
969 if (CONST_INT_P (x))
970 {
971 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
972 if (intval != INTVAL (x))
973 x = GEN_INT (intval);
974 }
975 else
976 x = convert_modes (mode, VOIDmode, x, unsignedp);
977 x = force_reg (mode, x);
978 }
979 return x;
980 }
981
982 /* Helper function for expand_binop: handle the case where there
983 is an insn that directly implements the indicated operation.
984 Returns null if this is not possible. */
985 static rtx
986 expand_binop_directly (machine_mode mode, optab binoptab,
987 rtx op0, rtx op1,
988 rtx target, int unsignedp, enum optab_methods methods,
989 rtx_insn *last)
990 {
991 machine_mode from_mode = widened_mode (mode, op0, op1);
992 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
993 from_mode, 1);
994 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
995 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
996 machine_mode mode0, mode1, tmp_mode;
997 struct expand_operand ops[3];
998 bool commutative_p;
999 rtx_insn *pat;
1000 rtx xop0 = op0, xop1 = op1;
1001 bool canonicalize_op1 = false;
1002
1003 /* If it is a commutative operator and the modes would match
1004 if we would swap the operands, we can save the conversions. */
1005 commutative_p = commutative_optab_p (binoptab);
1006 if (commutative_p
1007 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1008 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1009 std::swap (xop0, xop1);
1010
1011 /* If we are optimizing, force expensive constants into a register. */
1012 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1013 if (!shift_optab_p (binoptab))
1014 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1015 else
1016 /* Shifts and rotates often use a different mode for op1 from op0;
1017 for VOIDmode constants we don't know the mode, so force it
1018 to be canonicalized using convert_modes. */
1019 canonicalize_op1 = true;
1020
1021 /* In case the insn wants input operands in modes different from
1022 those of the actual operands, convert the operands. It would
1023 seem that we don't need to convert CONST_INTs, but we do, so
1024 that they're properly zero-extended, sign-extended or truncated
1025 for their mode. */
1026
1027 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1028 if (xmode0 != VOIDmode && xmode0 != mode0)
1029 {
1030 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1031 mode0 = xmode0;
1032 }
1033
1034 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1035 ? GET_MODE (xop1) : mode);
1036 if (xmode1 != VOIDmode && xmode1 != mode1)
1037 {
1038 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1039 mode1 = xmode1;
1040 }
1041
1042 /* If operation is commutative,
1043 try to make the first operand a register.
1044 Even better, try to make it the same as the target.
1045 Also try to make the last operand a constant. */
1046 if (commutative_p
1047 && swap_commutative_operands_with_target (target, xop0, xop1))
1048 std::swap (xop0, xop1);
1049
1050 /* Now, if insn's predicates don't allow our operands, put them into
1051 pseudo regs. */
1052
1053 if (binoptab == vec_pack_trunc_optab
1054 || binoptab == vec_pack_usat_optab
1055 || binoptab == vec_pack_ssat_optab
1056 || binoptab == vec_pack_ufix_trunc_optab
1057 || binoptab == vec_pack_sfix_trunc_optab)
1058 {
1059 /* The mode of the result is different then the mode of the
1060 arguments. */
1061 tmp_mode = insn_data[(int) icode].operand[0].mode;
1062 if (VECTOR_MODE_P (mode)
1063 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1064 {
1065 delete_insns_since (last);
1066 return NULL_RTX;
1067 }
1068 }
1069 else
1070 tmp_mode = mode;
1071
1072 create_output_operand (&ops[0], target, tmp_mode);
1073 create_input_operand (&ops[1], xop0, mode0);
1074 create_input_operand (&ops[2], xop1, mode1);
1075 pat = maybe_gen_insn (icode, 3, ops);
1076 if (pat)
1077 {
1078 /* If PAT is composed of more than one insn, try to add an appropriate
1079 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1080 operand, call expand_binop again, this time without a target. */
1081 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1082 && ! add_equal_note (pat, ops[0].value,
1083 optab_to_code (binoptab),
1084 ops[1].value, ops[2].value))
1085 {
1086 delete_insns_since (last);
1087 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1088 unsignedp, methods);
1089 }
1090
1091 emit_insn (pat);
1092 return ops[0].value;
1093 }
1094 delete_insns_since (last);
1095 return NULL_RTX;
1096 }
1097
1098 /* Generate code to perform an operation specified by BINOPTAB
1099 on operands OP0 and OP1, with result having machine-mode MODE.
1100
1101 UNSIGNEDP is for the case where we have to widen the operands
1102 to perform the operation. It says to use zero-extension.
1103
1104 If TARGET is nonzero, the value
1105 is generated there, if it is convenient to do so.
1106 In all cases an rtx is returned for the locus of the value;
1107 this may or may not be TARGET. */
1108
1109 rtx
1110 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1111 rtx target, int unsignedp, enum optab_methods methods)
1112 {
1113 enum optab_methods next_methods
1114 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1115 ? OPTAB_WIDEN : methods);
1116 enum mode_class mclass;
1117 machine_mode wider_mode;
1118 scalar_int_mode int_mode;
1119 rtx libfunc;
1120 rtx temp;
1121 rtx_insn *entry_last = get_last_insn ();
1122 rtx_insn *last;
1123
1124 mclass = GET_MODE_CLASS (mode);
1125
1126 /* If subtracting an integer constant, convert this into an addition of
1127 the negated constant. */
1128
1129 if (binoptab == sub_optab && CONST_INT_P (op1))
1130 {
1131 op1 = negate_rtx (mode, op1);
1132 binoptab = add_optab;
1133 }
1134 /* For shifts, constant invalid op1 might be expanded from different
1135 mode than MODE. As those are invalid, force them to a register
1136 to avoid further problems during expansion. */
1137 else if (CONST_INT_P (op1)
1138 && shift_optab_p (binoptab)
1139 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1140 {
1141 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1142 op1 = force_reg (GET_MODE_INNER (mode), op1);
1143 }
1144
1145 /* Record where to delete back to if we backtrack. */
1146 last = get_last_insn ();
1147
1148 /* If we can do it with a three-operand insn, do so. */
1149
1150 if (methods != OPTAB_MUST_WIDEN
1151 && find_widening_optab_handler (binoptab, mode,
1152 widened_mode (mode, op0, op1), 1)
1153 != CODE_FOR_nothing)
1154 {
1155 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1156 unsignedp, methods, last);
1157 if (temp)
1158 return temp;
1159 }
1160
1161 /* If we were trying to rotate, and that didn't work, try rotating
1162 the other direction before falling back to shifts and bitwise-or. */
1163 if (((binoptab == rotl_optab
1164 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1165 || (binoptab == rotr_optab
1166 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1167 && is_int_mode (mode, &int_mode))
1168 {
1169 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1170 rtx newop1;
1171 unsigned int bits = GET_MODE_PRECISION (int_mode);
1172
1173 if (CONST_INT_P (op1))
1174 newop1 = GEN_INT (bits - INTVAL (op1));
1175 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1176 newop1 = negate_rtx (GET_MODE (op1), op1);
1177 else
1178 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1179 gen_int_mode (bits, GET_MODE (op1)), op1,
1180 NULL_RTX, unsignedp, OPTAB_DIRECT);
1181
1182 temp = expand_binop_directly (int_mode, otheroptab, op0, newop1,
1183 target, unsignedp, methods, last);
1184 if (temp)
1185 return temp;
1186 }
1187
1188 /* If this is a multiply, see if we can do a widening operation that
1189 takes operands of this mode and makes a wider mode. */
1190
1191 if (binoptab == smul_optab
1192 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1193 && (convert_optab_handler ((unsignedp
1194 ? umul_widen_optab
1195 : smul_widen_optab),
1196 wider_mode, mode) != CODE_FOR_nothing))
1197 {
1198 temp = expand_binop (wider_mode,
1199 unsignedp ? umul_widen_optab : smul_widen_optab,
1200 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1201
1202 if (temp != 0)
1203 {
1204 if (GET_MODE_CLASS (mode) == MODE_INT
1205 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1206 return gen_lowpart (mode, temp);
1207 else
1208 return convert_to_mode (mode, temp, unsignedp);
1209 }
1210 }
1211
1212 /* If this is a vector shift by a scalar, see if we can do a vector
1213 shift by a vector. If so, broadcast the scalar into a vector. */
1214 if (mclass == MODE_VECTOR_INT)
1215 {
1216 optab otheroptab = unknown_optab;
1217
1218 if (binoptab == ashl_optab)
1219 otheroptab = vashl_optab;
1220 else if (binoptab == ashr_optab)
1221 otheroptab = vashr_optab;
1222 else if (binoptab == lshr_optab)
1223 otheroptab = vlshr_optab;
1224 else if (binoptab == rotl_optab)
1225 otheroptab = vrotl_optab;
1226 else if (binoptab == rotr_optab)
1227 otheroptab = vrotr_optab;
1228
1229 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1230 {
1231 /* The scalar may have been extended to be too wide. Truncate
1232 it back to the proper size to fit in the broadcast vector. */
1233 scalar_mode inner_mode = GET_MODE_INNER (mode);
1234 if (!CONST_INT_P (op1)
1235 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1236 > GET_MODE_BITSIZE (inner_mode)))
1237 op1 = force_reg (inner_mode,
1238 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1239 GET_MODE (op1)));
1240 rtx vop1 = expand_vector_broadcast (mode, op1);
1241 if (vop1)
1242 {
1243 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1244 target, unsignedp, methods, last);
1245 if (temp)
1246 return temp;
1247 }
1248 }
1249 }
1250
1251 /* Look for a wider mode of the same class for which we think we
1252 can open-code the operation. Check for a widening multiply at the
1253 wider mode as well. */
1254
1255 if (CLASS_HAS_WIDER_MODES_P (mclass)
1256 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1257 FOR_EACH_WIDER_MODE (wider_mode, mode)
1258 {
1259 machine_mode next_mode;
1260 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1261 || (binoptab == smul_optab
1262 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1263 && (find_widening_optab_handler ((unsignedp
1264 ? umul_widen_optab
1265 : smul_widen_optab),
1266 next_mode, mode, 0)
1267 != CODE_FOR_nothing)))
1268 {
1269 rtx xop0 = op0, xop1 = op1;
1270 int no_extend = 0;
1271
1272 /* For certain integer operations, we need not actually extend
1273 the narrow operands, as long as we will truncate
1274 the results to the same narrowness. */
1275
1276 if ((binoptab == ior_optab || binoptab == and_optab
1277 || binoptab == xor_optab
1278 || binoptab == add_optab || binoptab == sub_optab
1279 || binoptab == smul_optab || binoptab == ashl_optab)
1280 && mclass == MODE_INT)
1281 {
1282 no_extend = 1;
1283 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1284 xop0, unsignedp);
1285 if (binoptab != ashl_optab)
1286 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1287 xop1, unsignedp);
1288 }
1289
1290 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1291
1292 /* The second operand of a shift must always be extended. */
1293 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1294 no_extend && binoptab != ashl_optab);
1295
1296 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1297 unsignedp, OPTAB_DIRECT);
1298 if (temp)
1299 {
1300 if (mclass != MODE_INT
1301 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1302 {
1303 if (target == 0)
1304 target = gen_reg_rtx (mode);
1305 convert_move (target, temp, 0);
1306 return target;
1307 }
1308 else
1309 return gen_lowpart (mode, temp);
1310 }
1311 else
1312 delete_insns_since (last);
1313 }
1314 }
1315
1316 /* If operation is commutative,
1317 try to make the first operand a register.
1318 Even better, try to make it the same as the target.
1319 Also try to make the last operand a constant. */
1320 if (commutative_optab_p (binoptab)
1321 && swap_commutative_operands_with_target (target, op0, op1))
1322 std::swap (op0, op1);
1323
1324 /* These can be done a word at a time. */
1325 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1326 && is_int_mode (mode, &int_mode)
1327 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1328 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1329 {
1330 int i;
1331 rtx_insn *insns;
1332
1333 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1334 won't be accurate, so use a new target. */
1335 if (target == 0
1336 || target == op0
1337 || target == op1
1338 || !valid_multiword_target_p (target))
1339 target = gen_reg_rtx (int_mode);
1340
1341 start_sequence ();
1342
1343 /* Do the actual arithmetic. */
1344 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1345 {
1346 rtx target_piece = operand_subword (target, i, 1, int_mode);
1347 rtx x = expand_binop (word_mode, binoptab,
1348 operand_subword_force (op0, i, int_mode),
1349 operand_subword_force (op1, i, int_mode),
1350 target_piece, unsignedp, next_methods);
1351
1352 if (x == 0)
1353 break;
1354
1355 if (target_piece != x)
1356 emit_move_insn (target_piece, x);
1357 }
1358
1359 insns = get_insns ();
1360 end_sequence ();
1361
1362 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1363 {
1364 emit_insn (insns);
1365 return target;
1366 }
1367 }
1368
1369 /* Synthesize double word shifts from single word shifts. */
1370 if ((binoptab == lshr_optab || binoptab == ashl_optab
1371 || binoptab == ashr_optab)
1372 && is_int_mode (mode, &int_mode)
1373 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1374 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1375 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1376 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1377 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1378 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1379 {
1380 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1381 scalar_int_mode op1_mode;
1382
1383 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1384 shift_mask = targetm.shift_truncation_mask (word_mode);
1385 op1_mode = (GET_MODE (op1) != VOIDmode
1386 ? as_a <scalar_int_mode> (GET_MODE (op1))
1387 : word_mode);
1388
1389 /* Apply the truncation to constant shifts. */
1390 if (double_shift_mask > 0 && CONST_INT_P (op1))
1391 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1392
1393 if (op1 == CONST0_RTX (op1_mode))
1394 return op0;
1395
1396 /* Make sure that this is a combination that expand_doubleword_shift
1397 can handle. See the comments there for details. */
1398 if (double_shift_mask == 0
1399 || (shift_mask == BITS_PER_WORD - 1
1400 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1401 {
1402 rtx_insn *insns;
1403 rtx into_target, outof_target;
1404 rtx into_input, outof_input;
1405 int left_shift, outof_word;
1406
1407 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1408 won't be accurate, so use a new target. */
1409 if (target == 0
1410 || target == op0
1411 || target == op1
1412 || !valid_multiword_target_p (target))
1413 target = gen_reg_rtx (int_mode);
1414
1415 start_sequence ();
1416
1417 /* OUTOF_* is the word we are shifting bits away from, and
1418 INTO_* is the word that we are shifting bits towards, thus
1419 they differ depending on the direction of the shift and
1420 WORDS_BIG_ENDIAN. */
1421
1422 left_shift = binoptab == ashl_optab;
1423 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1424
1425 outof_target = operand_subword (target, outof_word, 1, int_mode);
1426 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1427
1428 outof_input = operand_subword_force (op0, outof_word, int_mode);
1429 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1430
1431 if (expand_doubleword_shift (op1_mode, binoptab,
1432 outof_input, into_input, op1,
1433 outof_target, into_target,
1434 unsignedp, next_methods, shift_mask))
1435 {
1436 insns = get_insns ();
1437 end_sequence ();
1438
1439 emit_insn (insns);
1440 return target;
1441 }
1442 end_sequence ();
1443 }
1444 }
1445
1446 /* Synthesize double word rotates from single word shifts. */
1447 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1448 && is_int_mode (mode, &int_mode)
1449 && CONST_INT_P (op1)
1450 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1451 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1452 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1453 {
1454 rtx_insn *insns;
1455 rtx into_target, outof_target;
1456 rtx into_input, outof_input;
1457 rtx inter;
1458 int shift_count, left_shift, outof_word;
1459
1460 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1461 won't be accurate, so use a new target. Do this also if target is not
1462 a REG, first because having a register instead may open optimization
1463 opportunities, and second because if target and op0 happen to be MEMs
1464 designating the same location, we would risk clobbering it too early
1465 in the code sequence we generate below. */
1466 if (target == 0
1467 || target == op0
1468 || target == op1
1469 || !REG_P (target)
1470 || !valid_multiword_target_p (target))
1471 target = gen_reg_rtx (int_mode);
1472
1473 start_sequence ();
1474
1475 shift_count = INTVAL (op1);
1476
1477 /* OUTOF_* is the word we are shifting bits away from, and
1478 INTO_* is the word that we are shifting bits towards, thus
1479 they differ depending on the direction of the shift and
1480 WORDS_BIG_ENDIAN. */
1481
1482 left_shift = (binoptab == rotl_optab);
1483 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1484
1485 outof_target = operand_subword (target, outof_word, 1, int_mode);
1486 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1487
1488 outof_input = operand_subword_force (op0, outof_word, int_mode);
1489 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1490
1491 if (shift_count == BITS_PER_WORD)
1492 {
1493 /* This is just a word swap. */
1494 emit_move_insn (outof_target, into_input);
1495 emit_move_insn (into_target, outof_input);
1496 inter = const0_rtx;
1497 }
1498 else
1499 {
1500 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1501 rtx first_shift_count, second_shift_count;
1502 optab reverse_unsigned_shift, unsigned_shift;
1503
1504 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1505 ? lshr_optab : ashl_optab);
1506
1507 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1508 ? ashl_optab : lshr_optab);
1509
1510 if (shift_count > BITS_PER_WORD)
1511 {
1512 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1513 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1514 }
1515 else
1516 {
1517 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1518 second_shift_count = GEN_INT (shift_count);
1519 }
1520
1521 into_temp1 = expand_binop (word_mode, unsigned_shift,
1522 outof_input, first_shift_count,
1523 NULL_RTX, unsignedp, next_methods);
1524 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1525 into_input, second_shift_count,
1526 NULL_RTX, unsignedp, next_methods);
1527
1528 if (into_temp1 != 0 && into_temp2 != 0)
1529 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1530 into_target, unsignedp, next_methods);
1531 else
1532 inter = 0;
1533
1534 if (inter != 0 && inter != into_target)
1535 emit_move_insn (into_target, inter);
1536
1537 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1538 into_input, first_shift_count,
1539 NULL_RTX, unsignedp, next_methods);
1540 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1541 outof_input, second_shift_count,
1542 NULL_RTX, unsignedp, next_methods);
1543
1544 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1545 inter = expand_binop (word_mode, ior_optab,
1546 outof_temp1, outof_temp2,
1547 outof_target, unsignedp, next_methods);
1548
1549 if (inter != 0 && inter != outof_target)
1550 emit_move_insn (outof_target, inter);
1551 }
1552
1553 insns = get_insns ();
1554 end_sequence ();
1555
1556 if (inter != 0)
1557 {
1558 emit_insn (insns);
1559 return target;
1560 }
1561 }
1562
1563 /* These can be done a word at a time by propagating carries. */
1564 if ((binoptab == add_optab || binoptab == sub_optab)
1565 && is_int_mode (mode, &int_mode)
1566 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1567 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1568 {
1569 unsigned int i;
1570 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1571 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1572 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1573 rtx xop0, xop1, xtarget;
1574
1575 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1576 value is one of those, use it. Otherwise, use 1 since it is the
1577 one easiest to get. */
1578 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1579 int normalizep = STORE_FLAG_VALUE;
1580 #else
1581 int normalizep = 1;
1582 #endif
1583
1584 /* Prepare the operands. */
1585 xop0 = force_reg (int_mode, op0);
1586 xop1 = force_reg (int_mode, op1);
1587
1588 xtarget = gen_reg_rtx (int_mode);
1589
1590 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1591 target = xtarget;
1592
1593 /* Indicate for flow that the entire target reg is being set. */
1594 if (REG_P (target))
1595 emit_clobber (xtarget);
1596
1597 /* Do the actual arithmetic. */
1598 for (i = 0; i < nwords; i++)
1599 {
1600 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1601 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1602 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1603 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1604 rtx x;
1605
1606 /* Main add/subtract of the input operands. */
1607 x = expand_binop (word_mode, binoptab,
1608 op0_piece, op1_piece,
1609 target_piece, unsignedp, next_methods);
1610 if (x == 0)
1611 break;
1612
1613 if (i + 1 < nwords)
1614 {
1615 /* Store carry from main add/subtract. */
1616 carry_out = gen_reg_rtx (word_mode);
1617 carry_out = emit_store_flag_force (carry_out,
1618 (binoptab == add_optab
1619 ? LT : GT),
1620 x, op0_piece,
1621 word_mode, 1, normalizep);
1622 }
1623
1624 if (i > 0)
1625 {
1626 rtx newx;
1627
1628 /* Add/subtract previous carry to main result. */
1629 newx = expand_binop (word_mode,
1630 normalizep == 1 ? binoptab : otheroptab,
1631 x, carry_in,
1632 NULL_RTX, 1, next_methods);
1633
1634 if (i + 1 < nwords)
1635 {
1636 /* Get out carry from adding/subtracting carry in. */
1637 rtx carry_tmp = gen_reg_rtx (word_mode);
1638 carry_tmp = emit_store_flag_force (carry_tmp,
1639 (binoptab == add_optab
1640 ? LT : GT),
1641 newx, x,
1642 word_mode, 1, normalizep);
1643
1644 /* Logical-ior the two poss. carry together. */
1645 carry_out = expand_binop (word_mode, ior_optab,
1646 carry_out, carry_tmp,
1647 carry_out, 0, next_methods);
1648 if (carry_out == 0)
1649 break;
1650 }
1651 emit_move_insn (target_piece, newx);
1652 }
1653 else
1654 {
1655 if (x != target_piece)
1656 emit_move_insn (target_piece, x);
1657 }
1658
1659 carry_in = carry_out;
1660 }
1661
1662 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1663 {
1664 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1665 || ! rtx_equal_p (target, xtarget))
1666 {
1667 rtx_insn *temp = emit_move_insn (target, xtarget);
1668
1669 set_dst_reg_note (temp, REG_EQUAL,
1670 gen_rtx_fmt_ee (optab_to_code (binoptab),
1671 int_mode, copy_rtx (xop0),
1672 copy_rtx (xop1)),
1673 target);
1674 }
1675 else
1676 target = xtarget;
1677
1678 return target;
1679 }
1680
1681 else
1682 delete_insns_since (last);
1683 }
1684
1685 /* Attempt to synthesize double word multiplies using a sequence of word
1686 mode multiplications. We first attempt to generate a sequence using a
1687 more efficient unsigned widening multiply, and if that fails we then
1688 try using a signed widening multiply. */
1689
1690 if (binoptab == smul_optab
1691 && is_int_mode (mode, &int_mode)
1692 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1693 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1694 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1695 {
1696 rtx product = NULL_RTX;
1697 if (widening_optab_handler (umul_widen_optab, int_mode, word_mode)
1698 != CODE_FOR_nothing)
1699 {
1700 product = expand_doubleword_mult (int_mode, op0, op1, target,
1701 true, methods);
1702 if (!product)
1703 delete_insns_since (last);
1704 }
1705
1706 if (product == NULL_RTX
1707 && (widening_optab_handler (smul_widen_optab, int_mode, word_mode)
1708 != CODE_FOR_nothing))
1709 {
1710 product = expand_doubleword_mult (int_mode, op0, op1, target,
1711 false, methods);
1712 if (!product)
1713 delete_insns_since (last);
1714 }
1715
1716 if (product != NULL_RTX)
1717 {
1718 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1719 {
1720 rtx_insn *move = emit_move_insn (target ? target : product,
1721 product);
1722 set_dst_reg_note (move,
1723 REG_EQUAL,
1724 gen_rtx_fmt_ee (MULT, int_mode,
1725 copy_rtx (op0),
1726 copy_rtx (op1)),
1727 target ? target : product);
1728 }
1729 return product;
1730 }
1731 }
1732
1733 /* It can't be open-coded in this mode.
1734 Use a library call if one is available and caller says that's ok. */
1735
1736 libfunc = optab_libfunc (binoptab, mode);
1737 if (libfunc
1738 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1739 {
1740 rtx_insn *insns;
1741 rtx op1x = op1;
1742 machine_mode op1_mode = mode;
1743 rtx value;
1744
1745 start_sequence ();
1746
1747 if (shift_optab_p (binoptab))
1748 {
1749 op1_mode = targetm.libgcc_shift_count_mode ();
1750 /* Specify unsigned here,
1751 since negative shift counts are meaningless. */
1752 op1x = convert_to_mode (op1_mode, op1, 1);
1753 }
1754
1755 if (GET_MODE (op0) != VOIDmode
1756 && GET_MODE (op0) != mode)
1757 op0 = convert_to_mode (mode, op0, unsignedp);
1758
1759 /* Pass 1 for NO_QUEUE so we don't lose any increments
1760 if the libcall is cse'd or moved. */
1761 value = emit_library_call_value (libfunc,
1762 NULL_RTX, LCT_CONST, mode,
1763 op0, mode, op1x, op1_mode);
1764
1765 insns = get_insns ();
1766 end_sequence ();
1767
1768 bool trapv = trapv_binoptab_p (binoptab);
1769 target = gen_reg_rtx (mode);
1770 emit_libcall_block_1 (insns, target, value,
1771 trapv ? NULL_RTX
1772 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1773 mode, op0, op1), trapv);
1774
1775 return target;
1776 }
1777
1778 delete_insns_since (last);
1779
1780 /* It can't be done in this mode. Can we do it in a wider mode? */
1781
1782 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1783 || methods == OPTAB_MUST_WIDEN))
1784 {
1785 /* Caller says, don't even try. */
1786 delete_insns_since (entry_last);
1787 return 0;
1788 }
1789
1790 /* Compute the value of METHODS to pass to recursive calls.
1791 Don't allow widening to be tried recursively. */
1792
1793 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1794
1795 /* Look for a wider mode of the same class for which it appears we can do
1796 the operation. */
1797
1798 if (CLASS_HAS_WIDER_MODES_P (mclass))
1799 {
1800 FOR_EACH_WIDER_MODE (wider_mode, mode)
1801 {
1802 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1803 != CODE_FOR_nothing
1804 || (methods == OPTAB_LIB
1805 && optab_libfunc (binoptab, wider_mode)))
1806 {
1807 rtx xop0 = op0, xop1 = op1;
1808 int no_extend = 0;
1809
1810 /* For certain integer operations, we need not actually extend
1811 the narrow operands, as long as we will truncate
1812 the results to the same narrowness. */
1813
1814 if ((binoptab == ior_optab || binoptab == and_optab
1815 || binoptab == xor_optab
1816 || binoptab == add_optab || binoptab == sub_optab
1817 || binoptab == smul_optab || binoptab == ashl_optab)
1818 && mclass == MODE_INT)
1819 no_extend = 1;
1820
1821 xop0 = widen_operand (xop0, wider_mode, mode,
1822 unsignedp, no_extend);
1823
1824 /* The second operand of a shift must always be extended. */
1825 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1826 no_extend && binoptab != ashl_optab);
1827
1828 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1829 unsignedp, methods);
1830 if (temp)
1831 {
1832 if (mclass != MODE_INT
1833 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1834 {
1835 if (target == 0)
1836 target = gen_reg_rtx (mode);
1837 convert_move (target, temp, 0);
1838 return target;
1839 }
1840 else
1841 return gen_lowpart (mode, temp);
1842 }
1843 else
1844 delete_insns_since (last);
1845 }
1846 }
1847 }
1848
1849 delete_insns_since (entry_last);
1850 return 0;
1851 }
1852 \f
1853 /* Expand a binary operator which has both signed and unsigned forms.
1854 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1855 signed operations.
1856
1857 If we widen unsigned operands, we may use a signed wider operation instead
1858 of an unsigned wider operation, since the result would be the same. */
1859
1860 rtx
1861 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1862 rtx op0, rtx op1, rtx target, int unsignedp,
1863 enum optab_methods methods)
1864 {
1865 rtx temp;
1866 optab direct_optab = unsignedp ? uoptab : soptab;
1867 bool save_enable;
1868
1869 /* Do it without widening, if possible. */
1870 temp = expand_binop (mode, direct_optab, op0, op1, target,
1871 unsignedp, OPTAB_DIRECT);
1872 if (temp || methods == OPTAB_DIRECT)
1873 return temp;
1874
1875 /* Try widening to a signed int. Disable any direct use of any
1876 signed insn in the current mode. */
1877 save_enable = swap_optab_enable (soptab, mode, false);
1878
1879 temp = expand_binop (mode, soptab, op0, op1, target,
1880 unsignedp, OPTAB_WIDEN);
1881
1882 /* For unsigned operands, try widening to an unsigned int. */
1883 if (!temp && unsignedp)
1884 temp = expand_binop (mode, uoptab, op0, op1, target,
1885 unsignedp, OPTAB_WIDEN);
1886 if (temp || methods == OPTAB_WIDEN)
1887 goto egress;
1888
1889 /* Use the right width libcall if that exists. */
1890 temp = expand_binop (mode, direct_optab, op0, op1, target,
1891 unsignedp, OPTAB_LIB);
1892 if (temp || methods == OPTAB_LIB)
1893 goto egress;
1894
1895 /* Must widen and use a libcall, use either signed or unsigned. */
1896 temp = expand_binop (mode, soptab, op0, op1, target,
1897 unsignedp, methods);
1898 if (!temp && unsignedp)
1899 temp = expand_binop (mode, uoptab, op0, op1, target,
1900 unsignedp, methods);
1901
1902 egress:
1903 /* Undo the fiddling above. */
1904 if (save_enable)
1905 swap_optab_enable (soptab, mode, true);
1906 return temp;
1907 }
1908 \f
1909 /* Generate code to perform an operation specified by UNOPPTAB
1910 on operand OP0, with two results to TARG0 and TARG1.
1911 We assume that the order of the operands for the instruction
1912 is TARG0, TARG1, OP0.
1913
1914 Either TARG0 or TARG1 may be zero, but what that means is that
1915 the result is not actually wanted. We will generate it into
1916 a dummy pseudo-reg and discard it. They may not both be zero.
1917
1918 Returns 1 if this operation can be performed; 0 if not. */
1919
1920 int
1921 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1922 int unsignedp)
1923 {
1924 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1925 enum mode_class mclass;
1926 machine_mode wider_mode;
1927 rtx_insn *entry_last = get_last_insn ();
1928 rtx_insn *last;
1929
1930 mclass = GET_MODE_CLASS (mode);
1931
1932 if (!targ0)
1933 targ0 = gen_reg_rtx (mode);
1934 if (!targ1)
1935 targ1 = gen_reg_rtx (mode);
1936
1937 /* Record where to go back to if we fail. */
1938 last = get_last_insn ();
1939
1940 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1941 {
1942 struct expand_operand ops[3];
1943 enum insn_code icode = optab_handler (unoptab, mode);
1944
1945 create_fixed_operand (&ops[0], targ0);
1946 create_fixed_operand (&ops[1], targ1);
1947 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1948 if (maybe_expand_insn (icode, 3, ops))
1949 return 1;
1950 }
1951
1952 /* It can't be done in this mode. Can we do it in a wider mode? */
1953
1954 if (CLASS_HAS_WIDER_MODES_P (mclass))
1955 {
1956 FOR_EACH_WIDER_MODE (wider_mode, mode)
1957 {
1958 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1959 {
1960 rtx t0 = gen_reg_rtx (wider_mode);
1961 rtx t1 = gen_reg_rtx (wider_mode);
1962 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1963
1964 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1965 {
1966 convert_move (targ0, t0, unsignedp);
1967 convert_move (targ1, t1, unsignedp);
1968 return 1;
1969 }
1970 else
1971 delete_insns_since (last);
1972 }
1973 }
1974 }
1975
1976 delete_insns_since (entry_last);
1977 return 0;
1978 }
1979 \f
1980 /* Generate code to perform an operation specified by BINOPTAB
1981 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1982 We assume that the order of the operands for the instruction
1983 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1984 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1985
1986 Either TARG0 or TARG1 may be zero, but what that means is that
1987 the result is not actually wanted. We will generate it into
1988 a dummy pseudo-reg and discard it. They may not both be zero.
1989
1990 Returns 1 if this operation can be performed; 0 if not. */
1991
1992 int
1993 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1994 int unsignedp)
1995 {
1996 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1997 enum mode_class mclass;
1998 machine_mode wider_mode;
1999 rtx_insn *entry_last = get_last_insn ();
2000 rtx_insn *last;
2001
2002 mclass = GET_MODE_CLASS (mode);
2003
2004 if (!targ0)
2005 targ0 = gen_reg_rtx (mode);
2006 if (!targ1)
2007 targ1 = gen_reg_rtx (mode);
2008
2009 /* Record where to go back to if we fail. */
2010 last = get_last_insn ();
2011
2012 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2013 {
2014 struct expand_operand ops[4];
2015 enum insn_code icode = optab_handler (binoptab, mode);
2016 machine_mode mode0 = insn_data[icode].operand[1].mode;
2017 machine_mode mode1 = insn_data[icode].operand[2].mode;
2018 rtx xop0 = op0, xop1 = op1;
2019
2020 /* If we are optimizing, force expensive constants into a register. */
2021 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2022 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2023
2024 create_fixed_operand (&ops[0], targ0);
2025 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2026 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2027 create_fixed_operand (&ops[3], targ1);
2028 if (maybe_expand_insn (icode, 4, ops))
2029 return 1;
2030 delete_insns_since (last);
2031 }
2032
2033 /* It can't be done in this mode. Can we do it in a wider mode? */
2034
2035 if (CLASS_HAS_WIDER_MODES_P (mclass))
2036 {
2037 FOR_EACH_WIDER_MODE (wider_mode, mode)
2038 {
2039 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2040 {
2041 rtx t0 = gen_reg_rtx (wider_mode);
2042 rtx t1 = gen_reg_rtx (wider_mode);
2043 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2044 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2045
2046 if (expand_twoval_binop (binoptab, cop0, cop1,
2047 t0, t1, unsignedp))
2048 {
2049 convert_move (targ0, t0, unsignedp);
2050 convert_move (targ1, t1, unsignedp);
2051 return 1;
2052 }
2053 else
2054 delete_insns_since (last);
2055 }
2056 }
2057 }
2058
2059 delete_insns_since (entry_last);
2060 return 0;
2061 }
2062
2063 /* Expand the two-valued library call indicated by BINOPTAB, but
2064 preserve only one of the values. If TARG0 is non-NULL, the first
2065 value is placed into TARG0; otherwise the second value is placed
2066 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2067 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2068 This routine assumes that the value returned by the library call is
2069 as if the return value was of an integral mode twice as wide as the
2070 mode of OP0. Returns 1 if the call was successful. */
2071
2072 bool
2073 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2074 rtx targ0, rtx targ1, enum rtx_code code)
2075 {
2076 machine_mode mode;
2077 machine_mode libval_mode;
2078 rtx libval;
2079 rtx_insn *insns;
2080 rtx libfunc;
2081
2082 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2083 gcc_assert (!targ0 != !targ1);
2084
2085 mode = GET_MODE (op0);
2086 libfunc = optab_libfunc (binoptab, mode);
2087 if (!libfunc)
2088 return false;
2089
2090 /* The value returned by the library function will have twice as
2091 many bits as the nominal MODE. */
2092 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2093 start_sequence ();
2094 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2095 libval_mode,
2096 op0, mode,
2097 op1, mode);
2098 /* Get the part of VAL containing the value that we want. */
2099 libval = simplify_gen_subreg (mode, libval, libval_mode,
2100 targ0 ? 0 : GET_MODE_SIZE (mode));
2101 insns = get_insns ();
2102 end_sequence ();
2103 /* Move the into the desired location. */
2104 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2105 gen_rtx_fmt_ee (code, mode, op0, op1));
2106
2107 return true;
2108 }
2109
2110 \f
2111 /* Wrapper around expand_unop which takes an rtx code to specify
2112 the operation to perform, not an optab pointer. All other
2113 arguments are the same. */
2114 rtx
2115 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2116 rtx target, int unsignedp)
2117 {
2118 optab unop = code_to_optab (code);
2119 gcc_assert (unop);
2120
2121 return expand_unop (mode, unop, op0, target, unsignedp);
2122 }
2123
2124 /* Try calculating
2125 (clz:narrow x)
2126 as
2127 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2128
2129 A similar operation can be used for clrsb. UNOPTAB says which operation
2130 we are trying to expand. */
2131 static rtx
2132 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2133 {
2134 opt_scalar_int_mode wider_mode_iter;
2135 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2136 {
2137 scalar_int_mode wider_mode = wider_mode_iter.require ();
2138 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2139 {
2140 rtx xop0, temp;
2141 rtx_insn *last;
2142
2143 last = get_last_insn ();
2144
2145 if (target == 0)
2146 target = gen_reg_rtx (mode);
2147 xop0 = widen_operand (op0, wider_mode, mode,
2148 unoptab != clrsb_optab, false);
2149 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2150 unoptab != clrsb_optab);
2151 if (temp != 0)
2152 temp = expand_binop
2153 (wider_mode, sub_optab, temp,
2154 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2155 - GET_MODE_PRECISION (mode),
2156 wider_mode),
2157 target, true, OPTAB_DIRECT);
2158 if (temp == 0)
2159 delete_insns_since (last);
2160
2161 return temp;
2162 }
2163 }
2164 return 0;
2165 }
2166
2167 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2168 quantities, choosing which based on whether the high word is nonzero. */
2169 static rtx
2170 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2171 {
2172 rtx xop0 = force_reg (mode, op0);
2173 rtx subhi = gen_highpart (word_mode, xop0);
2174 rtx sublo = gen_lowpart (word_mode, xop0);
2175 rtx_code_label *hi0_label = gen_label_rtx ();
2176 rtx_code_label *after_label = gen_label_rtx ();
2177 rtx_insn *seq;
2178 rtx temp, result;
2179
2180 /* If we were not given a target, use a word_mode register, not a
2181 'mode' register. The result will fit, and nobody is expecting
2182 anything bigger (the return type of __builtin_clz* is int). */
2183 if (!target)
2184 target = gen_reg_rtx (word_mode);
2185
2186 /* In any case, write to a word_mode scratch in both branches of the
2187 conditional, so we can ensure there is a single move insn setting
2188 'target' to tag a REG_EQUAL note on. */
2189 result = gen_reg_rtx (word_mode);
2190
2191 start_sequence ();
2192
2193 /* If the high word is not equal to zero,
2194 then clz of the full value is clz of the high word. */
2195 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2196 word_mode, true, hi0_label);
2197
2198 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2199 if (!temp)
2200 goto fail;
2201
2202 if (temp != result)
2203 convert_move (result, temp, true);
2204
2205 emit_jump_insn (targetm.gen_jump (after_label));
2206 emit_barrier ();
2207
2208 /* Else clz of the full value is clz of the low word plus the number
2209 of bits in the high word. */
2210 emit_label (hi0_label);
2211
2212 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2213 if (!temp)
2214 goto fail;
2215 temp = expand_binop (word_mode, add_optab, temp,
2216 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2217 result, true, OPTAB_DIRECT);
2218 if (!temp)
2219 goto fail;
2220 if (temp != result)
2221 convert_move (result, temp, true);
2222
2223 emit_label (after_label);
2224 convert_move (target, result, true);
2225
2226 seq = get_insns ();
2227 end_sequence ();
2228
2229 add_equal_note (seq, target, CLZ, xop0, 0);
2230 emit_insn (seq);
2231 return target;
2232
2233 fail:
2234 end_sequence ();
2235 return 0;
2236 }
2237
2238 /* Try calculating popcount of a double-word quantity as two popcount's of
2239 word-sized quantities and summing up the results. */
2240 static rtx
2241 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2242 {
2243 rtx t0, t1, t;
2244 rtx_insn *seq;
2245
2246 start_sequence ();
2247
2248 t0 = expand_unop_direct (word_mode, popcount_optab,
2249 operand_subword_force (op0, 0, mode), NULL_RTX,
2250 true);
2251 t1 = expand_unop_direct (word_mode, popcount_optab,
2252 operand_subword_force (op0, 1, mode), NULL_RTX,
2253 true);
2254 if (!t0 || !t1)
2255 {
2256 end_sequence ();
2257 return NULL_RTX;
2258 }
2259
2260 /* If we were not given a target, use a word_mode register, not a
2261 'mode' register. The result will fit, and nobody is expecting
2262 anything bigger (the return type of __builtin_popcount* is int). */
2263 if (!target)
2264 target = gen_reg_rtx (word_mode);
2265
2266 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2267
2268 seq = get_insns ();
2269 end_sequence ();
2270
2271 add_equal_note (seq, t, POPCOUNT, op0, 0);
2272 emit_insn (seq);
2273 return t;
2274 }
2275
2276 /* Try calculating
2277 (parity:wide x)
2278 as
2279 (parity:narrow (low (x) ^ high (x))) */
2280 static rtx
2281 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2282 {
2283 rtx t = expand_binop (word_mode, xor_optab,
2284 operand_subword_force (op0, 0, mode),
2285 operand_subword_force (op0, 1, mode),
2286 NULL_RTX, 0, OPTAB_DIRECT);
2287 return expand_unop (word_mode, parity_optab, t, target, true);
2288 }
2289
2290 /* Try calculating
2291 (bswap:narrow x)
2292 as
2293 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2294 static rtx
2295 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2296 {
2297 rtx x;
2298 rtx_insn *last;
2299 opt_scalar_int_mode wider_mode_iter;
2300
2301 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2302 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2303 != CODE_FOR_nothing)
2304 break;
2305
2306 if (!wider_mode_iter.exists ())
2307 return NULL_RTX;
2308
2309 scalar_int_mode wider_mode = wider_mode_iter.require ();
2310 last = get_last_insn ();
2311
2312 x = widen_operand (op0, wider_mode, mode, true, true);
2313 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2314
2315 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2316 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2317 if (x != 0)
2318 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2319 GET_MODE_BITSIZE (wider_mode)
2320 - GET_MODE_BITSIZE (mode),
2321 NULL_RTX, true);
2322
2323 if (x != 0)
2324 {
2325 if (target == 0)
2326 target = gen_reg_rtx (mode);
2327 emit_move_insn (target, gen_lowpart (mode, x));
2328 }
2329 else
2330 delete_insns_since (last);
2331
2332 return target;
2333 }
2334
2335 /* Try calculating bswap as two bswaps of two word-sized operands. */
2336
2337 static rtx
2338 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2339 {
2340 rtx t0, t1;
2341
2342 t1 = expand_unop (word_mode, bswap_optab,
2343 operand_subword_force (op, 0, mode), NULL_RTX, true);
2344 t0 = expand_unop (word_mode, bswap_optab,
2345 operand_subword_force (op, 1, mode), NULL_RTX, true);
2346
2347 if (target == 0 || !valid_multiword_target_p (target))
2348 target = gen_reg_rtx (mode);
2349 if (REG_P (target))
2350 emit_clobber (target);
2351 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2352 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2353
2354 return target;
2355 }
2356
2357 /* Try calculating (parity x) as (and (popcount x) 1), where
2358 popcount can also be done in a wider mode. */
2359 static rtx
2360 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2361 {
2362 enum mode_class mclass = GET_MODE_CLASS (mode);
2363 opt_scalar_int_mode wider_mode_iter;
2364 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2365 {
2366 scalar_int_mode wider_mode = wider_mode_iter.require ();
2367 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2368 {
2369 rtx xop0, temp;
2370 rtx_insn *last;
2371
2372 last = get_last_insn ();
2373
2374 if (target == 0 || GET_MODE (target) != wider_mode)
2375 target = gen_reg_rtx (wider_mode);
2376
2377 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2378 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2379 true);
2380 if (temp != 0)
2381 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2382 target, true, OPTAB_DIRECT);
2383
2384 if (temp)
2385 {
2386 if (mclass != MODE_INT
2387 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2388 return convert_to_mode (mode, temp, 0);
2389 else
2390 return gen_lowpart (mode, temp);
2391 }
2392 else
2393 delete_insns_since (last);
2394 }
2395 }
2396 return 0;
2397 }
2398
2399 /* Try calculating ctz(x) as K - clz(x & -x) ,
2400 where K is GET_MODE_PRECISION(mode) - 1.
2401
2402 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2403 don't have to worry about what the hardware does in that case. (If
2404 the clz instruction produces the usual value at 0, which is K, the
2405 result of this code sequence will be -1; expand_ffs, below, relies
2406 on this. It might be nice to have it be K instead, for consistency
2407 with the (very few) processors that provide a ctz with a defined
2408 value, but that would take one more instruction, and it would be
2409 less convenient for expand_ffs anyway. */
2410
2411 static rtx
2412 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2413 {
2414 rtx_insn *seq;
2415 rtx temp;
2416
2417 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2418 return 0;
2419
2420 start_sequence ();
2421
2422 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2423 if (temp)
2424 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2425 true, OPTAB_DIRECT);
2426 if (temp)
2427 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2428 if (temp)
2429 temp = expand_binop (mode, sub_optab,
2430 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2431 temp, target,
2432 true, OPTAB_DIRECT);
2433 if (temp == 0)
2434 {
2435 end_sequence ();
2436 return 0;
2437 }
2438
2439 seq = get_insns ();
2440 end_sequence ();
2441
2442 add_equal_note (seq, temp, CTZ, op0, 0);
2443 emit_insn (seq);
2444 return temp;
2445 }
2446
2447
2448 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2449 else with the sequence used by expand_clz.
2450
2451 The ffs builtin promises to return zero for a zero value and ctz/clz
2452 may have an undefined value in that case. If they do not give us a
2453 convenient value, we have to generate a test and branch. */
2454 static rtx
2455 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2456 {
2457 HOST_WIDE_INT val = 0;
2458 bool defined_at_zero = false;
2459 rtx temp;
2460 rtx_insn *seq;
2461
2462 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2463 {
2464 start_sequence ();
2465
2466 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2467 if (!temp)
2468 goto fail;
2469
2470 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2471 }
2472 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2473 {
2474 start_sequence ();
2475 temp = expand_ctz (mode, op0, 0);
2476 if (!temp)
2477 goto fail;
2478
2479 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2480 {
2481 defined_at_zero = true;
2482 val = (GET_MODE_PRECISION (mode) - 1) - val;
2483 }
2484 }
2485 else
2486 return 0;
2487
2488 if (defined_at_zero && val == -1)
2489 /* No correction needed at zero. */;
2490 else
2491 {
2492 /* We don't try to do anything clever with the situation found
2493 on some processors (eg Alpha) where ctz(0:mode) ==
2494 bitsize(mode). If someone can think of a way to send N to -1
2495 and leave alone all values in the range 0..N-1 (where N is a
2496 power of two), cheaper than this test-and-branch, please add it.
2497
2498 The test-and-branch is done after the operation itself, in case
2499 the operation sets condition codes that can be recycled for this.
2500 (This is true on i386, for instance.) */
2501
2502 rtx_code_label *nonzero_label = gen_label_rtx ();
2503 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2504 mode, true, nonzero_label);
2505
2506 convert_move (temp, GEN_INT (-1), false);
2507 emit_label (nonzero_label);
2508 }
2509
2510 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2511 to produce a value in the range 0..bitsize. */
2512 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2513 target, false, OPTAB_DIRECT);
2514 if (!temp)
2515 goto fail;
2516
2517 seq = get_insns ();
2518 end_sequence ();
2519
2520 add_equal_note (seq, temp, FFS, op0, 0);
2521 emit_insn (seq);
2522 return temp;
2523
2524 fail:
2525 end_sequence ();
2526 return 0;
2527 }
2528
2529 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2530 conditions, VAL may already be a SUBREG against which we cannot generate
2531 a further SUBREG. In this case, we expect forcing the value into a
2532 register will work around the situation. */
2533
2534 static rtx
2535 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2536 machine_mode imode)
2537 {
2538 rtx ret;
2539 ret = lowpart_subreg (omode, val, imode);
2540 if (ret == NULL)
2541 {
2542 val = force_reg (imode, val);
2543 ret = lowpart_subreg (omode, val, imode);
2544 gcc_assert (ret != NULL);
2545 }
2546 return ret;
2547 }
2548
2549 /* Expand a floating point absolute value or negation operation via a
2550 logical operation on the sign bit. */
2551
2552 static rtx
2553 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2554 rtx op0, rtx target)
2555 {
2556 const struct real_format *fmt;
2557 int bitpos, word, nwords, i;
2558 scalar_int_mode imode;
2559 rtx temp;
2560 rtx_insn *insns;
2561
2562 /* The format has to have a simple sign bit. */
2563 fmt = REAL_MODE_FORMAT (mode);
2564 if (fmt == NULL)
2565 return NULL_RTX;
2566
2567 bitpos = fmt->signbit_rw;
2568 if (bitpos < 0)
2569 return NULL_RTX;
2570
2571 /* Don't create negative zeros if the format doesn't support them. */
2572 if (code == NEG && !fmt->has_signed_zero)
2573 return NULL_RTX;
2574
2575 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2576 {
2577 if (!int_mode_for_mode (mode).exists (&imode))
2578 return NULL_RTX;
2579 word = 0;
2580 nwords = 1;
2581 }
2582 else
2583 {
2584 imode = word_mode;
2585
2586 if (FLOAT_WORDS_BIG_ENDIAN)
2587 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2588 else
2589 word = bitpos / BITS_PER_WORD;
2590 bitpos = bitpos % BITS_PER_WORD;
2591 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2592 }
2593
2594 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2595 if (code == ABS)
2596 mask = ~mask;
2597
2598 if (target == 0
2599 || target == op0
2600 || (nwords > 1 && !valid_multiword_target_p (target)))
2601 target = gen_reg_rtx (mode);
2602
2603 if (nwords > 1)
2604 {
2605 start_sequence ();
2606
2607 for (i = 0; i < nwords; ++i)
2608 {
2609 rtx targ_piece = operand_subword (target, i, 1, mode);
2610 rtx op0_piece = operand_subword_force (op0, i, mode);
2611
2612 if (i == word)
2613 {
2614 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2615 op0_piece,
2616 immed_wide_int_const (mask, imode),
2617 targ_piece, 1, OPTAB_LIB_WIDEN);
2618 if (temp != targ_piece)
2619 emit_move_insn (targ_piece, temp);
2620 }
2621 else
2622 emit_move_insn (targ_piece, op0_piece);
2623 }
2624
2625 insns = get_insns ();
2626 end_sequence ();
2627
2628 emit_insn (insns);
2629 }
2630 else
2631 {
2632 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2633 gen_lowpart (imode, op0),
2634 immed_wide_int_const (mask, imode),
2635 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2636 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2637
2638 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2639 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2640 target);
2641 }
2642
2643 return target;
2644 }
2645
2646 /* As expand_unop, but will fail rather than attempt the operation in a
2647 different mode or with a libcall. */
2648 static rtx
2649 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2650 int unsignedp)
2651 {
2652 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2653 {
2654 struct expand_operand ops[2];
2655 enum insn_code icode = optab_handler (unoptab, mode);
2656 rtx_insn *last = get_last_insn ();
2657 rtx_insn *pat;
2658
2659 create_output_operand (&ops[0], target, mode);
2660 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2661 pat = maybe_gen_insn (icode, 2, ops);
2662 if (pat)
2663 {
2664 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2665 && ! add_equal_note (pat, ops[0].value,
2666 optab_to_code (unoptab),
2667 ops[1].value, NULL_RTX))
2668 {
2669 delete_insns_since (last);
2670 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2671 }
2672
2673 emit_insn (pat);
2674
2675 return ops[0].value;
2676 }
2677 }
2678 return 0;
2679 }
2680
2681 /* Generate code to perform an operation specified by UNOPTAB
2682 on operand OP0, with result having machine-mode MODE.
2683
2684 UNSIGNEDP is for the case where we have to widen the operands
2685 to perform the operation. It says to use zero-extension.
2686
2687 If TARGET is nonzero, the value
2688 is generated there, if it is convenient to do so.
2689 In all cases an rtx is returned for the locus of the value;
2690 this may or may not be TARGET. */
2691
2692 rtx
2693 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2694 int unsignedp)
2695 {
2696 enum mode_class mclass = GET_MODE_CLASS (mode);
2697 machine_mode wider_mode;
2698 scalar_int_mode int_mode;
2699 scalar_float_mode float_mode;
2700 rtx temp;
2701 rtx libfunc;
2702
2703 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2704 if (temp)
2705 return temp;
2706
2707 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2708
2709 /* Widening (or narrowing) clz needs special treatment. */
2710 if (unoptab == clz_optab)
2711 {
2712 if (is_a <scalar_int_mode> (mode, &int_mode))
2713 {
2714 temp = widen_leading (int_mode, op0, target, unoptab);
2715 if (temp)
2716 return temp;
2717
2718 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2719 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2720 {
2721 temp = expand_doubleword_clz (int_mode, op0, target);
2722 if (temp)
2723 return temp;
2724 }
2725 }
2726
2727 goto try_libcall;
2728 }
2729
2730 if (unoptab == clrsb_optab)
2731 {
2732 if (is_a <scalar_int_mode> (mode, &int_mode))
2733 {
2734 temp = widen_leading (int_mode, op0, target, unoptab);
2735 if (temp)
2736 return temp;
2737 }
2738 goto try_libcall;
2739 }
2740
2741 if (unoptab == popcount_optab
2742 && is_a <scalar_int_mode> (mode, &int_mode)
2743 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2744 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2745 && optimize_insn_for_speed_p ())
2746 {
2747 temp = expand_doubleword_popcount (int_mode, op0, target);
2748 if (temp)
2749 return temp;
2750 }
2751
2752 if (unoptab == parity_optab
2753 && is_a <scalar_int_mode> (mode, &int_mode)
2754 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2755 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2756 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2757 && optimize_insn_for_speed_p ())
2758 {
2759 temp = expand_doubleword_parity (int_mode, op0, target);
2760 if (temp)
2761 return temp;
2762 }
2763
2764 /* Widening (or narrowing) bswap needs special treatment. */
2765 if (unoptab == bswap_optab)
2766 {
2767 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2768 or ROTATERT. First try these directly; if this fails, then try the
2769 obvious pair of shifts with allowed widening, as this will probably
2770 be always more efficient than the other fallback methods. */
2771 if (mode == HImode)
2772 {
2773 rtx_insn *last;
2774 rtx temp1, temp2;
2775
2776 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2777 {
2778 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2779 unsignedp, OPTAB_DIRECT);
2780 if (temp)
2781 return temp;
2782 }
2783
2784 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2785 {
2786 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2787 unsignedp, OPTAB_DIRECT);
2788 if (temp)
2789 return temp;
2790 }
2791
2792 last = get_last_insn ();
2793
2794 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2795 unsignedp, OPTAB_WIDEN);
2796 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2797 unsignedp, OPTAB_WIDEN);
2798 if (temp1 && temp2)
2799 {
2800 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2801 unsignedp, OPTAB_WIDEN);
2802 if (temp)
2803 return temp;
2804 }
2805
2806 delete_insns_since (last);
2807 }
2808
2809 if (is_a <scalar_int_mode> (mode, &int_mode))
2810 {
2811 temp = widen_bswap (int_mode, op0, target);
2812 if (temp)
2813 return temp;
2814
2815 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2816 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2817 {
2818 temp = expand_doubleword_bswap (mode, op0, target);
2819 if (temp)
2820 return temp;
2821 }
2822 }
2823
2824 goto try_libcall;
2825 }
2826
2827 if (CLASS_HAS_WIDER_MODES_P (mclass))
2828 FOR_EACH_WIDER_MODE (wider_mode, mode)
2829 {
2830 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2831 {
2832 rtx xop0 = op0;
2833 rtx_insn *last = get_last_insn ();
2834
2835 /* For certain operations, we need not actually extend
2836 the narrow operand, as long as we will truncate the
2837 results to the same narrowness. */
2838
2839 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2840 (unoptab == neg_optab
2841 || unoptab == one_cmpl_optab)
2842 && mclass == MODE_INT);
2843
2844 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2845 unsignedp);
2846
2847 if (temp)
2848 {
2849 if (mclass != MODE_INT
2850 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2851 {
2852 if (target == 0)
2853 target = gen_reg_rtx (mode);
2854 convert_move (target, temp, 0);
2855 return target;
2856 }
2857 else
2858 return gen_lowpart (mode, temp);
2859 }
2860 else
2861 delete_insns_since (last);
2862 }
2863 }
2864
2865 /* These can be done a word at a time. */
2866 if (unoptab == one_cmpl_optab
2867 && is_int_mode (mode, &int_mode)
2868 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2869 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2870 {
2871 int i;
2872 rtx_insn *insns;
2873
2874 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2875 target = gen_reg_rtx (int_mode);
2876
2877 start_sequence ();
2878
2879 /* Do the actual arithmetic. */
2880 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2881 {
2882 rtx target_piece = operand_subword (target, i, 1, int_mode);
2883 rtx x = expand_unop (word_mode, unoptab,
2884 operand_subword_force (op0, i, int_mode),
2885 target_piece, unsignedp);
2886
2887 if (target_piece != x)
2888 emit_move_insn (target_piece, x);
2889 }
2890
2891 insns = get_insns ();
2892 end_sequence ();
2893
2894 emit_insn (insns);
2895 return target;
2896 }
2897
2898 if (optab_to_code (unoptab) == NEG)
2899 {
2900 /* Try negating floating point values by flipping the sign bit. */
2901 if (is_a <scalar_float_mode> (mode, &float_mode))
2902 {
2903 temp = expand_absneg_bit (NEG, float_mode, op0, target);
2904 if (temp)
2905 return temp;
2906 }
2907
2908 /* If there is no negation pattern, and we have no negative zero,
2909 try subtracting from zero. */
2910 if (!HONOR_SIGNED_ZEROS (mode))
2911 {
2912 temp = expand_binop (mode, (unoptab == negv_optab
2913 ? subv_optab : sub_optab),
2914 CONST0_RTX (mode), op0, target,
2915 unsignedp, OPTAB_DIRECT);
2916 if (temp)
2917 return temp;
2918 }
2919 }
2920
2921 /* Try calculating parity (x) as popcount (x) % 2. */
2922 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
2923 {
2924 temp = expand_parity (int_mode, op0, target);
2925 if (temp)
2926 return temp;
2927 }
2928
2929 /* Try implementing ffs (x) in terms of clz (x). */
2930 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
2931 {
2932 temp = expand_ffs (int_mode, op0, target);
2933 if (temp)
2934 return temp;
2935 }
2936
2937 /* Try implementing ctz (x) in terms of clz (x). */
2938 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
2939 {
2940 temp = expand_ctz (int_mode, op0, target);
2941 if (temp)
2942 return temp;
2943 }
2944
2945 try_libcall:
2946 /* Now try a library call in this mode. */
2947 libfunc = optab_libfunc (unoptab, mode);
2948 if (libfunc)
2949 {
2950 rtx_insn *insns;
2951 rtx value;
2952 rtx eq_value;
2953 machine_mode outmode = mode;
2954
2955 /* All of these functions return small values. Thus we choose to
2956 have them return something that isn't a double-word. */
2957 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2958 || unoptab == clrsb_optab || unoptab == popcount_optab
2959 || unoptab == parity_optab)
2960 outmode
2961 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2962 optab_libfunc (unoptab, mode)));
2963
2964 start_sequence ();
2965
2966 /* Pass 1 for NO_QUEUE so we don't lose any increments
2967 if the libcall is cse'd or moved. */
2968 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2969 op0, mode);
2970 insns = get_insns ();
2971 end_sequence ();
2972
2973 target = gen_reg_rtx (outmode);
2974 bool trapv = trapv_unoptab_p (unoptab);
2975 if (trapv)
2976 eq_value = NULL_RTX;
2977 else
2978 {
2979 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2980 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2981 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2982 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2983 eq_value = simplify_gen_unary (ZERO_EXTEND,
2984 outmode, eq_value, mode);
2985 }
2986 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2987
2988 return target;
2989 }
2990
2991 /* It can't be done in this mode. Can we do it in a wider mode? */
2992
2993 if (CLASS_HAS_WIDER_MODES_P (mclass))
2994 {
2995 FOR_EACH_WIDER_MODE (wider_mode, mode)
2996 {
2997 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
2998 || optab_libfunc (unoptab, wider_mode))
2999 {
3000 rtx xop0 = op0;
3001 rtx_insn *last = get_last_insn ();
3002
3003 /* For certain operations, we need not actually extend
3004 the narrow operand, as long as we will truncate the
3005 results to the same narrowness. */
3006 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3007 (unoptab == neg_optab
3008 || unoptab == one_cmpl_optab
3009 || unoptab == bswap_optab)
3010 && mclass == MODE_INT);
3011
3012 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3013 unsignedp);
3014
3015 /* If we are generating clz using wider mode, adjust the
3016 result. Similarly for clrsb. */
3017 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3018 && temp != 0)
3019 {
3020 scalar_int_mode wider_int_mode
3021 = as_a <scalar_int_mode> (wider_mode);
3022 int_mode = as_a <scalar_int_mode> (mode);
3023 temp = expand_binop
3024 (wider_mode, sub_optab, temp,
3025 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3026 - GET_MODE_PRECISION (int_mode),
3027 wider_int_mode),
3028 target, true, OPTAB_DIRECT);
3029 }
3030
3031 /* Likewise for bswap. */
3032 if (unoptab == bswap_optab && temp != 0)
3033 {
3034 scalar_int_mode wider_int_mode
3035 = as_a <scalar_int_mode> (wider_mode);
3036 int_mode = as_a <scalar_int_mode> (mode);
3037 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3038 == GET_MODE_BITSIZE (wider_int_mode)
3039 && GET_MODE_PRECISION (int_mode)
3040 == GET_MODE_BITSIZE (int_mode));
3041
3042 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3043 GET_MODE_BITSIZE (wider_int_mode)
3044 - GET_MODE_BITSIZE (int_mode),
3045 NULL_RTX, true);
3046 }
3047
3048 if (temp)
3049 {
3050 if (mclass != MODE_INT)
3051 {
3052 if (target == 0)
3053 target = gen_reg_rtx (mode);
3054 convert_move (target, temp, 0);
3055 return target;
3056 }
3057 else
3058 return gen_lowpart (mode, temp);
3059 }
3060 else
3061 delete_insns_since (last);
3062 }
3063 }
3064 }
3065
3066 /* One final attempt at implementing negation via subtraction,
3067 this time allowing widening of the operand. */
3068 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3069 {
3070 rtx temp;
3071 temp = expand_binop (mode,
3072 unoptab == negv_optab ? subv_optab : sub_optab,
3073 CONST0_RTX (mode), op0,
3074 target, unsignedp, OPTAB_LIB_WIDEN);
3075 if (temp)
3076 return temp;
3077 }
3078
3079 return 0;
3080 }
3081 \f
3082 /* Emit code to compute the absolute value of OP0, with result to
3083 TARGET if convenient. (TARGET may be 0.) The return value says
3084 where the result actually is to be found.
3085
3086 MODE is the mode of the operand; the mode of the result is
3087 different but can be deduced from MODE.
3088
3089 */
3090
3091 rtx
3092 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3093 int result_unsignedp)
3094 {
3095 rtx temp;
3096
3097 if (GET_MODE_CLASS (mode) != MODE_INT
3098 || ! flag_trapv)
3099 result_unsignedp = 1;
3100
3101 /* First try to do it with a special abs instruction. */
3102 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3103 op0, target, 0);
3104 if (temp != 0)
3105 return temp;
3106
3107 /* For floating point modes, try clearing the sign bit. */
3108 scalar_float_mode float_mode;
3109 if (is_a <scalar_float_mode> (mode, &float_mode))
3110 {
3111 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3112 if (temp)
3113 return temp;
3114 }
3115
3116 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3117 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3118 && !HONOR_SIGNED_ZEROS (mode))
3119 {
3120 rtx_insn *last = get_last_insn ();
3121
3122 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3123 op0, NULL_RTX, 0);
3124 if (temp != 0)
3125 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3126 OPTAB_WIDEN);
3127
3128 if (temp != 0)
3129 return temp;
3130
3131 delete_insns_since (last);
3132 }
3133
3134 /* If this machine has expensive jumps, we can do integer absolute
3135 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3136 where W is the width of MODE. */
3137
3138 scalar_int_mode int_mode;
3139 if (is_int_mode (mode, &int_mode)
3140 && BRANCH_COST (optimize_insn_for_speed_p (),
3141 false) >= 2)
3142 {
3143 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3144 GET_MODE_PRECISION (int_mode) - 1,
3145 NULL_RTX, 0);
3146
3147 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3148 OPTAB_LIB_WIDEN);
3149 if (temp != 0)
3150 temp = expand_binop (int_mode,
3151 result_unsignedp ? sub_optab : subv_optab,
3152 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3153
3154 if (temp != 0)
3155 return temp;
3156 }
3157
3158 return NULL_RTX;
3159 }
3160
3161 rtx
3162 expand_abs (machine_mode mode, rtx op0, rtx target,
3163 int result_unsignedp, int safe)
3164 {
3165 rtx temp;
3166 rtx_code_label *op1;
3167
3168 if (GET_MODE_CLASS (mode) != MODE_INT
3169 || ! flag_trapv)
3170 result_unsignedp = 1;
3171
3172 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3173 if (temp != 0)
3174 return temp;
3175
3176 /* If that does not win, use conditional jump and negate. */
3177
3178 /* It is safe to use the target if it is the same
3179 as the source if this is also a pseudo register */
3180 if (op0 == target && REG_P (op0)
3181 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3182 safe = 1;
3183
3184 op1 = gen_label_rtx ();
3185 if (target == 0 || ! safe
3186 || GET_MODE (target) != mode
3187 || (MEM_P (target) && MEM_VOLATILE_P (target))
3188 || (REG_P (target)
3189 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3190 target = gen_reg_rtx (mode);
3191
3192 emit_move_insn (target, op0);
3193 NO_DEFER_POP;
3194
3195 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3196 NULL_RTX, NULL, op1,
3197 profile_probability::uninitialized ());
3198
3199 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3200 target, target, 0);
3201 if (op0 != target)
3202 emit_move_insn (target, op0);
3203 emit_label (op1);
3204 OK_DEFER_POP;
3205 return target;
3206 }
3207
3208 /* Emit code to compute the one's complement absolute value of OP0
3209 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3210 (TARGET may be NULL_RTX.) The return value says where the result
3211 actually is to be found.
3212
3213 MODE is the mode of the operand; the mode of the result is
3214 different but can be deduced from MODE. */
3215
3216 rtx
3217 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3218 {
3219 rtx temp;
3220
3221 /* Not applicable for floating point modes. */
3222 if (FLOAT_MODE_P (mode))
3223 return NULL_RTX;
3224
3225 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3226 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3227 {
3228 rtx_insn *last = get_last_insn ();
3229
3230 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3231 if (temp != 0)
3232 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3233 OPTAB_WIDEN);
3234
3235 if (temp != 0)
3236 return temp;
3237
3238 delete_insns_since (last);
3239 }
3240
3241 /* If this machine has expensive jumps, we can do one's complement
3242 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3243
3244 scalar_int_mode int_mode;
3245 if (is_int_mode (mode, &int_mode)
3246 && BRANCH_COST (optimize_insn_for_speed_p (),
3247 false) >= 2)
3248 {
3249 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3250 GET_MODE_PRECISION (int_mode) - 1,
3251 NULL_RTX, 0);
3252
3253 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3254 OPTAB_LIB_WIDEN);
3255
3256 if (temp != 0)
3257 return temp;
3258 }
3259
3260 return NULL_RTX;
3261 }
3262
3263 /* A subroutine of expand_copysign, perform the copysign operation using the
3264 abs and neg primitives advertised to exist on the target. The assumption
3265 is that we have a split register file, and leaving op0 in fp registers,
3266 and not playing with subregs so much, will help the register allocator. */
3267
3268 static rtx
3269 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3270 int bitpos, bool op0_is_abs)
3271 {
3272 scalar_int_mode imode;
3273 enum insn_code icode;
3274 rtx sign;
3275 rtx_code_label *label;
3276
3277 if (target == op1)
3278 target = NULL_RTX;
3279
3280 /* Check if the back end provides an insn that handles signbit for the
3281 argument's mode. */
3282 icode = optab_handler (signbit_optab, mode);
3283 if (icode != CODE_FOR_nothing)
3284 {
3285 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3286 sign = gen_reg_rtx (imode);
3287 emit_unop_insn (icode, sign, op1, UNKNOWN);
3288 }
3289 else
3290 {
3291 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3292 {
3293 if (!int_mode_for_mode (mode).exists (&imode))
3294 return NULL_RTX;
3295 op1 = gen_lowpart (imode, op1);
3296 }
3297 else
3298 {
3299 int word;
3300
3301 imode = word_mode;
3302 if (FLOAT_WORDS_BIG_ENDIAN)
3303 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3304 else
3305 word = bitpos / BITS_PER_WORD;
3306 bitpos = bitpos % BITS_PER_WORD;
3307 op1 = operand_subword_force (op1, word, mode);
3308 }
3309
3310 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3311 sign = expand_binop (imode, and_optab, op1,
3312 immed_wide_int_const (mask, imode),
3313 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3314 }
3315
3316 if (!op0_is_abs)
3317 {
3318 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3319 if (op0 == NULL)
3320 return NULL_RTX;
3321 target = op0;
3322 }
3323 else
3324 {
3325 if (target == NULL_RTX)
3326 target = copy_to_reg (op0);
3327 else
3328 emit_move_insn (target, op0);
3329 }
3330
3331 label = gen_label_rtx ();
3332 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3333
3334 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3335 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3336 else
3337 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3338 if (op0 != target)
3339 emit_move_insn (target, op0);
3340
3341 emit_label (label);
3342
3343 return target;
3344 }
3345
3346
3347 /* A subroutine of expand_copysign, perform the entire copysign operation
3348 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3349 is true if op0 is known to have its sign bit clear. */
3350
3351 static rtx
3352 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3353 int bitpos, bool op0_is_abs)
3354 {
3355 scalar_int_mode imode;
3356 int word, nwords, i;
3357 rtx temp;
3358 rtx_insn *insns;
3359
3360 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3361 {
3362 if (!int_mode_for_mode (mode).exists (&imode))
3363 return NULL_RTX;
3364 word = 0;
3365 nwords = 1;
3366 }
3367 else
3368 {
3369 imode = word_mode;
3370
3371 if (FLOAT_WORDS_BIG_ENDIAN)
3372 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3373 else
3374 word = bitpos / BITS_PER_WORD;
3375 bitpos = bitpos % BITS_PER_WORD;
3376 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3377 }
3378
3379 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3380
3381 if (target == 0
3382 || target == op0
3383 || target == op1
3384 || (nwords > 1 && !valid_multiword_target_p (target)))
3385 target = gen_reg_rtx (mode);
3386
3387 if (nwords > 1)
3388 {
3389 start_sequence ();
3390
3391 for (i = 0; i < nwords; ++i)
3392 {
3393 rtx targ_piece = operand_subword (target, i, 1, mode);
3394 rtx op0_piece = operand_subword_force (op0, i, mode);
3395
3396 if (i == word)
3397 {
3398 if (!op0_is_abs)
3399 op0_piece
3400 = expand_binop (imode, and_optab, op0_piece,
3401 immed_wide_int_const (~mask, imode),
3402 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3403 op1 = expand_binop (imode, and_optab,
3404 operand_subword_force (op1, i, mode),
3405 immed_wide_int_const (mask, imode),
3406 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3407
3408 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3409 targ_piece, 1, OPTAB_LIB_WIDEN);
3410 if (temp != targ_piece)
3411 emit_move_insn (targ_piece, temp);
3412 }
3413 else
3414 emit_move_insn (targ_piece, op0_piece);
3415 }
3416
3417 insns = get_insns ();
3418 end_sequence ();
3419
3420 emit_insn (insns);
3421 }
3422 else
3423 {
3424 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3425 immed_wide_int_const (mask, imode),
3426 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3427
3428 op0 = gen_lowpart (imode, op0);
3429 if (!op0_is_abs)
3430 op0 = expand_binop (imode, and_optab, op0,
3431 immed_wide_int_const (~mask, imode),
3432 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3433
3434 temp = expand_binop (imode, ior_optab, op0, op1,
3435 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3436 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3437 }
3438
3439 return target;
3440 }
3441
3442 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3443 scalar floating point mode. Return NULL if we do not know how to
3444 expand the operation inline. */
3445
3446 rtx
3447 expand_copysign (rtx op0, rtx op1, rtx target)
3448 {
3449 scalar_float_mode mode;
3450 const struct real_format *fmt;
3451 bool op0_is_abs;
3452 rtx temp;
3453
3454 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3455 gcc_assert (GET_MODE (op1) == mode);
3456
3457 /* First try to do it with a special instruction. */
3458 temp = expand_binop (mode, copysign_optab, op0, op1,
3459 target, 0, OPTAB_DIRECT);
3460 if (temp)
3461 return temp;
3462
3463 fmt = REAL_MODE_FORMAT (mode);
3464 if (fmt == NULL || !fmt->has_signed_zero)
3465 return NULL_RTX;
3466
3467 op0_is_abs = false;
3468 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3469 {
3470 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3471 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3472 op0_is_abs = true;
3473 }
3474
3475 if (fmt->signbit_ro >= 0
3476 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3477 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3478 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3479 {
3480 temp = expand_copysign_absneg (mode, op0, op1, target,
3481 fmt->signbit_ro, op0_is_abs);
3482 if (temp)
3483 return temp;
3484 }
3485
3486 if (fmt->signbit_rw < 0)
3487 return NULL_RTX;
3488 return expand_copysign_bit (mode, op0, op1, target,
3489 fmt->signbit_rw, op0_is_abs);
3490 }
3491 \f
3492 /* Generate an instruction whose insn-code is INSN_CODE,
3493 with two operands: an output TARGET and an input OP0.
3494 TARGET *must* be nonzero, and the output is always stored there.
3495 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3496 the value that is stored into TARGET.
3497
3498 Return false if expansion failed. */
3499
3500 bool
3501 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3502 enum rtx_code code)
3503 {
3504 struct expand_operand ops[2];
3505 rtx_insn *pat;
3506
3507 create_output_operand (&ops[0], target, GET_MODE (target));
3508 create_input_operand (&ops[1], op0, GET_MODE (op0));
3509 pat = maybe_gen_insn (icode, 2, ops);
3510 if (!pat)
3511 return false;
3512
3513 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3514 && code != UNKNOWN)
3515 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3516
3517 emit_insn (pat);
3518
3519 if (ops[0].value != target)
3520 emit_move_insn (target, ops[0].value);
3521 return true;
3522 }
3523 /* Generate an instruction whose insn-code is INSN_CODE,
3524 with two operands: an output TARGET and an input OP0.
3525 TARGET *must* be nonzero, and the output is always stored there.
3526 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3527 the value that is stored into TARGET. */
3528
3529 void
3530 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3531 {
3532 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3533 gcc_assert (ok);
3534 }
3535 \f
3536 struct no_conflict_data
3537 {
3538 rtx target;
3539 rtx_insn *first, *insn;
3540 bool must_stay;
3541 };
3542
3543 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3544 the currently examined clobber / store has to stay in the list of
3545 insns that constitute the actual libcall block. */
3546 static void
3547 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3548 {
3549 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3550
3551 /* If this inns directly contributes to setting the target, it must stay. */
3552 if (reg_overlap_mentioned_p (p->target, dest))
3553 p->must_stay = true;
3554 /* If we haven't committed to keeping any other insns in the list yet,
3555 there is nothing more to check. */
3556 else if (p->insn == p->first)
3557 return;
3558 /* If this insn sets / clobbers a register that feeds one of the insns
3559 already in the list, this insn has to stay too. */
3560 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3561 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3562 || reg_used_between_p (dest, p->first, p->insn)
3563 /* Likewise if this insn depends on a register set by a previous
3564 insn in the list, or if it sets a result (presumably a hard
3565 register) that is set or clobbered by a previous insn.
3566 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3567 SET_DEST perform the former check on the address, and the latter
3568 check on the MEM. */
3569 || (GET_CODE (set) == SET
3570 && (modified_in_p (SET_SRC (set), p->first)
3571 || modified_in_p (SET_DEST (set), p->first)
3572 || modified_between_p (SET_SRC (set), p->first, p->insn)
3573 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3574 p->must_stay = true;
3575 }
3576
3577 \f
3578 /* Emit code to make a call to a constant function or a library call.
3579
3580 INSNS is a list containing all insns emitted in the call.
3581 These insns leave the result in RESULT. Our block is to copy RESULT
3582 to TARGET, which is logically equivalent to EQUIV.
3583
3584 We first emit any insns that set a pseudo on the assumption that these are
3585 loading constants into registers; doing so allows them to be safely cse'ed
3586 between blocks. Then we emit all the other insns in the block, followed by
3587 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3588 note with an operand of EQUIV. */
3589
3590 static void
3591 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3592 bool equiv_may_trap)
3593 {
3594 rtx final_dest = target;
3595 rtx_insn *next, *last, *insn;
3596
3597 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3598 into a MEM later. Protect the libcall block from this change. */
3599 if (! REG_P (target) || REG_USERVAR_P (target))
3600 target = gen_reg_rtx (GET_MODE (target));
3601
3602 /* If we're using non-call exceptions, a libcall corresponding to an
3603 operation that may trap may also trap. */
3604 /* ??? See the comment in front of make_reg_eh_region_note. */
3605 if (cfun->can_throw_non_call_exceptions
3606 && (equiv_may_trap || may_trap_p (equiv)))
3607 {
3608 for (insn = insns; insn; insn = NEXT_INSN (insn))
3609 if (CALL_P (insn))
3610 {
3611 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3612 if (note)
3613 {
3614 int lp_nr = INTVAL (XEXP (note, 0));
3615 if (lp_nr == 0 || lp_nr == INT_MIN)
3616 remove_note (insn, note);
3617 }
3618 }
3619 }
3620 else
3621 {
3622 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3623 reg note to indicate that this call cannot throw or execute a nonlocal
3624 goto (unless there is already a REG_EH_REGION note, in which case
3625 we update it). */
3626 for (insn = insns; insn; insn = NEXT_INSN (insn))
3627 if (CALL_P (insn))
3628 make_reg_eh_region_note_nothrow_nononlocal (insn);
3629 }
3630
3631 /* First emit all insns that set pseudos. Remove them from the list as
3632 we go. Avoid insns that set pseudos which were referenced in previous
3633 insns. These can be generated by move_by_pieces, for example,
3634 to update an address. Similarly, avoid insns that reference things
3635 set in previous insns. */
3636
3637 for (insn = insns; insn; insn = next)
3638 {
3639 rtx set = single_set (insn);
3640
3641 next = NEXT_INSN (insn);
3642
3643 if (set != 0 && REG_P (SET_DEST (set))
3644 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3645 {
3646 struct no_conflict_data data;
3647
3648 data.target = const0_rtx;
3649 data.first = insns;
3650 data.insn = insn;
3651 data.must_stay = 0;
3652 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3653 if (! data.must_stay)
3654 {
3655 if (PREV_INSN (insn))
3656 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3657 else
3658 insns = next;
3659
3660 if (next)
3661 SET_PREV_INSN (next) = PREV_INSN (insn);
3662
3663 add_insn (insn);
3664 }
3665 }
3666
3667 /* Some ports use a loop to copy large arguments onto the stack.
3668 Don't move anything outside such a loop. */
3669 if (LABEL_P (insn))
3670 break;
3671 }
3672
3673 /* Write the remaining insns followed by the final copy. */
3674 for (insn = insns; insn; insn = next)
3675 {
3676 next = NEXT_INSN (insn);
3677
3678 add_insn (insn);
3679 }
3680
3681 last = emit_move_insn (target, result);
3682 if (equiv)
3683 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3684
3685 if (final_dest != target)
3686 emit_move_insn (final_dest, target);
3687 }
3688
3689 void
3690 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3691 {
3692 emit_libcall_block_1 (insns, target, result, equiv, false);
3693 }
3694 \f
3695 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3696 PURPOSE describes how this comparison will be used. CODE is the rtx
3697 comparison code we will be using.
3698
3699 ??? Actually, CODE is slightly weaker than that. A target is still
3700 required to implement all of the normal bcc operations, but not
3701 required to implement all (or any) of the unordered bcc operations. */
3702
3703 int
3704 can_compare_p (enum rtx_code code, machine_mode mode,
3705 enum can_compare_purpose purpose)
3706 {
3707 rtx test;
3708 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3709 do
3710 {
3711 enum insn_code icode;
3712
3713 if (purpose == ccp_jump
3714 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3715 && insn_operand_matches (icode, 0, test))
3716 return 1;
3717 if (purpose == ccp_store_flag
3718 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3719 && insn_operand_matches (icode, 1, test))
3720 return 1;
3721 if (purpose == ccp_cmov
3722 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3723 return 1;
3724
3725 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3726 PUT_MODE (test, mode);
3727 }
3728 while (mode != VOIDmode);
3729
3730 return 0;
3731 }
3732
3733 /* This function is called when we are going to emit a compare instruction that
3734 compares the values found in X and Y, using the rtl operator COMPARISON.
3735
3736 If they have mode BLKmode, then SIZE specifies the size of both operands.
3737
3738 UNSIGNEDP nonzero says that the operands are unsigned;
3739 this matters if they need to be widened (as given by METHODS).
3740
3741 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3742 if we failed to produce one.
3743
3744 *PMODE is the mode of the inputs (in case they are const_int).
3745
3746 This function performs all the setup necessary so that the caller only has
3747 to emit a single comparison insn. This setup can involve doing a BLKmode
3748 comparison or emitting a library call to perform the comparison if no insn
3749 is available to handle it.
3750 The values which are passed in through pointers can be modified; the caller
3751 should perform the comparison on the modified values. Constant
3752 comparisons must have already been folded. */
3753
3754 static void
3755 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3756 int unsignedp, enum optab_methods methods,
3757 rtx *ptest, machine_mode *pmode)
3758 {
3759 machine_mode mode = *pmode;
3760 rtx libfunc, test;
3761 machine_mode cmp_mode;
3762 enum mode_class mclass;
3763
3764 /* The other methods are not needed. */
3765 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3766 || methods == OPTAB_LIB_WIDEN);
3767
3768 /* If we are optimizing, force expensive constants into a register. */
3769 if (CONSTANT_P (x) && optimize
3770 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3771 > COSTS_N_INSNS (1)))
3772 x = force_reg (mode, x);
3773
3774 if (CONSTANT_P (y) && optimize
3775 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3776 > COSTS_N_INSNS (1)))
3777 y = force_reg (mode, y);
3778
3779 #if HAVE_cc0
3780 /* Make sure if we have a canonical comparison. The RTL
3781 documentation states that canonical comparisons are required only
3782 for targets which have cc0. */
3783 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3784 #endif
3785
3786 /* Don't let both operands fail to indicate the mode. */
3787 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3788 x = force_reg (mode, x);
3789 if (mode == VOIDmode)
3790 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3791
3792 /* Handle all BLKmode compares. */
3793
3794 if (mode == BLKmode)
3795 {
3796 machine_mode result_mode;
3797 enum insn_code cmp_code;
3798 rtx result;
3799 rtx opalign
3800 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3801
3802 gcc_assert (size);
3803
3804 /* Try to use a memory block compare insn - either cmpstr
3805 or cmpmem will do. */
3806 opt_scalar_int_mode cmp_mode_iter;
3807 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
3808 {
3809 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
3810 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3811 if (cmp_code == CODE_FOR_nothing)
3812 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3813 if (cmp_code == CODE_FOR_nothing)
3814 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3815 if (cmp_code == CODE_FOR_nothing)
3816 continue;
3817
3818 /* Must make sure the size fits the insn's mode. */
3819 if (CONST_INT_P (size)
3820 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3821 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3822 > GET_MODE_BITSIZE (cmp_mode)))
3823 continue;
3824
3825 result_mode = insn_data[cmp_code].operand[0].mode;
3826 result = gen_reg_rtx (result_mode);
3827 size = convert_to_mode (cmp_mode, size, 1);
3828 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3829
3830 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3831 *pmode = result_mode;
3832 return;
3833 }
3834
3835 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3836 goto fail;
3837
3838 /* Otherwise call a library function. */
3839 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3840
3841 x = result;
3842 y = const0_rtx;
3843 mode = TYPE_MODE (integer_type_node);
3844 methods = OPTAB_LIB_WIDEN;
3845 unsignedp = false;
3846 }
3847
3848 /* Don't allow operands to the compare to trap, as that can put the
3849 compare and branch in different basic blocks. */
3850 if (cfun->can_throw_non_call_exceptions)
3851 {
3852 if (may_trap_p (x))
3853 x = copy_to_reg (x);
3854 if (may_trap_p (y))
3855 y = copy_to_reg (y);
3856 }
3857
3858 if (GET_MODE_CLASS (mode) == MODE_CC)
3859 {
3860 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3861 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3862 gcc_assert (icode != CODE_FOR_nothing
3863 && insn_operand_matches (icode, 0, test));
3864 *ptest = test;
3865 return;
3866 }
3867
3868 mclass = GET_MODE_CLASS (mode);
3869 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3870 FOR_EACH_MODE_FROM (cmp_mode, mode)
3871 {
3872 enum insn_code icode;
3873 icode = optab_handler (cbranch_optab, cmp_mode);
3874 if (icode != CODE_FOR_nothing
3875 && insn_operand_matches (icode, 0, test))
3876 {
3877 rtx_insn *last = get_last_insn ();
3878 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3879 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3880 if (op0 && op1
3881 && insn_operand_matches (icode, 1, op0)
3882 && insn_operand_matches (icode, 2, op1))
3883 {
3884 XEXP (test, 0) = op0;
3885 XEXP (test, 1) = op1;
3886 *ptest = test;
3887 *pmode = cmp_mode;
3888 return;
3889 }
3890 delete_insns_since (last);
3891 }
3892
3893 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3894 break;
3895 }
3896
3897 if (methods != OPTAB_LIB_WIDEN)
3898 goto fail;
3899
3900 if (!SCALAR_FLOAT_MODE_P (mode))
3901 {
3902 rtx result;
3903 machine_mode ret_mode;
3904
3905 /* Handle a libcall just for the mode we are using. */
3906 libfunc = optab_libfunc (cmp_optab, mode);
3907 gcc_assert (libfunc);
3908
3909 /* If we want unsigned, and this mode has a distinct unsigned
3910 comparison routine, use that. */
3911 if (unsignedp)
3912 {
3913 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3914 if (ulibfunc)
3915 libfunc = ulibfunc;
3916 }
3917
3918 ret_mode = targetm.libgcc_cmp_return_mode ();
3919 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3920 ret_mode, x, mode, y, mode);
3921
3922 /* There are two kinds of comparison routines. Biased routines
3923 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3924 of gcc expect that the comparison operation is equivalent
3925 to the modified comparison. For signed comparisons compare the
3926 result against 1 in the biased case, and zero in the unbiased
3927 case. For unsigned comparisons always compare against 1 after
3928 biasing the unbiased result by adding 1. This gives us a way to
3929 represent LTU.
3930 The comparisons in the fixed-point helper library are always
3931 biased. */
3932 x = result;
3933 y = const1_rtx;
3934
3935 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3936 {
3937 if (unsignedp)
3938 x = plus_constant (ret_mode, result, 1);
3939 else
3940 y = const0_rtx;
3941 }
3942
3943 *pmode = ret_mode;
3944 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3945 ptest, pmode);
3946 }
3947 else
3948 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3949
3950 return;
3951
3952 fail:
3953 *ptest = NULL_RTX;
3954 }
3955
3956 /* Before emitting an insn with code ICODE, make sure that X, which is going
3957 to be used for operand OPNUM of the insn, is converted from mode MODE to
3958 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3959 that it is accepted by the operand predicate. Return the new value. */
3960
3961 rtx
3962 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3963 machine_mode wider_mode, int unsignedp)
3964 {
3965 if (mode != wider_mode)
3966 x = convert_modes (wider_mode, mode, x, unsignedp);
3967
3968 if (!insn_operand_matches (icode, opnum, x))
3969 {
3970 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3971 if (reload_completed)
3972 return NULL_RTX;
3973 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3974 return NULL_RTX;
3975 x = copy_to_mode_reg (op_mode, x);
3976 }
3977
3978 return x;
3979 }
3980
3981 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3982 we can do the branch. */
3983
3984 static void
3985 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
3986 profile_probability prob)
3987 {
3988 machine_mode optab_mode;
3989 enum mode_class mclass;
3990 enum insn_code icode;
3991 rtx_insn *insn;
3992
3993 mclass = GET_MODE_CLASS (mode);
3994 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3995 icode = optab_handler (cbranch_optab, optab_mode);
3996
3997 gcc_assert (icode != CODE_FOR_nothing);
3998 gcc_assert (insn_operand_matches (icode, 0, test));
3999 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4000 XEXP (test, 1), label));
4001 if (prob.initialized_p ()
4002 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4003 && insn
4004 && JUMP_P (insn)
4005 && any_condjump_p (insn)
4006 && !find_reg_note (insn, REG_BR_PROB, 0))
4007 add_reg_br_prob_note (insn, prob);
4008 }
4009
4010 /* Generate code to compare X with Y so that the condition codes are
4011 set and to jump to LABEL if the condition is true. If X is a
4012 constant and Y is not a constant, then the comparison is swapped to
4013 ensure that the comparison RTL has the canonical form.
4014
4015 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4016 need to be widened. UNSIGNEDP is also used to select the proper
4017 branch condition code.
4018
4019 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4020
4021 MODE is the mode of the inputs (in case they are const_int).
4022
4023 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4024 It will be potentially converted into an unsigned variant based on
4025 UNSIGNEDP to select a proper jump instruction.
4026
4027 PROB is the probability of jumping to LABEL. */
4028
4029 void
4030 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4031 machine_mode mode, int unsignedp, rtx label,
4032 profile_probability prob)
4033 {
4034 rtx op0 = x, op1 = y;
4035 rtx test;
4036
4037 /* Swap operands and condition to ensure canonical RTL. */
4038 if (swap_commutative_operands_p (x, y)
4039 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4040 {
4041 op0 = y, op1 = x;
4042 comparison = swap_condition (comparison);
4043 }
4044
4045 /* If OP0 is still a constant, then both X and Y must be constants
4046 or the opposite comparison is not supported. Force X into a register
4047 to create canonical RTL. */
4048 if (CONSTANT_P (op0))
4049 op0 = force_reg (mode, op0);
4050
4051 if (unsignedp)
4052 comparison = unsigned_condition (comparison);
4053
4054 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4055 &test, &mode);
4056 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4057 }
4058
4059 \f
4060 /* Emit a library call comparison between floating point X and Y.
4061 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4062
4063 static void
4064 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4065 rtx *ptest, machine_mode *pmode)
4066 {
4067 enum rtx_code swapped = swap_condition (comparison);
4068 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4069 machine_mode orig_mode = GET_MODE (x);
4070 machine_mode mode;
4071 rtx true_rtx, false_rtx;
4072 rtx value, target, equiv;
4073 rtx_insn *insns;
4074 rtx libfunc = 0;
4075 bool reversed_p = false;
4076 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4077
4078 FOR_EACH_MODE_FROM (mode, orig_mode)
4079 {
4080 if (code_to_optab (comparison)
4081 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4082 break;
4083
4084 if (code_to_optab (swapped)
4085 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4086 {
4087 std::swap (x, y);
4088 comparison = swapped;
4089 break;
4090 }
4091
4092 if (code_to_optab (reversed)
4093 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4094 {
4095 comparison = reversed;
4096 reversed_p = true;
4097 break;
4098 }
4099 }
4100
4101 gcc_assert (mode != VOIDmode);
4102
4103 if (mode != orig_mode)
4104 {
4105 x = convert_to_mode (mode, x, 0);
4106 y = convert_to_mode (mode, y, 0);
4107 }
4108
4109 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4110 the RTL. The allows the RTL optimizers to delete the libcall if the
4111 condition can be determined at compile-time. */
4112 if (comparison == UNORDERED
4113 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4114 {
4115 true_rtx = const_true_rtx;
4116 false_rtx = const0_rtx;
4117 }
4118 else
4119 {
4120 switch (comparison)
4121 {
4122 case EQ:
4123 true_rtx = const0_rtx;
4124 false_rtx = const_true_rtx;
4125 break;
4126
4127 case NE:
4128 true_rtx = const_true_rtx;
4129 false_rtx = const0_rtx;
4130 break;
4131
4132 case GT:
4133 true_rtx = const1_rtx;
4134 false_rtx = const0_rtx;
4135 break;
4136
4137 case GE:
4138 true_rtx = const0_rtx;
4139 false_rtx = constm1_rtx;
4140 break;
4141
4142 case LT:
4143 true_rtx = constm1_rtx;
4144 false_rtx = const0_rtx;
4145 break;
4146
4147 case LE:
4148 true_rtx = const0_rtx;
4149 false_rtx = const1_rtx;
4150 break;
4151
4152 default:
4153 gcc_unreachable ();
4154 }
4155 }
4156
4157 if (comparison == UNORDERED)
4158 {
4159 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4160 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4161 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4162 temp, const_true_rtx, equiv);
4163 }
4164 else
4165 {
4166 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4167 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4168 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4169 equiv, true_rtx, false_rtx);
4170 }
4171
4172 start_sequence ();
4173 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4174 cmp_mode, x, mode, y, mode);
4175 insns = get_insns ();
4176 end_sequence ();
4177
4178 target = gen_reg_rtx (cmp_mode);
4179 emit_libcall_block (insns, target, value, equiv);
4180
4181 if (comparison == UNORDERED
4182 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4183 || reversed_p)
4184 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4185 else
4186 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4187
4188 *pmode = cmp_mode;
4189 }
4190 \f
4191 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4192
4193 void
4194 emit_indirect_jump (rtx loc)
4195 {
4196 if (!targetm.have_indirect_jump ())
4197 sorry ("indirect jumps are not available on this target");
4198 else
4199 {
4200 struct expand_operand ops[1];
4201 create_address_operand (&ops[0], loc);
4202 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4203 emit_barrier ();
4204 }
4205 }
4206 \f
4207
4208 /* Emit a conditional move instruction if the machine supports one for that
4209 condition and machine mode.
4210
4211 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4212 the mode to use should they be constants. If it is VOIDmode, they cannot
4213 both be constants.
4214
4215 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4216 should be stored there. MODE is the mode to use should they be constants.
4217 If it is VOIDmode, they cannot both be constants.
4218
4219 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4220 is not supported. */
4221
4222 rtx
4223 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4224 machine_mode cmode, rtx op2, rtx op3,
4225 machine_mode mode, int unsignedp)
4226 {
4227 rtx comparison;
4228 rtx_insn *last;
4229 enum insn_code icode;
4230 enum rtx_code reversed;
4231
4232 /* If the two source operands are identical, that's just a move. */
4233
4234 if (rtx_equal_p (op2, op3))
4235 {
4236 if (!target)
4237 target = gen_reg_rtx (mode);
4238
4239 emit_move_insn (target, op3);
4240 return target;
4241 }
4242
4243 /* If one operand is constant, make it the second one. Only do this
4244 if the other operand is not constant as well. */
4245
4246 if (swap_commutative_operands_p (op0, op1))
4247 {
4248 std::swap (op0, op1);
4249 code = swap_condition (code);
4250 }
4251
4252 /* get_condition will prefer to generate LT and GT even if the old
4253 comparison was against zero, so undo that canonicalization here since
4254 comparisons against zero are cheaper. */
4255 if (code == LT && op1 == const1_rtx)
4256 code = LE, op1 = const0_rtx;
4257 else if (code == GT && op1 == constm1_rtx)
4258 code = GE, op1 = const0_rtx;
4259
4260 if (cmode == VOIDmode)
4261 cmode = GET_MODE (op0);
4262
4263 enum rtx_code orig_code = code;
4264 bool swapped = false;
4265 if (swap_commutative_operands_p (op2, op3)
4266 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4267 != UNKNOWN))
4268 {
4269 std::swap (op2, op3);
4270 code = reversed;
4271 swapped = true;
4272 }
4273
4274 if (mode == VOIDmode)
4275 mode = GET_MODE (op2);
4276
4277 icode = direct_optab_handler (movcc_optab, mode);
4278
4279 if (icode == CODE_FOR_nothing)
4280 return NULL_RTX;
4281
4282 if (!target)
4283 target = gen_reg_rtx (mode);
4284
4285 for (int pass = 0; ; pass++)
4286 {
4287 code = unsignedp ? unsigned_condition (code) : code;
4288 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4289
4290 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4291 punt and let the caller figure out how best to deal with this
4292 situation. */
4293 if (COMPARISON_P (comparison))
4294 {
4295 saved_pending_stack_adjust save;
4296 save_pending_stack_adjust (&save);
4297 last = get_last_insn ();
4298 do_pending_stack_adjust ();
4299 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4300 GET_CODE (comparison), NULL_RTX, unsignedp,
4301 OPTAB_WIDEN, &comparison, &cmode);
4302 if (comparison)
4303 {
4304 struct expand_operand ops[4];
4305
4306 create_output_operand (&ops[0], target, mode);
4307 create_fixed_operand (&ops[1], comparison);
4308 create_input_operand (&ops[2], op2, mode);
4309 create_input_operand (&ops[3], op3, mode);
4310 if (maybe_expand_insn (icode, 4, ops))
4311 {
4312 if (ops[0].value != target)
4313 convert_move (target, ops[0].value, false);
4314 return target;
4315 }
4316 }
4317 delete_insns_since (last);
4318 restore_pending_stack_adjust (&save);
4319 }
4320
4321 if (pass == 1)
4322 return NULL_RTX;
4323
4324 /* If the preferred op2/op3 order is not usable, retry with other
4325 operand order, perhaps it will expand successfully. */
4326 if (swapped)
4327 code = orig_code;
4328 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4329 NULL))
4330 != UNKNOWN)
4331 code = reversed;
4332 else
4333 return NULL_RTX;
4334 std::swap (op2, op3);
4335 }
4336 }
4337
4338
4339 /* Emit a conditional negate or bitwise complement using the
4340 negcc or notcc optabs if available. Return NULL_RTX if such operations
4341 are not available. Otherwise return the RTX holding the result.
4342 TARGET is the desired destination of the result. COMP is the comparison
4343 on which to negate. If COND is true move into TARGET the negation
4344 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4345 CODE is either NEG or NOT. MODE is the machine mode in which the
4346 operation is performed. */
4347
4348 rtx
4349 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4350 machine_mode mode, rtx cond, rtx op1,
4351 rtx op2)
4352 {
4353 optab op = unknown_optab;
4354 if (code == NEG)
4355 op = negcc_optab;
4356 else if (code == NOT)
4357 op = notcc_optab;
4358 else
4359 gcc_unreachable ();
4360
4361 insn_code icode = direct_optab_handler (op, mode);
4362
4363 if (icode == CODE_FOR_nothing)
4364 return NULL_RTX;
4365
4366 if (!target)
4367 target = gen_reg_rtx (mode);
4368
4369 rtx_insn *last = get_last_insn ();
4370 struct expand_operand ops[4];
4371
4372 create_output_operand (&ops[0], target, mode);
4373 create_fixed_operand (&ops[1], cond);
4374 create_input_operand (&ops[2], op1, mode);
4375 create_input_operand (&ops[3], op2, mode);
4376
4377 if (maybe_expand_insn (icode, 4, ops))
4378 {
4379 if (ops[0].value != target)
4380 convert_move (target, ops[0].value, false);
4381
4382 return target;
4383 }
4384 delete_insns_since (last);
4385 return NULL_RTX;
4386 }
4387
4388 /* Emit a conditional addition instruction if the machine supports one for that
4389 condition and machine mode.
4390
4391 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4392 the mode to use should they be constants. If it is VOIDmode, they cannot
4393 both be constants.
4394
4395 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4396 should be stored there. MODE is the mode to use should they be constants.
4397 If it is VOIDmode, they cannot both be constants.
4398
4399 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4400 is not supported. */
4401
4402 rtx
4403 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4404 machine_mode cmode, rtx op2, rtx op3,
4405 machine_mode mode, int unsignedp)
4406 {
4407 rtx comparison;
4408 rtx_insn *last;
4409 enum insn_code icode;
4410
4411 /* If one operand is constant, make it the second one. Only do this
4412 if the other operand is not constant as well. */
4413
4414 if (swap_commutative_operands_p (op0, op1))
4415 {
4416 std::swap (op0, op1);
4417 code = swap_condition (code);
4418 }
4419
4420 /* get_condition will prefer to generate LT and GT even if the old
4421 comparison was against zero, so undo that canonicalization here since
4422 comparisons against zero are cheaper. */
4423 if (code == LT && op1 == const1_rtx)
4424 code = LE, op1 = const0_rtx;
4425 else if (code == GT && op1 == constm1_rtx)
4426 code = GE, op1 = const0_rtx;
4427
4428 if (cmode == VOIDmode)
4429 cmode = GET_MODE (op0);
4430
4431 if (mode == VOIDmode)
4432 mode = GET_MODE (op2);
4433
4434 icode = optab_handler (addcc_optab, mode);
4435
4436 if (icode == CODE_FOR_nothing)
4437 return 0;
4438
4439 if (!target)
4440 target = gen_reg_rtx (mode);
4441
4442 code = unsignedp ? unsigned_condition (code) : code;
4443 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4444
4445 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4446 return NULL and let the caller figure out how best to deal with this
4447 situation. */
4448 if (!COMPARISON_P (comparison))
4449 return NULL_RTX;
4450
4451 do_pending_stack_adjust ();
4452 last = get_last_insn ();
4453 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4454 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4455 &comparison, &cmode);
4456 if (comparison)
4457 {
4458 struct expand_operand ops[4];
4459
4460 create_output_operand (&ops[0], target, mode);
4461 create_fixed_operand (&ops[1], comparison);
4462 create_input_operand (&ops[2], op2, mode);
4463 create_input_operand (&ops[3], op3, mode);
4464 if (maybe_expand_insn (icode, 4, ops))
4465 {
4466 if (ops[0].value != target)
4467 convert_move (target, ops[0].value, false);
4468 return target;
4469 }
4470 }
4471 delete_insns_since (last);
4472 return NULL_RTX;
4473 }
4474 \f
4475 /* These functions attempt to generate an insn body, rather than
4476 emitting the insn, but if the gen function already emits them, we
4477 make no attempt to turn them back into naked patterns. */
4478
4479 /* Generate and return an insn body to add Y to X. */
4480
4481 rtx_insn *
4482 gen_add2_insn (rtx x, rtx y)
4483 {
4484 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4485
4486 gcc_assert (insn_operand_matches (icode, 0, x));
4487 gcc_assert (insn_operand_matches (icode, 1, x));
4488 gcc_assert (insn_operand_matches (icode, 2, y));
4489
4490 return GEN_FCN (icode) (x, x, y);
4491 }
4492
4493 /* Generate and return an insn body to add r1 and c,
4494 storing the result in r0. */
4495
4496 rtx_insn *
4497 gen_add3_insn (rtx r0, rtx r1, rtx c)
4498 {
4499 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4500
4501 if (icode == CODE_FOR_nothing
4502 || !insn_operand_matches (icode, 0, r0)
4503 || !insn_operand_matches (icode, 1, r1)
4504 || !insn_operand_matches (icode, 2, c))
4505 return NULL;
4506
4507 return GEN_FCN (icode) (r0, r1, c);
4508 }
4509
4510 int
4511 have_add2_insn (rtx x, rtx y)
4512 {
4513 enum insn_code icode;
4514
4515 gcc_assert (GET_MODE (x) != VOIDmode);
4516
4517 icode = optab_handler (add_optab, GET_MODE (x));
4518
4519 if (icode == CODE_FOR_nothing)
4520 return 0;
4521
4522 if (!insn_operand_matches (icode, 0, x)
4523 || !insn_operand_matches (icode, 1, x)
4524 || !insn_operand_matches (icode, 2, y))
4525 return 0;
4526
4527 return 1;
4528 }
4529
4530 /* Generate and return an insn body to add Y to X. */
4531
4532 rtx_insn *
4533 gen_addptr3_insn (rtx x, rtx y, rtx z)
4534 {
4535 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4536
4537 gcc_assert (insn_operand_matches (icode, 0, x));
4538 gcc_assert (insn_operand_matches (icode, 1, y));
4539 gcc_assert (insn_operand_matches (icode, 2, z));
4540
4541 return GEN_FCN (icode) (x, y, z);
4542 }
4543
4544 /* Return true if the target implements an addptr pattern and X, Y,
4545 and Z are valid for the pattern predicates. */
4546
4547 int
4548 have_addptr3_insn (rtx x, rtx y, rtx z)
4549 {
4550 enum insn_code icode;
4551
4552 gcc_assert (GET_MODE (x) != VOIDmode);
4553
4554 icode = optab_handler (addptr3_optab, GET_MODE (x));
4555
4556 if (icode == CODE_FOR_nothing)
4557 return 0;
4558
4559 if (!insn_operand_matches (icode, 0, x)
4560 || !insn_operand_matches (icode, 1, y)
4561 || !insn_operand_matches (icode, 2, z))
4562 return 0;
4563
4564 return 1;
4565 }
4566
4567 /* Generate and return an insn body to subtract Y from X. */
4568
4569 rtx_insn *
4570 gen_sub2_insn (rtx x, rtx y)
4571 {
4572 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4573
4574 gcc_assert (insn_operand_matches (icode, 0, x));
4575 gcc_assert (insn_operand_matches (icode, 1, x));
4576 gcc_assert (insn_operand_matches (icode, 2, y));
4577
4578 return GEN_FCN (icode) (x, x, y);
4579 }
4580
4581 /* Generate and return an insn body to subtract r1 and c,
4582 storing the result in r0. */
4583
4584 rtx_insn *
4585 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4586 {
4587 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4588
4589 if (icode == CODE_FOR_nothing
4590 || !insn_operand_matches (icode, 0, r0)
4591 || !insn_operand_matches (icode, 1, r1)
4592 || !insn_operand_matches (icode, 2, c))
4593 return NULL;
4594
4595 return GEN_FCN (icode) (r0, r1, c);
4596 }
4597
4598 int
4599 have_sub2_insn (rtx x, rtx y)
4600 {
4601 enum insn_code icode;
4602
4603 gcc_assert (GET_MODE (x) != VOIDmode);
4604
4605 icode = optab_handler (sub_optab, GET_MODE (x));
4606
4607 if (icode == CODE_FOR_nothing)
4608 return 0;
4609
4610 if (!insn_operand_matches (icode, 0, x)
4611 || !insn_operand_matches (icode, 1, x)
4612 || !insn_operand_matches (icode, 2, y))
4613 return 0;
4614
4615 return 1;
4616 }
4617 \f
4618 /* Generate the body of an insn to extend Y (with mode MFROM)
4619 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4620
4621 rtx_insn *
4622 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4623 machine_mode mfrom, int unsignedp)
4624 {
4625 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4626 return GEN_FCN (icode) (x, y);
4627 }
4628 \f
4629 /* Generate code to convert FROM to floating point
4630 and store in TO. FROM must be fixed point and not VOIDmode.
4631 UNSIGNEDP nonzero means regard FROM as unsigned.
4632 Normally this is done by correcting the final value
4633 if it is negative. */
4634
4635 void
4636 expand_float (rtx to, rtx from, int unsignedp)
4637 {
4638 enum insn_code icode;
4639 rtx target = to;
4640 scalar_mode from_mode, to_mode;
4641 machine_mode fmode, imode;
4642 bool can_do_signed = false;
4643
4644 /* Crash now, because we won't be able to decide which mode to use. */
4645 gcc_assert (GET_MODE (from) != VOIDmode);
4646
4647 /* Look for an insn to do the conversion. Do it in the specified
4648 modes if possible; otherwise convert either input, output or both to
4649 wider mode. If the integer mode is wider than the mode of FROM,
4650 we can do the conversion signed even if the input is unsigned. */
4651
4652 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4653 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4654 {
4655 int doing_unsigned = unsignedp;
4656
4657 if (fmode != GET_MODE (to)
4658 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4659 continue;
4660
4661 icode = can_float_p (fmode, imode, unsignedp);
4662 if (icode == CODE_FOR_nothing && unsignedp)
4663 {
4664 enum insn_code scode = can_float_p (fmode, imode, 0);
4665 if (scode != CODE_FOR_nothing)
4666 can_do_signed = true;
4667 if (imode != GET_MODE (from))
4668 icode = scode, doing_unsigned = 0;
4669 }
4670
4671 if (icode != CODE_FOR_nothing)
4672 {
4673 if (imode != GET_MODE (from))
4674 from = convert_to_mode (imode, from, unsignedp);
4675
4676 if (fmode != GET_MODE (to))
4677 target = gen_reg_rtx (fmode);
4678
4679 emit_unop_insn (icode, target, from,
4680 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4681
4682 if (target != to)
4683 convert_move (to, target, 0);
4684 return;
4685 }
4686 }
4687
4688 /* Unsigned integer, and no way to convert directly. Convert as signed,
4689 then unconditionally adjust the result. */
4690 if (unsignedp
4691 && can_do_signed
4692 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4693 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4694 {
4695 opt_scalar_mode fmode_iter;
4696 rtx_code_label *label = gen_label_rtx ();
4697 rtx temp;
4698 REAL_VALUE_TYPE offset;
4699
4700 /* Look for a usable floating mode FMODE wider than the source and at
4701 least as wide as the target. Using FMODE will avoid rounding woes
4702 with unsigned values greater than the signed maximum value. */
4703
4704 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4705 {
4706 scalar_mode fmode = fmode_iter.require ();
4707 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4708 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4709 break;
4710 }
4711
4712 if (!fmode_iter.exists (&fmode))
4713 {
4714 /* There is no such mode. Pretend the target is wide enough. */
4715 fmode = to_mode;
4716
4717 /* Avoid double-rounding when TO is narrower than FROM. */
4718 if ((significand_size (fmode) + 1)
4719 < GET_MODE_PRECISION (from_mode))
4720 {
4721 rtx temp1;
4722 rtx_code_label *neglabel = gen_label_rtx ();
4723
4724 /* Don't use TARGET if it isn't a register, is a hard register,
4725 or is the wrong mode. */
4726 if (!REG_P (target)
4727 || REGNO (target) < FIRST_PSEUDO_REGISTER
4728 || GET_MODE (target) != fmode)
4729 target = gen_reg_rtx (fmode);
4730
4731 imode = from_mode;
4732 do_pending_stack_adjust ();
4733
4734 /* Test whether the sign bit is set. */
4735 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4736 0, neglabel);
4737
4738 /* The sign bit is not set. Convert as signed. */
4739 expand_float (target, from, 0);
4740 emit_jump_insn (targetm.gen_jump (label));
4741 emit_barrier ();
4742
4743 /* The sign bit is set.
4744 Convert to a usable (positive signed) value by shifting right
4745 one bit, while remembering if a nonzero bit was shifted
4746 out; i.e., compute (from & 1) | (from >> 1). */
4747
4748 emit_label (neglabel);
4749 temp = expand_binop (imode, and_optab, from, const1_rtx,
4750 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4751 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4752 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4753 OPTAB_LIB_WIDEN);
4754 expand_float (target, temp, 0);
4755
4756 /* Multiply by 2 to undo the shift above. */
4757 temp = expand_binop (fmode, add_optab, target, target,
4758 target, 0, OPTAB_LIB_WIDEN);
4759 if (temp != target)
4760 emit_move_insn (target, temp);
4761
4762 do_pending_stack_adjust ();
4763 emit_label (label);
4764 goto done;
4765 }
4766 }
4767
4768 /* If we are about to do some arithmetic to correct for an
4769 unsigned operand, do it in a pseudo-register. */
4770
4771 if (to_mode != fmode
4772 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4773 target = gen_reg_rtx (fmode);
4774
4775 /* Convert as signed integer to floating. */
4776 expand_float (target, from, 0);
4777
4778 /* If FROM is negative (and therefore TO is negative),
4779 correct its value by 2**bitwidth. */
4780
4781 do_pending_stack_adjust ();
4782 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
4783 0, label);
4784
4785
4786 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
4787 temp = expand_binop (fmode, add_optab, target,
4788 const_double_from_real_value (offset, fmode),
4789 target, 0, OPTAB_LIB_WIDEN);
4790 if (temp != target)
4791 emit_move_insn (target, temp);
4792
4793 do_pending_stack_adjust ();
4794 emit_label (label);
4795 goto done;
4796 }
4797
4798 /* No hardware instruction available; call a library routine. */
4799 {
4800 rtx libfunc;
4801 rtx_insn *insns;
4802 rtx value;
4803 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4804
4805 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4806 from = convert_to_mode (SImode, from, unsignedp);
4807
4808 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4809 gcc_assert (libfunc);
4810
4811 start_sequence ();
4812
4813 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4814 GET_MODE (to), from, GET_MODE (from));
4815 insns = get_insns ();
4816 end_sequence ();
4817
4818 emit_libcall_block (insns, target, value,
4819 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4820 GET_MODE (to), from));
4821 }
4822
4823 done:
4824
4825 /* Copy result to requested destination
4826 if we have been computing in a temp location. */
4827
4828 if (target != to)
4829 {
4830 if (GET_MODE (target) == GET_MODE (to))
4831 emit_move_insn (to, target);
4832 else
4833 convert_move (to, target, 0);
4834 }
4835 }
4836 \f
4837 /* Generate code to convert FROM to fixed point and store in TO. FROM
4838 must be floating point. */
4839
4840 void
4841 expand_fix (rtx to, rtx from, int unsignedp)
4842 {
4843 enum insn_code icode;
4844 rtx target = to;
4845 machine_mode fmode, imode;
4846 opt_scalar_mode fmode_iter;
4847 bool must_trunc = false;
4848
4849 /* We first try to find a pair of modes, one real and one integer, at
4850 least as wide as FROM and TO, respectively, in which we can open-code
4851 this conversion. If the integer mode is wider than the mode of TO,
4852 we can do the conversion either signed or unsigned. */
4853
4854 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4855 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4856 {
4857 int doing_unsigned = unsignedp;
4858
4859 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4860 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4861 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4862
4863 if (icode != CODE_FOR_nothing)
4864 {
4865 rtx_insn *last = get_last_insn ();
4866 if (fmode != GET_MODE (from))
4867 from = convert_to_mode (fmode, from, 0);
4868
4869 if (must_trunc)
4870 {
4871 rtx temp = gen_reg_rtx (GET_MODE (from));
4872 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4873 temp, 0);
4874 }
4875
4876 if (imode != GET_MODE (to))
4877 target = gen_reg_rtx (imode);
4878
4879 if (maybe_emit_unop_insn (icode, target, from,
4880 doing_unsigned ? UNSIGNED_FIX : FIX))
4881 {
4882 if (target != to)
4883 convert_move (to, target, unsignedp);
4884 return;
4885 }
4886 delete_insns_since (last);
4887 }
4888 }
4889
4890 /* For an unsigned conversion, there is one more way to do it.
4891 If we have a signed conversion, we generate code that compares
4892 the real value to the largest representable positive number. If if
4893 is smaller, the conversion is done normally. Otherwise, subtract
4894 one plus the highest signed number, convert, and add it back.
4895
4896 We only need to check all real modes, since we know we didn't find
4897 anything with a wider integer mode.
4898
4899 This code used to extend FP value into mode wider than the destination.
4900 This is needed for decimal float modes which cannot accurately
4901 represent one plus the highest signed number of the same size, but
4902 not for binary modes. Consider, for instance conversion from SFmode
4903 into DImode.
4904
4905 The hot path through the code is dealing with inputs smaller than 2^63
4906 and doing just the conversion, so there is no bits to lose.
4907
4908 In the other path we know the value is positive in the range 2^63..2^64-1
4909 inclusive. (as for other input overflow happens and result is undefined)
4910 So we know that the most important bit set in mantissa corresponds to
4911 2^63. The subtraction of 2^63 should not generate any rounding as it
4912 simply clears out that bit. The rest is trivial. */
4913
4914 scalar_int_mode to_mode;
4915 if (unsignedp
4916 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
4917 && HWI_COMPUTABLE_MODE_P (to_mode))
4918 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
4919 {
4920 scalar_mode fmode = fmode_iter.require ();
4921 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
4922 0, &must_trunc)
4923 && (!DECIMAL_FLOAT_MODE_P (fmode)
4924 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
4925 {
4926 int bitsize;
4927 REAL_VALUE_TYPE offset;
4928 rtx limit;
4929 rtx_code_label *lab1, *lab2;
4930 rtx_insn *insn;
4931
4932 bitsize = GET_MODE_PRECISION (to_mode);
4933 real_2expN (&offset, bitsize - 1, fmode);
4934 limit = const_double_from_real_value (offset, fmode);
4935 lab1 = gen_label_rtx ();
4936 lab2 = gen_label_rtx ();
4937
4938 if (fmode != GET_MODE (from))
4939 from = convert_to_mode (fmode, from, 0);
4940
4941 /* See if we need to do the subtraction. */
4942 do_pending_stack_adjust ();
4943 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
4944 GET_MODE (from), 0, lab1);
4945
4946 /* If not, do the signed "fix" and branch around fixup code. */
4947 expand_fix (to, from, 0);
4948 emit_jump_insn (targetm.gen_jump (lab2));
4949 emit_barrier ();
4950
4951 /* Otherwise, subtract 2**(N-1), convert to signed number,
4952 then add 2**(N-1). Do the addition using XOR since this
4953 will often generate better code. */
4954 emit_label (lab1);
4955 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4956 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4957 expand_fix (to, target, 0);
4958 target = expand_binop (to_mode, xor_optab, to,
4959 gen_int_mode
4960 (HOST_WIDE_INT_1 << (bitsize - 1),
4961 to_mode),
4962 to, 1, OPTAB_LIB_WIDEN);
4963
4964 if (target != to)
4965 emit_move_insn (to, target);
4966
4967 emit_label (lab2);
4968
4969 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
4970 {
4971 /* Make a place for a REG_NOTE and add it. */
4972 insn = emit_move_insn (to, to);
4973 set_dst_reg_note (insn, REG_EQUAL,
4974 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
4975 copy_rtx (from)),
4976 to);
4977 }
4978
4979 return;
4980 }
4981 }
4982
4983 /* We can't do it with an insn, so use a library call. But first ensure
4984 that the mode of TO is at least as wide as SImode, since those are the
4985 only library calls we know about. */
4986
4987 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4988 {
4989 target = gen_reg_rtx (SImode);
4990
4991 expand_fix (target, from, unsignedp);
4992 }
4993 else
4994 {
4995 rtx_insn *insns;
4996 rtx value;
4997 rtx libfunc;
4998
4999 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5000 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5001 gcc_assert (libfunc);
5002
5003 start_sequence ();
5004
5005 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5006 GET_MODE (to), from, GET_MODE (from));
5007 insns = get_insns ();
5008 end_sequence ();
5009
5010 emit_libcall_block (insns, target, value,
5011 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5012 GET_MODE (to), from));
5013 }
5014
5015 if (target != to)
5016 {
5017 if (GET_MODE (to) == GET_MODE (target))
5018 emit_move_insn (to, target);
5019 else
5020 convert_move (to, target, 0);
5021 }
5022 }
5023
5024
5025 /* Promote integer arguments for a libcall if necessary.
5026 emit_library_call_value cannot do the promotion because it does not
5027 know if it should do a signed or unsigned promotion. This is because
5028 there are no tree types defined for libcalls. */
5029
5030 static rtx
5031 prepare_libcall_arg (rtx arg, int uintp)
5032 {
5033 scalar_int_mode mode;
5034 machine_mode arg_mode;
5035 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5036 {
5037 /* If we need to promote the integer function argument we need to do
5038 it here instead of inside emit_library_call_value because in
5039 emit_library_call_value we don't know if we should do a signed or
5040 unsigned promotion. */
5041
5042 int unsigned_p = 0;
5043 arg_mode = promote_function_mode (NULL_TREE, mode,
5044 &unsigned_p, NULL_TREE, 0);
5045 if (arg_mode != mode)
5046 return convert_to_mode (arg_mode, arg, uintp);
5047 }
5048 return arg;
5049 }
5050
5051 /* Generate code to convert FROM or TO a fixed-point.
5052 If UINTP is true, either TO or FROM is an unsigned integer.
5053 If SATP is true, we need to saturate the result. */
5054
5055 void
5056 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5057 {
5058 machine_mode to_mode = GET_MODE (to);
5059 machine_mode from_mode = GET_MODE (from);
5060 convert_optab tab;
5061 enum rtx_code this_code;
5062 enum insn_code code;
5063 rtx_insn *insns;
5064 rtx value;
5065 rtx libfunc;
5066
5067 if (to_mode == from_mode)
5068 {
5069 emit_move_insn (to, from);
5070 return;
5071 }
5072
5073 if (uintp)
5074 {
5075 tab = satp ? satfractuns_optab : fractuns_optab;
5076 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5077 }
5078 else
5079 {
5080 tab = satp ? satfract_optab : fract_optab;
5081 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5082 }
5083 code = convert_optab_handler (tab, to_mode, from_mode);
5084 if (code != CODE_FOR_nothing)
5085 {
5086 emit_unop_insn (code, to, from, this_code);
5087 return;
5088 }
5089
5090 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5091 gcc_assert (libfunc);
5092
5093 from = prepare_libcall_arg (from, uintp);
5094 from_mode = GET_MODE (from);
5095
5096 start_sequence ();
5097 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5098 from, from_mode);
5099 insns = get_insns ();
5100 end_sequence ();
5101
5102 emit_libcall_block (insns, to, value,
5103 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5104 }
5105
5106 /* Generate code to convert FROM to fixed point and store in TO. FROM
5107 must be floating point, TO must be signed. Use the conversion optab
5108 TAB to do the conversion. */
5109
5110 bool
5111 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5112 {
5113 enum insn_code icode;
5114 rtx target = to;
5115 machine_mode fmode, imode;
5116
5117 /* We first try to find a pair of modes, one real and one integer, at
5118 least as wide as FROM and TO, respectively, in which we can open-code
5119 this conversion. If the integer mode is wider than the mode of TO,
5120 we can do the conversion either signed or unsigned. */
5121
5122 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5123 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5124 {
5125 icode = convert_optab_handler (tab, imode, fmode);
5126 if (icode != CODE_FOR_nothing)
5127 {
5128 rtx_insn *last = get_last_insn ();
5129 if (fmode != GET_MODE (from))
5130 from = convert_to_mode (fmode, from, 0);
5131
5132 if (imode != GET_MODE (to))
5133 target = gen_reg_rtx (imode);
5134
5135 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5136 {
5137 delete_insns_since (last);
5138 continue;
5139 }
5140 if (target != to)
5141 convert_move (to, target, 0);
5142 return true;
5143 }
5144 }
5145
5146 return false;
5147 }
5148 \f
5149 /* Report whether we have an instruction to perform the operation
5150 specified by CODE on operands of mode MODE. */
5151 int
5152 have_insn_for (enum rtx_code code, machine_mode mode)
5153 {
5154 return (code_to_optab (code)
5155 && (optab_handler (code_to_optab (code), mode)
5156 != CODE_FOR_nothing));
5157 }
5158
5159 /* Print information about the current contents of the optabs on
5160 STDERR. */
5161
5162 DEBUG_FUNCTION void
5163 debug_optab_libfuncs (void)
5164 {
5165 int i, j, k;
5166
5167 /* Dump the arithmetic optabs. */
5168 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5169 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5170 {
5171 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5172 if (l)
5173 {
5174 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5175 fprintf (stderr, "%s\t%s:\t%s\n",
5176 GET_RTX_NAME (optab_to_code ((optab) i)),
5177 GET_MODE_NAME (j),
5178 XSTR (l, 0));
5179 }
5180 }
5181
5182 /* Dump the conversion optabs. */
5183 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5184 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5185 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5186 {
5187 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5188 (machine_mode) k);
5189 if (l)
5190 {
5191 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5192 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5193 GET_RTX_NAME (optab_to_code ((optab) i)),
5194 GET_MODE_NAME (j),
5195 GET_MODE_NAME (k),
5196 XSTR (l, 0));
5197 }
5198 }
5199 }
5200
5201 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5202 CODE. Return 0 on failure. */
5203
5204 rtx_insn *
5205 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5206 {
5207 machine_mode mode = GET_MODE (op1);
5208 enum insn_code icode;
5209 rtx_insn *insn;
5210 rtx trap_rtx;
5211
5212 if (mode == VOIDmode)
5213 return 0;
5214
5215 icode = optab_handler (ctrap_optab, mode);
5216 if (icode == CODE_FOR_nothing)
5217 return 0;
5218
5219 /* Some targets only accept a zero trap code. */
5220 if (!insn_operand_matches (icode, 3, tcode))
5221 return 0;
5222
5223 do_pending_stack_adjust ();
5224 start_sequence ();
5225 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5226 &trap_rtx, &mode);
5227 if (!trap_rtx)
5228 insn = NULL;
5229 else
5230 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5231 tcode);
5232
5233 /* If that failed, then give up. */
5234 if (insn == 0)
5235 {
5236 end_sequence ();
5237 return 0;
5238 }
5239
5240 emit_insn (insn);
5241 insn = get_insns ();
5242 end_sequence ();
5243 return insn;
5244 }
5245
5246 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5247 or unsigned operation code. */
5248
5249 enum rtx_code
5250 get_rtx_code (enum tree_code tcode, bool unsignedp)
5251 {
5252 enum rtx_code code;
5253 switch (tcode)
5254 {
5255 case EQ_EXPR:
5256 code = EQ;
5257 break;
5258 case NE_EXPR:
5259 code = NE;
5260 break;
5261 case LT_EXPR:
5262 code = unsignedp ? LTU : LT;
5263 break;
5264 case LE_EXPR:
5265 code = unsignedp ? LEU : LE;
5266 break;
5267 case GT_EXPR:
5268 code = unsignedp ? GTU : GT;
5269 break;
5270 case GE_EXPR:
5271 code = unsignedp ? GEU : GE;
5272 break;
5273
5274 case UNORDERED_EXPR:
5275 code = UNORDERED;
5276 break;
5277 case ORDERED_EXPR:
5278 code = ORDERED;
5279 break;
5280 case UNLT_EXPR:
5281 code = UNLT;
5282 break;
5283 case UNLE_EXPR:
5284 code = UNLE;
5285 break;
5286 case UNGT_EXPR:
5287 code = UNGT;
5288 break;
5289 case UNGE_EXPR:
5290 code = UNGE;
5291 break;
5292 case UNEQ_EXPR:
5293 code = UNEQ;
5294 break;
5295 case LTGT_EXPR:
5296 code = LTGT;
5297 break;
5298
5299 case BIT_AND_EXPR:
5300 code = AND;
5301 break;
5302
5303 case BIT_IOR_EXPR:
5304 code = IOR;
5305 break;
5306
5307 default:
5308 gcc_unreachable ();
5309 }
5310 return code;
5311 }
5312
5313 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5314 select signed or unsigned operators. OPNO holds the index of the
5315 first comparison operand for insn ICODE. Do not generate the
5316 compare instruction itself. */
5317
5318 static rtx
5319 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5320 tree t_op0, tree t_op1, bool unsignedp,
5321 enum insn_code icode, unsigned int opno)
5322 {
5323 struct expand_operand ops[2];
5324 rtx rtx_op0, rtx_op1;
5325 machine_mode m0, m1;
5326 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5327
5328 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5329
5330 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5331 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5332 cases, use the original mode. */
5333 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5334 EXPAND_STACK_PARM);
5335 m0 = GET_MODE (rtx_op0);
5336 if (m0 == VOIDmode)
5337 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5338
5339 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5340 EXPAND_STACK_PARM);
5341 m1 = GET_MODE (rtx_op1);
5342 if (m1 == VOIDmode)
5343 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5344
5345 create_input_operand (&ops[0], rtx_op0, m0);
5346 create_input_operand (&ops[1], rtx_op1, m1);
5347 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5348 gcc_unreachable ();
5349 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5350 }
5351
5352 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5353 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5354 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5355 shift. */
5356 static rtx
5357 shift_amt_for_vec_perm_mask (rtx sel)
5358 {
5359 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5360 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5361
5362 if (GET_CODE (sel) != CONST_VECTOR)
5363 return NULL_RTX;
5364
5365 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5366 if (first >= nelt)
5367 return NULL_RTX;
5368 for (i = 1; i < nelt; i++)
5369 {
5370 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5371 unsigned int expected = i + first;
5372 /* Indices into the second vector are all equivalent. */
5373 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5374 return NULL_RTX;
5375 }
5376
5377 return GEN_INT (first * bitsize);
5378 }
5379
5380 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5381
5382 static rtx
5383 expand_vec_perm_1 (enum insn_code icode, rtx target,
5384 rtx v0, rtx v1, rtx sel)
5385 {
5386 machine_mode tmode = GET_MODE (target);
5387 machine_mode smode = GET_MODE (sel);
5388 struct expand_operand ops[4];
5389
5390 create_output_operand (&ops[0], target, tmode);
5391 create_input_operand (&ops[3], sel, smode);
5392
5393 /* Make an effort to preserve v0 == v1. The target expander is able to
5394 rely on this to determine if we're permuting a single input operand. */
5395 if (rtx_equal_p (v0, v1))
5396 {
5397 if (!insn_operand_matches (icode, 1, v0))
5398 v0 = force_reg (tmode, v0);
5399 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5400 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5401
5402 create_fixed_operand (&ops[1], v0);
5403 create_fixed_operand (&ops[2], v0);
5404 }
5405 else
5406 {
5407 create_input_operand (&ops[1], v0, tmode);
5408 create_input_operand (&ops[2], v1, tmode);
5409 }
5410
5411 if (maybe_expand_insn (icode, 4, ops))
5412 return ops[0].value;
5413 return NULL_RTX;
5414 }
5415
5416 /* Generate instructions for vec_perm optab given its mode
5417 and three operands. */
5418
5419 rtx
5420 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5421 {
5422 enum insn_code icode;
5423 machine_mode qimode;
5424 unsigned int i, w, e, u;
5425 rtx tmp, sel_qi = NULL;
5426 rtvec vec;
5427
5428 if (!target || GET_MODE (target) != mode)
5429 target = gen_reg_rtx (mode);
5430
5431 w = GET_MODE_SIZE (mode);
5432 e = GET_MODE_NUNITS (mode);
5433 u = GET_MODE_UNIT_SIZE (mode);
5434
5435 /* Set QIMODE to a different vector mode with byte elements.
5436 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5437 if (GET_MODE_INNER (mode) == QImode
5438 || !mode_for_vector (QImode, w).exists (&qimode)
5439 || !VECTOR_MODE_P (qimode))
5440 qimode = VOIDmode;
5441
5442 /* If the input is a constant, expand it specially. */
5443 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5444 if (GET_CODE (sel) == CONST_VECTOR)
5445 {
5446 /* See if this can be handled with a vec_shr. We only do this if the
5447 second vector is all zeroes. */
5448 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5449 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5450 ? optab_handler (vec_shr_optab, qimode)
5451 : CODE_FOR_nothing);
5452 rtx shift_amt = NULL_RTX;
5453 if (v1 == CONST0_RTX (GET_MODE (v1))
5454 && (shift_code != CODE_FOR_nothing
5455 || shift_code_qi != CODE_FOR_nothing))
5456 {
5457 shift_amt = shift_amt_for_vec_perm_mask (sel);
5458 if (shift_amt)
5459 {
5460 struct expand_operand ops[3];
5461 if (shift_code != CODE_FOR_nothing)
5462 {
5463 create_output_operand (&ops[0], target, mode);
5464 create_input_operand (&ops[1], v0, mode);
5465 create_convert_operand_from_type (&ops[2], shift_amt,
5466 sizetype);
5467 if (maybe_expand_insn (shift_code, 3, ops))
5468 return ops[0].value;
5469 }
5470 if (shift_code_qi != CODE_FOR_nothing)
5471 {
5472 tmp = gen_reg_rtx (qimode);
5473 create_output_operand (&ops[0], tmp, qimode);
5474 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5475 qimode);
5476 create_convert_operand_from_type (&ops[2], shift_amt,
5477 sizetype);
5478 if (maybe_expand_insn (shift_code_qi, 3, ops))
5479 return gen_lowpart (mode, ops[0].value);
5480 }
5481 }
5482 }
5483
5484 icode = direct_optab_handler (vec_perm_const_optab, mode);
5485 if (icode != CODE_FOR_nothing)
5486 {
5487 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5488 if (tmp)
5489 return tmp;
5490 }
5491
5492 /* Fall back to a constant byte-based permutation. */
5493 if (qimode != VOIDmode)
5494 {
5495 vec = rtvec_alloc (w);
5496 for (i = 0; i < e; ++i)
5497 {
5498 unsigned int j, this_e;
5499
5500 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5501 this_e &= 2 * e - 1;
5502 this_e *= u;
5503
5504 for (j = 0; j < u; ++j)
5505 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5506 }
5507 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5508
5509 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5510 if (icode != CODE_FOR_nothing)
5511 {
5512 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5513 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5514 gen_lowpart (qimode, v1), sel_qi);
5515 if (tmp)
5516 return gen_lowpart (mode, tmp);
5517 }
5518 }
5519 }
5520
5521 /* Otherwise expand as a fully variable permuation. */
5522 icode = direct_optab_handler (vec_perm_optab, mode);
5523 if (icode != CODE_FOR_nothing)
5524 {
5525 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5526 if (tmp)
5527 return tmp;
5528 }
5529
5530 /* As a special case to aid several targets, lower the element-based
5531 permutation to a byte-based permutation and try again. */
5532 if (qimode == VOIDmode)
5533 return NULL_RTX;
5534 icode = direct_optab_handler (vec_perm_optab, qimode);
5535 if (icode == CODE_FOR_nothing)
5536 return NULL_RTX;
5537
5538 if (sel_qi == NULL)
5539 {
5540 /* Multiply each element by its byte size. */
5541 machine_mode selmode = GET_MODE (sel);
5542 if (u == 2)
5543 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5544 NULL, 0, OPTAB_DIRECT);
5545 else
5546 sel = expand_simple_binop (selmode, ASHIFT, sel,
5547 GEN_INT (exact_log2 (u)),
5548 NULL, 0, OPTAB_DIRECT);
5549 gcc_assert (sel != NULL);
5550
5551 /* Broadcast the low byte each element into each of its bytes. */
5552 vec = rtvec_alloc (w);
5553 for (i = 0; i < w; ++i)
5554 {
5555 int this_e = i / u * u;
5556 if (BYTES_BIG_ENDIAN)
5557 this_e += u - 1;
5558 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5559 }
5560 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5561 sel = gen_lowpart (qimode, sel);
5562 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5563 gcc_assert (sel != NULL);
5564
5565 /* Add the byte offset to each byte element. */
5566 /* Note that the definition of the indicies here is memory ordering,
5567 so there should be no difference between big and little endian. */
5568 vec = rtvec_alloc (w);
5569 for (i = 0; i < w; ++i)
5570 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5571 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5572 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5573 sel, 0, OPTAB_DIRECT);
5574 gcc_assert (sel_qi != NULL);
5575 }
5576
5577 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5578 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5579 gen_lowpart (qimode, v1), sel_qi);
5580 if (tmp)
5581 tmp = gen_lowpart (mode, tmp);
5582 return tmp;
5583 }
5584
5585 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5586 three operands. */
5587
5588 rtx
5589 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5590 rtx target)
5591 {
5592 struct expand_operand ops[4];
5593 machine_mode mode = TYPE_MODE (vec_cond_type);
5594 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5595 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5596 rtx mask, rtx_op1, rtx_op2;
5597
5598 if (icode == CODE_FOR_nothing)
5599 return 0;
5600
5601 mask = expand_normal (op0);
5602 rtx_op1 = expand_normal (op1);
5603 rtx_op2 = expand_normal (op2);
5604
5605 mask = force_reg (mask_mode, mask);
5606 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5607
5608 create_output_operand (&ops[0], target, mode);
5609 create_input_operand (&ops[1], rtx_op1, mode);
5610 create_input_operand (&ops[2], rtx_op2, mode);
5611 create_input_operand (&ops[3], mask, mask_mode);
5612 expand_insn (icode, 4, ops);
5613
5614 return ops[0].value;
5615 }
5616
5617 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5618 three operands. */
5619
5620 rtx
5621 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5622 rtx target)
5623 {
5624 struct expand_operand ops[6];
5625 enum insn_code icode;
5626 rtx comparison, rtx_op1, rtx_op2;
5627 machine_mode mode = TYPE_MODE (vec_cond_type);
5628 machine_mode cmp_op_mode;
5629 bool unsignedp;
5630 tree op0a, op0b;
5631 enum tree_code tcode;
5632
5633 if (COMPARISON_CLASS_P (op0))
5634 {
5635 op0a = TREE_OPERAND (op0, 0);
5636 op0b = TREE_OPERAND (op0, 1);
5637 tcode = TREE_CODE (op0);
5638 }
5639 else
5640 {
5641 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5642 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5643 != CODE_FOR_nothing)
5644 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5645 op2, target);
5646 /* Fake op0 < 0. */
5647 else
5648 {
5649 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5650 == MODE_VECTOR_INT);
5651 op0a = op0;
5652 op0b = build_zero_cst (TREE_TYPE (op0));
5653 tcode = LT_EXPR;
5654 }
5655 }
5656 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5657 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5658
5659
5660 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5661 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5662
5663 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5664 if (icode == CODE_FOR_nothing)
5665 {
5666 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5667 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5668 if (icode == CODE_FOR_nothing)
5669 return 0;
5670 }
5671
5672 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5673 icode, 4);
5674 rtx_op1 = expand_normal (op1);
5675 rtx_op2 = expand_normal (op2);
5676
5677 create_output_operand (&ops[0], target, mode);
5678 create_input_operand (&ops[1], rtx_op1, mode);
5679 create_input_operand (&ops[2], rtx_op2, mode);
5680 create_fixed_operand (&ops[3], comparison);
5681 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5682 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5683 expand_insn (icode, 6, ops);
5684 return ops[0].value;
5685 }
5686
5687 /* Generate insns for a vector comparison into a mask. */
5688
5689 rtx
5690 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5691 {
5692 struct expand_operand ops[4];
5693 enum insn_code icode;
5694 rtx comparison;
5695 machine_mode mask_mode = TYPE_MODE (type);
5696 machine_mode vmode;
5697 bool unsignedp;
5698 tree op0a, op0b;
5699 enum tree_code tcode;
5700
5701 op0a = TREE_OPERAND (exp, 0);
5702 op0b = TREE_OPERAND (exp, 1);
5703 tcode = TREE_CODE (exp);
5704
5705 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5706 vmode = TYPE_MODE (TREE_TYPE (op0a));
5707
5708 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5709 if (icode == CODE_FOR_nothing)
5710 {
5711 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5712 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5713 if (icode == CODE_FOR_nothing)
5714 return 0;
5715 }
5716
5717 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5718 unsignedp, icode, 2);
5719 create_output_operand (&ops[0], target, mask_mode);
5720 create_fixed_operand (&ops[1], comparison);
5721 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5722 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5723 expand_insn (icode, 4, ops);
5724 return ops[0].value;
5725 }
5726
5727 /* Expand a highpart multiply. */
5728
5729 rtx
5730 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5731 rtx target, bool uns_p)
5732 {
5733 struct expand_operand eops[3];
5734 enum insn_code icode;
5735 int method, i, nunits;
5736 machine_mode wmode;
5737 rtx m1, m2, perm;
5738 optab tab1, tab2;
5739 rtvec v;
5740
5741 method = can_mult_highpart_p (mode, uns_p);
5742 switch (method)
5743 {
5744 case 0:
5745 return NULL_RTX;
5746 case 1:
5747 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5748 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5749 OPTAB_LIB_WIDEN);
5750 case 2:
5751 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5752 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5753 break;
5754 case 3:
5755 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5756 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5757 if (BYTES_BIG_ENDIAN)
5758 std::swap (tab1, tab2);
5759 break;
5760 default:
5761 gcc_unreachable ();
5762 }
5763
5764 icode = optab_handler (tab1, mode);
5765 nunits = GET_MODE_NUNITS (mode);
5766 wmode = insn_data[icode].operand[0].mode;
5767 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5768 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5769
5770 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5771 create_input_operand (&eops[1], op0, mode);
5772 create_input_operand (&eops[2], op1, mode);
5773 expand_insn (icode, 3, eops);
5774 m1 = gen_lowpart (mode, eops[0].value);
5775
5776 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5777 create_input_operand (&eops[1], op0, mode);
5778 create_input_operand (&eops[2], op1, mode);
5779 expand_insn (optab_handler (tab2, mode), 3, eops);
5780 m2 = gen_lowpart (mode, eops[0].value);
5781
5782 v = rtvec_alloc (nunits);
5783 if (method == 2)
5784 {
5785 for (i = 0; i < nunits; ++i)
5786 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5787 + ((i & 1) ? nunits : 0));
5788 }
5789 else
5790 {
5791 for (i = 0; i < nunits; ++i)
5792 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5793 }
5794 perm = gen_rtx_CONST_VECTOR (mode, v);
5795
5796 return expand_vec_perm (mode, m1, m2, perm, target);
5797 }
5798 \f
5799 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5800 pattern. */
5801
5802 static void
5803 find_cc_set (rtx x, const_rtx pat, void *data)
5804 {
5805 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5806 && GET_CODE (pat) == SET)
5807 {
5808 rtx *p_cc_reg = (rtx *) data;
5809 gcc_assert (!*p_cc_reg);
5810 *p_cc_reg = x;
5811 }
5812 }
5813
5814 /* This is a helper function for the other atomic operations. This function
5815 emits a loop that contains SEQ that iterates until a compare-and-swap
5816 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5817 a set of instructions that takes a value from OLD_REG as an input and
5818 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5819 set to the current contents of MEM. After SEQ, a compare-and-swap will
5820 attempt to update MEM with NEW_REG. The function returns true when the
5821 loop was generated successfully. */
5822
5823 static bool
5824 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5825 {
5826 machine_mode mode = GET_MODE (mem);
5827 rtx_code_label *label;
5828 rtx cmp_reg, success, oldval;
5829
5830 /* The loop we want to generate looks like
5831
5832 cmp_reg = mem;
5833 label:
5834 old_reg = cmp_reg;
5835 seq;
5836 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5837 if (success)
5838 goto label;
5839
5840 Note that we only do the plain load from memory once. Subsequent
5841 iterations use the value loaded by the compare-and-swap pattern. */
5842
5843 label = gen_label_rtx ();
5844 cmp_reg = gen_reg_rtx (mode);
5845
5846 emit_move_insn (cmp_reg, mem);
5847 emit_label (label);
5848 emit_move_insn (old_reg, cmp_reg);
5849 if (seq)
5850 emit_insn (seq);
5851
5852 success = NULL_RTX;
5853 oldval = cmp_reg;
5854 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5855 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5856 MEMMODEL_RELAXED))
5857 return false;
5858
5859 if (oldval != cmp_reg)
5860 emit_move_insn (cmp_reg, oldval);
5861
5862 /* Mark this jump predicted not taken. */
5863 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5864 GET_MODE (success), 1, label,
5865 profile_probability::guessed_never ());
5866 return true;
5867 }
5868
5869
5870 /* This function tries to emit an atomic_exchange intruction. VAL is written
5871 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5872 using TARGET if possible. */
5873
5874 static rtx
5875 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5876 {
5877 machine_mode mode = GET_MODE (mem);
5878 enum insn_code icode;
5879
5880 /* If the target supports the exchange directly, great. */
5881 icode = direct_optab_handler (atomic_exchange_optab, mode);
5882 if (icode != CODE_FOR_nothing)
5883 {
5884 struct expand_operand ops[4];
5885
5886 create_output_operand (&ops[0], target, mode);
5887 create_fixed_operand (&ops[1], mem);
5888 create_input_operand (&ops[2], val, mode);
5889 create_integer_operand (&ops[3], model);
5890 if (maybe_expand_insn (icode, 4, ops))
5891 return ops[0].value;
5892 }
5893
5894 return NULL_RTX;
5895 }
5896
5897 /* This function tries to implement an atomic exchange operation using
5898 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5899 The previous contents of *MEM are returned, using TARGET if possible.
5900 Since this instructionn is an acquire barrier only, stronger memory
5901 models may require additional barriers to be emitted. */
5902
5903 static rtx
5904 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5905 enum memmodel model)
5906 {
5907 machine_mode mode = GET_MODE (mem);
5908 enum insn_code icode;
5909 rtx_insn *last_insn = get_last_insn ();
5910
5911 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5912
5913 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5914 exists, and the memory model is stronger than acquire, add a release
5915 barrier before the instruction. */
5916
5917 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5918 expand_mem_thread_fence (model);
5919
5920 if (icode != CODE_FOR_nothing)
5921 {
5922 struct expand_operand ops[3];
5923 create_output_operand (&ops[0], target, mode);
5924 create_fixed_operand (&ops[1], mem);
5925 create_input_operand (&ops[2], val, mode);
5926 if (maybe_expand_insn (icode, 3, ops))
5927 return ops[0].value;
5928 }
5929
5930 /* If an external test-and-set libcall is provided, use that instead of
5931 any external compare-and-swap that we might get from the compare-and-
5932 swap-loop expansion later. */
5933 if (!can_compare_and_swap_p (mode, false))
5934 {
5935 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5936 if (libfunc != NULL)
5937 {
5938 rtx addr;
5939
5940 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5941 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5942 mode, addr, ptr_mode,
5943 val, mode);
5944 }
5945 }
5946
5947 /* If the test_and_set can't be emitted, eliminate any barrier that might
5948 have been emitted. */
5949 delete_insns_since (last_insn);
5950 return NULL_RTX;
5951 }
5952
5953 /* This function tries to implement an atomic exchange operation using a
5954 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5955 *MEM are returned, using TARGET if possible. No memory model is required
5956 since a compare_and_swap loop is seq-cst. */
5957
5958 static rtx
5959 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5960 {
5961 machine_mode mode = GET_MODE (mem);
5962
5963 if (can_compare_and_swap_p (mode, true))
5964 {
5965 if (!target || !register_operand (target, mode))
5966 target = gen_reg_rtx (mode);
5967 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5968 return target;
5969 }
5970
5971 return NULL_RTX;
5972 }
5973
5974 /* This function tries to implement an atomic test-and-set operation
5975 using the atomic_test_and_set instruction pattern. A boolean value
5976 is returned from the operation, using TARGET if possible. */
5977
5978 static rtx
5979 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5980 {
5981 machine_mode pat_bool_mode;
5982 struct expand_operand ops[3];
5983
5984 if (!targetm.have_atomic_test_and_set ())
5985 return NULL_RTX;
5986
5987 /* While we always get QImode from __atomic_test_and_set, we get
5988 other memory modes from __sync_lock_test_and_set. Note that we
5989 use no endian adjustment here. This matches the 4.6 behavior
5990 in the Sparc backend. */
5991 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5992 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5993 if (GET_MODE (mem) != QImode)
5994 mem = adjust_address_nv (mem, QImode, 0);
5995
5996 pat_bool_mode = insn_data[icode].operand[0].mode;
5997 create_output_operand (&ops[0], target, pat_bool_mode);
5998 create_fixed_operand (&ops[1], mem);
5999 create_integer_operand (&ops[2], model);
6000
6001 if (maybe_expand_insn (icode, 3, ops))
6002 return ops[0].value;
6003 return NULL_RTX;
6004 }
6005
6006 /* This function expands the legacy _sync_lock test_and_set operation which is
6007 generally an atomic exchange. Some limited targets only allow the
6008 constant 1 to be stored. This is an ACQUIRE operation.
6009
6010 TARGET is an optional place to stick the return value.
6011 MEM is where VAL is stored. */
6012
6013 rtx
6014 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6015 {
6016 rtx ret;
6017
6018 /* Try an atomic_exchange first. */
6019 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6020 if (ret)
6021 return ret;
6022
6023 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6024 MEMMODEL_SYNC_ACQUIRE);
6025 if (ret)
6026 return ret;
6027
6028 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6029 if (ret)
6030 return ret;
6031
6032 /* If there are no other options, try atomic_test_and_set if the value
6033 being stored is 1. */
6034 if (val == const1_rtx)
6035 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6036
6037 return ret;
6038 }
6039
6040 /* This function expands the atomic test_and_set operation:
6041 atomically store a boolean TRUE into MEM and return the previous value.
6042
6043 MEMMODEL is the memory model variant to use.
6044 TARGET is an optional place to stick the return value. */
6045
6046 rtx
6047 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6048 {
6049 machine_mode mode = GET_MODE (mem);
6050 rtx ret, trueval, subtarget;
6051
6052 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6053 if (ret)
6054 return ret;
6055
6056 /* Be binary compatible with non-default settings of trueval, and different
6057 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6058 another only has atomic-exchange. */
6059 if (targetm.atomic_test_and_set_trueval == 1)
6060 {
6061 trueval = const1_rtx;
6062 subtarget = target ? target : gen_reg_rtx (mode);
6063 }
6064 else
6065 {
6066 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6067 subtarget = gen_reg_rtx (mode);
6068 }
6069
6070 /* Try the atomic-exchange optab... */
6071 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6072
6073 /* ... then an atomic-compare-and-swap loop ... */
6074 if (!ret)
6075 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6076
6077 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6078 if (!ret)
6079 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6080
6081 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6082 things with the value 1. Thus we try again without trueval. */
6083 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6084 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6085
6086 /* Failing all else, assume a single threaded environment and simply
6087 perform the operation. */
6088 if (!ret)
6089 {
6090 /* If the result is ignored skip the move to target. */
6091 if (subtarget != const0_rtx)
6092 emit_move_insn (subtarget, mem);
6093
6094 emit_move_insn (mem, trueval);
6095 ret = subtarget;
6096 }
6097
6098 /* Recall that have to return a boolean value; rectify if trueval
6099 is not exactly one. */
6100 if (targetm.atomic_test_and_set_trueval != 1)
6101 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6102
6103 return ret;
6104 }
6105
6106 /* This function expands the atomic exchange operation:
6107 atomically store VAL in MEM and return the previous value in MEM.
6108
6109 MEMMODEL is the memory model variant to use.
6110 TARGET is an optional place to stick the return value. */
6111
6112 rtx
6113 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6114 {
6115 machine_mode mode = GET_MODE (mem);
6116 rtx ret;
6117
6118 /* If loads are not atomic for the required size and we are not called to
6119 provide a __sync builtin, do not do anything so that we stay consistent
6120 with atomic loads of the same size. */
6121 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6122 return NULL_RTX;
6123
6124 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6125
6126 /* Next try a compare-and-swap loop for the exchange. */
6127 if (!ret)
6128 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6129
6130 return ret;
6131 }
6132
6133 /* This function expands the atomic compare exchange operation:
6134
6135 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6136 *PTARGET_OVAL is an optional place to store the old value from memory.
6137 Both target parameters may be NULL or const0_rtx to indicate that we do
6138 not care about that return value. Both target parameters are updated on
6139 success to the actual location of the corresponding result.
6140
6141 MEMMODEL is the memory model variant to use.
6142
6143 The return value of the function is true for success. */
6144
6145 bool
6146 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6147 rtx mem, rtx expected, rtx desired,
6148 bool is_weak, enum memmodel succ_model,
6149 enum memmodel fail_model)
6150 {
6151 machine_mode mode = GET_MODE (mem);
6152 struct expand_operand ops[8];
6153 enum insn_code icode;
6154 rtx target_oval, target_bool = NULL_RTX;
6155 rtx libfunc;
6156
6157 /* If loads are not atomic for the required size and we are not called to
6158 provide a __sync builtin, do not do anything so that we stay consistent
6159 with atomic loads of the same size. */
6160 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6161 return false;
6162
6163 /* Load expected into a register for the compare and swap. */
6164 if (MEM_P (expected))
6165 expected = copy_to_reg (expected);
6166
6167 /* Make sure we always have some place to put the return oldval.
6168 Further, make sure that place is distinct from the input expected,
6169 just in case we need that path down below. */
6170 if (ptarget_oval && *ptarget_oval == const0_rtx)
6171 ptarget_oval = NULL;
6172
6173 if (ptarget_oval == NULL
6174 || (target_oval = *ptarget_oval) == NULL
6175 || reg_overlap_mentioned_p (expected, target_oval))
6176 target_oval = gen_reg_rtx (mode);
6177
6178 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6179 if (icode != CODE_FOR_nothing)
6180 {
6181 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6182
6183 if (ptarget_bool && *ptarget_bool == const0_rtx)
6184 ptarget_bool = NULL;
6185
6186 /* Make sure we always have a place for the bool operand. */
6187 if (ptarget_bool == NULL
6188 || (target_bool = *ptarget_bool) == NULL
6189 || GET_MODE (target_bool) != bool_mode)
6190 target_bool = gen_reg_rtx (bool_mode);
6191
6192 /* Emit the compare_and_swap. */
6193 create_output_operand (&ops[0], target_bool, bool_mode);
6194 create_output_operand (&ops[1], target_oval, mode);
6195 create_fixed_operand (&ops[2], mem);
6196 create_input_operand (&ops[3], expected, mode);
6197 create_input_operand (&ops[4], desired, mode);
6198 create_integer_operand (&ops[5], is_weak);
6199 create_integer_operand (&ops[6], succ_model);
6200 create_integer_operand (&ops[7], fail_model);
6201 if (maybe_expand_insn (icode, 8, ops))
6202 {
6203 /* Return success/failure. */
6204 target_bool = ops[0].value;
6205 target_oval = ops[1].value;
6206 goto success;
6207 }
6208 }
6209
6210 /* Otherwise fall back to the original __sync_val_compare_and_swap
6211 which is always seq-cst. */
6212 icode = optab_handler (sync_compare_and_swap_optab, mode);
6213 if (icode != CODE_FOR_nothing)
6214 {
6215 rtx cc_reg;
6216
6217 create_output_operand (&ops[0], target_oval, mode);
6218 create_fixed_operand (&ops[1], mem);
6219 create_input_operand (&ops[2], expected, mode);
6220 create_input_operand (&ops[3], desired, mode);
6221 if (!maybe_expand_insn (icode, 4, ops))
6222 return false;
6223
6224 target_oval = ops[0].value;
6225
6226 /* If the caller isn't interested in the boolean return value,
6227 skip the computation of it. */
6228 if (ptarget_bool == NULL)
6229 goto success;
6230
6231 /* Otherwise, work out if the compare-and-swap succeeded. */
6232 cc_reg = NULL_RTX;
6233 if (have_insn_for (COMPARE, CCmode))
6234 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6235 if (cc_reg)
6236 {
6237 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6238 const0_rtx, VOIDmode, 0, 1);
6239 goto success;
6240 }
6241 goto success_bool_from_val;
6242 }
6243
6244 /* Also check for library support for __sync_val_compare_and_swap. */
6245 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6246 if (libfunc != NULL)
6247 {
6248 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6249 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6250 mode, addr, ptr_mode,
6251 expected, mode, desired, mode);
6252 emit_move_insn (target_oval, target);
6253
6254 /* Compute the boolean return value only if requested. */
6255 if (ptarget_bool)
6256 goto success_bool_from_val;
6257 else
6258 goto success;
6259 }
6260
6261 /* Failure. */
6262 return false;
6263
6264 success_bool_from_val:
6265 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6266 expected, VOIDmode, 1, 1);
6267 success:
6268 /* Make sure that the oval output winds up where the caller asked. */
6269 if (ptarget_oval)
6270 *ptarget_oval = target_oval;
6271 if (ptarget_bool)
6272 *ptarget_bool = target_bool;
6273 return true;
6274 }
6275
6276 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6277
6278 static void
6279 expand_asm_memory_barrier (void)
6280 {
6281 rtx asm_op, clob;
6282
6283 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6284 rtvec_alloc (0), rtvec_alloc (0),
6285 rtvec_alloc (0), UNKNOWN_LOCATION);
6286 MEM_VOLATILE_P (asm_op) = 1;
6287
6288 clob = gen_rtx_SCRATCH (VOIDmode);
6289 clob = gen_rtx_MEM (BLKmode, clob);
6290 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6291
6292 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6293 }
6294
6295 /* This routine will either emit the mem_thread_fence pattern or issue a
6296 sync_synchronize to generate a fence for memory model MEMMODEL. */
6297
6298 void
6299 expand_mem_thread_fence (enum memmodel model)
6300 {
6301 if (is_mm_relaxed (model))
6302 return;
6303 if (targetm.have_mem_thread_fence ())
6304 {
6305 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6306 expand_asm_memory_barrier ();
6307 }
6308 else if (targetm.have_memory_barrier ())
6309 emit_insn (targetm.gen_memory_barrier ());
6310 else if (synchronize_libfunc != NULL_RTX)
6311 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6312 else
6313 expand_asm_memory_barrier ();
6314 }
6315
6316 /* Emit a signal fence with given memory model. */
6317
6318 void
6319 expand_mem_signal_fence (enum memmodel model)
6320 {
6321 /* No machine barrier is required to implement a signal fence, but
6322 a compiler memory barrier must be issued, except for relaxed MM. */
6323 if (!is_mm_relaxed (model))
6324 expand_asm_memory_barrier ();
6325 }
6326
6327 /* This function expands the atomic load operation:
6328 return the atomically loaded value in MEM.
6329
6330 MEMMODEL is the memory model variant to use.
6331 TARGET is an option place to stick the return value. */
6332
6333 rtx
6334 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6335 {
6336 machine_mode mode = GET_MODE (mem);
6337 enum insn_code icode;
6338
6339 /* If the target supports the load directly, great. */
6340 icode = direct_optab_handler (atomic_load_optab, mode);
6341 if (icode != CODE_FOR_nothing)
6342 {
6343 struct expand_operand ops[3];
6344 rtx_insn *last = get_last_insn ();
6345 if (is_mm_seq_cst (model))
6346 expand_asm_memory_barrier ();
6347
6348 create_output_operand (&ops[0], target, mode);
6349 create_fixed_operand (&ops[1], mem);
6350 create_integer_operand (&ops[2], model);
6351 if (maybe_expand_insn (icode, 3, ops))
6352 {
6353 if (!is_mm_relaxed (model))
6354 expand_asm_memory_barrier ();
6355 return ops[0].value;
6356 }
6357 delete_insns_since (last);
6358 }
6359
6360 /* If the size of the object is greater than word size on this target,
6361 then we assume that a load will not be atomic. We could try to
6362 emulate a load with a compare-and-swap operation, but the store that
6363 doing this could result in would be incorrect if this is a volatile
6364 atomic load or targetting read-only-mapped memory. */
6365 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6366 /* If there is no atomic load, leave the library call. */
6367 return NULL_RTX;
6368
6369 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6370 if (!target || target == const0_rtx)
6371 target = gen_reg_rtx (mode);
6372
6373 /* For SEQ_CST, emit a barrier before the load. */
6374 if (is_mm_seq_cst (model))
6375 expand_mem_thread_fence (model);
6376
6377 emit_move_insn (target, mem);
6378
6379 /* Emit the appropriate barrier after the load. */
6380 expand_mem_thread_fence (model);
6381
6382 return target;
6383 }
6384
6385 /* This function expands the atomic store operation:
6386 Atomically store VAL in MEM.
6387 MEMMODEL is the memory model variant to use.
6388 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6389 function returns const0_rtx if a pattern was emitted. */
6390
6391 rtx
6392 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6393 {
6394 machine_mode mode = GET_MODE (mem);
6395 enum insn_code icode;
6396 struct expand_operand ops[3];
6397
6398 /* If the target supports the store directly, great. */
6399 icode = direct_optab_handler (atomic_store_optab, mode);
6400 if (icode != CODE_FOR_nothing)
6401 {
6402 rtx_insn *last = get_last_insn ();
6403 if (!is_mm_relaxed (model))
6404 expand_asm_memory_barrier ();
6405 create_fixed_operand (&ops[0], mem);
6406 create_input_operand (&ops[1], val, mode);
6407 create_integer_operand (&ops[2], model);
6408 if (maybe_expand_insn (icode, 3, ops))
6409 {
6410 if (is_mm_seq_cst (model))
6411 expand_asm_memory_barrier ();
6412 return const0_rtx;
6413 }
6414 delete_insns_since (last);
6415 }
6416
6417 /* If using __sync_lock_release is a viable alternative, try it.
6418 Note that this will not be set to true if we are expanding a generic
6419 __atomic_store_n. */
6420 if (use_release)
6421 {
6422 icode = direct_optab_handler (sync_lock_release_optab, mode);
6423 if (icode != CODE_FOR_nothing)
6424 {
6425 create_fixed_operand (&ops[0], mem);
6426 create_input_operand (&ops[1], const0_rtx, mode);
6427 if (maybe_expand_insn (icode, 2, ops))
6428 {
6429 /* lock_release is only a release barrier. */
6430 if (is_mm_seq_cst (model))
6431 expand_mem_thread_fence (model);
6432 return const0_rtx;
6433 }
6434 }
6435 }
6436
6437 /* If the size of the object is greater than word size on this target,
6438 a default store will not be atomic. */
6439 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6440 {
6441 /* If loads are atomic or we are called to provide a __sync builtin,
6442 we can try a atomic_exchange and throw away the result. Otherwise,
6443 don't do anything so that we do not create an inconsistency between
6444 loads and stores. */
6445 if (can_atomic_load_p (mode) || is_mm_sync (model))
6446 {
6447 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6448 if (!target)
6449 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6450 val);
6451 if (target)
6452 return const0_rtx;
6453 }
6454 return NULL_RTX;
6455 }
6456
6457 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6458 expand_mem_thread_fence (model);
6459
6460 emit_move_insn (mem, val);
6461
6462 /* For SEQ_CST, also emit a barrier after the store. */
6463 if (is_mm_seq_cst (model))
6464 expand_mem_thread_fence (model);
6465
6466 return const0_rtx;
6467 }
6468
6469
6470 /* Structure containing the pointers and values required to process the
6471 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6472
6473 struct atomic_op_functions
6474 {
6475 direct_optab mem_fetch_before;
6476 direct_optab mem_fetch_after;
6477 direct_optab mem_no_result;
6478 optab fetch_before;
6479 optab fetch_after;
6480 direct_optab no_result;
6481 enum rtx_code reverse_code;
6482 };
6483
6484
6485 /* Fill in structure pointed to by OP with the various optab entries for an
6486 operation of type CODE. */
6487
6488 static void
6489 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6490 {
6491 gcc_assert (op!= NULL);
6492
6493 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6494 in the source code during compilation, and the optab entries are not
6495 computable until runtime. Fill in the values at runtime. */
6496 switch (code)
6497 {
6498 case PLUS:
6499 op->mem_fetch_before = atomic_fetch_add_optab;
6500 op->mem_fetch_after = atomic_add_fetch_optab;
6501 op->mem_no_result = atomic_add_optab;
6502 op->fetch_before = sync_old_add_optab;
6503 op->fetch_after = sync_new_add_optab;
6504 op->no_result = sync_add_optab;
6505 op->reverse_code = MINUS;
6506 break;
6507 case MINUS:
6508 op->mem_fetch_before = atomic_fetch_sub_optab;
6509 op->mem_fetch_after = atomic_sub_fetch_optab;
6510 op->mem_no_result = atomic_sub_optab;
6511 op->fetch_before = sync_old_sub_optab;
6512 op->fetch_after = sync_new_sub_optab;
6513 op->no_result = sync_sub_optab;
6514 op->reverse_code = PLUS;
6515 break;
6516 case XOR:
6517 op->mem_fetch_before = atomic_fetch_xor_optab;
6518 op->mem_fetch_after = atomic_xor_fetch_optab;
6519 op->mem_no_result = atomic_xor_optab;
6520 op->fetch_before = sync_old_xor_optab;
6521 op->fetch_after = sync_new_xor_optab;
6522 op->no_result = sync_xor_optab;
6523 op->reverse_code = XOR;
6524 break;
6525 case AND:
6526 op->mem_fetch_before = atomic_fetch_and_optab;
6527 op->mem_fetch_after = atomic_and_fetch_optab;
6528 op->mem_no_result = atomic_and_optab;
6529 op->fetch_before = sync_old_and_optab;
6530 op->fetch_after = sync_new_and_optab;
6531 op->no_result = sync_and_optab;
6532 op->reverse_code = UNKNOWN;
6533 break;
6534 case IOR:
6535 op->mem_fetch_before = atomic_fetch_or_optab;
6536 op->mem_fetch_after = atomic_or_fetch_optab;
6537 op->mem_no_result = atomic_or_optab;
6538 op->fetch_before = sync_old_ior_optab;
6539 op->fetch_after = sync_new_ior_optab;
6540 op->no_result = sync_ior_optab;
6541 op->reverse_code = UNKNOWN;
6542 break;
6543 case NOT:
6544 op->mem_fetch_before = atomic_fetch_nand_optab;
6545 op->mem_fetch_after = atomic_nand_fetch_optab;
6546 op->mem_no_result = atomic_nand_optab;
6547 op->fetch_before = sync_old_nand_optab;
6548 op->fetch_after = sync_new_nand_optab;
6549 op->no_result = sync_nand_optab;
6550 op->reverse_code = UNKNOWN;
6551 break;
6552 default:
6553 gcc_unreachable ();
6554 }
6555 }
6556
6557 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6558 using memory order MODEL. If AFTER is true the operation needs to return
6559 the value of *MEM after the operation, otherwise the previous value.
6560 TARGET is an optional place to place the result. The result is unused if
6561 it is const0_rtx.
6562 Return the result if there is a better sequence, otherwise NULL_RTX. */
6563
6564 static rtx
6565 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6566 enum memmodel model, bool after)
6567 {
6568 /* If the value is prefetched, or not used, it may be possible to replace
6569 the sequence with a native exchange operation. */
6570 if (!after || target == const0_rtx)
6571 {
6572 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6573 if (code == AND && val == const0_rtx)
6574 {
6575 if (target == const0_rtx)
6576 target = gen_reg_rtx (GET_MODE (mem));
6577 return maybe_emit_atomic_exchange (target, mem, val, model);
6578 }
6579
6580 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6581 if (code == IOR && val == constm1_rtx)
6582 {
6583 if (target == const0_rtx)
6584 target = gen_reg_rtx (GET_MODE (mem));
6585 return maybe_emit_atomic_exchange (target, mem, val, model);
6586 }
6587 }
6588
6589 return NULL_RTX;
6590 }
6591
6592 /* Try to emit an instruction for a specific operation varaition.
6593 OPTAB contains the OP functions.
6594 TARGET is an optional place to return the result. const0_rtx means unused.
6595 MEM is the memory location to operate on.
6596 VAL is the value to use in the operation.
6597 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6598 MODEL is the memory model, if used.
6599 AFTER is true if the returned result is the value after the operation. */
6600
6601 static rtx
6602 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6603 rtx val, bool use_memmodel, enum memmodel model, bool after)
6604 {
6605 machine_mode mode = GET_MODE (mem);
6606 struct expand_operand ops[4];
6607 enum insn_code icode;
6608 int op_counter = 0;
6609 int num_ops;
6610
6611 /* Check to see if there is a result returned. */
6612 if (target == const0_rtx)
6613 {
6614 if (use_memmodel)
6615 {
6616 icode = direct_optab_handler (optab->mem_no_result, mode);
6617 create_integer_operand (&ops[2], model);
6618 num_ops = 3;
6619 }
6620 else
6621 {
6622 icode = direct_optab_handler (optab->no_result, mode);
6623 num_ops = 2;
6624 }
6625 }
6626 /* Otherwise, we need to generate a result. */
6627 else
6628 {
6629 if (use_memmodel)
6630 {
6631 icode = direct_optab_handler (after ? optab->mem_fetch_after
6632 : optab->mem_fetch_before, mode);
6633 create_integer_operand (&ops[3], model);
6634 num_ops = 4;
6635 }
6636 else
6637 {
6638 icode = optab_handler (after ? optab->fetch_after
6639 : optab->fetch_before, mode);
6640 num_ops = 3;
6641 }
6642 create_output_operand (&ops[op_counter++], target, mode);
6643 }
6644 if (icode == CODE_FOR_nothing)
6645 return NULL_RTX;
6646
6647 create_fixed_operand (&ops[op_counter++], mem);
6648 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6649 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6650
6651 if (maybe_expand_insn (icode, num_ops, ops))
6652 return (target == const0_rtx ? const0_rtx : ops[0].value);
6653
6654 return NULL_RTX;
6655 }
6656
6657
6658 /* This function expands an atomic fetch_OP or OP_fetch operation:
6659 TARGET is an option place to stick the return value. const0_rtx indicates
6660 the result is unused.
6661 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6662 CODE is the operation being performed (OP)
6663 MEMMODEL is the memory model variant to use.
6664 AFTER is true to return the result of the operation (OP_fetch).
6665 AFTER is false to return the value before the operation (fetch_OP).
6666
6667 This function will *only* generate instructions if there is a direct
6668 optab. No compare and swap loops or libcalls will be generated. */
6669
6670 static rtx
6671 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6672 enum rtx_code code, enum memmodel model,
6673 bool after)
6674 {
6675 machine_mode mode = GET_MODE (mem);
6676 struct atomic_op_functions optab;
6677 rtx result;
6678 bool unused_result = (target == const0_rtx);
6679
6680 get_atomic_op_for_code (&optab, code);
6681
6682 /* Check to see if there are any better instructions. */
6683 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6684 if (result)
6685 return result;
6686
6687 /* Check for the case where the result isn't used and try those patterns. */
6688 if (unused_result)
6689 {
6690 /* Try the memory model variant first. */
6691 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6692 if (result)
6693 return result;
6694
6695 /* Next try the old style withuot a memory model. */
6696 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6697 if (result)
6698 return result;
6699
6700 /* There is no no-result pattern, so try patterns with a result. */
6701 target = NULL_RTX;
6702 }
6703
6704 /* Try the __atomic version. */
6705 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6706 if (result)
6707 return result;
6708
6709 /* Try the older __sync version. */
6710 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6711 if (result)
6712 return result;
6713
6714 /* If the fetch value can be calculated from the other variation of fetch,
6715 try that operation. */
6716 if (after || unused_result || optab.reverse_code != UNKNOWN)
6717 {
6718 /* Try the __atomic version, then the older __sync version. */
6719 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6720 if (!result)
6721 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6722
6723 if (result)
6724 {
6725 /* If the result isn't used, no need to do compensation code. */
6726 if (unused_result)
6727 return result;
6728
6729 /* Issue compensation code. Fetch_after == fetch_before OP val.
6730 Fetch_before == after REVERSE_OP val. */
6731 if (!after)
6732 code = optab.reverse_code;
6733 if (code == NOT)
6734 {
6735 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6736 true, OPTAB_LIB_WIDEN);
6737 result = expand_simple_unop (mode, NOT, result, target, true);
6738 }
6739 else
6740 result = expand_simple_binop (mode, code, result, val, target,
6741 true, OPTAB_LIB_WIDEN);
6742 return result;
6743 }
6744 }
6745
6746 /* No direct opcode can be generated. */
6747 return NULL_RTX;
6748 }
6749
6750
6751
6752 /* This function expands an atomic fetch_OP or OP_fetch operation:
6753 TARGET is an option place to stick the return value. const0_rtx indicates
6754 the result is unused.
6755 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6756 CODE is the operation being performed (OP)
6757 MEMMODEL is the memory model variant to use.
6758 AFTER is true to return the result of the operation (OP_fetch).
6759 AFTER is false to return the value before the operation (fetch_OP). */
6760 rtx
6761 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6762 enum memmodel model, bool after)
6763 {
6764 machine_mode mode = GET_MODE (mem);
6765 rtx result;
6766 bool unused_result = (target == const0_rtx);
6767
6768 /* If loads are not atomic for the required size and we are not called to
6769 provide a __sync builtin, do not do anything so that we stay consistent
6770 with atomic loads of the same size. */
6771 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6772 return NULL_RTX;
6773
6774 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6775 after);
6776
6777 if (result)
6778 return result;
6779
6780 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6781 if (code == PLUS || code == MINUS)
6782 {
6783 rtx tmp;
6784 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6785
6786 start_sequence ();
6787 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6788 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6789 model, after);
6790 if (result)
6791 {
6792 /* PLUS worked so emit the insns and return. */
6793 tmp = get_insns ();
6794 end_sequence ();
6795 emit_insn (tmp);
6796 return result;
6797 }
6798
6799 /* PLUS did not work, so throw away the negation code and continue. */
6800 end_sequence ();
6801 }
6802
6803 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6804 if (!can_compare_and_swap_p (mode, false))
6805 {
6806 rtx libfunc;
6807 bool fixup = false;
6808 enum rtx_code orig_code = code;
6809 struct atomic_op_functions optab;
6810
6811 get_atomic_op_for_code (&optab, code);
6812 libfunc = optab_libfunc (after ? optab.fetch_after
6813 : optab.fetch_before, mode);
6814 if (libfunc == NULL
6815 && (after || unused_result || optab.reverse_code != UNKNOWN))
6816 {
6817 fixup = true;
6818 if (!after)
6819 code = optab.reverse_code;
6820 libfunc = optab_libfunc (after ? optab.fetch_before
6821 : optab.fetch_after, mode);
6822 }
6823 if (libfunc != NULL)
6824 {
6825 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6826 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6827 addr, ptr_mode, val, mode);
6828
6829 if (!unused_result && fixup)
6830 result = expand_simple_binop (mode, code, result, val, target,
6831 true, OPTAB_LIB_WIDEN);
6832 return result;
6833 }
6834
6835 /* We need the original code for any further attempts. */
6836 code = orig_code;
6837 }
6838
6839 /* If nothing else has succeeded, default to a compare and swap loop. */
6840 if (can_compare_and_swap_p (mode, true))
6841 {
6842 rtx_insn *insn;
6843 rtx t0 = gen_reg_rtx (mode), t1;
6844
6845 start_sequence ();
6846
6847 /* If the result is used, get a register for it. */
6848 if (!unused_result)
6849 {
6850 if (!target || !register_operand (target, mode))
6851 target = gen_reg_rtx (mode);
6852 /* If fetch_before, copy the value now. */
6853 if (!after)
6854 emit_move_insn (target, t0);
6855 }
6856 else
6857 target = const0_rtx;
6858
6859 t1 = t0;
6860 if (code == NOT)
6861 {
6862 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6863 true, OPTAB_LIB_WIDEN);
6864 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6865 }
6866 else
6867 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6868 OPTAB_LIB_WIDEN);
6869
6870 /* For after, copy the value now. */
6871 if (!unused_result && after)
6872 emit_move_insn (target, t1);
6873 insn = get_insns ();
6874 end_sequence ();
6875
6876 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6877 return target;
6878 }
6879
6880 return NULL_RTX;
6881 }
6882 \f
6883 /* Return true if OPERAND is suitable for operand number OPNO of
6884 instruction ICODE. */
6885
6886 bool
6887 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6888 {
6889 return (!insn_data[(int) icode].operand[opno].predicate
6890 || (insn_data[(int) icode].operand[opno].predicate
6891 (operand, insn_data[(int) icode].operand[opno].mode)));
6892 }
6893 \f
6894 /* TARGET is a target of a multiword operation that we are going to
6895 implement as a series of word-mode operations. Return true if
6896 TARGET is suitable for this purpose. */
6897
6898 bool
6899 valid_multiword_target_p (rtx target)
6900 {
6901 machine_mode mode;
6902 int i;
6903
6904 mode = GET_MODE (target);
6905 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6906 if (!validate_subreg (word_mode, mode, target, i))
6907 return false;
6908 return true;
6909 }
6910
6911 /* Like maybe_legitimize_operand, but do not change the code of the
6912 current rtx value. */
6913
6914 static bool
6915 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6916 struct expand_operand *op)
6917 {
6918 /* See if the operand matches in its current form. */
6919 if (insn_operand_matches (icode, opno, op->value))
6920 return true;
6921
6922 /* If the operand is a memory whose address has no side effects,
6923 try forcing the address into a non-virtual pseudo register.
6924 The check for side effects is important because copy_to_mode_reg
6925 cannot handle things like auto-modified addresses. */
6926 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6927 {
6928 rtx addr, mem;
6929
6930 mem = op->value;
6931 addr = XEXP (mem, 0);
6932 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6933 && !side_effects_p (addr))
6934 {
6935 rtx_insn *last;
6936 machine_mode mode;
6937
6938 last = get_last_insn ();
6939 mode = get_address_mode (mem);
6940 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6941 if (insn_operand_matches (icode, opno, mem))
6942 {
6943 op->value = mem;
6944 return true;
6945 }
6946 delete_insns_since (last);
6947 }
6948 }
6949
6950 return false;
6951 }
6952
6953 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6954 on success, storing the new operand value back in OP. */
6955
6956 static bool
6957 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6958 struct expand_operand *op)
6959 {
6960 machine_mode mode, imode;
6961 bool old_volatile_ok, result;
6962
6963 mode = op->mode;
6964 switch (op->type)
6965 {
6966 case EXPAND_FIXED:
6967 old_volatile_ok = volatile_ok;
6968 volatile_ok = true;
6969 result = maybe_legitimize_operand_same_code (icode, opno, op);
6970 volatile_ok = old_volatile_ok;
6971 return result;
6972
6973 case EXPAND_OUTPUT:
6974 gcc_assert (mode != VOIDmode);
6975 if (op->value
6976 && op->value != const0_rtx
6977 && GET_MODE (op->value) == mode
6978 && maybe_legitimize_operand_same_code (icode, opno, op))
6979 return true;
6980
6981 op->value = gen_reg_rtx (mode);
6982 op->target = 0;
6983 break;
6984
6985 case EXPAND_INPUT:
6986 input:
6987 gcc_assert (mode != VOIDmode);
6988 gcc_assert (GET_MODE (op->value) == VOIDmode
6989 || GET_MODE (op->value) == mode);
6990 if (maybe_legitimize_operand_same_code (icode, opno, op))
6991 return true;
6992
6993 op->value = copy_to_mode_reg (mode, op->value);
6994 break;
6995
6996 case EXPAND_CONVERT_TO:
6997 gcc_assert (mode != VOIDmode);
6998 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6999 goto input;
7000
7001 case EXPAND_CONVERT_FROM:
7002 if (GET_MODE (op->value) != VOIDmode)
7003 mode = GET_MODE (op->value);
7004 else
7005 /* The caller must tell us what mode this value has. */
7006 gcc_assert (mode != VOIDmode);
7007
7008 imode = insn_data[(int) icode].operand[opno].mode;
7009 if (imode != VOIDmode && imode != mode)
7010 {
7011 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7012 mode = imode;
7013 }
7014 goto input;
7015
7016 case EXPAND_ADDRESS:
7017 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7018 op->value);
7019 goto input;
7020
7021 case EXPAND_INTEGER:
7022 mode = insn_data[(int) icode].operand[opno].mode;
7023 if (mode != VOIDmode && const_int_operand (op->value, mode))
7024 goto input;
7025 break;
7026 }
7027 return insn_operand_matches (icode, opno, op->value);
7028 }
7029
7030 /* Make OP describe an input operand that should have the same value
7031 as VALUE, after any mode conversion that the target might request.
7032 TYPE is the type of VALUE. */
7033
7034 void
7035 create_convert_operand_from_type (struct expand_operand *op,
7036 rtx value, tree type)
7037 {
7038 create_convert_operand_from (op, value, TYPE_MODE (type),
7039 TYPE_UNSIGNED (type));
7040 }
7041
7042 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7043 of instruction ICODE. Return true on success, leaving the new operand
7044 values in the OPS themselves. Emit no code on failure. */
7045
7046 bool
7047 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7048 unsigned int nops, struct expand_operand *ops)
7049 {
7050 rtx_insn *last;
7051 unsigned int i;
7052
7053 last = get_last_insn ();
7054 for (i = 0; i < nops; i++)
7055 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7056 {
7057 delete_insns_since (last);
7058 return false;
7059 }
7060 return true;
7061 }
7062
7063 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7064 as its operands. Return the instruction pattern on success,
7065 and emit any necessary set-up code. Return null and emit no
7066 code on failure. */
7067
7068 rtx_insn *
7069 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7070 struct expand_operand *ops)
7071 {
7072 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7073 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7074 return NULL;
7075
7076 switch (nops)
7077 {
7078 case 1:
7079 return GEN_FCN (icode) (ops[0].value);
7080 case 2:
7081 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7082 case 3:
7083 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7084 case 4:
7085 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7086 ops[3].value);
7087 case 5:
7088 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7089 ops[3].value, ops[4].value);
7090 case 6:
7091 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7092 ops[3].value, ops[4].value, ops[5].value);
7093 case 7:
7094 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7095 ops[3].value, ops[4].value, ops[5].value,
7096 ops[6].value);
7097 case 8:
7098 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7099 ops[3].value, ops[4].value, ops[5].value,
7100 ops[6].value, ops[7].value);
7101 case 9:
7102 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7103 ops[3].value, ops[4].value, ops[5].value,
7104 ops[6].value, ops[7].value, ops[8].value);
7105 }
7106 gcc_unreachable ();
7107 }
7108
7109 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7110 as its operands. Return true on success and emit no code on failure. */
7111
7112 bool
7113 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7114 struct expand_operand *ops)
7115 {
7116 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7117 if (pat)
7118 {
7119 emit_insn (pat);
7120 return true;
7121 }
7122 return false;
7123 }
7124
7125 /* Like maybe_expand_insn, but for jumps. */
7126
7127 bool
7128 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7129 struct expand_operand *ops)
7130 {
7131 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7132 if (pat)
7133 {
7134 emit_jump_insn (pat);
7135 return true;
7136 }
7137 return false;
7138 }
7139
7140 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7141 as its operands. */
7142
7143 void
7144 expand_insn (enum insn_code icode, unsigned int nops,
7145 struct expand_operand *ops)
7146 {
7147 if (!maybe_expand_insn (icode, nops, ops))
7148 gcc_unreachable ();
7149 }
7150
7151 /* Like expand_insn, but for jumps. */
7152
7153 void
7154 expand_jump_insn (enum insn_code icode, unsigned int nops,
7155 struct expand_operand *ops)
7156 {
7157 if (!maybe_expand_jump_insn (icode, nops, ops))
7158 gcc_unreachable ();
7159 }