re PR target/69894 (dependency of gcc-plugin.h not installed on aarch64-linux-gnu)
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "tm_p.h"
30 #include "expmed.h"
31 #include "optabs.h"
32 #include "emit-rtl.h"
33 #include "recog.h"
34 #include "diagnostic-core.h"
35
36 /* Include insn-config.h before expr.h so that HAVE_conditional_move
37 is properly defined. */
38 #include "stor-layout.h"
39 #include "except.h"
40 #include "dojump.h"
41 #include "explow.h"
42 #include "expr.h"
43 #include "optabs-tree.h"
44 #include "libfuncs.h"
45
46 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
47 machine_mode *);
48 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
49 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
50
51 /* Debug facility for use in GDB. */
52 void debug_optab_libfuncs (void);
53 \f
54 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
55 the result of operation CODE applied to OP0 (and OP1 if it is a binary
56 operation).
57
58 If the last insn does not set TARGET, don't do anything, but return 1.
59
60 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
61 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
62 try again, ensuring that TARGET is not one of the operands. */
63
64 static int
65 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
66 {
67 rtx_insn *last_insn;
68 rtx set;
69 rtx note;
70
71 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
72
73 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
74 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
75 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
76 && GET_RTX_CLASS (code) != RTX_COMPARE
77 && GET_RTX_CLASS (code) != RTX_UNARY)
78 return 1;
79
80 if (GET_CODE (target) == ZERO_EXTRACT)
81 return 1;
82
83 for (last_insn = insns;
84 NEXT_INSN (last_insn) != NULL_RTX;
85 last_insn = NEXT_INSN (last_insn))
86 ;
87
88 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
89 a value changing in the insn, so the note would be invalid for CSE. */
90 if (reg_overlap_mentioned_p (target, op0)
91 || (op1 && reg_overlap_mentioned_p (target, op1)))
92 {
93 if (MEM_P (target)
94 && (rtx_equal_p (target, op0)
95 || (op1 && rtx_equal_p (target, op1))))
96 {
97 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
98 over expanding it as temp = MEM op X, MEM = temp. If the target
99 supports MEM = MEM op X instructions, it is sometimes too hard
100 to reconstruct that form later, especially if X is also a memory,
101 and due to multiple occurrences of addresses the address might
102 be forced into register unnecessarily.
103 Note that not emitting the REG_EQUIV note might inhibit
104 CSE in some cases. */
105 set = single_set (last_insn);
106 if (set
107 && GET_CODE (SET_SRC (set)) == code
108 && MEM_P (SET_DEST (set))
109 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
110 || (op1 && rtx_equal_p (SET_DEST (set),
111 XEXP (SET_SRC (set), 1)))))
112 return 1;
113 }
114 return 0;
115 }
116
117 set = set_for_reg_notes (last_insn);
118 if (set == NULL_RTX)
119 return 1;
120
121 if (! rtx_equal_p (SET_DEST (set), target)
122 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
123 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
124 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
125 return 1;
126
127 if (GET_RTX_CLASS (code) == RTX_UNARY)
128 switch (code)
129 {
130 case FFS:
131 case CLZ:
132 case CTZ:
133 case CLRSB:
134 case POPCOUNT:
135 case PARITY:
136 case BSWAP:
137 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
138 {
139 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
140 if (GET_MODE_SIZE (GET_MODE (op0))
141 > GET_MODE_SIZE (GET_MODE (target)))
142 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
143 note, GET_MODE (op0));
144 else
145 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
146 note, GET_MODE (op0));
147 break;
148 }
149 /* FALLTHRU */
150 default:
151 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
152 break;
153 }
154 else
155 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
156
157 set_unique_reg_note (last_insn, REG_EQUAL, note);
158
159 return 1;
160 }
161 \f
162 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
163 for a widening operation would be. In most cases this would be OP0, but if
164 that's a constant it'll be VOIDmode, which isn't useful. */
165
166 static machine_mode
167 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
168 {
169 machine_mode m0 = GET_MODE (op0);
170 machine_mode m1 = GET_MODE (op1);
171 machine_mode result;
172
173 if (m0 == VOIDmode && m1 == VOIDmode)
174 return to_mode;
175 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
176 result = m1;
177 else
178 result = m0;
179
180 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
181 return to_mode;
182
183 return result;
184 }
185 \f
186 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
187 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
188 not actually do a sign-extend or zero-extend, but can leave the
189 higher-order bits of the result rtx undefined, for example, in the case
190 of logical operations, but not right shifts. */
191
192 static rtx
193 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
194 int unsignedp, int no_extend)
195 {
196 rtx result;
197
198 /* If we don't have to extend and this is a constant, return it. */
199 if (no_extend && GET_MODE (op) == VOIDmode)
200 return op;
201
202 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
203 extend since it will be more efficient to do so unless the signedness of
204 a promoted object differs from our extension. */
205 if (! no_extend
206 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
207 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
208 return convert_modes (mode, oldmode, op, unsignedp);
209
210 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
211 SUBREG. */
212 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
213 return gen_lowpart (mode, force_reg (GET_MODE (op), op));
214
215 /* Otherwise, get an object of MODE, clobber it, and set the low-order
216 part to OP. */
217
218 result = gen_reg_rtx (mode);
219 emit_clobber (result);
220 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
221 return result;
222 }
223 \f
224 /* Expand vector widening operations.
225
226 There are two different classes of operations handled here:
227 1) Operations whose result is wider than all the arguments to the operation.
228 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
229 In this case OP0 and optionally OP1 would be initialized,
230 but WIDE_OP wouldn't (not relevant for this case).
231 2) Operations whose result is of the same size as the last argument to the
232 operation, but wider than all the other arguments to the operation.
233 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
234 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
235
236 E.g, when called to expand the following operations, this is how
237 the arguments will be initialized:
238 nops OP0 OP1 WIDE_OP
239 widening-sum 2 oprnd0 - oprnd1
240 widening-dot-product 3 oprnd0 oprnd1 oprnd2
241 widening-mult 2 oprnd0 oprnd1 -
242 type-promotion (vec-unpack) 1 oprnd0 - - */
243
244 rtx
245 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
246 rtx target, int unsignedp)
247 {
248 struct expand_operand eops[4];
249 tree oprnd0, oprnd1, oprnd2;
250 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
251 optab widen_pattern_optab;
252 enum insn_code icode;
253 int nops = TREE_CODE_LENGTH (ops->code);
254 int op;
255
256 oprnd0 = ops->op0;
257 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
258 widen_pattern_optab =
259 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
260 if (ops->code == WIDEN_MULT_PLUS_EXPR
261 || ops->code == WIDEN_MULT_MINUS_EXPR)
262 icode = find_widening_optab_handler (widen_pattern_optab,
263 TYPE_MODE (TREE_TYPE (ops->op2)),
264 tmode0, 0);
265 else
266 icode = optab_handler (widen_pattern_optab, tmode0);
267 gcc_assert (icode != CODE_FOR_nothing);
268
269 if (nops >= 2)
270 {
271 oprnd1 = ops->op1;
272 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
273 }
274
275 /* The last operand is of a wider mode than the rest of the operands. */
276 if (nops == 2)
277 wmode = tmode1;
278 else if (nops == 3)
279 {
280 gcc_assert (tmode1 == tmode0);
281 gcc_assert (op1);
282 oprnd2 = ops->op2;
283 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
284 }
285
286 op = 0;
287 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
288 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
289 if (op1)
290 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
291 if (wide_op)
292 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
293 expand_insn (icode, op, eops);
294 return eops[0].value;
295 }
296
297 /* Generate code to perform an operation specified by TERNARY_OPTAB
298 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
299
300 UNSIGNEDP is for the case where we have to widen the operands
301 to perform the operation. It says to use zero-extension.
302
303 If TARGET is nonzero, the value
304 is generated there, if it is convenient to do so.
305 In all cases an rtx is returned for the locus of the value;
306 this may or may not be TARGET. */
307
308 rtx
309 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
310 rtx op1, rtx op2, rtx target, int unsignedp)
311 {
312 struct expand_operand ops[4];
313 enum insn_code icode = optab_handler (ternary_optab, mode);
314
315 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
316
317 create_output_operand (&ops[0], target, mode);
318 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
319 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
320 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
321 expand_insn (icode, 4, ops);
322 return ops[0].value;
323 }
324
325
326 /* Like expand_binop, but return a constant rtx if the result can be
327 calculated at compile time. The arguments and return value are
328 otherwise the same as for expand_binop. */
329
330 rtx
331 simplify_expand_binop (machine_mode mode, optab binoptab,
332 rtx op0, rtx op1, rtx target, int unsignedp,
333 enum optab_methods methods)
334 {
335 if (CONSTANT_P (op0) && CONSTANT_P (op1))
336 {
337 rtx x = simplify_binary_operation (optab_to_code (binoptab),
338 mode, op0, op1);
339 if (x)
340 return x;
341 }
342
343 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
344 }
345
346 /* Like simplify_expand_binop, but always put the result in TARGET.
347 Return true if the expansion succeeded. */
348
349 bool
350 force_expand_binop (machine_mode mode, optab binoptab,
351 rtx op0, rtx op1, rtx target, int unsignedp,
352 enum optab_methods methods)
353 {
354 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
355 target, unsignedp, methods);
356 if (x == 0)
357 return false;
358 if (x != target)
359 emit_move_insn (target, x);
360 return true;
361 }
362
363 /* Create a new vector value in VMODE with all elements set to OP. The
364 mode of OP must be the element mode of VMODE. If OP is a constant,
365 then the return value will be a constant. */
366
367 static rtx
368 expand_vector_broadcast (machine_mode vmode, rtx op)
369 {
370 enum insn_code icode;
371 rtvec vec;
372 rtx ret;
373 int i, n;
374
375 gcc_checking_assert (VECTOR_MODE_P (vmode));
376
377 n = GET_MODE_NUNITS (vmode);
378 vec = rtvec_alloc (n);
379 for (i = 0; i < n; ++i)
380 RTVEC_ELT (vec, i) = op;
381
382 if (CONSTANT_P (op))
383 return gen_rtx_CONST_VECTOR (vmode, vec);
384
385 /* ??? If the target doesn't have a vec_init, then we have no easy way
386 of performing this operation. Most of this sort of generic support
387 is hidden away in the vector lowering support in gimple. */
388 icode = optab_handler (vec_init_optab, vmode);
389 if (icode == CODE_FOR_nothing)
390 return NULL;
391
392 ret = gen_reg_rtx (vmode);
393 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
394
395 return ret;
396 }
397
398 /* This subroutine of expand_doubleword_shift handles the cases in which
399 the effective shift value is >= BITS_PER_WORD. The arguments and return
400 value are the same as for the parent routine, except that SUPERWORD_OP1
401 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
402 INTO_TARGET may be null if the caller has decided to calculate it. */
403
404 static bool
405 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
406 rtx outof_target, rtx into_target,
407 int unsignedp, enum optab_methods methods)
408 {
409 if (into_target != 0)
410 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
411 into_target, unsignedp, methods))
412 return false;
413
414 if (outof_target != 0)
415 {
416 /* For a signed right shift, we must fill OUTOF_TARGET with copies
417 of the sign bit, otherwise we must fill it with zeros. */
418 if (binoptab != ashr_optab)
419 emit_move_insn (outof_target, CONST0_RTX (word_mode));
420 else
421 if (!force_expand_binop (word_mode, binoptab,
422 outof_input, GEN_INT (BITS_PER_WORD - 1),
423 outof_target, unsignedp, methods))
424 return false;
425 }
426 return true;
427 }
428
429 /* This subroutine of expand_doubleword_shift handles the cases in which
430 the effective shift value is < BITS_PER_WORD. The arguments and return
431 value are the same as for the parent routine. */
432
433 static bool
434 expand_subword_shift (machine_mode op1_mode, optab binoptab,
435 rtx outof_input, rtx into_input, rtx op1,
436 rtx outof_target, rtx into_target,
437 int unsignedp, enum optab_methods methods,
438 unsigned HOST_WIDE_INT shift_mask)
439 {
440 optab reverse_unsigned_shift, unsigned_shift;
441 rtx tmp, carries;
442
443 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
444 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
445
446 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
447 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
448 the opposite direction to BINOPTAB. */
449 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
450 {
451 carries = outof_input;
452 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
453 op1_mode), op1_mode);
454 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
455 0, true, methods);
456 }
457 else
458 {
459 /* We must avoid shifting by BITS_PER_WORD bits since that is either
460 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
461 has unknown behavior. Do a single shift first, then shift by the
462 remainder. It's OK to use ~OP1 as the remainder if shift counts
463 are truncated to the mode size. */
464 carries = expand_binop (word_mode, reverse_unsigned_shift,
465 outof_input, const1_rtx, 0, unsignedp, methods);
466 if (shift_mask == BITS_PER_WORD - 1)
467 {
468 tmp = immed_wide_int_const
469 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
470 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
471 0, true, methods);
472 }
473 else
474 {
475 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
476 op1_mode), op1_mode);
477 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
478 0, true, methods);
479 }
480 }
481 if (tmp == 0 || carries == 0)
482 return false;
483 carries = expand_binop (word_mode, reverse_unsigned_shift,
484 carries, tmp, 0, unsignedp, methods);
485 if (carries == 0)
486 return false;
487
488 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
489 so the result can go directly into INTO_TARGET if convenient. */
490 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
491 into_target, unsignedp, methods);
492 if (tmp == 0)
493 return false;
494
495 /* Now OR in the bits carried over from OUTOF_INPUT. */
496 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
497 into_target, unsignedp, methods))
498 return false;
499
500 /* Use a standard word_mode shift for the out-of half. */
501 if (outof_target != 0)
502 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
503 outof_target, unsignedp, methods))
504 return false;
505
506 return true;
507 }
508
509
510 /* Try implementing expand_doubleword_shift using conditional moves.
511 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
512 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
513 are the shift counts to use in the former and latter case. All other
514 arguments are the same as the parent routine. */
515
516 static bool
517 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
518 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
519 rtx outof_input, rtx into_input,
520 rtx subword_op1, rtx superword_op1,
521 rtx outof_target, rtx into_target,
522 int unsignedp, enum optab_methods methods,
523 unsigned HOST_WIDE_INT shift_mask)
524 {
525 rtx outof_superword, into_superword;
526
527 /* Put the superword version of the output into OUTOF_SUPERWORD and
528 INTO_SUPERWORD. */
529 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
530 if (outof_target != 0 && subword_op1 == superword_op1)
531 {
532 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
533 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
534 into_superword = outof_target;
535 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
536 outof_superword, 0, unsignedp, methods))
537 return false;
538 }
539 else
540 {
541 into_superword = gen_reg_rtx (word_mode);
542 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
543 outof_superword, into_superword,
544 unsignedp, methods))
545 return false;
546 }
547
548 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
549 if (!expand_subword_shift (op1_mode, binoptab,
550 outof_input, into_input, subword_op1,
551 outof_target, into_target,
552 unsignedp, methods, shift_mask))
553 return false;
554
555 /* Select between them. Do the INTO half first because INTO_SUPERWORD
556 might be the current value of OUTOF_TARGET. */
557 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
558 into_target, into_superword, word_mode, false))
559 return false;
560
561 if (outof_target != 0)
562 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
563 outof_target, outof_superword,
564 word_mode, false))
565 return false;
566
567 return true;
568 }
569
570 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
571 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
572 input operand; the shift moves bits in the direction OUTOF_INPUT->
573 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
574 of the target. OP1 is the shift count and OP1_MODE is its mode.
575 If OP1 is constant, it will have been truncated as appropriate
576 and is known to be nonzero.
577
578 If SHIFT_MASK is zero, the result of word shifts is undefined when the
579 shift count is outside the range [0, BITS_PER_WORD). This routine must
580 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
581
582 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
583 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
584 fill with zeros or sign bits as appropriate.
585
586 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
587 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
588 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
589 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
590 are undefined.
591
592 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
593 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
594 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
595 function wants to calculate it itself.
596
597 Return true if the shift could be successfully synthesized. */
598
599 static bool
600 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
601 rtx outof_input, rtx into_input, rtx op1,
602 rtx outof_target, rtx into_target,
603 int unsignedp, enum optab_methods methods,
604 unsigned HOST_WIDE_INT shift_mask)
605 {
606 rtx superword_op1, tmp, cmp1, cmp2;
607 enum rtx_code cmp_code;
608
609 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
610 fill the result with sign or zero bits as appropriate. If so, the value
611 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
612 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
613 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
614
615 This isn't worthwhile for constant shifts since the optimizers will
616 cope better with in-range shift counts. */
617 if (shift_mask >= BITS_PER_WORD
618 && outof_target != 0
619 && !CONSTANT_P (op1))
620 {
621 if (!expand_doubleword_shift (op1_mode, binoptab,
622 outof_input, into_input, op1,
623 0, into_target,
624 unsignedp, methods, shift_mask))
625 return false;
626 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
627 outof_target, unsignedp, methods))
628 return false;
629 return true;
630 }
631
632 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
633 is true when the effective shift value is less than BITS_PER_WORD.
634 Set SUPERWORD_OP1 to the shift count that should be used to shift
635 OUTOF_INPUT into INTO_TARGET when the condition is false. */
636 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
637 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
638 {
639 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
640 is a subword shift count. */
641 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
642 0, true, methods);
643 cmp2 = CONST0_RTX (op1_mode);
644 cmp_code = EQ;
645 superword_op1 = op1;
646 }
647 else
648 {
649 /* Set CMP1 to OP1 - BITS_PER_WORD. */
650 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
651 0, true, methods);
652 cmp2 = CONST0_RTX (op1_mode);
653 cmp_code = LT;
654 superword_op1 = cmp1;
655 }
656 if (cmp1 == 0)
657 return false;
658
659 /* If we can compute the condition at compile time, pick the
660 appropriate subroutine. */
661 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
662 if (tmp != 0 && CONST_INT_P (tmp))
663 {
664 if (tmp == const0_rtx)
665 return expand_superword_shift (binoptab, outof_input, superword_op1,
666 outof_target, into_target,
667 unsignedp, methods);
668 else
669 return expand_subword_shift (op1_mode, binoptab,
670 outof_input, into_input, op1,
671 outof_target, into_target,
672 unsignedp, methods, shift_mask);
673 }
674
675 /* Try using conditional moves to generate straight-line code. */
676 if (HAVE_conditional_move)
677 {
678 rtx_insn *start = get_last_insn ();
679 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
680 cmp_code, cmp1, cmp2,
681 outof_input, into_input,
682 op1, superword_op1,
683 outof_target, into_target,
684 unsignedp, methods, shift_mask))
685 return true;
686 delete_insns_since (start);
687 }
688
689 /* As a last resort, use branches to select the correct alternative. */
690 rtx_code_label *subword_label = gen_label_rtx ();
691 rtx_code_label *done_label = gen_label_rtx ();
692
693 NO_DEFER_POP;
694 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
695 0, 0, subword_label, -1);
696 OK_DEFER_POP;
697
698 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
699 outof_target, into_target,
700 unsignedp, methods))
701 return false;
702
703 emit_jump_insn (targetm.gen_jump (done_label));
704 emit_barrier ();
705 emit_label (subword_label);
706
707 if (!expand_subword_shift (op1_mode, binoptab,
708 outof_input, into_input, op1,
709 outof_target, into_target,
710 unsignedp, methods, shift_mask))
711 return false;
712
713 emit_label (done_label);
714 return true;
715 }
716 \f
717 /* Subroutine of expand_binop. Perform a double word multiplication of
718 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
719 as the target's word_mode. This function return NULL_RTX if anything
720 goes wrong, in which case it may have already emitted instructions
721 which need to be deleted.
722
723 If we want to multiply two two-word values and have normal and widening
724 multiplies of single-word values, we can do this with three smaller
725 multiplications.
726
727 The multiplication proceeds as follows:
728 _______________________
729 [__op0_high_|__op0_low__]
730 _______________________
731 * [__op1_high_|__op1_low__]
732 _______________________________________________
733 _______________________
734 (1) [__op0_low__*__op1_low__]
735 _______________________
736 (2a) [__op0_low__*__op1_high_]
737 _______________________
738 (2b) [__op0_high_*__op1_low__]
739 _______________________
740 (3) [__op0_high_*__op1_high_]
741
742
743 This gives a 4-word result. Since we are only interested in the
744 lower 2 words, partial result (3) and the upper words of (2a) and
745 (2b) don't need to be calculated. Hence (2a) and (2b) can be
746 calculated using non-widening multiplication.
747
748 (1), however, needs to be calculated with an unsigned widening
749 multiplication. If this operation is not directly supported we
750 try using a signed widening multiplication and adjust the result.
751 This adjustment works as follows:
752
753 If both operands are positive then no adjustment is needed.
754
755 If the operands have different signs, for example op0_low < 0 and
756 op1_low >= 0, the instruction treats the most significant bit of
757 op0_low as a sign bit instead of a bit with significance
758 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
759 with 2**BITS_PER_WORD - op0_low, and two's complements the
760 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
761 the result.
762
763 Similarly, if both operands are negative, we need to add
764 (op0_low + op1_low) * 2**BITS_PER_WORD.
765
766 We use a trick to adjust quickly. We logically shift op0_low right
767 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
768 op0_high (op1_high) before it is used to calculate 2b (2a). If no
769 logical shift exists, we do an arithmetic right shift and subtract
770 the 0 or -1. */
771
772 static rtx
773 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
774 bool umulp, enum optab_methods methods)
775 {
776 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
777 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
778 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
779 rtx product, adjust, product_high, temp;
780
781 rtx op0_high = operand_subword_force (op0, high, mode);
782 rtx op0_low = operand_subword_force (op0, low, mode);
783 rtx op1_high = operand_subword_force (op1, high, mode);
784 rtx op1_low = operand_subword_force (op1, low, mode);
785
786 /* If we're using an unsigned multiply to directly compute the product
787 of the low-order words of the operands and perform any required
788 adjustments of the operands, we begin by trying two more multiplications
789 and then computing the appropriate sum.
790
791 We have checked above that the required addition is provided.
792 Full-word addition will normally always succeed, especially if
793 it is provided at all, so we don't worry about its failure. The
794 multiplication may well fail, however, so we do handle that. */
795
796 if (!umulp)
797 {
798 /* ??? This could be done with emit_store_flag where available. */
799 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
800 NULL_RTX, 1, methods);
801 if (temp)
802 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
803 NULL_RTX, 0, OPTAB_DIRECT);
804 else
805 {
806 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
807 NULL_RTX, 0, methods);
808 if (!temp)
809 return NULL_RTX;
810 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
811 NULL_RTX, 0, OPTAB_DIRECT);
812 }
813
814 if (!op0_high)
815 return NULL_RTX;
816 }
817
818 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
819 NULL_RTX, 0, OPTAB_DIRECT);
820 if (!adjust)
821 return NULL_RTX;
822
823 /* OP0_HIGH should now be dead. */
824
825 if (!umulp)
826 {
827 /* ??? This could be done with emit_store_flag where available. */
828 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
829 NULL_RTX, 1, methods);
830 if (temp)
831 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
832 NULL_RTX, 0, OPTAB_DIRECT);
833 else
834 {
835 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
836 NULL_RTX, 0, methods);
837 if (!temp)
838 return NULL_RTX;
839 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
840 NULL_RTX, 0, OPTAB_DIRECT);
841 }
842
843 if (!op1_high)
844 return NULL_RTX;
845 }
846
847 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
848 NULL_RTX, 0, OPTAB_DIRECT);
849 if (!temp)
850 return NULL_RTX;
851
852 /* OP1_HIGH should now be dead. */
853
854 adjust = expand_binop (word_mode, add_optab, adjust, temp,
855 NULL_RTX, 0, OPTAB_DIRECT);
856
857 if (target && !REG_P (target))
858 target = NULL_RTX;
859
860 if (umulp)
861 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
862 target, 1, OPTAB_DIRECT);
863 else
864 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
865 target, 1, OPTAB_DIRECT);
866
867 if (!product)
868 return NULL_RTX;
869
870 product_high = operand_subword (product, high, 1, mode);
871 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
872 NULL_RTX, 0, OPTAB_DIRECT);
873 emit_move_insn (product_high, adjust);
874 return product;
875 }
876 \f
877 /* Wrapper around expand_binop which takes an rtx code to specify
878 the operation to perform, not an optab pointer. All other
879 arguments are the same. */
880 rtx
881 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
882 rtx op1, rtx target, int unsignedp,
883 enum optab_methods methods)
884 {
885 optab binop = code_to_optab (code);
886 gcc_assert (binop);
887
888 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
889 }
890
891 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
892 binop. Order them according to commutative_operand_precedence and, if
893 possible, try to put TARGET or a pseudo first. */
894 static bool
895 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
896 {
897 int op0_prec = commutative_operand_precedence (op0);
898 int op1_prec = commutative_operand_precedence (op1);
899
900 if (op0_prec < op1_prec)
901 return true;
902
903 if (op0_prec > op1_prec)
904 return false;
905
906 /* With equal precedence, both orders are ok, but it is better if the
907 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
908 if (target == 0 || REG_P (target))
909 return (REG_P (op1) && !REG_P (op0)) || target == op1;
910 else
911 return rtx_equal_p (op1, target);
912 }
913
914 /* Return true if BINOPTAB implements a shift operation. */
915
916 static bool
917 shift_optab_p (optab binoptab)
918 {
919 switch (optab_to_code (binoptab))
920 {
921 case ASHIFT:
922 case SS_ASHIFT:
923 case US_ASHIFT:
924 case ASHIFTRT:
925 case LSHIFTRT:
926 case ROTATE:
927 case ROTATERT:
928 return true;
929
930 default:
931 return false;
932 }
933 }
934
935 /* Return true if BINOPTAB implements a commutative binary operation. */
936
937 static bool
938 commutative_optab_p (optab binoptab)
939 {
940 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
941 || binoptab == smul_widen_optab
942 || binoptab == umul_widen_optab
943 || binoptab == smul_highpart_optab
944 || binoptab == umul_highpart_optab);
945 }
946
947 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
948 optimizing, and if the operand is a constant that costs more than
949 1 instruction, force the constant into a register and return that
950 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
951
952 static rtx
953 avoid_expensive_constant (machine_mode mode, optab binoptab,
954 int opn, rtx x, bool unsignedp)
955 {
956 bool speed = optimize_insn_for_speed_p ();
957
958 if (mode != VOIDmode
959 && optimize
960 && CONSTANT_P (x)
961 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
962 > set_src_cost (x, mode, speed)))
963 {
964 if (CONST_INT_P (x))
965 {
966 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
967 if (intval != INTVAL (x))
968 x = GEN_INT (intval);
969 }
970 else
971 x = convert_modes (mode, VOIDmode, x, unsignedp);
972 x = force_reg (mode, x);
973 }
974 return x;
975 }
976
977 /* Helper function for expand_binop: handle the case where there
978 is an insn that directly implements the indicated operation.
979 Returns null if this is not possible. */
980 static rtx
981 expand_binop_directly (machine_mode mode, optab binoptab,
982 rtx op0, rtx op1,
983 rtx target, int unsignedp, enum optab_methods methods,
984 rtx_insn *last)
985 {
986 machine_mode from_mode = widened_mode (mode, op0, op1);
987 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
988 from_mode, 1);
989 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
990 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
991 machine_mode mode0, mode1, tmp_mode;
992 struct expand_operand ops[3];
993 bool commutative_p;
994 rtx_insn *pat;
995 rtx xop0 = op0, xop1 = op1;
996 bool canonicalize_op1 = false;
997
998 /* If it is a commutative operator and the modes would match
999 if we would swap the operands, we can save the conversions. */
1000 commutative_p = commutative_optab_p (binoptab);
1001 if (commutative_p
1002 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1003 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1004 std::swap (xop0, xop1);
1005
1006 /* If we are optimizing, force expensive constants into a register. */
1007 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1008 if (!shift_optab_p (binoptab))
1009 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1010 else
1011 /* Shifts and rotates often use a different mode for op1 from op0;
1012 for VOIDmode constants we don't know the mode, so force it
1013 to be canonicalized using convert_modes. */
1014 canonicalize_op1 = true;
1015
1016 /* In case the insn wants input operands in modes different from
1017 those of the actual operands, convert the operands. It would
1018 seem that we don't need to convert CONST_INTs, but we do, so
1019 that they're properly zero-extended, sign-extended or truncated
1020 for their mode. */
1021
1022 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1023 if (xmode0 != VOIDmode && xmode0 != mode0)
1024 {
1025 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1026 mode0 = xmode0;
1027 }
1028
1029 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1030 ? GET_MODE (xop1) : mode);
1031 if (xmode1 != VOIDmode && xmode1 != mode1)
1032 {
1033 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1034 mode1 = xmode1;
1035 }
1036
1037 /* If operation is commutative,
1038 try to make the first operand a register.
1039 Even better, try to make it the same as the target.
1040 Also try to make the last operand a constant. */
1041 if (commutative_p
1042 && swap_commutative_operands_with_target (target, xop0, xop1))
1043 std::swap (xop0, xop1);
1044
1045 /* Now, if insn's predicates don't allow our operands, put them into
1046 pseudo regs. */
1047
1048 if (binoptab == vec_pack_trunc_optab
1049 || binoptab == vec_pack_usat_optab
1050 || binoptab == vec_pack_ssat_optab
1051 || binoptab == vec_pack_ufix_trunc_optab
1052 || binoptab == vec_pack_sfix_trunc_optab)
1053 {
1054 /* The mode of the result is different then the mode of the
1055 arguments. */
1056 tmp_mode = insn_data[(int) icode].operand[0].mode;
1057 if (VECTOR_MODE_P (mode)
1058 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1059 {
1060 delete_insns_since (last);
1061 return NULL_RTX;
1062 }
1063 }
1064 else
1065 tmp_mode = mode;
1066
1067 create_output_operand (&ops[0], target, tmp_mode);
1068 create_input_operand (&ops[1], xop0, mode0);
1069 create_input_operand (&ops[2], xop1, mode1);
1070 pat = maybe_gen_insn (icode, 3, ops);
1071 if (pat)
1072 {
1073 /* If PAT is composed of more than one insn, try to add an appropriate
1074 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1075 operand, call expand_binop again, this time without a target. */
1076 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1077 && ! add_equal_note (pat, ops[0].value,
1078 optab_to_code (binoptab),
1079 ops[1].value, ops[2].value))
1080 {
1081 delete_insns_since (last);
1082 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1083 unsignedp, methods);
1084 }
1085
1086 emit_insn (pat);
1087 return ops[0].value;
1088 }
1089 delete_insns_since (last);
1090 return NULL_RTX;
1091 }
1092
1093 /* Generate code to perform an operation specified by BINOPTAB
1094 on operands OP0 and OP1, with result having machine-mode MODE.
1095
1096 UNSIGNEDP is for the case where we have to widen the operands
1097 to perform the operation. It says to use zero-extension.
1098
1099 If TARGET is nonzero, the value
1100 is generated there, if it is convenient to do so.
1101 In all cases an rtx is returned for the locus of the value;
1102 this may or may not be TARGET. */
1103
1104 rtx
1105 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1106 rtx target, int unsignedp, enum optab_methods methods)
1107 {
1108 enum optab_methods next_methods
1109 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1110 ? OPTAB_WIDEN : methods);
1111 enum mode_class mclass;
1112 machine_mode wider_mode;
1113 rtx libfunc;
1114 rtx temp;
1115 rtx_insn *entry_last = get_last_insn ();
1116 rtx_insn *last;
1117
1118 mclass = GET_MODE_CLASS (mode);
1119
1120 /* If subtracting an integer constant, convert this into an addition of
1121 the negated constant. */
1122
1123 if (binoptab == sub_optab && CONST_INT_P (op1))
1124 {
1125 op1 = negate_rtx (mode, op1);
1126 binoptab = add_optab;
1127 }
1128 /* For shifts, constant invalid op1 might be expanded from different
1129 mode than MODE. As those are invalid, force them to a register
1130 to avoid further problems during expansion. */
1131 else if (CONST_INT_P (op1)
1132 && shift_optab_p (binoptab)
1133 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1134 {
1135 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1136 op1 = force_reg (GET_MODE_INNER (mode), op1);
1137 }
1138
1139 /* Record where to delete back to if we backtrack. */
1140 last = get_last_insn ();
1141
1142 /* If we can do it with a three-operand insn, do so. */
1143
1144 if (methods != OPTAB_MUST_WIDEN
1145 && find_widening_optab_handler (binoptab, mode,
1146 widened_mode (mode, op0, op1), 1)
1147 != CODE_FOR_nothing)
1148 {
1149 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1150 unsignedp, methods, last);
1151 if (temp)
1152 return temp;
1153 }
1154
1155 /* If we were trying to rotate, and that didn't work, try rotating
1156 the other direction before falling back to shifts and bitwise-or. */
1157 if (((binoptab == rotl_optab
1158 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1159 || (binoptab == rotr_optab
1160 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1161 && mclass == MODE_INT)
1162 {
1163 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1164 rtx newop1;
1165 unsigned int bits = GET_MODE_PRECISION (mode);
1166
1167 if (CONST_INT_P (op1))
1168 newop1 = GEN_INT (bits - INTVAL (op1));
1169 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1170 newop1 = negate_rtx (GET_MODE (op1), op1);
1171 else
1172 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1173 gen_int_mode (bits, GET_MODE (op1)), op1,
1174 NULL_RTX, unsignedp, OPTAB_DIRECT);
1175
1176 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1177 target, unsignedp, methods, last);
1178 if (temp)
1179 return temp;
1180 }
1181
1182 /* If this is a multiply, see if we can do a widening operation that
1183 takes operands of this mode and makes a wider mode. */
1184
1185 if (binoptab == smul_optab
1186 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1187 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1188 : smul_widen_optab),
1189 GET_MODE_2XWIDER_MODE (mode), mode)
1190 != CODE_FOR_nothing))
1191 {
1192 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1193 unsignedp ? umul_widen_optab : smul_widen_optab,
1194 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1195
1196 if (temp != 0)
1197 {
1198 if (GET_MODE_CLASS (mode) == MODE_INT
1199 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1200 return gen_lowpart (mode, temp);
1201 else
1202 return convert_to_mode (mode, temp, unsignedp);
1203 }
1204 }
1205
1206 /* If this is a vector shift by a scalar, see if we can do a vector
1207 shift by a vector. If so, broadcast the scalar into a vector. */
1208 if (mclass == MODE_VECTOR_INT)
1209 {
1210 optab otheroptab = unknown_optab;
1211
1212 if (binoptab == ashl_optab)
1213 otheroptab = vashl_optab;
1214 else if (binoptab == ashr_optab)
1215 otheroptab = vashr_optab;
1216 else if (binoptab == lshr_optab)
1217 otheroptab = vlshr_optab;
1218 else if (binoptab == rotl_optab)
1219 otheroptab = vrotl_optab;
1220 else if (binoptab == rotr_optab)
1221 otheroptab = vrotr_optab;
1222
1223 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1224 {
1225 /* The scalar may have been extended to be too wide. Truncate
1226 it back to the proper size to fit in the broadcast vector. */
1227 machine_mode inner_mode = GET_MODE_INNER (mode);
1228 if (!CONST_INT_P (op1)
1229 && (GET_MODE_BITSIZE (inner_mode)
1230 < GET_MODE_BITSIZE (GET_MODE (op1))))
1231 op1 = force_reg (inner_mode,
1232 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1233 GET_MODE (op1)));
1234 rtx vop1 = expand_vector_broadcast (mode, op1);
1235 if (vop1)
1236 {
1237 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1238 target, unsignedp, methods, last);
1239 if (temp)
1240 return temp;
1241 }
1242 }
1243 }
1244
1245 /* Look for a wider mode of the same class for which we think we
1246 can open-code the operation. Check for a widening multiply at the
1247 wider mode as well. */
1248
1249 if (CLASS_HAS_WIDER_MODES_P (mclass)
1250 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1251 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1252 wider_mode != VOIDmode;
1253 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1254 {
1255 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1256 || (binoptab == smul_optab
1257 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1258 && (find_widening_optab_handler ((unsignedp
1259 ? umul_widen_optab
1260 : smul_widen_optab),
1261 GET_MODE_WIDER_MODE (wider_mode),
1262 mode, 0)
1263 != CODE_FOR_nothing)))
1264 {
1265 rtx xop0 = op0, xop1 = op1;
1266 int no_extend = 0;
1267
1268 /* For certain integer operations, we need not actually extend
1269 the narrow operands, as long as we will truncate
1270 the results to the same narrowness. */
1271
1272 if ((binoptab == ior_optab || binoptab == and_optab
1273 || binoptab == xor_optab
1274 || binoptab == add_optab || binoptab == sub_optab
1275 || binoptab == smul_optab || binoptab == ashl_optab)
1276 && mclass == MODE_INT)
1277 {
1278 no_extend = 1;
1279 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1280 xop0, unsignedp);
1281 if (binoptab != ashl_optab)
1282 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1283 xop1, unsignedp);
1284 }
1285
1286 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1287
1288 /* The second operand of a shift must always be extended. */
1289 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1290 no_extend && binoptab != ashl_optab);
1291
1292 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1293 unsignedp, OPTAB_DIRECT);
1294 if (temp)
1295 {
1296 if (mclass != MODE_INT
1297 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1298 {
1299 if (target == 0)
1300 target = gen_reg_rtx (mode);
1301 convert_move (target, temp, 0);
1302 return target;
1303 }
1304 else
1305 return gen_lowpart (mode, temp);
1306 }
1307 else
1308 delete_insns_since (last);
1309 }
1310 }
1311
1312 /* If operation is commutative,
1313 try to make the first operand a register.
1314 Even better, try to make it the same as the target.
1315 Also try to make the last operand a constant. */
1316 if (commutative_optab_p (binoptab)
1317 && swap_commutative_operands_with_target (target, op0, op1))
1318 std::swap (op0, op1);
1319
1320 /* These can be done a word at a time. */
1321 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1322 && mclass == MODE_INT
1323 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1324 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1325 {
1326 int i;
1327 rtx_insn *insns;
1328
1329 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1330 won't be accurate, so use a new target. */
1331 if (target == 0
1332 || target == op0
1333 || target == op1
1334 || !valid_multiword_target_p (target))
1335 target = gen_reg_rtx (mode);
1336
1337 start_sequence ();
1338
1339 /* Do the actual arithmetic. */
1340 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1341 {
1342 rtx target_piece = operand_subword (target, i, 1, mode);
1343 rtx x = expand_binop (word_mode, binoptab,
1344 operand_subword_force (op0, i, mode),
1345 operand_subword_force (op1, i, mode),
1346 target_piece, unsignedp, next_methods);
1347
1348 if (x == 0)
1349 break;
1350
1351 if (target_piece != x)
1352 emit_move_insn (target_piece, x);
1353 }
1354
1355 insns = get_insns ();
1356 end_sequence ();
1357
1358 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1359 {
1360 emit_insn (insns);
1361 return target;
1362 }
1363 }
1364
1365 /* Synthesize double word shifts from single word shifts. */
1366 if ((binoptab == lshr_optab || binoptab == ashl_optab
1367 || binoptab == ashr_optab)
1368 && mclass == MODE_INT
1369 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1370 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1371 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1372 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1373 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1374 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1375 {
1376 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1377 machine_mode op1_mode;
1378
1379 double_shift_mask = targetm.shift_truncation_mask (mode);
1380 shift_mask = targetm.shift_truncation_mask (word_mode);
1381 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1382
1383 /* Apply the truncation to constant shifts. */
1384 if (double_shift_mask > 0 && CONST_INT_P (op1))
1385 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1386
1387 if (op1 == CONST0_RTX (op1_mode))
1388 return op0;
1389
1390 /* Make sure that this is a combination that expand_doubleword_shift
1391 can handle. See the comments there for details. */
1392 if (double_shift_mask == 0
1393 || (shift_mask == BITS_PER_WORD - 1
1394 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1395 {
1396 rtx_insn *insns;
1397 rtx into_target, outof_target;
1398 rtx into_input, outof_input;
1399 int left_shift, outof_word;
1400
1401 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1402 won't be accurate, so use a new target. */
1403 if (target == 0
1404 || target == op0
1405 || target == op1
1406 || !valid_multiword_target_p (target))
1407 target = gen_reg_rtx (mode);
1408
1409 start_sequence ();
1410
1411 /* OUTOF_* is the word we are shifting bits away from, and
1412 INTO_* is the word that we are shifting bits towards, thus
1413 they differ depending on the direction of the shift and
1414 WORDS_BIG_ENDIAN. */
1415
1416 left_shift = binoptab == ashl_optab;
1417 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1418
1419 outof_target = operand_subword (target, outof_word, 1, mode);
1420 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1421
1422 outof_input = operand_subword_force (op0, outof_word, mode);
1423 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1424
1425 if (expand_doubleword_shift (op1_mode, binoptab,
1426 outof_input, into_input, op1,
1427 outof_target, into_target,
1428 unsignedp, next_methods, shift_mask))
1429 {
1430 insns = get_insns ();
1431 end_sequence ();
1432
1433 emit_insn (insns);
1434 return target;
1435 }
1436 end_sequence ();
1437 }
1438 }
1439
1440 /* Synthesize double word rotates from single word shifts. */
1441 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1442 && mclass == MODE_INT
1443 && CONST_INT_P (op1)
1444 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1445 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1446 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1447 {
1448 rtx_insn *insns;
1449 rtx into_target, outof_target;
1450 rtx into_input, outof_input;
1451 rtx inter;
1452 int shift_count, left_shift, outof_word;
1453
1454 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1455 won't be accurate, so use a new target. Do this also if target is not
1456 a REG, first because having a register instead may open optimization
1457 opportunities, and second because if target and op0 happen to be MEMs
1458 designating the same location, we would risk clobbering it too early
1459 in the code sequence we generate below. */
1460 if (target == 0
1461 || target == op0
1462 || target == op1
1463 || !REG_P (target)
1464 || !valid_multiword_target_p (target))
1465 target = gen_reg_rtx (mode);
1466
1467 start_sequence ();
1468
1469 shift_count = INTVAL (op1);
1470
1471 /* OUTOF_* is the word we are shifting bits away from, and
1472 INTO_* is the word that we are shifting bits towards, thus
1473 they differ depending on the direction of the shift and
1474 WORDS_BIG_ENDIAN. */
1475
1476 left_shift = (binoptab == rotl_optab);
1477 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1478
1479 outof_target = operand_subword (target, outof_word, 1, mode);
1480 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1481
1482 outof_input = operand_subword_force (op0, outof_word, mode);
1483 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1484
1485 if (shift_count == BITS_PER_WORD)
1486 {
1487 /* This is just a word swap. */
1488 emit_move_insn (outof_target, into_input);
1489 emit_move_insn (into_target, outof_input);
1490 inter = const0_rtx;
1491 }
1492 else
1493 {
1494 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1495 rtx first_shift_count, second_shift_count;
1496 optab reverse_unsigned_shift, unsigned_shift;
1497
1498 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1499 ? lshr_optab : ashl_optab);
1500
1501 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1502 ? ashl_optab : lshr_optab);
1503
1504 if (shift_count > BITS_PER_WORD)
1505 {
1506 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1507 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1508 }
1509 else
1510 {
1511 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1512 second_shift_count = GEN_INT (shift_count);
1513 }
1514
1515 into_temp1 = expand_binop (word_mode, unsigned_shift,
1516 outof_input, first_shift_count,
1517 NULL_RTX, unsignedp, next_methods);
1518 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1519 into_input, second_shift_count,
1520 NULL_RTX, unsignedp, next_methods);
1521
1522 if (into_temp1 != 0 && into_temp2 != 0)
1523 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1524 into_target, unsignedp, next_methods);
1525 else
1526 inter = 0;
1527
1528 if (inter != 0 && inter != into_target)
1529 emit_move_insn (into_target, inter);
1530
1531 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1532 into_input, first_shift_count,
1533 NULL_RTX, unsignedp, next_methods);
1534 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1535 outof_input, second_shift_count,
1536 NULL_RTX, unsignedp, next_methods);
1537
1538 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1539 inter = expand_binop (word_mode, ior_optab,
1540 outof_temp1, outof_temp2,
1541 outof_target, unsignedp, next_methods);
1542
1543 if (inter != 0 && inter != outof_target)
1544 emit_move_insn (outof_target, inter);
1545 }
1546
1547 insns = get_insns ();
1548 end_sequence ();
1549
1550 if (inter != 0)
1551 {
1552 emit_insn (insns);
1553 return target;
1554 }
1555 }
1556
1557 /* These can be done a word at a time by propagating carries. */
1558 if ((binoptab == add_optab || binoptab == sub_optab)
1559 && mclass == MODE_INT
1560 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1561 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1562 {
1563 unsigned int i;
1564 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1565 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1566 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1567 rtx xop0, xop1, xtarget;
1568
1569 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1570 value is one of those, use it. Otherwise, use 1 since it is the
1571 one easiest to get. */
1572 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1573 int normalizep = STORE_FLAG_VALUE;
1574 #else
1575 int normalizep = 1;
1576 #endif
1577
1578 /* Prepare the operands. */
1579 xop0 = force_reg (mode, op0);
1580 xop1 = force_reg (mode, op1);
1581
1582 xtarget = gen_reg_rtx (mode);
1583
1584 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1585 target = xtarget;
1586
1587 /* Indicate for flow that the entire target reg is being set. */
1588 if (REG_P (target))
1589 emit_clobber (xtarget);
1590
1591 /* Do the actual arithmetic. */
1592 for (i = 0; i < nwords; i++)
1593 {
1594 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1595 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1596 rtx op0_piece = operand_subword_force (xop0, index, mode);
1597 rtx op1_piece = operand_subword_force (xop1, index, mode);
1598 rtx x;
1599
1600 /* Main add/subtract of the input operands. */
1601 x = expand_binop (word_mode, binoptab,
1602 op0_piece, op1_piece,
1603 target_piece, unsignedp, next_methods);
1604 if (x == 0)
1605 break;
1606
1607 if (i + 1 < nwords)
1608 {
1609 /* Store carry from main add/subtract. */
1610 carry_out = gen_reg_rtx (word_mode);
1611 carry_out = emit_store_flag_force (carry_out,
1612 (binoptab == add_optab
1613 ? LT : GT),
1614 x, op0_piece,
1615 word_mode, 1, normalizep);
1616 }
1617
1618 if (i > 0)
1619 {
1620 rtx newx;
1621
1622 /* Add/subtract previous carry to main result. */
1623 newx = expand_binop (word_mode,
1624 normalizep == 1 ? binoptab : otheroptab,
1625 x, carry_in,
1626 NULL_RTX, 1, next_methods);
1627
1628 if (i + 1 < nwords)
1629 {
1630 /* Get out carry from adding/subtracting carry in. */
1631 rtx carry_tmp = gen_reg_rtx (word_mode);
1632 carry_tmp = emit_store_flag_force (carry_tmp,
1633 (binoptab == add_optab
1634 ? LT : GT),
1635 newx, x,
1636 word_mode, 1, normalizep);
1637
1638 /* Logical-ior the two poss. carry together. */
1639 carry_out = expand_binop (word_mode, ior_optab,
1640 carry_out, carry_tmp,
1641 carry_out, 0, next_methods);
1642 if (carry_out == 0)
1643 break;
1644 }
1645 emit_move_insn (target_piece, newx);
1646 }
1647 else
1648 {
1649 if (x != target_piece)
1650 emit_move_insn (target_piece, x);
1651 }
1652
1653 carry_in = carry_out;
1654 }
1655
1656 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1657 {
1658 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
1659 || ! rtx_equal_p (target, xtarget))
1660 {
1661 rtx_insn *temp = emit_move_insn (target, xtarget);
1662
1663 set_dst_reg_note (temp, REG_EQUAL,
1664 gen_rtx_fmt_ee (optab_to_code (binoptab),
1665 mode, copy_rtx (xop0),
1666 copy_rtx (xop1)),
1667 target);
1668 }
1669 else
1670 target = xtarget;
1671
1672 return target;
1673 }
1674
1675 else
1676 delete_insns_since (last);
1677 }
1678
1679 /* Attempt to synthesize double word multiplies using a sequence of word
1680 mode multiplications. We first attempt to generate a sequence using a
1681 more efficient unsigned widening multiply, and if that fails we then
1682 try using a signed widening multiply. */
1683
1684 if (binoptab == smul_optab
1685 && mclass == MODE_INT
1686 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1687 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1688 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1689 {
1690 rtx product = NULL_RTX;
1691 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
1692 != CODE_FOR_nothing)
1693 {
1694 product = expand_doubleword_mult (mode, op0, op1, target,
1695 true, methods);
1696 if (!product)
1697 delete_insns_since (last);
1698 }
1699
1700 if (product == NULL_RTX
1701 && widening_optab_handler (smul_widen_optab, mode, word_mode)
1702 != CODE_FOR_nothing)
1703 {
1704 product = expand_doubleword_mult (mode, op0, op1, target,
1705 false, methods);
1706 if (!product)
1707 delete_insns_since (last);
1708 }
1709
1710 if (product != NULL_RTX)
1711 {
1712 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
1713 {
1714 temp = emit_move_insn (target ? target : product, product);
1715 set_dst_reg_note (temp,
1716 REG_EQUAL,
1717 gen_rtx_fmt_ee (MULT, mode,
1718 copy_rtx (op0),
1719 copy_rtx (op1)),
1720 target ? target : product);
1721 }
1722 return product;
1723 }
1724 }
1725
1726 /* It can't be open-coded in this mode.
1727 Use a library call if one is available and caller says that's ok. */
1728
1729 libfunc = optab_libfunc (binoptab, mode);
1730 if (libfunc
1731 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1732 {
1733 rtx_insn *insns;
1734 rtx op1x = op1;
1735 machine_mode op1_mode = mode;
1736 rtx value;
1737
1738 start_sequence ();
1739
1740 if (shift_optab_p (binoptab))
1741 {
1742 op1_mode = targetm.libgcc_shift_count_mode ();
1743 /* Specify unsigned here,
1744 since negative shift counts are meaningless. */
1745 op1x = convert_to_mode (op1_mode, op1, 1);
1746 }
1747
1748 if (GET_MODE (op0) != VOIDmode
1749 && GET_MODE (op0) != mode)
1750 op0 = convert_to_mode (mode, op0, unsignedp);
1751
1752 /* Pass 1 for NO_QUEUE so we don't lose any increments
1753 if the libcall is cse'd or moved. */
1754 value = emit_library_call_value (libfunc,
1755 NULL_RTX, LCT_CONST, mode, 2,
1756 op0, mode, op1x, op1_mode);
1757
1758 insns = get_insns ();
1759 end_sequence ();
1760
1761 bool trapv = trapv_binoptab_p (binoptab);
1762 target = gen_reg_rtx (mode);
1763 emit_libcall_block_1 (insns, target, value,
1764 trapv ? NULL_RTX
1765 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1766 mode, op0, op1), trapv);
1767
1768 return target;
1769 }
1770
1771 delete_insns_since (last);
1772
1773 /* It can't be done in this mode. Can we do it in a wider mode? */
1774
1775 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1776 || methods == OPTAB_MUST_WIDEN))
1777 {
1778 /* Caller says, don't even try. */
1779 delete_insns_since (entry_last);
1780 return 0;
1781 }
1782
1783 /* Compute the value of METHODS to pass to recursive calls.
1784 Don't allow widening to be tried recursively. */
1785
1786 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1787
1788 /* Look for a wider mode of the same class for which it appears we can do
1789 the operation. */
1790
1791 if (CLASS_HAS_WIDER_MODES_P (mclass))
1792 {
1793 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1794 wider_mode != VOIDmode;
1795 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1796 {
1797 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1798 != CODE_FOR_nothing
1799 || (methods == OPTAB_LIB
1800 && optab_libfunc (binoptab, wider_mode)))
1801 {
1802 rtx xop0 = op0, xop1 = op1;
1803 int no_extend = 0;
1804
1805 /* For certain integer operations, we need not actually extend
1806 the narrow operands, as long as we will truncate
1807 the results to the same narrowness. */
1808
1809 if ((binoptab == ior_optab || binoptab == and_optab
1810 || binoptab == xor_optab
1811 || binoptab == add_optab || binoptab == sub_optab
1812 || binoptab == smul_optab || binoptab == ashl_optab)
1813 && mclass == MODE_INT)
1814 no_extend = 1;
1815
1816 xop0 = widen_operand (xop0, wider_mode, mode,
1817 unsignedp, no_extend);
1818
1819 /* The second operand of a shift must always be extended. */
1820 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1821 no_extend && binoptab != ashl_optab);
1822
1823 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1824 unsignedp, methods);
1825 if (temp)
1826 {
1827 if (mclass != MODE_INT
1828 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1829 {
1830 if (target == 0)
1831 target = gen_reg_rtx (mode);
1832 convert_move (target, temp, 0);
1833 return target;
1834 }
1835 else
1836 return gen_lowpart (mode, temp);
1837 }
1838 else
1839 delete_insns_since (last);
1840 }
1841 }
1842 }
1843
1844 delete_insns_since (entry_last);
1845 return 0;
1846 }
1847 \f
1848 /* Expand a binary operator which has both signed and unsigned forms.
1849 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1850 signed operations.
1851
1852 If we widen unsigned operands, we may use a signed wider operation instead
1853 of an unsigned wider operation, since the result would be the same. */
1854
1855 rtx
1856 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1857 rtx op0, rtx op1, rtx target, int unsignedp,
1858 enum optab_methods methods)
1859 {
1860 rtx temp;
1861 optab direct_optab = unsignedp ? uoptab : soptab;
1862 bool save_enable;
1863
1864 /* Do it without widening, if possible. */
1865 temp = expand_binop (mode, direct_optab, op0, op1, target,
1866 unsignedp, OPTAB_DIRECT);
1867 if (temp || methods == OPTAB_DIRECT)
1868 return temp;
1869
1870 /* Try widening to a signed int. Disable any direct use of any
1871 signed insn in the current mode. */
1872 save_enable = swap_optab_enable (soptab, mode, false);
1873
1874 temp = expand_binop (mode, soptab, op0, op1, target,
1875 unsignedp, OPTAB_WIDEN);
1876
1877 /* For unsigned operands, try widening to an unsigned int. */
1878 if (!temp && unsignedp)
1879 temp = expand_binop (mode, uoptab, op0, op1, target,
1880 unsignedp, OPTAB_WIDEN);
1881 if (temp || methods == OPTAB_WIDEN)
1882 goto egress;
1883
1884 /* Use the right width libcall if that exists. */
1885 temp = expand_binop (mode, direct_optab, op0, op1, target,
1886 unsignedp, OPTAB_LIB);
1887 if (temp || methods == OPTAB_LIB)
1888 goto egress;
1889
1890 /* Must widen and use a libcall, use either signed or unsigned. */
1891 temp = expand_binop (mode, soptab, op0, op1, target,
1892 unsignedp, methods);
1893 if (!temp && unsignedp)
1894 temp = expand_binop (mode, uoptab, op0, op1, target,
1895 unsignedp, methods);
1896
1897 egress:
1898 /* Undo the fiddling above. */
1899 if (save_enable)
1900 swap_optab_enable (soptab, mode, true);
1901 return temp;
1902 }
1903 \f
1904 /* Generate code to perform an operation specified by UNOPPTAB
1905 on operand OP0, with two results to TARG0 and TARG1.
1906 We assume that the order of the operands for the instruction
1907 is TARG0, TARG1, OP0.
1908
1909 Either TARG0 or TARG1 may be zero, but what that means is that
1910 the result is not actually wanted. We will generate it into
1911 a dummy pseudo-reg and discard it. They may not both be zero.
1912
1913 Returns 1 if this operation can be performed; 0 if not. */
1914
1915 int
1916 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1917 int unsignedp)
1918 {
1919 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1920 enum mode_class mclass;
1921 machine_mode wider_mode;
1922 rtx_insn *entry_last = get_last_insn ();
1923 rtx_insn *last;
1924
1925 mclass = GET_MODE_CLASS (mode);
1926
1927 if (!targ0)
1928 targ0 = gen_reg_rtx (mode);
1929 if (!targ1)
1930 targ1 = gen_reg_rtx (mode);
1931
1932 /* Record where to go back to if we fail. */
1933 last = get_last_insn ();
1934
1935 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1936 {
1937 struct expand_operand ops[3];
1938 enum insn_code icode = optab_handler (unoptab, mode);
1939
1940 create_fixed_operand (&ops[0], targ0);
1941 create_fixed_operand (&ops[1], targ1);
1942 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1943 if (maybe_expand_insn (icode, 3, ops))
1944 return 1;
1945 }
1946
1947 /* It can't be done in this mode. Can we do it in a wider mode? */
1948
1949 if (CLASS_HAS_WIDER_MODES_P (mclass))
1950 {
1951 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1952 wider_mode != VOIDmode;
1953 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1954 {
1955 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1956 {
1957 rtx t0 = gen_reg_rtx (wider_mode);
1958 rtx t1 = gen_reg_rtx (wider_mode);
1959 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1960
1961 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1962 {
1963 convert_move (targ0, t0, unsignedp);
1964 convert_move (targ1, t1, unsignedp);
1965 return 1;
1966 }
1967 else
1968 delete_insns_since (last);
1969 }
1970 }
1971 }
1972
1973 delete_insns_since (entry_last);
1974 return 0;
1975 }
1976 \f
1977 /* Generate code to perform an operation specified by BINOPTAB
1978 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1979 We assume that the order of the operands for the instruction
1980 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1981 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1982
1983 Either TARG0 or TARG1 may be zero, but what that means is that
1984 the result is not actually wanted. We will generate it into
1985 a dummy pseudo-reg and discard it. They may not both be zero.
1986
1987 Returns 1 if this operation can be performed; 0 if not. */
1988
1989 int
1990 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1991 int unsignedp)
1992 {
1993 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1994 enum mode_class mclass;
1995 machine_mode wider_mode;
1996 rtx_insn *entry_last = get_last_insn ();
1997 rtx_insn *last;
1998
1999 mclass = GET_MODE_CLASS (mode);
2000
2001 if (!targ0)
2002 targ0 = gen_reg_rtx (mode);
2003 if (!targ1)
2004 targ1 = gen_reg_rtx (mode);
2005
2006 /* Record where to go back to if we fail. */
2007 last = get_last_insn ();
2008
2009 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2010 {
2011 struct expand_operand ops[4];
2012 enum insn_code icode = optab_handler (binoptab, mode);
2013 machine_mode mode0 = insn_data[icode].operand[1].mode;
2014 machine_mode mode1 = insn_data[icode].operand[2].mode;
2015 rtx xop0 = op0, xop1 = op1;
2016
2017 /* If we are optimizing, force expensive constants into a register. */
2018 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2019 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2020
2021 create_fixed_operand (&ops[0], targ0);
2022 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2023 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2024 create_fixed_operand (&ops[3], targ1);
2025 if (maybe_expand_insn (icode, 4, ops))
2026 return 1;
2027 delete_insns_since (last);
2028 }
2029
2030 /* It can't be done in this mode. Can we do it in a wider mode? */
2031
2032 if (CLASS_HAS_WIDER_MODES_P (mclass))
2033 {
2034 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2035 wider_mode != VOIDmode;
2036 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2037 {
2038 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2039 {
2040 rtx t0 = gen_reg_rtx (wider_mode);
2041 rtx t1 = gen_reg_rtx (wider_mode);
2042 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2043 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2044
2045 if (expand_twoval_binop (binoptab, cop0, cop1,
2046 t0, t1, unsignedp))
2047 {
2048 convert_move (targ0, t0, unsignedp);
2049 convert_move (targ1, t1, unsignedp);
2050 return 1;
2051 }
2052 else
2053 delete_insns_since (last);
2054 }
2055 }
2056 }
2057
2058 delete_insns_since (entry_last);
2059 return 0;
2060 }
2061
2062 /* Expand the two-valued library call indicated by BINOPTAB, but
2063 preserve only one of the values. If TARG0 is non-NULL, the first
2064 value is placed into TARG0; otherwise the second value is placed
2065 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2066 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2067 This routine assumes that the value returned by the library call is
2068 as if the return value was of an integral mode twice as wide as the
2069 mode of OP0. Returns 1 if the call was successful. */
2070
2071 bool
2072 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2073 rtx targ0, rtx targ1, enum rtx_code code)
2074 {
2075 machine_mode mode;
2076 machine_mode libval_mode;
2077 rtx libval;
2078 rtx_insn *insns;
2079 rtx libfunc;
2080
2081 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2082 gcc_assert (!targ0 != !targ1);
2083
2084 mode = GET_MODE (op0);
2085 libfunc = optab_libfunc (binoptab, mode);
2086 if (!libfunc)
2087 return false;
2088
2089 /* The value returned by the library function will have twice as
2090 many bits as the nominal MODE. */
2091 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2092 MODE_INT);
2093 start_sequence ();
2094 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2095 libval_mode, 2,
2096 op0, mode,
2097 op1, mode);
2098 /* Get the part of VAL containing the value that we want. */
2099 libval = simplify_gen_subreg (mode, libval, libval_mode,
2100 targ0 ? 0 : GET_MODE_SIZE (mode));
2101 insns = get_insns ();
2102 end_sequence ();
2103 /* Move the into the desired location. */
2104 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2105 gen_rtx_fmt_ee (code, mode, op0, op1));
2106
2107 return true;
2108 }
2109
2110 \f
2111 /* Wrapper around expand_unop which takes an rtx code to specify
2112 the operation to perform, not an optab pointer. All other
2113 arguments are the same. */
2114 rtx
2115 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2116 rtx target, int unsignedp)
2117 {
2118 optab unop = code_to_optab (code);
2119 gcc_assert (unop);
2120
2121 return expand_unop (mode, unop, op0, target, unsignedp);
2122 }
2123
2124 /* Try calculating
2125 (clz:narrow x)
2126 as
2127 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2128
2129 A similar operation can be used for clrsb. UNOPTAB says which operation
2130 we are trying to expand. */
2131 static rtx
2132 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2133 {
2134 enum mode_class mclass = GET_MODE_CLASS (mode);
2135 if (CLASS_HAS_WIDER_MODES_P (mclass))
2136 {
2137 machine_mode wider_mode;
2138 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2139 wider_mode != VOIDmode;
2140 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2141 {
2142 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2143 {
2144 rtx xop0, temp;
2145 rtx_insn *last;
2146
2147 last = get_last_insn ();
2148
2149 if (target == 0)
2150 target = gen_reg_rtx (mode);
2151 xop0 = widen_operand (op0, wider_mode, mode,
2152 unoptab != clrsb_optab, false);
2153 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2154 unoptab != clrsb_optab);
2155 if (temp != 0)
2156 temp = expand_binop
2157 (wider_mode, sub_optab, temp,
2158 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2159 - GET_MODE_PRECISION (mode),
2160 wider_mode),
2161 target, true, OPTAB_DIRECT);
2162 if (temp == 0)
2163 delete_insns_since (last);
2164
2165 return temp;
2166 }
2167 }
2168 }
2169 return 0;
2170 }
2171
2172 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2173 quantities, choosing which based on whether the high word is nonzero. */
2174 static rtx
2175 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2176 {
2177 rtx xop0 = force_reg (mode, op0);
2178 rtx subhi = gen_highpart (word_mode, xop0);
2179 rtx sublo = gen_lowpart (word_mode, xop0);
2180 rtx_code_label *hi0_label = gen_label_rtx ();
2181 rtx_code_label *after_label = gen_label_rtx ();
2182 rtx_insn *seq;
2183 rtx temp, result;
2184
2185 /* If we were not given a target, use a word_mode register, not a
2186 'mode' register. The result will fit, and nobody is expecting
2187 anything bigger (the return type of __builtin_clz* is int). */
2188 if (!target)
2189 target = gen_reg_rtx (word_mode);
2190
2191 /* In any case, write to a word_mode scratch in both branches of the
2192 conditional, so we can ensure there is a single move insn setting
2193 'target' to tag a REG_EQUAL note on. */
2194 result = gen_reg_rtx (word_mode);
2195
2196 start_sequence ();
2197
2198 /* If the high word is not equal to zero,
2199 then clz of the full value is clz of the high word. */
2200 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2201 word_mode, true, hi0_label);
2202
2203 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2204 if (!temp)
2205 goto fail;
2206
2207 if (temp != result)
2208 convert_move (result, temp, true);
2209
2210 emit_jump_insn (targetm.gen_jump (after_label));
2211 emit_barrier ();
2212
2213 /* Else clz of the full value is clz of the low word plus the number
2214 of bits in the high word. */
2215 emit_label (hi0_label);
2216
2217 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2218 if (!temp)
2219 goto fail;
2220 temp = expand_binop (word_mode, add_optab, temp,
2221 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2222 result, true, OPTAB_DIRECT);
2223 if (!temp)
2224 goto fail;
2225 if (temp != result)
2226 convert_move (result, temp, true);
2227
2228 emit_label (after_label);
2229 convert_move (target, result, true);
2230
2231 seq = get_insns ();
2232 end_sequence ();
2233
2234 add_equal_note (seq, target, CLZ, xop0, 0);
2235 emit_insn (seq);
2236 return target;
2237
2238 fail:
2239 end_sequence ();
2240 return 0;
2241 }
2242
2243 /* Try calculating popcount of a double-word quantity as two popcount's of
2244 word-sized quantities and summing up the results. */
2245 static rtx
2246 expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
2247 {
2248 rtx t0, t1, t;
2249 rtx_insn *seq;
2250
2251 start_sequence ();
2252
2253 t0 = expand_unop_direct (word_mode, popcount_optab,
2254 operand_subword_force (op0, 0, mode), NULL_RTX,
2255 true);
2256 t1 = expand_unop_direct (word_mode, popcount_optab,
2257 operand_subword_force (op0, 1, mode), NULL_RTX,
2258 true);
2259 if (!t0 || !t1)
2260 {
2261 end_sequence ();
2262 return NULL_RTX;
2263 }
2264
2265 /* If we were not given a target, use a word_mode register, not a
2266 'mode' register. The result will fit, and nobody is expecting
2267 anything bigger (the return type of __builtin_popcount* is int). */
2268 if (!target)
2269 target = gen_reg_rtx (word_mode);
2270
2271 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2272
2273 seq = get_insns ();
2274 end_sequence ();
2275
2276 add_equal_note (seq, t, POPCOUNT, op0, 0);
2277 emit_insn (seq);
2278 return t;
2279 }
2280
2281 /* Try calculating
2282 (parity:wide x)
2283 as
2284 (parity:narrow (low (x) ^ high (x))) */
2285 static rtx
2286 expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
2287 {
2288 rtx t = expand_binop (word_mode, xor_optab,
2289 operand_subword_force (op0, 0, mode),
2290 operand_subword_force (op0, 1, mode),
2291 NULL_RTX, 0, OPTAB_DIRECT);
2292 return expand_unop (word_mode, parity_optab, t, target, true);
2293 }
2294
2295 /* Try calculating
2296 (bswap:narrow x)
2297 as
2298 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2299 static rtx
2300 widen_bswap (machine_mode mode, rtx op0, rtx target)
2301 {
2302 enum mode_class mclass = GET_MODE_CLASS (mode);
2303 machine_mode wider_mode;
2304 rtx x;
2305 rtx_insn *last;
2306
2307 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2308 return NULL_RTX;
2309
2310 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2311 wider_mode != VOIDmode;
2312 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2313 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2314 goto found;
2315 return NULL_RTX;
2316
2317 found:
2318 last = get_last_insn ();
2319
2320 x = widen_operand (op0, wider_mode, mode, true, true);
2321 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2322
2323 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2324 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2325 if (x != 0)
2326 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2327 GET_MODE_BITSIZE (wider_mode)
2328 - GET_MODE_BITSIZE (mode),
2329 NULL_RTX, true);
2330
2331 if (x != 0)
2332 {
2333 if (target == 0)
2334 target = gen_reg_rtx (mode);
2335 emit_move_insn (target, gen_lowpart (mode, x));
2336 }
2337 else
2338 delete_insns_since (last);
2339
2340 return target;
2341 }
2342
2343 /* Try calculating bswap as two bswaps of two word-sized operands. */
2344
2345 static rtx
2346 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2347 {
2348 rtx t0, t1;
2349
2350 t1 = expand_unop (word_mode, bswap_optab,
2351 operand_subword_force (op, 0, mode), NULL_RTX, true);
2352 t0 = expand_unop (word_mode, bswap_optab,
2353 operand_subword_force (op, 1, mode), NULL_RTX, true);
2354
2355 if (target == 0 || !valid_multiword_target_p (target))
2356 target = gen_reg_rtx (mode);
2357 if (REG_P (target))
2358 emit_clobber (target);
2359 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2360 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2361
2362 return target;
2363 }
2364
2365 /* Try calculating (parity x) as (and (popcount x) 1), where
2366 popcount can also be done in a wider mode. */
2367 static rtx
2368 expand_parity (machine_mode mode, rtx op0, rtx target)
2369 {
2370 enum mode_class mclass = GET_MODE_CLASS (mode);
2371 if (CLASS_HAS_WIDER_MODES_P (mclass))
2372 {
2373 machine_mode wider_mode;
2374 for (wider_mode = mode; wider_mode != VOIDmode;
2375 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2376 {
2377 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2378 {
2379 rtx xop0, temp;
2380 rtx_insn *last;
2381
2382 last = get_last_insn ();
2383
2384 if (target == 0)
2385 target = gen_reg_rtx (mode);
2386 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2387 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2388 true);
2389 if (temp != 0)
2390 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2391 target, true, OPTAB_DIRECT);
2392 if (temp == 0)
2393 delete_insns_since (last);
2394
2395 return temp;
2396 }
2397 }
2398 }
2399 return 0;
2400 }
2401
2402 /* Try calculating ctz(x) as K - clz(x & -x) ,
2403 where K is GET_MODE_PRECISION(mode) - 1.
2404
2405 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2406 don't have to worry about what the hardware does in that case. (If
2407 the clz instruction produces the usual value at 0, which is K, the
2408 result of this code sequence will be -1; expand_ffs, below, relies
2409 on this. It might be nice to have it be K instead, for consistency
2410 with the (very few) processors that provide a ctz with a defined
2411 value, but that would take one more instruction, and it would be
2412 less convenient for expand_ffs anyway. */
2413
2414 static rtx
2415 expand_ctz (machine_mode mode, rtx op0, rtx target)
2416 {
2417 rtx_insn *seq;
2418 rtx temp;
2419
2420 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2421 return 0;
2422
2423 start_sequence ();
2424
2425 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2426 if (temp)
2427 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2428 true, OPTAB_DIRECT);
2429 if (temp)
2430 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2431 if (temp)
2432 temp = expand_binop (mode, sub_optab,
2433 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2434 temp, target,
2435 true, OPTAB_DIRECT);
2436 if (temp == 0)
2437 {
2438 end_sequence ();
2439 return 0;
2440 }
2441
2442 seq = get_insns ();
2443 end_sequence ();
2444
2445 add_equal_note (seq, temp, CTZ, op0, 0);
2446 emit_insn (seq);
2447 return temp;
2448 }
2449
2450
2451 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2452 else with the sequence used by expand_clz.
2453
2454 The ffs builtin promises to return zero for a zero value and ctz/clz
2455 may have an undefined value in that case. If they do not give us a
2456 convenient value, we have to generate a test and branch. */
2457 static rtx
2458 expand_ffs (machine_mode mode, rtx op0, rtx target)
2459 {
2460 HOST_WIDE_INT val = 0;
2461 bool defined_at_zero = false;
2462 rtx temp;
2463 rtx_insn *seq;
2464
2465 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2466 {
2467 start_sequence ();
2468
2469 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2470 if (!temp)
2471 goto fail;
2472
2473 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2474 }
2475 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2476 {
2477 start_sequence ();
2478 temp = expand_ctz (mode, op0, 0);
2479 if (!temp)
2480 goto fail;
2481
2482 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2483 {
2484 defined_at_zero = true;
2485 val = (GET_MODE_PRECISION (mode) - 1) - val;
2486 }
2487 }
2488 else
2489 return 0;
2490
2491 if (defined_at_zero && val == -1)
2492 /* No correction needed at zero. */;
2493 else
2494 {
2495 /* We don't try to do anything clever with the situation found
2496 on some processors (eg Alpha) where ctz(0:mode) ==
2497 bitsize(mode). If someone can think of a way to send N to -1
2498 and leave alone all values in the range 0..N-1 (where N is a
2499 power of two), cheaper than this test-and-branch, please add it.
2500
2501 The test-and-branch is done after the operation itself, in case
2502 the operation sets condition codes that can be recycled for this.
2503 (This is true on i386, for instance.) */
2504
2505 rtx_code_label *nonzero_label = gen_label_rtx ();
2506 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2507 mode, true, nonzero_label);
2508
2509 convert_move (temp, GEN_INT (-1), false);
2510 emit_label (nonzero_label);
2511 }
2512
2513 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2514 to produce a value in the range 0..bitsize. */
2515 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2516 target, false, OPTAB_DIRECT);
2517 if (!temp)
2518 goto fail;
2519
2520 seq = get_insns ();
2521 end_sequence ();
2522
2523 add_equal_note (seq, temp, FFS, op0, 0);
2524 emit_insn (seq);
2525 return temp;
2526
2527 fail:
2528 end_sequence ();
2529 return 0;
2530 }
2531
2532 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2533 conditions, VAL may already be a SUBREG against which we cannot generate
2534 a further SUBREG. In this case, we expect forcing the value into a
2535 register will work around the situation. */
2536
2537 static rtx
2538 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2539 machine_mode imode)
2540 {
2541 rtx ret;
2542 ret = lowpart_subreg (omode, val, imode);
2543 if (ret == NULL)
2544 {
2545 val = force_reg (imode, val);
2546 ret = lowpart_subreg (omode, val, imode);
2547 gcc_assert (ret != NULL);
2548 }
2549 return ret;
2550 }
2551
2552 /* Expand a floating point absolute value or negation operation via a
2553 logical operation on the sign bit. */
2554
2555 static rtx
2556 expand_absneg_bit (enum rtx_code code, machine_mode mode,
2557 rtx op0, rtx target)
2558 {
2559 const struct real_format *fmt;
2560 int bitpos, word, nwords, i;
2561 machine_mode imode;
2562 rtx temp;
2563 rtx_insn *insns;
2564
2565 /* The format has to have a simple sign bit. */
2566 fmt = REAL_MODE_FORMAT (mode);
2567 if (fmt == NULL)
2568 return NULL_RTX;
2569
2570 bitpos = fmt->signbit_rw;
2571 if (bitpos < 0)
2572 return NULL_RTX;
2573
2574 /* Don't create negative zeros if the format doesn't support them. */
2575 if (code == NEG && !fmt->has_signed_zero)
2576 return NULL_RTX;
2577
2578 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2579 {
2580 imode = int_mode_for_mode (mode);
2581 if (imode == BLKmode)
2582 return NULL_RTX;
2583 word = 0;
2584 nwords = 1;
2585 }
2586 else
2587 {
2588 imode = word_mode;
2589
2590 if (FLOAT_WORDS_BIG_ENDIAN)
2591 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2592 else
2593 word = bitpos / BITS_PER_WORD;
2594 bitpos = bitpos % BITS_PER_WORD;
2595 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2596 }
2597
2598 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2599 if (code == ABS)
2600 mask = ~mask;
2601
2602 if (target == 0
2603 || target == op0
2604 || (nwords > 1 && !valid_multiword_target_p (target)))
2605 target = gen_reg_rtx (mode);
2606
2607 if (nwords > 1)
2608 {
2609 start_sequence ();
2610
2611 for (i = 0; i < nwords; ++i)
2612 {
2613 rtx targ_piece = operand_subword (target, i, 1, mode);
2614 rtx op0_piece = operand_subword_force (op0, i, mode);
2615
2616 if (i == word)
2617 {
2618 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2619 op0_piece,
2620 immed_wide_int_const (mask, imode),
2621 targ_piece, 1, OPTAB_LIB_WIDEN);
2622 if (temp != targ_piece)
2623 emit_move_insn (targ_piece, temp);
2624 }
2625 else
2626 emit_move_insn (targ_piece, op0_piece);
2627 }
2628
2629 insns = get_insns ();
2630 end_sequence ();
2631
2632 emit_insn (insns);
2633 }
2634 else
2635 {
2636 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2637 gen_lowpart (imode, op0),
2638 immed_wide_int_const (mask, imode),
2639 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2640 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2641
2642 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2643 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2644 target);
2645 }
2646
2647 return target;
2648 }
2649
2650 /* As expand_unop, but will fail rather than attempt the operation in a
2651 different mode or with a libcall. */
2652 static rtx
2653 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2654 int unsignedp)
2655 {
2656 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2657 {
2658 struct expand_operand ops[2];
2659 enum insn_code icode = optab_handler (unoptab, mode);
2660 rtx_insn *last = get_last_insn ();
2661 rtx_insn *pat;
2662
2663 create_output_operand (&ops[0], target, mode);
2664 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2665 pat = maybe_gen_insn (icode, 2, ops);
2666 if (pat)
2667 {
2668 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2669 && ! add_equal_note (pat, ops[0].value,
2670 optab_to_code (unoptab),
2671 ops[1].value, NULL_RTX))
2672 {
2673 delete_insns_since (last);
2674 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2675 }
2676
2677 emit_insn (pat);
2678
2679 return ops[0].value;
2680 }
2681 }
2682 return 0;
2683 }
2684
2685 /* Generate code to perform an operation specified by UNOPTAB
2686 on operand OP0, with result having machine-mode MODE.
2687
2688 UNSIGNEDP is for the case where we have to widen the operands
2689 to perform the operation. It says to use zero-extension.
2690
2691 If TARGET is nonzero, the value
2692 is generated there, if it is convenient to do so.
2693 In all cases an rtx is returned for the locus of the value;
2694 this may or may not be TARGET. */
2695
2696 rtx
2697 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2698 int unsignedp)
2699 {
2700 enum mode_class mclass = GET_MODE_CLASS (mode);
2701 machine_mode wider_mode;
2702 rtx temp;
2703 rtx libfunc;
2704
2705 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2706 if (temp)
2707 return temp;
2708
2709 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2710
2711 /* Widening (or narrowing) clz needs special treatment. */
2712 if (unoptab == clz_optab)
2713 {
2714 temp = widen_leading (mode, op0, target, unoptab);
2715 if (temp)
2716 return temp;
2717
2718 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2719 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2720 {
2721 temp = expand_doubleword_clz (mode, op0, target);
2722 if (temp)
2723 return temp;
2724 }
2725
2726 goto try_libcall;
2727 }
2728
2729 if (unoptab == clrsb_optab)
2730 {
2731 temp = widen_leading (mode, op0, target, unoptab);
2732 if (temp)
2733 return temp;
2734 goto try_libcall;
2735 }
2736
2737 if (unoptab == popcount_optab
2738 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2739 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2740 && optimize_insn_for_speed_p ())
2741 {
2742 temp = expand_doubleword_popcount (mode, op0, target);
2743 if (temp)
2744 return temp;
2745 }
2746
2747 if (unoptab == parity_optab
2748 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2749 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2750 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2751 && optimize_insn_for_speed_p ())
2752 {
2753 temp = expand_doubleword_parity (mode, op0, target);
2754 if (temp)
2755 return temp;
2756 }
2757
2758 /* Widening (or narrowing) bswap needs special treatment. */
2759 if (unoptab == bswap_optab)
2760 {
2761 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2762 or ROTATERT. First try these directly; if this fails, then try the
2763 obvious pair of shifts with allowed widening, as this will probably
2764 be always more efficient than the other fallback methods. */
2765 if (mode == HImode)
2766 {
2767 rtx_insn *last;
2768 rtx temp1, temp2;
2769
2770 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2771 {
2772 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2773 unsignedp, OPTAB_DIRECT);
2774 if (temp)
2775 return temp;
2776 }
2777
2778 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2779 {
2780 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2781 unsignedp, OPTAB_DIRECT);
2782 if (temp)
2783 return temp;
2784 }
2785
2786 last = get_last_insn ();
2787
2788 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2789 unsignedp, OPTAB_WIDEN);
2790 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2791 unsignedp, OPTAB_WIDEN);
2792 if (temp1 && temp2)
2793 {
2794 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2795 unsignedp, OPTAB_WIDEN);
2796 if (temp)
2797 return temp;
2798 }
2799
2800 delete_insns_since (last);
2801 }
2802
2803 temp = widen_bswap (mode, op0, target);
2804 if (temp)
2805 return temp;
2806
2807 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2808 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2809 {
2810 temp = expand_doubleword_bswap (mode, op0, target);
2811 if (temp)
2812 return temp;
2813 }
2814
2815 goto try_libcall;
2816 }
2817
2818 if (CLASS_HAS_WIDER_MODES_P (mclass))
2819 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2820 wider_mode != VOIDmode;
2821 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2822 {
2823 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2824 {
2825 rtx xop0 = op0;
2826 rtx_insn *last = get_last_insn ();
2827
2828 /* For certain operations, we need not actually extend
2829 the narrow operand, as long as we will truncate the
2830 results to the same narrowness. */
2831
2832 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2833 (unoptab == neg_optab
2834 || unoptab == one_cmpl_optab)
2835 && mclass == MODE_INT);
2836
2837 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2838 unsignedp);
2839
2840 if (temp)
2841 {
2842 if (mclass != MODE_INT
2843 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2844 {
2845 if (target == 0)
2846 target = gen_reg_rtx (mode);
2847 convert_move (target, temp, 0);
2848 return target;
2849 }
2850 else
2851 return gen_lowpart (mode, temp);
2852 }
2853 else
2854 delete_insns_since (last);
2855 }
2856 }
2857
2858 /* These can be done a word at a time. */
2859 if (unoptab == one_cmpl_optab
2860 && mclass == MODE_INT
2861 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2862 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2863 {
2864 int i;
2865 rtx_insn *insns;
2866
2867 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2868 target = gen_reg_rtx (mode);
2869
2870 start_sequence ();
2871
2872 /* Do the actual arithmetic. */
2873 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2874 {
2875 rtx target_piece = operand_subword (target, i, 1, mode);
2876 rtx x = expand_unop (word_mode, unoptab,
2877 operand_subword_force (op0, i, mode),
2878 target_piece, unsignedp);
2879
2880 if (target_piece != x)
2881 emit_move_insn (target_piece, x);
2882 }
2883
2884 insns = get_insns ();
2885 end_sequence ();
2886
2887 emit_insn (insns);
2888 return target;
2889 }
2890
2891 if (optab_to_code (unoptab) == NEG)
2892 {
2893 /* Try negating floating point values by flipping the sign bit. */
2894 if (SCALAR_FLOAT_MODE_P (mode))
2895 {
2896 temp = expand_absneg_bit (NEG, mode, op0, target);
2897 if (temp)
2898 return temp;
2899 }
2900
2901 /* If there is no negation pattern, and we have no negative zero,
2902 try subtracting from zero. */
2903 if (!HONOR_SIGNED_ZEROS (mode))
2904 {
2905 temp = expand_binop (mode, (unoptab == negv_optab
2906 ? subv_optab : sub_optab),
2907 CONST0_RTX (mode), op0, target,
2908 unsignedp, OPTAB_DIRECT);
2909 if (temp)
2910 return temp;
2911 }
2912 }
2913
2914 /* Try calculating parity (x) as popcount (x) % 2. */
2915 if (unoptab == parity_optab)
2916 {
2917 temp = expand_parity (mode, op0, target);
2918 if (temp)
2919 return temp;
2920 }
2921
2922 /* Try implementing ffs (x) in terms of clz (x). */
2923 if (unoptab == ffs_optab)
2924 {
2925 temp = expand_ffs (mode, op0, target);
2926 if (temp)
2927 return temp;
2928 }
2929
2930 /* Try implementing ctz (x) in terms of clz (x). */
2931 if (unoptab == ctz_optab)
2932 {
2933 temp = expand_ctz (mode, op0, target);
2934 if (temp)
2935 return temp;
2936 }
2937
2938 try_libcall:
2939 /* Now try a library call in this mode. */
2940 libfunc = optab_libfunc (unoptab, mode);
2941 if (libfunc)
2942 {
2943 rtx_insn *insns;
2944 rtx value;
2945 rtx eq_value;
2946 machine_mode outmode = mode;
2947
2948 /* All of these functions return small values. Thus we choose to
2949 have them return something that isn't a double-word. */
2950 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2951 || unoptab == clrsb_optab || unoptab == popcount_optab
2952 || unoptab == parity_optab)
2953 outmode
2954 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2955 optab_libfunc (unoptab, mode)));
2956
2957 start_sequence ();
2958
2959 /* Pass 1 for NO_QUEUE so we don't lose any increments
2960 if the libcall is cse'd or moved. */
2961 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2962 1, op0, mode);
2963 insns = get_insns ();
2964 end_sequence ();
2965
2966 target = gen_reg_rtx (outmode);
2967 bool trapv = trapv_unoptab_p (unoptab);
2968 if (trapv)
2969 eq_value = NULL_RTX;
2970 else
2971 {
2972 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2973 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2974 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2975 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2976 eq_value = simplify_gen_unary (ZERO_EXTEND,
2977 outmode, eq_value, mode);
2978 }
2979 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2980
2981 return target;
2982 }
2983
2984 /* It can't be done in this mode. Can we do it in a wider mode? */
2985
2986 if (CLASS_HAS_WIDER_MODES_P (mclass))
2987 {
2988 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2989 wider_mode != VOIDmode;
2990 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2991 {
2992 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
2993 || optab_libfunc (unoptab, wider_mode))
2994 {
2995 rtx xop0 = op0;
2996 rtx_insn *last = get_last_insn ();
2997
2998 /* For certain operations, we need not actually extend
2999 the narrow operand, as long as we will truncate the
3000 results to the same narrowness. */
3001 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3002 (unoptab == neg_optab
3003 || unoptab == one_cmpl_optab
3004 || unoptab == bswap_optab)
3005 && mclass == MODE_INT);
3006
3007 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3008 unsignedp);
3009
3010 /* If we are generating clz using wider mode, adjust the
3011 result. Similarly for clrsb. */
3012 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3013 && temp != 0)
3014 temp = expand_binop
3015 (wider_mode, sub_optab, temp,
3016 gen_int_mode (GET_MODE_PRECISION (wider_mode)
3017 - GET_MODE_PRECISION (mode),
3018 wider_mode),
3019 target, true, OPTAB_DIRECT);
3020
3021 /* Likewise for bswap. */
3022 if (unoptab == bswap_optab && temp != 0)
3023 {
3024 gcc_assert (GET_MODE_PRECISION (wider_mode)
3025 == GET_MODE_BITSIZE (wider_mode)
3026 && GET_MODE_PRECISION (mode)
3027 == GET_MODE_BITSIZE (mode));
3028
3029 temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3030 GET_MODE_BITSIZE (wider_mode)
3031 - GET_MODE_BITSIZE (mode),
3032 NULL_RTX, true);
3033 }
3034
3035 if (temp)
3036 {
3037 if (mclass != MODE_INT)
3038 {
3039 if (target == 0)
3040 target = gen_reg_rtx (mode);
3041 convert_move (target, temp, 0);
3042 return target;
3043 }
3044 else
3045 return gen_lowpart (mode, temp);
3046 }
3047 else
3048 delete_insns_since (last);
3049 }
3050 }
3051 }
3052
3053 /* One final attempt at implementing negation via subtraction,
3054 this time allowing widening of the operand. */
3055 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3056 {
3057 rtx temp;
3058 temp = expand_binop (mode,
3059 unoptab == negv_optab ? subv_optab : sub_optab,
3060 CONST0_RTX (mode), op0,
3061 target, unsignedp, OPTAB_LIB_WIDEN);
3062 if (temp)
3063 return temp;
3064 }
3065
3066 return 0;
3067 }
3068 \f
3069 /* Emit code to compute the absolute value of OP0, with result to
3070 TARGET if convenient. (TARGET may be 0.) The return value says
3071 where the result actually is to be found.
3072
3073 MODE is the mode of the operand; the mode of the result is
3074 different but can be deduced from MODE.
3075
3076 */
3077
3078 rtx
3079 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3080 int result_unsignedp)
3081 {
3082 rtx temp;
3083
3084 if (GET_MODE_CLASS (mode) != MODE_INT
3085 || ! flag_trapv)
3086 result_unsignedp = 1;
3087
3088 /* First try to do it with a special abs instruction. */
3089 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3090 op0, target, 0);
3091 if (temp != 0)
3092 return temp;
3093
3094 /* For floating point modes, try clearing the sign bit. */
3095 if (SCALAR_FLOAT_MODE_P (mode))
3096 {
3097 temp = expand_absneg_bit (ABS, mode, op0, target);
3098 if (temp)
3099 return temp;
3100 }
3101
3102 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3103 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3104 && !HONOR_SIGNED_ZEROS (mode))
3105 {
3106 rtx_insn *last = get_last_insn ();
3107
3108 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3109 op0, NULL_RTX, 0);
3110 if (temp != 0)
3111 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3112 OPTAB_WIDEN);
3113
3114 if (temp != 0)
3115 return temp;
3116
3117 delete_insns_since (last);
3118 }
3119
3120 /* If this machine has expensive jumps, we can do integer absolute
3121 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3122 where W is the width of MODE. */
3123
3124 if (GET_MODE_CLASS (mode) == MODE_INT
3125 && BRANCH_COST (optimize_insn_for_speed_p (),
3126 false) >= 2)
3127 {
3128 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3129 GET_MODE_PRECISION (mode) - 1,
3130 NULL_RTX, 0);
3131
3132 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3133 OPTAB_LIB_WIDEN);
3134 if (temp != 0)
3135 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3136 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3137
3138 if (temp != 0)
3139 return temp;
3140 }
3141
3142 return NULL_RTX;
3143 }
3144
3145 rtx
3146 expand_abs (machine_mode mode, rtx op0, rtx target,
3147 int result_unsignedp, int safe)
3148 {
3149 rtx temp;
3150 rtx_code_label *op1;
3151
3152 if (GET_MODE_CLASS (mode) != MODE_INT
3153 || ! flag_trapv)
3154 result_unsignedp = 1;
3155
3156 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3157 if (temp != 0)
3158 return temp;
3159
3160 /* If that does not win, use conditional jump and negate. */
3161
3162 /* It is safe to use the target if it is the same
3163 as the source if this is also a pseudo register */
3164 if (op0 == target && REG_P (op0)
3165 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3166 safe = 1;
3167
3168 op1 = gen_label_rtx ();
3169 if (target == 0 || ! safe
3170 || GET_MODE (target) != mode
3171 || (MEM_P (target) && MEM_VOLATILE_P (target))
3172 || (REG_P (target)
3173 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3174 target = gen_reg_rtx (mode);
3175
3176 emit_move_insn (target, op0);
3177 NO_DEFER_POP;
3178
3179 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3180 NULL_RTX, NULL, op1, -1);
3181
3182 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3183 target, target, 0);
3184 if (op0 != target)
3185 emit_move_insn (target, op0);
3186 emit_label (op1);
3187 OK_DEFER_POP;
3188 return target;
3189 }
3190
3191 /* Emit code to compute the one's complement absolute value of OP0
3192 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3193 (TARGET may be NULL_RTX.) The return value says where the result
3194 actually is to be found.
3195
3196 MODE is the mode of the operand; the mode of the result is
3197 different but can be deduced from MODE. */
3198
3199 rtx
3200 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3201 {
3202 rtx temp;
3203
3204 /* Not applicable for floating point modes. */
3205 if (FLOAT_MODE_P (mode))
3206 return NULL_RTX;
3207
3208 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3209 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3210 {
3211 rtx_insn *last = get_last_insn ();
3212
3213 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3214 if (temp != 0)
3215 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3216 OPTAB_WIDEN);
3217
3218 if (temp != 0)
3219 return temp;
3220
3221 delete_insns_since (last);
3222 }
3223
3224 /* If this machine has expensive jumps, we can do one's complement
3225 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3226
3227 if (GET_MODE_CLASS (mode) == MODE_INT
3228 && BRANCH_COST (optimize_insn_for_speed_p (),
3229 false) >= 2)
3230 {
3231 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3232 GET_MODE_PRECISION (mode) - 1,
3233 NULL_RTX, 0);
3234
3235 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3236 OPTAB_LIB_WIDEN);
3237
3238 if (temp != 0)
3239 return temp;
3240 }
3241
3242 return NULL_RTX;
3243 }
3244
3245 /* A subroutine of expand_copysign, perform the copysign operation using the
3246 abs and neg primitives advertised to exist on the target. The assumption
3247 is that we have a split register file, and leaving op0 in fp registers,
3248 and not playing with subregs so much, will help the register allocator. */
3249
3250 static rtx
3251 expand_copysign_absneg (machine_mode mode, rtx op0, rtx op1, rtx target,
3252 int bitpos, bool op0_is_abs)
3253 {
3254 machine_mode imode;
3255 enum insn_code icode;
3256 rtx sign;
3257 rtx_code_label *label;
3258
3259 if (target == op1)
3260 target = NULL_RTX;
3261
3262 /* Check if the back end provides an insn that handles signbit for the
3263 argument's mode. */
3264 icode = optab_handler (signbit_optab, mode);
3265 if (icode != CODE_FOR_nothing)
3266 {
3267 imode = insn_data[(int) icode].operand[0].mode;
3268 sign = gen_reg_rtx (imode);
3269 emit_unop_insn (icode, sign, op1, UNKNOWN);
3270 }
3271 else
3272 {
3273 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3274 {
3275 imode = int_mode_for_mode (mode);
3276 if (imode == BLKmode)
3277 return NULL_RTX;
3278 op1 = gen_lowpart (imode, op1);
3279 }
3280 else
3281 {
3282 int word;
3283
3284 imode = word_mode;
3285 if (FLOAT_WORDS_BIG_ENDIAN)
3286 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3287 else
3288 word = bitpos / BITS_PER_WORD;
3289 bitpos = bitpos % BITS_PER_WORD;
3290 op1 = operand_subword_force (op1, word, mode);
3291 }
3292
3293 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3294 sign = expand_binop (imode, and_optab, op1,
3295 immed_wide_int_const (mask, imode),
3296 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3297 }
3298
3299 if (!op0_is_abs)
3300 {
3301 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3302 if (op0 == NULL)
3303 return NULL_RTX;
3304 target = op0;
3305 }
3306 else
3307 {
3308 if (target == NULL_RTX)
3309 target = copy_to_reg (op0);
3310 else
3311 emit_move_insn (target, op0);
3312 }
3313
3314 label = gen_label_rtx ();
3315 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3316
3317 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3318 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3319 else
3320 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3321 if (op0 != target)
3322 emit_move_insn (target, op0);
3323
3324 emit_label (label);
3325
3326 return target;
3327 }
3328
3329
3330 /* A subroutine of expand_copysign, perform the entire copysign operation
3331 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3332 is true if op0 is known to have its sign bit clear. */
3333
3334 static rtx
3335 expand_copysign_bit (machine_mode mode, rtx op0, rtx op1, rtx target,
3336 int bitpos, bool op0_is_abs)
3337 {
3338 machine_mode imode;
3339 int word, nwords, i;
3340 rtx temp;
3341 rtx_insn *insns;
3342
3343 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3344 {
3345 imode = int_mode_for_mode (mode);
3346 if (imode == BLKmode)
3347 return NULL_RTX;
3348 word = 0;
3349 nwords = 1;
3350 }
3351 else
3352 {
3353 imode = word_mode;
3354
3355 if (FLOAT_WORDS_BIG_ENDIAN)
3356 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3357 else
3358 word = bitpos / BITS_PER_WORD;
3359 bitpos = bitpos % BITS_PER_WORD;
3360 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3361 }
3362
3363 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3364
3365 if (target == 0
3366 || target == op0
3367 || target == op1
3368 || (nwords > 1 && !valid_multiword_target_p (target)))
3369 target = gen_reg_rtx (mode);
3370
3371 if (nwords > 1)
3372 {
3373 start_sequence ();
3374
3375 for (i = 0; i < nwords; ++i)
3376 {
3377 rtx targ_piece = operand_subword (target, i, 1, mode);
3378 rtx op0_piece = operand_subword_force (op0, i, mode);
3379
3380 if (i == word)
3381 {
3382 if (!op0_is_abs)
3383 op0_piece
3384 = expand_binop (imode, and_optab, op0_piece,
3385 immed_wide_int_const (~mask, imode),
3386 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3387 op1 = expand_binop (imode, and_optab,
3388 operand_subword_force (op1, i, mode),
3389 immed_wide_int_const (mask, imode),
3390 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3391
3392 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3393 targ_piece, 1, OPTAB_LIB_WIDEN);
3394 if (temp != targ_piece)
3395 emit_move_insn (targ_piece, temp);
3396 }
3397 else
3398 emit_move_insn (targ_piece, op0_piece);
3399 }
3400
3401 insns = get_insns ();
3402 end_sequence ();
3403
3404 emit_insn (insns);
3405 }
3406 else
3407 {
3408 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3409 immed_wide_int_const (mask, imode),
3410 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3411
3412 op0 = gen_lowpart (imode, op0);
3413 if (!op0_is_abs)
3414 op0 = expand_binop (imode, and_optab, op0,
3415 immed_wide_int_const (~mask, imode),
3416 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3417
3418 temp = expand_binop (imode, ior_optab, op0, op1,
3419 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3420 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3421 }
3422
3423 return target;
3424 }
3425
3426 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3427 scalar floating point mode. Return NULL if we do not know how to
3428 expand the operation inline. */
3429
3430 rtx
3431 expand_copysign (rtx op0, rtx op1, rtx target)
3432 {
3433 machine_mode mode = GET_MODE (op0);
3434 const struct real_format *fmt;
3435 bool op0_is_abs;
3436 rtx temp;
3437
3438 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3439 gcc_assert (GET_MODE (op1) == mode);
3440
3441 /* First try to do it with a special instruction. */
3442 temp = expand_binop (mode, copysign_optab, op0, op1,
3443 target, 0, OPTAB_DIRECT);
3444 if (temp)
3445 return temp;
3446
3447 fmt = REAL_MODE_FORMAT (mode);
3448 if (fmt == NULL || !fmt->has_signed_zero)
3449 return NULL_RTX;
3450
3451 op0_is_abs = false;
3452 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3453 {
3454 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3455 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3456 op0_is_abs = true;
3457 }
3458
3459 if (fmt->signbit_ro >= 0
3460 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3461 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3462 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3463 {
3464 temp = expand_copysign_absneg (mode, op0, op1, target,
3465 fmt->signbit_ro, op0_is_abs);
3466 if (temp)
3467 return temp;
3468 }
3469
3470 if (fmt->signbit_rw < 0)
3471 return NULL_RTX;
3472 return expand_copysign_bit (mode, op0, op1, target,
3473 fmt->signbit_rw, op0_is_abs);
3474 }
3475 \f
3476 /* Generate an instruction whose insn-code is INSN_CODE,
3477 with two operands: an output TARGET and an input OP0.
3478 TARGET *must* be nonzero, and the output is always stored there.
3479 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3480 the value that is stored into TARGET.
3481
3482 Return false if expansion failed. */
3483
3484 bool
3485 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3486 enum rtx_code code)
3487 {
3488 struct expand_operand ops[2];
3489 rtx_insn *pat;
3490
3491 create_output_operand (&ops[0], target, GET_MODE (target));
3492 create_input_operand (&ops[1], op0, GET_MODE (op0));
3493 pat = maybe_gen_insn (icode, 2, ops);
3494 if (!pat)
3495 return false;
3496
3497 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3498 && code != UNKNOWN)
3499 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3500
3501 emit_insn (pat);
3502
3503 if (ops[0].value != target)
3504 emit_move_insn (target, ops[0].value);
3505 return true;
3506 }
3507 /* Generate an instruction whose insn-code is INSN_CODE,
3508 with two operands: an output TARGET and an input OP0.
3509 TARGET *must* be nonzero, and the output is always stored there.
3510 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3511 the value that is stored into TARGET. */
3512
3513 void
3514 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3515 {
3516 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3517 gcc_assert (ok);
3518 }
3519 \f
3520 struct no_conflict_data
3521 {
3522 rtx target;
3523 rtx_insn *first, *insn;
3524 bool must_stay;
3525 };
3526
3527 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3528 the currently examined clobber / store has to stay in the list of
3529 insns that constitute the actual libcall block. */
3530 static void
3531 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3532 {
3533 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3534
3535 /* If this inns directly contributes to setting the target, it must stay. */
3536 if (reg_overlap_mentioned_p (p->target, dest))
3537 p->must_stay = true;
3538 /* If we haven't committed to keeping any other insns in the list yet,
3539 there is nothing more to check. */
3540 else if (p->insn == p->first)
3541 return;
3542 /* If this insn sets / clobbers a register that feeds one of the insns
3543 already in the list, this insn has to stay too. */
3544 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3545 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3546 || reg_used_between_p (dest, p->first, p->insn)
3547 /* Likewise if this insn depends on a register set by a previous
3548 insn in the list, or if it sets a result (presumably a hard
3549 register) that is set or clobbered by a previous insn.
3550 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3551 SET_DEST perform the former check on the address, and the latter
3552 check on the MEM. */
3553 || (GET_CODE (set) == SET
3554 && (modified_in_p (SET_SRC (set), p->first)
3555 || modified_in_p (SET_DEST (set), p->first)
3556 || modified_between_p (SET_SRC (set), p->first, p->insn)
3557 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3558 p->must_stay = true;
3559 }
3560
3561 \f
3562 /* Emit code to make a call to a constant function or a library call.
3563
3564 INSNS is a list containing all insns emitted in the call.
3565 These insns leave the result in RESULT. Our block is to copy RESULT
3566 to TARGET, which is logically equivalent to EQUIV.
3567
3568 We first emit any insns that set a pseudo on the assumption that these are
3569 loading constants into registers; doing so allows them to be safely cse'ed
3570 between blocks. Then we emit all the other insns in the block, followed by
3571 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3572 note with an operand of EQUIV. */
3573
3574 static void
3575 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3576 bool equiv_may_trap)
3577 {
3578 rtx final_dest = target;
3579 rtx_insn *next, *last, *insn;
3580
3581 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3582 into a MEM later. Protect the libcall block from this change. */
3583 if (! REG_P (target) || REG_USERVAR_P (target))
3584 target = gen_reg_rtx (GET_MODE (target));
3585
3586 /* If we're using non-call exceptions, a libcall corresponding to an
3587 operation that may trap may also trap. */
3588 /* ??? See the comment in front of make_reg_eh_region_note. */
3589 if (cfun->can_throw_non_call_exceptions
3590 && (equiv_may_trap || may_trap_p (equiv)))
3591 {
3592 for (insn = insns; insn; insn = NEXT_INSN (insn))
3593 if (CALL_P (insn))
3594 {
3595 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3596 if (note)
3597 {
3598 int lp_nr = INTVAL (XEXP (note, 0));
3599 if (lp_nr == 0 || lp_nr == INT_MIN)
3600 remove_note (insn, note);
3601 }
3602 }
3603 }
3604 else
3605 {
3606 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3607 reg note to indicate that this call cannot throw or execute a nonlocal
3608 goto (unless there is already a REG_EH_REGION note, in which case
3609 we update it). */
3610 for (insn = insns; insn; insn = NEXT_INSN (insn))
3611 if (CALL_P (insn))
3612 make_reg_eh_region_note_nothrow_nononlocal (insn);
3613 }
3614
3615 /* First emit all insns that set pseudos. Remove them from the list as
3616 we go. Avoid insns that set pseudos which were referenced in previous
3617 insns. These can be generated by move_by_pieces, for example,
3618 to update an address. Similarly, avoid insns that reference things
3619 set in previous insns. */
3620
3621 for (insn = insns; insn; insn = next)
3622 {
3623 rtx set = single_set (insn);
3624
3625 next = NEXT_INSN (insn);
3626
3627 if (set != 0 && REG_P (SET_DEST (set))
3628 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3629 {
3630 struct no_conflict_data data;
3631
3632 data.target = const0_rtx;
3633 data.first = insns;
3634 data.insn = insn;
3635 data.must_stay = 0;
3636 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3637 if (! data.must_stay)
3638 {
3639 if (PREV_INSN (insn))
3640 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3641 else
3642 insns = next;
3643
3644 if (next)
3645 SET_PREV_INSN (next) = PREV_INSN (insn);
3646
3647 add_insn (insn);
3648 }
3649 }
3650
3651 /* Some ports use a loop to copy large arguments onto the stack.
3652 Don't move anything outside such a loop. */
3653 if (LABEL_P (insn))
3654 break;
3655 }
3656
3657 /* Write the remaining insns followed by the final copy. */
3658 for (insn = insns; insn; insn = next)
3659 {
3660 next = NEXT_INSN (insn);
3661
3662 add_insn (insn);
3663 }
3664
3665 last = emit_move_insn (target, result);
3666 if (equiv)
3667 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3668
3669 if (final_dest != target)
3670 emit_move_insn (final_dest, target);
3671 }
3672
3673 void
3674 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3675 {
3676 emit_libcall_block_1 (safe_as_a <rtx_insn *> (insns),
3677 target, result, equiv, false);
3678 }
3679 \f
3680 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3681 PURPOSE describes how this comparison will be used. CODE is the rtx
3682 comparison code we will be using.
3683
3684 ??? Actually, CODE is slightly weaker than that. A target is still
3685 required to implement all of the normal bcc operations, but not
3686 required to implement all (or any) of the unordered bcc operations. */
3687
3688 int
3689 can_compare_p (enum rtx_code code, machine_mode mode,
3690 enum can_compare_purpose purpose)
3691 {
3692 rtx test;
3693 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3694 do
3695 {
3696 enum insn_code icode;
3697
3698 if (purpose == ccp_jump
3699 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3700 && insn_operand_matches (icode, 0, test))
3701 return 1;
3702 if (purpose == ccp_store_flag
3703 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3704 && insn_operand_matches (icode, 1, test))
3705 return 1;
3706 if (purpose == ccp_cmov
3707 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3708 return 1;
3709
3710 mode = GET_MODE_WIDER_MODE (mode);
3711 PUT_MODE (test, mode);
3712 }
3713 while (mode != VOIDmode);
3714
3715 return 0;
3716 }
3717
3718 /* This function is called when we are going to emit a compare instruction that
3719 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3720
3721 *PMODE is the mode of the inputs (in case they are const_int).
3722 *PUNSIGNEDP nonzero says that the operands are unsigned;
3723 this matters if they need to be widened (as given by METHODS).
3724
3725 If they have mode BLKmode, then SIZE specifies the size of both operands.
3726
3727 This function performs all the setup necessary so that the caller only has
3728 to emit a single comparison insn. This setup can involve doing a BLKmode
3729 comparison or emitting a library call to perform the comparison if no insn
3730 is available to handle it.
3731 The values which are passed in through pointers can be modified; the caller
3732 should perform the comparison on the modified values. Constant
3733 comparisons must have already been folded. */
3734
3735 static void
3736 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3737 int unsignedp, enum optab_methods methods,
3738 rtx *ptest, machine_mode *pmode)
3739 {
3740 machine_mode mode = *pmode;
3741 rtx libfunc, test;
3742 machine_mode cmp_mode;
3743 enum mode_class mclass;
3744
3745 /* The other methods are not needed. */
3746 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3747 || methods == OPTAB_LIB_WIDEN);
3748
3749 /* If we are optimizing, force expensive constants into a register. */
3750 if (CONSTANT_P (x) && optimize
3751 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3752 > COSTS_N_INSNS (1)))
3753 x = force_reg (mode, x);
3754
3755 if (CONSTANT_P (y) && optimize
3756 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3757 > COSTS_N_INSNS (1)))
3758 y = force_reg (mode, y);
3759
3760 #if HAVE_cc0
3761 /* Make sure if we have a canonical comparison. The RTL
3762 documentation states that canonical comparisons are required only
3763 for targets which have cc0. */
3764 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3765 #endif
3766
3767 /* Don't let both operands fail to indicate the mode. */
3768 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3769 x = force_reg (mode, x);
3770 if (mode == VOIDmode)
3771 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3772
3773 /* Handle all BLKmode compares. */
3774
3775 if (mode == BLKmode)
3776 {
3777 machine_mode result_mode;
3778 enum insn_code cmp_code;
3779 tree length_type;
3780 rtx libfunc;
3781 rtx result;
3782 rtx opalign
3783 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3784
3785 gcc_assert (size);
3786
3787 /* Try to use a memory block compare insn - either cmpstr
3788 or cmpmem will do. */
3789 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3790 cmp_mode != VOIDmode;
3791 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3792 {
3793 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3794 if (cmp_code == CODE_FOR_nothing)
3795 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3796 if (cmp_code == CODE_FOR_nothing)
3797 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3798 if (cmp_code == CODE_FOR_nothing)
3799 continue;
3800
3801 /* Must make sure the size fits the insn's mode. */
3802 if ((CONST_INT_P (size)
3803 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3804 || (GET_MODE_BITSIZE (GET_MODE (size))
3805 > GET_MODE_BITSIZE (cmp_mode)))
3806 continue;
3807
3808 result_mode = insn_data[cmp_code].operand[0].mode;
3809 result = gen_reg_rtx (result_mode);
3810 size = convert_to_mode (cmp_mode, size, 1);
3811 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3812
3813 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3814 *pmode = result_mode;
3815 return;
3816 }
3817
3818 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3819 goto fail;
3820
3821 /* Otherwise call a library function, memcmp. */
3822 libfunc = memcmp_libfunc;
3823 length_type = sizetype;
3824 result_mode = TYPE_MODE (integer_type_node);
3825 cmp_mode = TYPE_MODE (length_type);
3826 size = convert_to_mode (TYPE_MODE (length_type), size,
3827 TYPE_UNSIGNED (length_type));
3828
3829 result = emit_library_call_value (libfunc, 0, LCT_PURE,
3830 result_mode, 3,
3831 XEXP (x, 0), Pmode,
3832 XEXP (y, 0), Pmode,
3833 size, cmp_mode);
3834 x = result;
3835 y = const0_rtx;
3836 mode = result_mode;
3837 methods = OPTAB_LIB_WIDEN;
3838 unsignedp = false;
3839 }
3840
3841 /* Don't allow operands to the compare to trap, as that can put the
3842 compare and branch in different basic blocks. */
3843 if (cfun->can_throw_non_call_exceptions)
3844 {
3845 if (may_trap_p (x))
3846 x = force_reg (mode, x);
3847 if (may_trap_p (y))
3848 y = force_reg (mode, y);
3849 }
3850
3851 if (GET_MODE_CLASS (mode) == MODE_CC)
3852 {
3853 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3854 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3855 gcc_assert (icode != CODE_FOR_nothing
3856 && insn_operand_matches (icode, 0, test));
3857 *ptest = test;
3858 return;
3859 }
3860
3861 mclass = GET_MODE_CLASS (mode);
3862 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3863 cmp_mode = mode;
3864 do
3865 {
3866 enum insn_code icode;
3867 icode = optab_handler (cbranch_optab, cmp_mode);
3868 if (icode != CODE_FOR_nothing
3869 && insn_operand_matches (icode, 0, test))
3870 {
3871 rtx_insn *last = get_last_insn ();
3872 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3873 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3874 if (op0 && op1
3875 && insn_operand_matches (icode, 1, op0)
3876 && insn_operand_matches (icode, 2, op1))
3877 {
3878 XEXP (test, 0) = op0;
3879 XEXP (test, 1) = op1;
3880 *ptest = test;
3881 *pmode = cmp_mode;
3882 return;
3883 }
3884 delete_insns_since (last);
3885 }
3886
3887 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3888 break;
3889 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
3890 }
3891 while (cmp_mode != VOIDmode);
3892
3893 if (methods != OPTAB_LIB_WIDEN)
3894 goto fail;
3895
3896 if (!SCALAR_FLOAT_MODE_P (mode))
3897 {
3898 rtx result;
3899 machine_mode ret_mode;
3900
3901 /* Handle a libcall just for the mode we are using. */
3902 libfunc = optab_libfunc (cmp_optab, mode);
3903 gcc_assert (libfunc);
3904
3905 /* If we want unsigned, and this mode has a distinct unsigned
3906 comparison routine, use that. */
3907 if (unsignedp)
3908 {
3909 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3910 if (ulibfunc)
3911 libfunc = ulibfunc;
3912 }
3913
3914 ret_mode = targetm.libgcc_cmp_return_mode ();
3915 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3916 ret_mode, 2, x, mode, y, mode);
3917
3918 /* There are two kinds of comparison routines. Biased routines
3919 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3920 of gcc expect that the comparison operation is equivalent
3921 to the modified comparison. For signed comparisons compare the
3922 result against 1 in the biased case, and zero in the unbiased
3923 case. For unsigned comparisons always compare against 1 after
3924 biasing the unbiased result by adding 1. This gives us a way to
3925 represent LTU.
3926 The comparisons in the fixed-point helper library are always
3927 biased. */
3928 x = result;
3929 y = const1_rtx;
3930
3931 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3932 {
3933 if (unsignedp)
3934 x = plus_constant (ret_mode, result, 1);
3935 else
3936 y = const0_rtx;
3937 }
3938
3939 *pmode = ret_mode;
3940 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3941 ptest, pmode);
3942 }
3943 else
3944 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3945
3946 return;
3947
3948 fail:
3949 *ptest = NULL_RTX;
3950 }
3951
3952 /* Before emitting an insn with code ICODE, make sure that X, which is going
3953 to be used for operand OPNUM of the insn, is converted from mode MODE to
3954 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3955 that it is accepted by the operand predicate. Return the new value. */
3956
3957 rtx
3958 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3959 machine_mode wider_mode, int unsignedp)
3960 {
3961 if (mode != wider_mode)
3962 x = convert_modes (wider_mode, mode, x, unsignedp);
3963
3964 if (!insn_operand_matches (icode, opnum, x))
3965 {
3966 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3967 if (reload_completed)
3968 return NULL_RTX;
3969 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3970 return NULL_RTX;
3971 x = copy_to_mode_reg (op_mode, x);
3972 }
3973
3974 return x;
3975 }
3976
3977 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3978 we can do the branch. */
3979
3980 static void
3981 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, int prob)
3982 {
3983 machine_mode optab_mode;
3984 enum mode_class mclass;
3985 enum insn_code icode;
3986 rtx_insn *insn;
3987
3988 mclass = GET_MODE_CLASS (mode);
3989 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3990 icode = optab_handler (cbranch_optab, optab_mode);
3991
3992 gcc_assert (icode != CODE_FOR_nothing);
3993 gcc_assert (insn_operand_matches (icode, 0, test));
3994 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
3995 XEXP (test, 1), label));
3996 if (prob != -1
3997 && profile_status_for_fn (cfun) != PROFILE_ABSENT
3998 && insn
3999 && JUMP_P (insn)
4000 && any_condjump_p (insn)
4001 && !find_reg_note (insn, REG_BR_PROB, 0))
4002 add_int_reg_note (insn, REG_BR_PROB, prob);
4003 }
4004
4005 /* Generate code to compare X with Y so that the condition codes are
4006 set and to jump to LABEL if the condition is true. If X is a
4007 constant and Y is not a constant, then the comparison is swapped to
4008 ensure that the comparison RTL has the canonical form.
4009
4010 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4011 need to be widened. UNSIGNEDP is also used to select the proper
4012 branch condition code.
4013
4014 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4015
4016 MODE is the mode of the inputs (in case they are const_int).
4017
4018 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4019 It will be potentially converted into an unsigned variant based on
4020 UNSIGNEDP to select a proper jump instruction.
4021
4022 PROB is the probability of jumping to LABEL. */
4023
4024 void
4025 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4026 machine_mode mode, int unsignedp, rtx label,
4027 int prob)
4028 {
4029 rtx op0 = x, op1 = y;
4030 rtx test;
4031
4032 /* Swap operands and condition to ensure canonical RTL. */
4033 if (swap_commutative_operands_p (x, y)
4034 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4035 {
4036 op0 = y, op1 = x;
4037 comparison = swap_condition (comparison);
4038 }
4039
4040 /* If OP0 is still a constant, then both X and Y must be constants
4041 or the opposite comparison is not supported. Force X into a register
4042 to create canonical RTL. */
4043 if (CONSTANT_P (op0))
4044 op0 = force_reg (mode, op0);
4045
4046 if (unsignedp)
4047 comparison = unsigned_condition (comparison);
4048
4049 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4050 &test, &mode);
4051 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4052 }
4053
4054 \f
4055 /* Emit a library call comparison between floating point X and Y.
4056 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4057
4058 static void
4059 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4060 rtx *ptest, machine_mode *pmode)
4061 {
4062 enum rtx_code swapped = swap_condition (comparison);
4063 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4064 machine_mode orig_mode = GET_MODE (x);
4065 machine_mode mode, cmp_mode;
4066 rtx true_rtx, false_rtx;
4067 rtx value, target, equiv;
4068 rtx_insn *insns;
4069 rtx libfunc = 0;
4070 bool reversed_p = false;
4071 cmp_mode = targetm.libgcc_cmp_return_mode ();
4072
4073 for (mode = orig_mode;
4074 mode != VOIDmode;
4075 mode = GET_MODE_WIDER_MODE (mode))
4076 {
4077 if (code_to_optab (comparison)
4078 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4079 break;
4080
4081 if (code_to_optab (swapped)
4082 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4083 {
4084 std::swap (x, y);
4085 comparison = swapped;
4086 break;
4087 }
4088
4089 if (code_to_optab (reversed)
4090 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4091 {
4092 comparison = reversed;
4093 reversed_p = true;
4094 break;
4095 }
4096 }
4097
4098 gcc_assert (mode != VOIDmode);
4099
4100 if (mode != orig_mode)
4101 {
4102 x = convert_to_mode (mode, x, 0);
4103 y = convert_to_mode (mode, y, 0);
4104 }
4105
4106 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4107 the RTL. The allows the RTL optimizers to delete the libcall if the
4108 condition can be determined at compile-time. */
4109 if (comparison == UNORDERED
4110 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4111 {
4112 true_rtx = const_true_rtx;
4113 false_rtx = const0_rtx;
4114 }
4115 else
4116 {
4117 switch (comparison)
4118 {
4119 case EQ:
4120 true_rtx = const0_rtx;
4121 false_rtx = const_true_rtx;
4122 break;
4123
4124 case NE:
4125 true_rtx = const_true_rtx;
4126 false_rtx = const0_rtx;
4127 break;
4128
4129 case GT:
4130 true_rtx = const1_rtx;
4131 false_rtx = const0_rtx;
4132 break;
4133
4134 case GE:
4135 true_rtx = const0_rtx;
4136 false_rtx = constm1_rtx;
4137 break;
4138
4139 case LT:
4140 true_rtx = constm1_rtx;
4141 false_rtx = const0_rtx;
4142 break;
4143
4144 case LE:
4145 true_rtx = const0_rtx;
4146 false_rtx = const1_rtx;
4147 break;
4148
4149 default:
4150 gcc_unreachable ();
4151 }
4152 }
4153
4154 if (comparison == UNORDERED)
4155 {
4156 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4157 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4158 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4159 temp, const_true_rtx, equiv);
4160 }
4161 else
4162 {
4163 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4164 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4165 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4166 equiv, true_rtx, false_rtx);
4167 }
4168
4169 start_sequence ();
4170 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4171 cmp_mode, 2, x, mode, y, mode);
4172 insns = get_insns ();
4173 end_sequence ();
4174
4175 target = gen_reg_rtx (cmp_mode);
4176 emit_libcall_block (insns, target, value, equiv);
4177
4178 if (comparison == UNORDERED
4179 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4180 || reversed_p)
4181 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4182 else
4183 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4184
4185 *pmode = cmp_mode;
4186 }
4187 \f
4188 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4189
4190 void
4191 emit_indirect_jump (rtx loc)
4192 {
4193 if (!targetm.have_indirect_jump ())
4194 sorry ("indirect jumps are not available on this target");
4195 else
4196 {
4197 struct expand_operand ops[1];
4198 create_address_operand (&ops[0], loc);
4199 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4200 emit_barrier ();
4201 }
4202 }
4203 \f
4204
4205 /* Emit a conditional move instruction if the machine supports one for that
4206 condition and machine mode.
4207
4208 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4209 the mode to use should they be constants. If it is VOIDmode, they cannot
4210 both be constants.
4211
4212 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4213 should be stored there. MODE is the mode to use should they be constants.
4214 If it is VOIDmode, they cannot both be constants.
4215
4216 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4217 is not supported. */
4218
4219 rtx
4220 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4221 machine_mode cmode, rtx op2, rtx op3,
4222 machine_mode mode, int unsignedp)
4223 {
4224 rtx comparison;
4225 rtx_insn *last;
4226 enum insn_code icode;
4227 enum rtx_code reversed;
4228
4229 /* If one operand is constant, make it the second one. Only do this
4230 if the other operand is not constant as well. */
4231
4232 if (swap_commutative_operands_p (op0, op1))
4233 {
4234 std::swap (op0, op1);
4235 code = swap_condition (code);
4236 }
4237
4238 /* get_condition will prefer to generate LT and GT even if the old
4239 comparison was against zero, so undo that canonicalization here since
4240 comparisons against zero are cheaper. */
4241 if (code == LT && op1 == const1_rtx)
4242 code = LE, op1 = const0_rtx;
4243 else if (code == GT && op1 == constm1_rtx)
4244 code = GE, op1 = const0_rtx;
4245
4246 if (cmode == VOIDmode)
4247 cmode = GET_MODE (op0);
4248
4249 if (swap_commutative_operands_p (op2, op3)
4250 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4251 != UNKNOWN))
4252 {
4253 std::swap (op2, op3);
4254 code = reversed;
4255 }
4256
4257 if (mode == VOIDmode)
4258 mode = GET_MODE (op2);
4259
4260 icode = direct_optab_handler (movcc_optab, mode);
4261
4262 if (icode == CODE_FOR_nothing)
4263 return 0;
4264
4265 if (!target)
4266 target = gen_reg_rtx (mode);
4267
4268 code = unsignedp ? unsigned_condition (code) : code;
4269 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4270
4271 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4272 return NULL and let the caller figure out how best to deal with this
4273 situation. */
4274 if (!COMPARISON_P (comparison))
4275 return NULL_RTX;
4276
4277 saved_pending_stack_adjust save;
4278 save_pending_stack_adjust (&save);
4279 last = get_last_insn ();
4280 do_pending_stack_adjust ();
4281 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4282 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4283 &comparison, &cmode);
4284 if (comparison)
4285 {
4286 struct expand_operand ops[4];
4287
4288 create_output_operand (&ops[0], target, mode);
4289 create_fixed_operand (&ops[1], comparison);
4290 create_input_operand (&ops[2], op2, mode);
4291 create_input_operand (&ops[3], op3, mode);
4292 if (maybe_expand_insn (icode, 4, ops))
4293 {
4294 if (ops[0].value != target)
4295 convert_move (target, ops[0].value, false);
4296 return target;
4297 }
4298 }
4299 delete_insns_since (last);
4300 restore_pending_stack_adjust (&save);
4301 return NULL_RTX;
4302 }
4303
4304
4305 /* Emit a conditional negate or bitwise complement using the
4306 negcc or notcc optabs if available. Return NULL_RTX if such operations
4307 are not available. Otherwise return the RTX holding the result.
4308 TARGET is the desired destination of the result. COMP is the comparison
4309 on which to negate. If COND is true move into TARGET the negation
4310 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4311 CODE is either NEG or NOT. MODE is the machine mode in which the
4312 operation is performed. */
4313
4314 rtx
4315 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4316 machine_mode mode, rtx cond, rtx op1,
4317 rtx op2)
4318 {
4319 optab op = unknown_optab;
4320 if (code == NEG)
4321 op = negcc_optab;
4322 else if (code == NOT)
4323 op = notcc_optab;
4324 else
4325 gcc_unreachable ();
4326
4327 insn_code icode = direct_optab_handler (op, mode);
4328
4329 if (icode == CODE_FOR_nothing)
4330 return NULL_RTX;
4331
4332 if (!target)
4333 target = gen_reg_rtx (mode);
4334
4335 rtx_insn *last = get_last_insn ();
4336 struct expand_operand ops[4];
4337
4338 create_output_operand (&ops[0], target, mode);
4339 create_fixed_operand (&ops[1], cond);
4340 create_input_operand (&ops[2], op1, mode);
4341 create_input_operand (&ops[3], op2, mode);
4342
4343 if (maybe_expand_insn (icode, 4, ops))
4344 {
4345 if (ops[0].value != target)
4346 convert_move (target, ops[0].value, false);
4347
4348 return target;
4349 }
4350 delete_insns_since (last);
4351 return NULL_RTX;
4352 }
4353
4354 /* Emit a conditional addition instruction if the machine supports one for that
4355 condition and machine mode.
4356
4357 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4358 the mode to use should they be constants. If it is VOIDmode, they cannot
4359 both be constants.
4360
4361 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4362 should be stored there. MODE is the mode to use should they be constants.
4363 If it is VOIDmode, they cannot both be constants.
4364
4365 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4366 is not supported. */
4367
4368 rtx
4369 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4370 machine_mode cmode, rtx op2, rtx op3,
4371 machine_mode mode, int unsignedp)
4372 {
4373 rtx comparison;
4374 rtx_insn *last;
4375 enum insn_code icode;
4376
4377 /* If one operand is constant, make it the second one. Only do this
4378 if the other operand is not constant as well. */
4379
4380 if (swap_commutative_operands_p (op0, op1))
4381 {
4382 std::swap (op0, op1);
4383 code = swap_condition (code);
4384 }
4385
4386 /* get_condition will prefer to generate LT and GT even if the old
4387 comparison was against zero, so undo that canonicalization here since
4388 comparisons against zero are cheaper. */
4389 if (code == LT && op1 == const1_rtx)
4390 code = LE, op1 = const0_rtx;
4391 else if (code == GT && op1 == constm1_rtx)
4392 code = GE, op1 = const0_rtx;
4393
4394 if (cmode == VOIDmode)
4395 cmode = GET_MODE (op0);
4396
4397 if (mode == VOIDmode)
4398 mode = GET_MODE (op2);
4399
4400 icode = optab_handler (addcc_optab, mode);
4401
4402 if (icode == CODE_FOR_nothing)
4403 return 0;
4404
4405 if (!target)
4406 target = gen_reg_rtx (mode);
4407
4408 code = unsignedp ? unsigned_condition (code) : code;
4409 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4410
4411 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4412 return NULL and let the caller figure out how best to deal with this
4413 situation. */
4414 if (!COMPARISON_P (comparison))
4415 return NULL_RTX;
4416
4417 do_pending_stack_adjust ();
4418 last = get_last_insn ();
4419 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4420 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4421 &comparison, &cmode);
4422 if (comparison)
4423 {
4424 struct expand_operand ops[4];
4425
4426 create_output_operand (&ops[0], target, mode);
4427 create_fixed_operand (&ops[1], comparison);
4428 create_input_operand (&ops[2], op2, mode);
4429 create_input_operand (&ops[3], op3, mode);
4430 if (maybe_expand_insn (icode, 4, ops))
4431 {
4432 if (ops[0].value != target)
4433 convert_move (target, ops[0].value, false);
4434 return target;
4435 }
4436 }
4437 delete_insns_since (last);
4438 return NULL_RTX;
4439 }
4440 \f
4441 /* These functions attempt to generate an insn body, rather than
4442 emitting the insn, but if the gen function already emits them, we
4443 make no attempt to turn them back into naked patterns. */
4444
4445 /* Generate and return an insn body to add Y to X. */
4446
4447 rtx_insn *
4448 gen_add2_insn (rtx x, rtx y)
4449 {
4450 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4451
4452 gcc_assert (insn_operand_matches (icode, 0, x));
4453 gcc_assert (insn_operand_matches (icode, 1, x));
4454 gcc_assert (insn_operand_matches (icode, 2, y));
4455
4456 return GEN_FCN (icode) (x, x, y);
4457 }
4458
4459 /* Generate and return an insn body to add r1 and c,
4460 storing the result in r0. */
4461
4462 rtx_insn *
4463 gen_add3_insn (rtx r0, rtx r1, rtx c)
4464 {
4465 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4466
4467 if (icode == CODE_FOR_nothing
4468 || !insn_operand_matches (icode, 0, r0)
4469 || !insn_operand_matches (icode, 1, r1)
4470 || !insn_operand_matches (icode, 2, c))
4471 return NULL;
4472
4473 return GEN_FCN (icode) (r0, r1, c);
4474 }
4475
4476 int
4477 have_add2_insn (rtx x, rtx y)
4478 {
4479 enum insn_code icode;
4480
4481 gcc_assert (GET_MODE (x) != VOIDmode);
4482
4483 icode = optab_handler (add_optab, GET_MODE (x));
4484
4485 if (icode == CODE_FOR_nothing)
4486 return 0;
4487
4488 if (!insn_operand_matches (icode, 0, x)
4489 || !insn_operand_matches (icode, 1, x)
4490 || !insn_operand_matches (icode, 2, y))
4491 return 0;
4492
4493 return 1;
4494 }
4495
4496 /* Generate and return an insn body to add Y to X. */
4497
4498 rtx_insn *
4499 gen_addptr3_insn (rtx x, rtx y, rtx z)
4500 {
4501 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4502
4503 gcc_assert (insn_operand_matches (icode, 0, x));
4504 gcc_assert (insn_operand_matches (icode, 1, y));
4505 gcc_assert (insn_operand_matches (icode, 2, z));
4506
4507 return GEN_FCN (icode) (x, y, z);
4508 }
4509
4510 /* Return true if the target implements an addptr pattern and X, Y,
4511 and Z are valid for the pattern predicates. */
4512
4513 int
4514 have_addptr3_insn (rtx x, rtx y, rtx z)
4515 {
4516 enum insn_code icode;
4517
4518 gcc_assert (GET_MODE (x) != VOIDmode);
4519
4520 icode = optab_handler (addptr3_optab, GET_MODE (x));
4521
4522 if (icode == CODE_FOR_nothing)
4523 return 0;
4524
4525 if (!insn_operand_matches (icode, 0, x)
4526 || !insn_operand_matches (icode, 1, y)
4527 || !insn_operand_matches (icode, 2, z))
4528 return 0;
4529
4530 return 1;
4531 }
4532
4533 /* Generate and return an insn body to subtract Y from X. */
4534
4535 rtx_insn *
4536 gen_sub2_insn (rtx x, rtx y)
4537 {
4538 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4539
4540 gcc_assert (insn_operand_matches (icode, 0, x));
4541 gcc_assert (insn_operand_matches (icode, 1, x));
4542 gcc_assert (insn_operand_matches (icode, 2, y));
4543
4544 return GEN_FCN (icode) (x, x, y);
4545 }
4546
4547 /* Generate and return an insn body to subtract r1 and c,
4548 storing the result in r0. */
4549
4550 rtx_insn *
4551 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4552 {
4553 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4554
4555 if (icode == CODE_FOR_nothing
4556 || !insn_operand_matches (icode, 0, r0)
4557 || !insn_operand_matches (icode, 1, r1)
4558 || !insn_operand_matches (icode, 2, c))
4559 return NULL;
4560
4561 return GEN_FCN (icode) (r0, r1, c);
4562 }
4563
4564 int
4565 have_sub2_insn (rtx x, rtx y)
4566 {
4567 enum insn_code icode;
4568
4569 gcc_assert (GET_MODE (x) != VOIDmode);
4570
4571 icode = optab_handler (sub_optab, GET_MODE (x));
4572
4573 if (icode == CODE_FOR_nothing)
4574 return 0;
4575
4576 if (!insn_operand_matches (icode, 0, x)
4577 || !insn_operand_matches (icode, 1, x)
4578 || !insn_operand_matches (icode, 2, y))
4579 return 0;
4580
4581 return 1;
4582 }
4583 \f
4584 /* Generate the body of an insn to extend Y (with mode MFROM)
4585 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4586
4587 rtx_insn *
4588 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4589 machine_mode mfrom, int unsignedp)
4590 {
4591 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4592 return GEN_FCN (icode) (x, y);
4593 }
4594 \f
4595 /* Generate code to convert FROM to floating point
4596 and store in TO. FROM must be fixed point and not VOIDmode.
4597 UNSIGNEDP nonzero means regard FROM as unsigned.
4598 Normally this is done by correcting the final value
4599 if it is negative. */
4600
4601 void
4602 expand_float (rtx to, rtx from, int unsignedp)
4603 {
4604 enum insn_code icode;
4605 rtx target = to;
4606 machine_mode fmode, imode;
4607 bool can_do_signed = false;
4608
4609 /* Crash now, because we won't be able to decide which mode to use. */
4610 gcc_assert (GET_MODE (from) != VOIDmode);
4611
4612 /* Look for an insn to do the conversion. Do it in the specified
4613 modes if possible; otherwise convert either input, output or both to
4614 wider mode. If the integer mode is wider than the mode of FROM,
4615 we can do the conversion signed even if the input is unsigned. */
4616
4617 for (fmode = GET_MODE (to); fmode != VOIDmode;
4618 fmode = GET_MODE_WIDER_MODE (fmode))
4619 for (imode = GET_MODE (from); imode != VOIDmode;
4620 imode = GET_MODE_WIDER_MODE (imode))
4621 {
4622 int doing_unsigned = unsignedp;
4623
4624 if (fmode != GET_MODE (to)
4625 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4626 continue;
4627
4628 icode = can_float_p (fmode, imode, unsignedp);
4629 if (icode == CODE_FOR_nothing && unsignedp)
4630 {
4631 enum insn_code scode = can_float_p (fmode, imode, 0);
4632 if (scode != CODE_FOR_nothing)
4633 can_do_signed = true;
4634 if (imode != GET_MODE (from))
4635 icode = scode, doing_unsigned = 0;
4636 }
4637
4638 if (icode != CODE_FOR_nothing)
4639 {
4640 if (imode != GET_MODE (from))
4641 from = convert_to_mode (imode, from, unsignedp);
4642
4643 if (fmode != GET_MODE (to))
4644 target = gen_reg_rtx (fmode);
4645
4646 emit_unop_insn (icode, target, from,
4647 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4648
4649 if (target != to)
4650 convert_move (to, target, 0);
4651 return;
4652 }
4653 }
4654
4655 /* Unsigned integer, and no way to convert directly. Convert as signed,
4656 then unconditionally adjust the result. */
4657 if (unsignedp && can_do_signed)
4658 {
4659 rtx_code_label *label = gen_label_rtx ();
4660 rtx temp;
4661 REAL_VALUE_TYPE offset;
4662
4663 /* Look for a usable floating mode FMODE wider than the source and at
4664 least as wide as the target. Using FMODE will avoid rounding woes
4665 with unsigned values greater than the signed maximum value. */
4666
4667 for (fmode = GET_MODE (to); fmode != VOIDmode;
4668 fmode = GET_MODE_WIDER_MODE (fmode))
4669 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4670 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4671 break;
4672
4673 if (fmode == VOIDmode)
4674 {
4675 /* There is no such mode. Pretend the target is wide enough. */
4676 fmode = GET_MODE (to);
4677
4678 /* Avoid double-rounding when TO is narrower than FROM. */
4679 if ((significand_size (fmode) + 1)
4680 < GET_MODE_PRECISION (GET_MODE (from)))
4681 {
4682 rtx temp1;
4683 rtx_code_label *neglabel = gen_label_rtx ();
4684
4685 /* Don't use TARGET if it isn't a register, is a hard register,
4686 or is the wrong mode. */
4687 if (!REG_P (target)
4688 || REGNO (target) < FIRST_PSEUDO_REGISTER
4689 || GET_MODE (target) != fmode)
4690 target = gen_reg_rtx (fmode);
4691
4692 imode = GET_MODE (from);
4693 do_pending_stack_adjust ();
4694
4695 /* Test whether the sign bit is set. */
4696 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4697 0, neglabel);
4698
4699 /* The sign bit is not set. Convert as signed. */
4700 expand_float (target, from, 0);
4701 emit_jump_insn (targetm.gen_jump (label));
4702 emit_barrier ();
4703
4704 /* The sign bit is set.
4705 Convert to a usable (positive signed) value by shifting right
4706 one bit, while remembering if a nonzero bit was shifted
4707 out; i.e., compute (from & 1) | (from >> 1). */
4708
4709 emit_label (neglabel);
4710 temp = expand_binop (imode, and_optab, from, const1_rtx,
4711 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4712 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4713 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4714 OPTAB_LIB_WIDEN);
4715 expand_float (target, temp, 0);
4716
4717 /* Multiply by 2 to undo the shift above. */
4718 temp = expand_binop (fmode, add_optab, target, target,
4719 target, 0, OPTAB_LIB_WIDEN);
4720 if (temp != target)
4721 emit_move_insn (target, temp);
4722
4723 do_pending_stack_adjust ();
4724 emit_label (label);
4725 goto done;
4726 }
4727 }
4728
4729 /* If we are about to do some arithmetic to correct for an
4730 unsigned operand, do it in a pseudo-register. */
4731
4732 if (GET_MODE (to) != fmode
4733 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4734 target = gen_reg_rtx (fmode);
4735
4736 /* Convert as signed integer to floating. */
4737 expand_float (target, from, 0);
4738
4739 /* If FROM is negative (and therefore TO is negative),
4740 correct its value by 2**bitwidth. */
4741
4742 do_pending_stack_adjust ();
4743 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4744 0, label);
4745
4746
4747 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4748 temp = expand_binop (fmode, add_optab, target,
4749 const_double_from_real_value (offset, fmode),
4750 target, 0, OPTAB_LIB_WIDEN);
4751 if (temp != target)
4752 emit_move_insn (target, temp);
4753
4754 do_pending_stack_adjust ();
4755 emit_label (label);
4756 goto done;
4757 }
4758
4759 /* No hardware instruction available; call a library routine. */
4760 {
4761 rtx libfunc;
4762 rtx_insn *insns;
4763 rtx value;
4764 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4765
4766 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4767 from = convert_to_mode (SImode, from, unsignedp);
4768
4769 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4770 gcc_assert (libfunc);
4771
4772 start_sequence ();
4773
4774 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4775 GET_MODE (to), 1, from,
4776 GET_MODE (from));
4777 insns = get_insns ();
4778 end_sequence ();
4779
4780 emit_libcall_block (insns, target, value,
4781 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4782 GET_MODE (to), from));
4783 }
4784
4785 done:
4786
4787 /* Copy result to requested destination
4788 if we have been computing in a temp location. */
4789
4790 if (target != to)
4791 {
4792 if (GET_MODE (target) == GET_MODE (to))
4793 emit_move_insn (to, target);
4794 else
4795 convert_move (to, target, 0);
4796 }
4797 }
4798 \f
4799 /* Generate code to convert FROM to fixed point and store in TO. FROM
4800 must be floating point. */
4801
4802 void
4803 expand_fix (rtx to, rtx from, int unsignedp)
4804 {
4805 enum insn_code icode;
4806 rtx target = to;
4807 machine_mode fmode, imode;
4808 bool must_trunc = false;
4809
4810 /* We first try to find a pair of modes, one real and one integer, at
4811 least as wide as FROM and TO, respectively, in which we can open-code
4812 this conversion. If the integer mode is wider than the mode of TO,
4813 we can do the conversion either signed or unsigned. */
4814
4815 for (fmode = GET_MODE (from); fmode != VOIDmode;
4816 fmode = GET_MODE_WIDER_MODE (fmode))
4817 for (imode = GET_MODE (to); imode != VOIDmode;
4818 imode = GET_MODE_WIDER_MODE (imode))
4819 {
4820 int doing_unsigned = unsignedp;
4821
4822 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4823 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4824 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4825
4826 if (icode != CODE_FOR_nothing)
4827 {
4828 rtx_insn *last = get_last_insn ();
4829 if (fmode != GET_MODE (from))
4830 from = convert_to_mode (fmode, from, 0);
4831
4832 if (must_trunc)
4833 {
4834 rtx temp = gen_reg_rtx (GET_MODE (from));
4835 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4836 temp, 0);
4837 }
4838
4839 if (imode != GET_MODE (to))
4840 target = gen_reg_rtx (imode);
4841
4842 if (maybe_emit_unop_insn (icode, target, from,
4843 doing_unsigned ? UNSIGNED_FIX : FIX))
4844 {
4845 if (target != to)
4846 convert_move (to, target, unsignedp);
4847 return;
4848 }
4849 delete_insns_since (last);
4850 }
4851 }
4852
4853 /* For an unsigned conversion, there is one more way to do it.
4854 If we have a signed conversion, we generate code that compares
4855 the real value to the largest representable positive number. If if
4856 is smaller, the conversion is done normally. Otherwise, subtract
4857 one plus the highest signed number, convert, and add it back.
4858
4859 We only need to check all real modes, since we know we didn't find
4860 anything with a wider integer mode.
4861
4862 This code used to extend FP value into mode wider than the destination.
4863 This is needed for decimal float modes which cannot accurately
4864 represent one plus the highest signed number of the same size, but
4865 not for binary modes. Consider, for instance conversion from SFmode
4866 into DImode.
4867
4868 The hot path through the code is dealing with inputs smaller than 2^63
4869 and doing just the conversion, so there is no bits to lose.
4870
4871 In the other path we know the value is positive in the range 2^63..2^64-1
4872 inclusive. (as for other input overflow happens and result is undefined)
4873 So we know that the most important bit set in mantissa corresponds to
4874 2^63. The subtraction of 2^63 should not generate any rounding as it
4875 simply clears out that bit. The rest is trivial. */
4876
4877 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4878 for (fmode = GET_MODE (from); fmode != VOIDmode;
4879 fmode = GET_MODE_WIDER_MODE (fmode))
4880 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
4881 && (!DECIMAL_FLOAT_MODE_P (fmode)
4882 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
4883 {
4884 int bitsize;
4885 REAL_VALUE_TYPE offset;
4886 rtx limit;
4887 rtx_code_label *lab1, *lab2;
4888 rtx_insn *insn;
4889
4890 bitsize = GET_MODE_PRECISION (GET_MODE (to));
4891 real_2expN (&offset, bitsize - 1, fmode);
4892 limit = const_double_from_real_value (offset, fmode);
4893 lab1 = gen_label_rtx ();
4894 lab2 = gen_label_rtx ();
4895
4896 if (fmode != GET_MODE (from))
4897 from = convert_to_mode (fmode, from, 0);
4898
4899 /* See if we need to do the subtraction. */
4900 do_pending_stack_adjust ();
4901 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4902 0, lab1);
4903
4904 /* If not, do the signed "fix" and branch around fixup code. */
4905 expand_fix (to, from, 0);
4906 emit_jump_insn (targetm.gen_jump (lab2));
4907 emit_barrier ();
4908
4909 /* Otherwise, subtract 2**(N-1), convert to signed number,
4910 then add 2**(N-1). Do the addition using XOR since this
4911 will often generate better code. */
4912 emit_label (lab1);
4913 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4914 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4915 expand_fix (to, target, 0);
4916 target = expand_binop (GET_MODE (to), xor_optab, to,
4917 gen_int_mode
4918 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4919 GET_MODE (to)),
4920 to, 1, OPTAB_LIB_WIDEN);
4921
4922 if (target != to)
4923 emit_move_insn (to, target);
4924
4925 emit_label (lab2);
4926
4927 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
4928 {
4929 /* Make a place for a REG_NOTE and add it. */
4930 insn = emit_move_insn (to, to);
4931 set_dst_reg_note (insn, REG_EQUAL,
4932 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
4933 copy_rtx (from)),
4934 to);
4935 }
4936
4937 return;
4938 }
4939
4940 /* We can't do it with an insn, so use a library call. But first ensure
4941 that the mode of TO is at least as wide as SImode, since those are the
4942 only library calls we know about. */
4943
4944 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4945 {
4946 target = gen_reg_rtx (SImode);
4947
4948 expand_fix (target, from, unsignedp);
4949 }
4950 else
4951 {
4952 rtx_insn *insns;
4953 rtx value;
4954 rtx libfunc;
4955
4956 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4957 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4958 gcc_assert (libfunc);
4959
4960 start_sequence ();
4961
4962 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4963 GET_MODE (to), 1, from,
4964 GET_MODE (from));
4965 insns = get_insns ();
4966 end_sequence ();
4967
4968 emit_libcall_block (insns, target, value,
4969 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4970 GET_MODE (to), from));
4971 }
4972
4973 if (target != to)
4974 {
4975 if (GET_MODE (to) == GET_MODE (target))
4976 emit_move_insn (to, target);
4977 else
4978 convert_move (to, target, 0);
4979 }
4980 }
4981
4982
4983 /* Promote integer arguments for a libcall if necessary.
4984 emit_library_call_value cannot do the promotion because it does not
4985 know if it should do a signed or unsigned promotion. This is because
4986 there are no tree types defined for libcalls. */
4987
4988 static rtx
4989 prepare_libcall_arg (rtx arg, int uintp)
4990 {
4991 machine_mode mode = GET_MODE (arg);
4992 machine_mode arg_mode;
4993 if (SCALAR_INT_MODE_P (mode))
4994 {
4995 /* If we need to promote the integer function argument we need to do
4996 it here instead of inside emit_library_call_value because in
4997 emit_library_call_value we don't know if we should do a signed or
4998 unsigned promotion. */
4999
5000 int unsigned_p = 0;
5001 arg_mode = promote_function_mode (NULL_TREE, mode,
5002 &unsigned_p, NULL_TREE, 0);
5003 if (arg_mode != mode)
5004 return convert_to_mode (arg_mode, arg, uintp);
5005 }
5006 return arg;
5007 }
5008
5009 /* Generate code to convert FROM or TO a fixed-point.
5010 If UINTP is true, either TO or FROM is an unsigned integer.
5011 If SATP is true, we need to saturate the result. */
5012
5013 void
5014 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5015 {
5016 machine_mode to_mode = GET_MODE (to);
5017 machine_mode from_mode = GET_MODE (from);
5018 convert_optab tab;
5019 enum rtx_code this_code;
5020 enum insn_code code;
5021 rtx_insn *insns;
5022 rtx value;
5023 rtx libfunc;
5024
5025 if (to_mode == from_mode)
5026 {
5027 emit_move_insn (to, from);
5028 return;
5029 }
5030
5031 if (uintp)
5032 {
5033 tab = satp ? satfractuns_optab : fractuns_optab;
5034 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5035 }
5036 else
5037 {
5038 tab = satp ? satfract_optab : fract_optab;
5039 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5040 }
5041 code = convert_optab_handler (tab, to_mode, from_mode);
5042 if (code != CODE_FOR_nothing)
5043 {
5044 emit_unop_insn (code, to, from, this_code);
5045 return;
5046 }
5047
5048 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5049 gcc_assert (libfunc);
5050
5051 from = prepare_libcall_arg (from, uintp);
5052 from_mode = GET_MODE (from);
5053
5054 start_sequence ();
5055 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5056 1, from, from_mode);
5057 insns = get_insns ();
5058 end_sequence ();
5059
5060 emit_libcall_block (insns, to, value,
5061 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5062 }
5063
5064 /* Generate code to convert FROM to fixed point and store in TO. FROM
5065 must be floating point, TO must be signed. Use the conversion optab
5066 TAB to do the conversion. */
5067
5068 bool
5069 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5070 {
5071 enum insn_code icode;
5072 rtx target = to;
5073 machine_mode fmode, imode;
5074
5075 /* We first try to find a pair of modes, one real and one integer, at
5076 least as wide as FROM and TO, respectively, in which we can open-code
5077 this conversion. If the integer mode is wider than the mode of TO,
5078 we can do the conversion either signed or unsigned. */
5079
5080 for (fmode = GET_MODE (from); fmode != VOIDmode;
5081 fmode = GET_MODE_WIDER_MODE (fmode))
5082 for (imode = GET_MODE (to); imode != VOIDmode;
5083 imode = GET_MODE_WIDER_MODE (imode))
5084 {
5085 icode = convert_optab_handler (tab, imode, fmode);
5086 if (icode != CODE_FOR_nothing)
5087 {
5088 rtx_insn *last = get_last_insn ();
5089 if (fmode != GET_MODE (from))
5090 from = convert_to_mode (fmode, from, 0);
5091
5092 if (imode != GET_MODE (to))
5093 target = gen_reg_rtx (imode);
5094
5095 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5096 {
5097 delete_insns_since (last);
5098 continue;
5099 }
5100 if (target != to)
5101 convert_move (to, target, 0);
5102 return true;
5103 }
5104 }
5105
5106 return false;
5107 }
5108 \f
5109 /* Report whether we have an instruction to perform the operation
5110 specified by CODE on operands of mode MODE. */
5111 int
5112 have_insn_for (enum rtx_code code, machine_mode mode)
5113 {
5114 return (code_to_optab (code)
5115 && (optab_handler (code_to_optab (code), mode)
5116 != CODE_FOR_nothing));
5117 }
5118
5119 /* Print information about the current contents of the optabs on
5120 STDERR. */
5121
5122 DEBUG_FUNCTION void
5123 debug_optab_libfuncs (void)
5124 {
5125 int i, j, k;
5126
5127 /* Dump the arithmetic optabs. */
5128 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5129 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5130 {
5131 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5132 if (l)
5133 {
5134 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5135 fprintf (stderr, "%s\t%s:\t%s\n",
5136 GET_RTX_NAME (optab_to_code ((optab) i)),
5137 GET_MODE_NAME (j),
5138 XSTR (l, 0));
5139 }
5140 }
5141
5142 /* Dump the conversion optabs. */
5143 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5144 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5145 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5146 {
5147 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5148 (machine_mode) k);
5149 if (l)
5150 {
5151 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5152 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5153 GET_RTX_NAME (optab_to_code ((optab) i)),
5154 GET_MODE_NAME (j),
5155 GET_MODE_NAME (k),
5156 XSTR (l, 0));
5157 }
5158 }
5159 }
5160
5161 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5162 CODE. Return 0 on failure. */
5163
5164 rtx_insn *
5165 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5166 {
5167 machine_mode mode = GET_MODE (op1);
5168 enum insn_code icode;
5169 rtx_insn *insn;
5170 rtx trap_rtx;
5171
5172 if (mode == VOIDmode)
5173 return 0;
5174
5175 icode = optab_handler (ctrap_optab, mode);
5176 if (icode == CODE_FOR_nothing)
5177 return 0;
5178
5179 /* Some targets only accept a zero trap code. */
5180 if (!insn_operand_matches (icode, 3, tcode))
5181 return 0;
5182
5183 do_pending_stack_adjust ();
5184 start_sequence ();
5185 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5186 &trap_rtx, &mode);
5187 if (!trap_rtx)
5188 insn = NULL;
5189 else
5190 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5191 tcode);
5192
5193 /* If that failed, then give up. */
5194 if (insn == 0)
5195 {
5196 end_sequence ();
5197 return 0;
5198 }
5199
5200 emit_insn (insn);
5201 insn = get_insns ();
5202 end_sequence ();
5203 return insn;
5204 }
5205
5206 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5207 or unsigned operation code. */
5208
5209 enum rtx_code
5210 get_rtx_code (enum tree_code tcode, bool unsignedp)
5211 {
5212 enum rtx_code code;
5213 switch (tcode)
5214 {
5215 case EQ_EXPR:
5216 code = EQ;
5217 break;
5218 case NE_EXPR:
5219 code = NE;
5220 break;
5221 case LT_EXPR:
5222 code = unsignedp ? LTU : LT;
5223 break;
5224 case LE_EXPR:
5225 code = unsignedp ? LEU : LE;
5226 break;
5227 case GT_EXPR:
5228 code = unsignedp ? GTU : GT;
5229 break;
5230 case GE_EXPR:
5231 code = unsignedp ? GEU : GE;
5232 break;
5233
5234 case UNORDERED_EXPR:
5235 code = UNORDERED;
5236 break;
5237 case ORDERED_EXPR:
5238 code = ORDERED;
5239 break;
5240 case UNLT_EXPR:
5241 code = UNLT;
5242 break;
5243 case UNLE_EXPR:
5244 code = UNLE;
5245 break;
5246 case UNGT_EXPR:
5247 code = UNGT;
5248 break;
5249 case UNGE_EXPR:
5250 code = UNGE;
5251 break;
5252 case UNEQ_EXPR:
5253 code = UNEQ;
5254 break;
5255 case LTGT_EXPR:
5256 code = LTGT;
5257 break;
5258
5259 case BIT_AND_EXPR:
5260 code = AND;
5261 break;
5262
5263 case BIT_IOR_EXPR:
5264 code = IOR;
5265 break;
5266
5267 default:
5268 gcc_unreachable ();
5269 }
5270 return code;
5271 }
5272
5273 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5274 unsigned operators. OPNO holds an index of the first comparison
5275 operand in insn with code ICODE. Do not generate compare instruction. */
5276
5277 static rtx
5278 vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
5279 bool unsignedp, enum insn_code icode,
5280 unsigned int opno)
5281 {
5282 struct expand_operand ops[2];
5283 rtx rtx_op0, rtx_op1;
5284 machine_mode m0, m1;
5285 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5286
5287 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5288
5289 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5290 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5291 cases, use the original mode. */
5292 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5293 EXPAND_STACK_PARM);
5294 m0 = GET_MODE (rtx_op0);
5295 if (m0 == VOIDmode)
5296 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5297
5298 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5299 EXPAND_STACK_PARM);
5300 m1 = GET_MODE (rtx_op1);
5301 if (m1 == VOIDmode)
5302 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5303
5304 create_input_operand (&ops[0], rtx_op0, m0);
5305 create_input_operand (&ops[1], rtx_op1, m1);
5306 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5307 gcc_unreachable ();
5308 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
5309 }
5310
5311 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5312 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5313 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5314 shift. */
5315 static rtx
5316 shift_amt_for_vec_perm_mask (rtx sel)
5317 {
5318 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5319 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5320
5321 if (GET_CODE (sel) != CONST_VECTOR)
5322 return NULL_RTX;
5323
5324 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5325 if (first >= nelt)
5326 return NULL_RTX;
5327 for (i = 1; i < nelt; i++)
5328 {
5329 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5330 unsigned int expected = i + first;
5331 /* Indices into the second vector are all equivalent. */
5332 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5333 return NULL_RTX;
5334 }
5335
5336 return GEN_INT (first * bitsize);
5337 }
5338
5339 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5340
5341 static rtx
5342 expand_vec_perm_1 (enum insn_code icode, rtx target,
5343 rtx v0, rtx v1, rtx sel)
5344 {
5345 machine_mode tmode = GET_MODE (target);
5346 machine_mode smode = GET_MODE (sel);
5347 struct expand_operand ops[4];
5348
5349 create_output_operand (&ops[0], target, tmode);
5350 create_input_operand (&ops[3], sel, smode);
5351
5352 /* Make an effort to preserve v0 == v1. The target expander is able to
5353 rely on this to determine if we're permuting a single input operand. */
5354 if (rtx_equal_p (v0, v1))
5355 {
5356 if (!insn_operand_matches (icode, 1, v0))
5357 v0 = force_reg (tmode, v0);
5358 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5359 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5360
5361 create_fixed_operand (&ops[1], v0);
5362 create_fixed_operand (&ops[2], v0);
5363 }
5364 else
5365 {
5366 create_input_operand (&ops[1], v0, tmode);
5367 create_input_operand (&ops[2], v1, tmode);
5368 }
5369
5370 if (maybe_expand_insn (icode, 4, ops))
5371 return ops[0].value;
5372 return NULL_RTX;
5373 }
5374
5375 /* Generate instructions for vec_perm optab given its mode
5376 and three operands. */
5377
5378 rtx
5379 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5380 {
5381 enum insn_code icode;
5382 machine_mode qimode;
5383 unsigned int i, w, e, u;
5384 rtx tmp, sel_qi = NULL;
5385 rtvec vec;
5386
5387 if (!target || GET_MODE (target) != mode)
5388 target = gen_reg_rtx (mode);
5389
5390 w = GET_MODE_SIZE (mode);
5391 e = GET_MODE_NUNITS (mode);
5392 u = GET_MODE_UNIT_SIZE (mode);
5393
5394 /* Set QIMODE to a different vector mode with byte elements.
5395 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5396 qimode = VOIDmode;
5397 if (GET_MODE_INNER (mode) != QImode)
5398 {
5399 qimode = mode_for_vector (QImode, w);
5400 if (!VECTOR_MODE_P (qimode))
5401 qimode = VOIDmode;
5402 }
5403
5404 /* If the input is a constant, expand it specially. */
5405 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5406 if (GET_CODE (sel) == CONST_VECTOR)
5407 {
5408 /* See if this can be handled with a vec_shr. We only do this if the
5409 second vector is all zeroes. */
5410 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5411 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5412 ? optab_handler (vec_shr_optab, qimode)
5413 : CODE_FOR_nothing);
5414 rtx shift_amt = NULL_RTX;
5415 if (v1 == CONST0_RTX (GET_MODE (v1))
5416 && (shift_code != CODE_FOR_nothing
5417 || shift_code_qi != CODE_FOR_nothing))
5418 {
5419 shift_amt = shift_amt_for_vec_perm_mask (sel);
5420 if (shift_amt)
5421 {
5422 struct expand_operand ops[3];
5423 if (shift_code != CODE_FOR_nothing)
5424 {
5425 create_output_operand (&ops[0], target, mode);
5426 create_input_operand (&ops[1], v0, mode);
5427 create_convert_operand_from_type (&ops[2], shift_amt,
5428 sizetype);
5429 if (maybe_expand_insn (shift_code, 3, ops))
5430 return ops[0].value;
5431 }
5432 if (shift_code_qi != CODE_FOR_nothing)
5433 {
5434 tmp = gen_reg_rtx (qimode);
5435 create_output_operand (&ops[0], tmp, qimode);
5436 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5437 qimode);
5438 create_convert_operand_from_type (&ops[2], shift_amt,
5439 sizetype);
5440 if (maybe_expand_insn (shift_code_qi, 3, ops))
5441 return gen_lowpart (mode, ops[0].value);
5442 }
5443 }
5444 }
5445
5446 icode = direct_optab_handler (vec_perm_const_optab, mode);
5447 if (icode != CODE_FOR_nothing)
5448 {
5449 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5450 if (tmp)
5451 return tmp;
5452 }
5453
5454 /* Fall back to a constant byte-based permutation. */
5455 if (qimode != VOIDmode)
5456 {
5457 vec = rtvec_alloc (w);
5458 for (i = 0; i < e; ++i)
5459 {
5460 unsigned int j, this_e;
5461
5462 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5463 this_e &= 2 * e - 1;
5464 this_e *= u;
5465
5466 for (j = 0; j < u; ++j)
5467 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5468 }
5469 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5470
5471 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5472 if (icode != CODE_FOR_nothing)
5473 {
5474 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5475 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5476 gen_lowpart (qimode, v1), sel_qi);
5477 if (tmp)
5478 return gen_lowpart (mode, tmp);
5479 }
5480 }
5481 }
5482
5483 /* Otherwise expand as a fully variable permuation. */
5484 icode = direct_optab_handler (vec_perm_optab, mode);
5485 if (icode != CODE_FOR_nothing)
5486 {
5487 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5488 if (tmp)
5489 return tmp;
5490 }
5491
5492 /* As a special case to aid several targets, lower the element-based
5493 permutation to a byte-based permutation and try again. */
5494 if (qimode == VOIDmode)
5495 return NULL_RTX;
5496 icode = direct_optab_handler (vec_perm_optab, qimode);
5497 if (icode == CODE_FOR_nothing)
5498 return NULL_RTX;
5499
5500 if (sel_qi == NULL)
5501 {
5502 /* Multiply each element by its byte size. */
5503 machine_mode selmode = GET_MODE (sel);
5504 if (u == 2)
5505 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5506 NULL, 0, OPTAB_DIRECT);
5507 else
5508 sel = expand_simple_binop (selmode, ASHIFT, sel,
5509 GEN_INT (exact_log2 (u)),
5510 NULL, 0, OPTAB_DIRECT);
5511 gcc_assert (sel != NULL);
5512
5513 /* Broadcast the low byte each element into each of its bytes. */
5514 vec = rtvec_alloc (w);
5515 for (i = 0; i < w; ++i)
5516 {
5517 int this_e = i / u * u;
5518 if (BYTES_BIG_ENDIAN)
5519 this_e += u - 1;
5520 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5521 }
5522 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5523 sel = gen_lowpart (qimode, sel);
5524 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5525 gcc_assert (sel != NULL);
5526
5527 /* Add the byte offset to each byte element. */
5528 /* Note that the definition of the indicies here is memory ordering,
5529 so there should be no difference between big and little endian. */
5530 vec = rtvec_alloc (w);
5531 for (i = 0; i < w; ++i)
5532 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5533 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5534 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5535 sel, 0, OPTAB_DIRECT);
5536 gcc_assert (sel_qi != NULL);
5537 }
5538
5539 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5540 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5541 gen_lowpart (qimode, v1), sel_qi);
5542 if (tmp)
5543 tmp = gen_lowpart (mode, tmp);
5544 return tmp;
5545 }
5546
5547 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5548 three operands. */
5549
5550 rtx
5551 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5552 rtx target)
5553 {
5554 struct expand_operand ops[4];
5555 machine_mode mode = TYPE_MODE (vec_cond_type);
5556 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5557 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5558 rtx mask, rtx_op1, rtx_op2;
5559
5560 if (icode == CODE_FOR_nothing)
5561 return 0;
5562
5563 mask = expand_normal (op0);
5564 rtx_op1 = expand_normal (op1);
5565 rtx_op2 = expand_normal (op2);
5566
5567 mask = force_reg (mask_mode, mask);
5568 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5569
5570 create_output_operand (&ops[0], target, mode);
5571 create_input_operand (&ops[1], rtx_op1, mode);
5572 create_input_operand (&ops[2], rtx_op2, mode);
5573 create_input_operand (&ops[3], mask, mask_mode);
5574 expand_insn (icode, 4, ops);
5575
5576 return ops[0].value;
5577 }
5578
5579 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5580 three operands. */
5581
5582 rtx
5583 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5584 rtx target)
5585 {
5586 struct expand_operand ops[6];
5587 enum insn_code icode;
5588 rtx comparison, rtx_op1, rtx_op2;
5589 machine_mode mode = TYPE_MODE (vec_cond_type);
5590 machine_mode cmp_op_mode;
5591 bool unsignedp;
5592 tree op0a, op0b;
5593 enum tree_code tcode;
5594
5595 if (COMPARISON_CLASS_P (op0))
5596 {
5597 op0a = TREE_OPERAND (op0, 0);
5598 op0b = TREE_OPERAND (op0, 1);
5599 tcode = TREE_CODE (op0);
5600 }
5601 else
5602 {
5603 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5604 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5605 != CODE_FOR_nothing)
5606 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5607 op2, target);
5608 /* Fake op0 < 0. */
5609 else
5610 {
5611 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5612 == MODE_VECTOR_INT);
5613 op0a = op0;
5614 op0b = build_zero_cst (TREE_TYPE (op0));
5615 tcode = LT_EXPR;
5616 }
5617 }
5618 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5619 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5620
5621
5622 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5623 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5624
5625 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5626 if (icode == CODE_FOR_nothing)
5627 return 0;
5628
5629 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 4);
5630 rtx_op1 = expand_normal (op1);
5631 rtx_op2 = expand_normal (op2);
5632
5633 create_output_operand (&ops[0], target, mode);
5634 create_input_operand (&ops[1], rtx_op1, mode);
5635 create_input_operand (&ops[2], rtx_op2, mode);
5636 create_fixed_operand (&ops[3], comparison);
5637 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5638 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5639 expand_insn (icode, 6, ops);
5640 return ops[0].value;
5641 }
5642
5643 /* Generate insns for a vector comparison into a mask. */
5644
5645 rtx
5646 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5647 {
5648 struct expand_operand ops[4];
5649 enum insn_code icode;
5650 rtx comparison;
5651 machine_mode mask_mode = TYPE_MODE (type);
5652 machine_mode vmode;
5653 bool unsignedp;
5654 tree op0a, op0b;
5655 enum tree_code tcode;
5656
5657 op0a = TREE_OPERAND (exp, 0);
5658 op0b = TREE_OPERAND (exp, 1);
5659 tcode = TREE_CODE (exp);
5660
5661 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5662 vmode = TYPE_MODE (TREE_TYPE (op0a));
5663
5664 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5665 if (icode == CODE_FOR_nothing)
5666 return 0;
5667
5668 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 2);
5669 create_output_operand (&ops[0], target, mask_mode);
5670 create_fixed_operand (&ops[1], comparison);
5671 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5672 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5673 expand_insn (icode, 4, ops);
5674 return ops[0].value;
5675 }
5676
5677 /* Expand a highpart multiply. */
5678
5679 rtx
5680 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5681 rtx target, bool uns_p)
5682 {
5683 struct expand_operand eops[3];
5684 enum insn_code icode;
5685 int method, i, nunits;
5686 machine_mode wmode;
5687 rtx m1, m2, perm;
5688 optab tab1, tab2;
5689 rtvec v;
5690
5691 method = can_mult_highpart_p (mode, uns_p);
5692 switch (method)
5693 {
5694 case 0:
5695 return NULL_RTX;
5696 case 1:
5697 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5698 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5699 OPTAB_LIB_WIDEN);
5700 case 2:
5701 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5702 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5703 break;
5704 case 3:
5705 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5706 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5707 if (BYTES_BIG_ENDIAN)
5708 std::swap (tab1, tab2);
5709 break;
5710 default:
5711 gcc_unreachable ();
5712 }
5713
5714 icode = optab_handler (tab1, mode);
5715 nunits = GET_MODE_NUNITS (mode);
5716 wmode = insn_data[icode].operand[0].mode;
5717 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5718 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5719
5720 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5721 create_input_operand (&eops[1], op0, mode);
5722 create_input_operand (&eops[2], op1, mode);
5723 expand_insn (icode, 3, eops);
5724 m1 = gen_lowpart (mode, eops[0].value);
5725
5726 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5727 create_input_operand (&eops[1], op0, mode);
5728 create_input_operand (&eops[2], op1, mode);
5729 expand_insn (optab_handler (tab2, mode), 3, eops);
5730 m2 = gen_lowpart (mode, eops[0].value);
5731
5732 v = rtvec_alloc (nunits);
5733 if (method == 2)
5734 {
5735 for (i = 0; i < nunits; ++i)
5736 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5737 + ((i & 1) ? nunits : 0));
5738 }
5739 else
5740 {
5741 for (i = 0; i < nunits; ++i)
5742 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5743 }
5744 perm = gen_rtx_CONST_VECTOR (mode, v);
5745
5746 return expand_vec_perm (mode, m1, m2, perm, target);
5747 }
5748 \f
5749 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5750 pattern. */
5751
5752 static void
5753 find_cc_set (rtx x, const_rtx pat, void *data)
5754 {
5755 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5756 && GET_CODE (pat) == SET)
5757 {
5758 rtx *p_cc_reg = (rtx *) data;
5759 gcc_assert (!*p_cc_reg);
5760 *p_cc_reg = x;
5761 }
5762 }
5763
5764 /* This is a helper function for the other atomic operations. This function
5765 emits a loop that contains SEQ that iterates until a compare-and-swap
5766 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5767 a set of instructions that takes a value from OLD_REG as an input and
5768 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5769 set to the current contents of MEM. After SEQ, a compare-and-swap will
5770 attempt to update MEM with NEW_REG. The function returns true when the
5771 loop was generated successfully. */
5772
5773 static bool
5774 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5775 {
5776 machine_mode mode = GET_MODE (mem);
5777 rtx_code_label *label;
5778 rtx cmp_reg, success, oldval;
5779
5780 /* The loop we want to generate looks like
5781
5782 cmp_reg = mem;
5783 label:
5784 old_reg = cmp_reg;
5785 seq;
5786 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5787 if (success)
5788 goto label;
5789
5790 Note that we only do the plain load from memory once. Subsequent
5791 iterations use the value loaded by the compare-and-swap pattern. */
5792
5793 label = gen_label_rtx ();
5794 cmp_reg = gen_reg_rtx (mode);
5795
5796 emit_move_insn (cmp_reg, mem);
5797 emit_label (label);
5798 emit_move_insn (old_reg, cmp_reg);
5799 if (seq)
5800 emit_insn (seq);
5801
5802 success = NULL_RTX;
5803 oldval = cmp_reg;
5804 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5805 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5806 MEMMODEL_RELAXED))
5807 return false;
5808
5809 if (oldval != cmp_reg)
5810 emit_move_insn (cmp_reg, oldval);
5811
5812 /* Mark this jump predicted not taken. */
5813 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5814 GET_MODE (success), 1, label, 0);
5815 return true;
5816 }
5817
5818
5819 /* This function tries to emit an atomic_exchange intruction. VAL is written
5820 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5821 using TARGET if possible. */
5822
5823 static rtx
5824 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5825 {
5826 machine_mode mode = GET_MODE (mem);
5827 enum insn_code icode;
5828
5829 /* If the target supports the exchange directly, great. */
5830 icode = direct_optab_handler (atomic_exchange_optab, mode);
5831 if (icode != CODE_FOR_nothing)
5832 {
5833 struct expand_operand ops[4];
5834
5835 create_output_operand (&ops[0], target, mode);
5836 create_fixed_operand (&ops[1], mem);
5837 create_input_operand (&ops[2], val, mode);
5838 create_integer_operand (&ops[3], model);
5839 if (maybe_expand_insn (icode, 4, ops))
5840 return ops[0].value;
5841 }
5842
5843 return NULL_RTX;
5844 }
5845
5846 /* This function tries to implement an atomic exchange operation using
5847 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5848 The previous contents of *MEM are returned, using TARGET if possible.
5849 Since this instructionn is an acquire barrier only, stronger memory
5850 models may require additional barriers to be emitted. */
5851
5852 static rtx
5853 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5854 enum memmodel model)
5855 {
5856 machine_mode mode = GET_MODE (mem);
5857 enum insn_code icode;
5858 rtx_insn *last_insn = get_last_insn ();
5859
5860 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5861
5862 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5863 exists, and the memory model is stronger than acquire, add a release
5864 barrier before the instruction. */
5865
5866 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5867 expand_mem_thread_fence (model);
5868
5869 if (icode != CODE_FOR_nothing)
5870 {
5871 struct expand_operand ops[3];
5872 create_output_operand (&ops[0], target, mode);
5873 create_fixed_operand (&ops[1], mem);
5874 create_input_operand (&ops[2], val, mode);
5875 if (maybe_expand_insn (icode, 3, ops))
5876 return ops[0].value;
5877 }
5878
5879 /* If an external test-and-set libcall is provided, use that instead of
5880 any external compare-and-swap that we might get from the compare-and-
5881 swap-loop expansion later. */
5882 if (!can_compare_and_swap_p (mode, false))
5883 {
5884 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5885 if (libfunc != NULL)
5886 {
5887 rtx addr;
5888
5889 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5890 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5891 mode, 2, addr, ptr_mode,
5892 val, mode);
5893 }
5894 }
5895
5896 /* If the test_and_set can't be emitted, eliminate any barrier that might
5897 have been emitted. */
5898 delete_insns_since (last_insn);
5899 return NULL_RTX;
5900 }
5901
5902 /* This function tries to implement an atomic exchange operation using a
5903 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5904 *MEM are returned, using TARGET if possible. No memory model is required
5905 since a compare_and_swap loop is seq-cst. */
5906
5907 static rtx
5908 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5909 {
5910 machine_mode mode = GET_MODE (mem);
5911
5912 if (can_compare_and_swap_p (mode, true))
5913 {
5914 if (!target || !register_operand (target, mode))
5915 target = gen_reg_rtx (mode);
5916 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5917 return target;
5918 }
5919
5920 return NULL_RTX;
5921 }
5922
5923 /* This function tries to implement an atomic test-and-set operation
5924 using the atomic_test_and_set instruction pattern. A boolean value
5925 is returned from the operation, using TARGET if possible. */
5926
5927 static rtx
5928 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5929 {
5930 machine_mode pat_bool_mode;
5931 struct expand_operand ops[3];
5932
5933 if (!targetm.have_atomic_test_and_set ())
5934 return NULL_RTX;
5935
5936 /* While we always get QImode from __atomic_test_and_set, we get
5937 other memory modes from __sync_lock_test_and_set. Note that we
5938 use no endian adjustment here. This matches the 4.6 behavior
5939 in the Sparc backend. */
5940 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5941 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5942 if (GET_MODE (mem) != QImode)
5943 mem = adjust_address_nv (mem, QImode, 0);
5944
5945 pat_bool_mode = insn_data[icode].operand[0].mode;
5946 create_output_operand (&ops[0], target, pat_bool_mode);
5947 create_fixed_operand (&ops[1], mem);
5948 create_integer_operand (&ops[2], model);
5949
5950 if (maybe_expand_insn (icode, 3, ops))
5951 return ops[0].value;
5952 return NULL_RTX;
5953 }
5954
5955 /* This function expands the legacy _sync_lock test_and_set operation which is
5956 generally an atomic exchange. Some limited targets only allow the
5957 constant 1 to be stored. This is an ACQUIRE operation.
5958
5959 TARGET is an optional place to stick the return value.
5960 MEM is where VAL is stored. */
5961
5962 rtx
5963 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
5964 {
5965 rtx ret;
5966
5967 /* Try an atomic_exchange first. */
5968 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
5969 if (ret)
5970 return ret;
5971
5972 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
5973 MEMMODEL_SYNC_ACQUIRE);
5974 if (ret)
5975 return ret;
5976
5977 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
5978 if (ret)
5979 return ret;
5980
5981 /* If there are no other options, try atomic_test_and_set if the value
5982 being stored is 1. */
5983 if (val == const1_rtx)
5984 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
5985
5986 return ret;
5987 }
5988
5989 /* This function expands the atomic test_and_set operation:
5990 atomically store a boolean TRUE into MEM and return the previous value.
5991
5992 MEMMODEL is the memory model variant to use.
5993 TARGET is an optional place to stick the return value. */
5994
5995 rtx
5996 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5997 {
5998 machine_mode mode = GET_MODE (mem);
5999 rtx ret, trueval, subtarget;
6000
6001 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6002 if (ret)
6003 return ret;
6004
6005 /* Be binary compatible with non-default settings of trueval, and different
6006 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6007 another only has atomic-exchange. */
6008 if (targetm.atomic_test_and_set_trueval == 1)
6009 {
6010 trueval = const1_rtx;
6011 subtarget = target ? target : gen_reg_rtx (mode);
6012 }
6013 else
6014 {
6015 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6016 subtarget = gen_reg_rtx (mode);
6017 }
6018
6019 /* Try the atomic-exchange optab... */
6020 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6021
6022 /* ... then an atomic-compare-and-swap loop ... */
6023 if (!ret)
6024 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6025
6026 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6027 if (!ret)
6028 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6029
6030 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6031 things with the value 1. Thus we try again without trueval. */
6032 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6033 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6034
6035 /* Failing all else, assume a single threaded environment and simply
6036 perform the operation. */
6037 if (!ret)
6038 {
6039 /* If the result is ignored skip the move to target. */
6040 if (subtarget != const0_rtx)
6041 emit_move_insn (subtarget, mem);
6042
6043 emit_move_insn (mem, trueval);
6044 ret = subtarget;
6045 }
6046
6047 /* Recall that have to return a boolean value; rectify if trueval
6048 is not exactly one. */
6049 if (targetm.atomic_test_and_set_trueval != 1)
6050 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6051
6052 return ret;
6053 }
6054
6055 /* This function expands the atomic exchange operation:
6056 atomically store VAL in MEM and return the previous value in MEM.
6057
6058 MEMMODEL is the memory model variant to use.
6059 TARGET is an optional place to stick the return value. */
6060
6061 rtx
6062 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6063 {
6064 rtx ret;
6065
6066 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6067
6068 /* Next try a compare-and-swap loop for the exchange. */
6069 if (!ret)
6070 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6071
6072 return ret;
6073 }
6074
6075 /* This function expands the atomic compare exchange operation:
6076
6077 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6078 *PTARGET_OVAL is an optional place to store the old value from memory.
6079 Both target parameters may be NULL or const0_rtx to indicate that we do
6080 not care about that return value. Both target parameters are updated on
6081 success to the actual location of the corresponding result.
6082
6083 MEMMODEL is the memory model variant to use.
6084
6085 The return value of the function is true for success. */
6086
6087 bool
6088 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6089 rtx mem, rtx expected, rtx desired,
6090 bool is_weak, enum memmodel succ_model,
6091 enum memmodel fail_model)
6092 {
6093 machine_mode mode = GET_MODE (mem);
6094 struct expand_operand ops[8];
6095 enum insn_code icode;
6096 rtx target_oval, target_bool = NULL_RTX;
6097 rtx libfunc;
6098
6099 /* Load expected into a register for the compare and swap. */
6100 if (MEM_P (expected))
6101 expected = copy_to_reg (expected);
6102
6103 /* Make sure we always have some place to put the return oldval.
6104 Further, make sure that place is distinct from the input expected,
6105 just in case we need that path down below. */
6106 if (ptarget_oval && *ptarget_oval == const0_rtx)
6107 ptarget_oval = NULL;
6108
6109 if (ptarget_oval == NULL
6110 || (target_oval = *ptarget_oval) == NULL
6111 || reg_overlap_mentioned_p (expected, target_oval))
6112 target_oval = gen_reg_rtx (mode);
6113
6114 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6115 if (icode != CODE_FOR_nothing)
6116 {
6117 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6118
6119 if (ptarget_bool && *ptarget_bool == const0_rtx)
6120 ptarget_bool = NULL;
6121
6122 /* Make sure we always have a place for the bool operand. */
6123 if (ptarget_bool == NULL
6124 || (target_bool = *ptarget_bool) == NULL
6125 || GET_MODE (target_bool) != bool_mode)
6126 target_bool = gen_reg_rtx (bool_mode);
6127
6128 /* Emit the compare_and_swap. */
6129 create_output_operand (&ops[0], target_bool, bool_mode);
6130 create_output_operand (&ops[1], target_oval, mode);
6131 create_fixed_operand (&ops[2], mem);
6132 create_input_operand (&ops[3], expected, mode);
6133 create_input_operand (&ops[4], desired, mode);
6134 create_integer_operand (&ops[5], is_weak);
6135 create_integer_operand (&ops[6], succ_model);
6136 create_integer_operand (&ops[7], fail_model);
6137 if (maybe_expand_insn (icode, 8, ops))
6138 {
6139 /* Return success/failure. */
6140 target_bool = ops[0].value;
6141 target_oval = ops[1].value;
6142 goto success;
6143 }
6144 }
6145
6146 /* Otherwise fall back to the original __sync_val_compare_and_swap
6147 which is always seq-cst. */
6148 icode = optab_handler (sync_compare_and_swap_optab, mode);
6149 if (icode != CODE_FOR_nothing)
6150 {
6151 rtx cc_reg;
6152
6153 create_output_operand (&ops[0], target_oval, mode);
6154 create_fixed_operand (&ops[1], mem);
6155 create_input_operand (&ops[2], expected, mode);
6156 create_input_operand (&ops[3], desired, mode);
6157 if (!maybe_expand_insn (icode, 4, ops))
6158 return false;
6159
6160 target_oval = ops[0].value;
6161
6162 /* If the caller isn't interested in the boolean return value,
6163 skip the computation of it. */
6164 if (ptarget_bool == NULL)
6165 goto success;
6166
6167 /* Otherwise, work out if the compare-and-swap succeeded. */
6168 cc_reg = NULL_RTX;
6169 if (have_insn_for (COMPARE, CCmode))
6170 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6171 if (cc_reg)
6172 {
6173 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6174 const0_rtx, VOIDmode, 0, 1);
6175 goto success;
6176 }
6177 goto success_bool_from_val;
6178 }
6179
6180 /* Also check for library support for __sync_val_compare_and_swap. */
6181 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6182 if (libfunc != NULL)
6183 {
6184 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6185 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6186 mode, 3, addr, ptr_mode,
6187 expected, mode, desired, mode);
6188 emit_move_insn (target_oval, target);
6189
6190 /* Compute the boolean return value only if requested. */
6191 if (ptarget_bool)
6192 goto success_bool_from_val;
6193 else
6194 goto success;
6195 }
6196
6197 /* Failure. */
6198 return false;
6199
6200 success_bool_from_val:
6201 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6202 expected, VOIDmode, 1, 1);
6203 success:
6204 /* Make sure that the oval output winds up where the caller asked. */
6205 if (ptarget_oval)
6206 *ptarget_oval = target_oval;
6207 if (ptarget_bool)
6208 *ptarget_bool = target_bool;
6209 return true;
6210 }
6211
6212 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6213
6214 static void
6215 expand_asm_memory_barrier (void)
6216 {
6217 rtx asm_op, clob;
6218
6219 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
6220 rtvec_alloc (0), rtvec_alloc (0),
6221 rtvec_alloc (0), UNKNOWN_LOCATION);
6222 MEM_VOLATILE_P (asm_op) = 1;
6223
6224 clob = gen_rtx_SCRATCH (VOIDmode);
6225 clob = gen_rtx_MEM (BLKmode, clob);
6226 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6227
6228 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6229 }
6230
6231 /* This routine will either emit the mem_thread_fence pattern or issue a
6232 sync_synchronize to generate a fence for memory model MEMMODEL. */
6233
6234 void
6235 expand_mem_thread_fence (enum memmodel model)
6236 {
6237 if (targetm.have_mem_thread_fence ())
6238 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6239 else if (!is_mm_relaxed (model))
6240 {
6241 if (targetm.have_memory_barrier ())
6242 emit_insn (targetm.gen_memory_barrier ());
6243 else if (synchronize_libfunc != NULL_RTX)
6244 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
6245 else
6246 expand_asm_memory_barrier ();
6247 }
6248 }
6249
6250 /* This routine will either emit the mem_signal_fence pattern or issue a
6251 sync_synchronize to generate a fence for memory model MEMMODEL. */
6252
6253 void
6254 expand_mem_signal_fence (enum memmodel model)
6255 {
6256 if (targetm.have_mem_signal_fence ())
6257 emit_insn (targetm.gen_mem_signal_fence (GEN_INT (model)));
6258 else if (!is_mm_relaxed (model))
6259 {
6260 /* By default targets are coherent between a thread and the signal
6261 handler running on the same thread. Thus this really becomes a
6262 compiler barrier, in that stores must not be sunk past
6263 (or raised above) a given point. */
6264 expand_asm_memory_barrier ();
6265 }
6266 }
6267
6268 /* This function expands the atomic load operation:
6269 return the atomically loaded value in MEM.
6270
6271 MEMMODEL is the memory model variant to use.
6272 TARGET is an option place to stick the return value. */
6273
6274 rtx
6275 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6276 {
6277 machine_mode mode = GET_MODE (mem);
6278 enum insn_code icode;
6279
6280 /* If the target supports the load directly, great. */
6281 icode = direct_optab_handler (atomic_load_optab, mode);
6282 if (icode != CODE_FOR_nothing)
6283 {
6284 struct expand_operand ops[3];
6285
6286 create_output_operand (&ops[0], target, mode);
6287 create_fixed_operand (&ops[1], mem);
6288 create_integer_operand (&ops[2], model);
6289 if (maybe_expand_insn (icode, 3, ops))
6290 return ops[0].value;
6291 }
6292
6293 /* If the size of the object is greater than word size on this target,
6294 then we assume that a load will not be atomic. */
6295 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6296 {
6297 /* Issue val = compare_and_swap (mem, 0, 0).
6298 This may cause the occasional harmless store of 0 when the value is
6299 already 0, but it seems to be OK according to the standards guys. */
6300 if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
6301 const0_rtx, false, model, model))
6302 return target;
6303 else
6304 /* Otherwise there is no atomic load, leave the library call. */
6305 return NULL_RTX;
6306 }
6307
6308 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6309 if (!target || target == const0_rtx)
6310 target = gen_reg_rtx (mode);
6311
6312 /* For SEQ_CST, emit a barrier before the load. */
6313 if (is_mm_seq_cst (model))
6314 expand_mem_thread_fence (model);
6315
6316 emit_move_insn (target, mem);
6317
6318 /* Emit the appropriate barrier after the load. */
6319 expand_mem_thread_fence (model);
6320
6321 return target;
6322 }
6323
6324 /* This function expands the atomic store operation:
6325 Atomically store VAL in MEM.
6326 MEMMODEL is the memory model variant to use.
6327 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6328 function returns const0_rtx if a pattern was emitted. */
6329
6330 rtx
6331 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6332 {
6333 machine_mode mode = GET_MODE (mem);
6334 enum insn_code icode;
6335 struct expand_operand ops[3];
6336
6337 /* If the target supports the store directly, great. */
6338 icode = direct_optab_handler (atomic_store_optab, mode);
6339 if (icode != CODE_FOR_nothing)
6340 {
6341 create_fixed_operand (&ops[0], mem);
6342 create_input_operand (&ops[1], val, mode);
6343 create_integer_operand (&ops[2], model);
6344 if (maybe_expand_insn (icode, 3, ops))
6345 return const0_rtx;
6346 }
6347
6348 /* If using __sync_lock_release is a viable alternative, try it. */
6349 if (use_release)
6350 {
6351 icode = direct_optab_handler (sync_lock_release_optab, mode);
6352 if (icode != CODE_FOR_nothing)
6353 {
6354 create_fixed_operand (&ops[0], mem);
6355 create_input_operand (&ops[1], const0_rtx, mode);
6356 if (maybe_expand_insn (icode, 2, ops))
6357 {
6358 /* lock_release is only a release barrier. */
6359 if (is_mm_seq_cst (model))
6360 expand_mem_thread_fence (model);
6361 return const0_rtx;
6362 }
6363 }
6364 }
6365
6366 /* If the size of the object is greater than word size on this target,
6367 a default store will not be atomic, Try a mem_exchange and throw away
6368 the result. If that doesn't work, don't do anything. */
6369 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6370 {
6371 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6372 if (!target)
6373 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
6374 if (target)
6375 return const0_rtx;
6376 else
6377 return NULL_RTX;
6378 }
6379
6380 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6381 expand_mem_thread_fence (model);
6382
6383 emit_move_insn (mem, val);
6384
6385 /* For SEQ_CST, also emit a barrier after the store. */
6386 if (is_mm_seq_cst (model))
6387 expand_mem_thread_fence (model);
6388
6389 return const0_rtx;
6390 }
6391
6392
6393 /* Structure containing the pointers and values required to process the
6394 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6395
6396 struct atomic_op_functions
6397 {
6398 direct_optab mem_fetch_before;
6399 direct_optab mem_fetch_after;
6400 direct_optab mem_no_result;
6401 optab fetch_before;
6402 optab fetch_after;
6403 direct_optab no_result;
6404 enum rtx_code reverse_code;
6405 };
6406
6407
6408 /* Fill in structure pointed to by OP with the various optab entries for an
6409 operation of type CODE. */
6410
6411 static void
6412 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6413 {
6414 gcc_assert (op!= NULL);
6415
6416 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6417 in the source code during compilation, and the optab entries are not
6418 computable until runtime. Fill in the values at runtime. */
6419 switch (code)
6420 {
6421 case PLUS:
6422 op->mem_fetch_before = atomic_fetch_add_optab;
6423 op->mem_fetch_after = atomic_add_fetch_optab;
6424 op->mem_no_result = atomic_add_optab;
6425 op->fetch_before = sync_old_add_optab;
6426 op->fetch_after = sync_new_add_optab;
6427 op->no_result = sync_add_optab;
6428 op->reverse_code = MINUS;
6429 break;
6430 case MINUS:
6431 op->mem_fetch_before = atomic_fetch_sub_optab;
6432 op->mem_fetch_after = atomic_sub_fetch_optab;
6433 op->mem_no_result = atomic_sub_optab;
6434 op->fetch_before = sync_old_sub_optab;
6435 op->fetch_after = sync_new_sub_optab;
6436 op->no_result = sync_sub_optab;
6437 op->reverse_code = PLUS;
6438 break;
6439 case XOR:
6440 op->mem_fetch_before = atomic_fetch_xor_optab;
6441 op->mem_fetch_after = atomic_xor_fetch_optab;
6442 op->mem_no_result = atomic_xor_optab;
6443 op->fetch_before = sync_old_xor_optab;
6444 op->fetch_after = sync_new_xor_optab;
6445 op->no_result = sync_xor_optab;
6446 op->reverse_code = XOR;
6447 break;
6448 case AND:
6449 op->mem_fetch_before = atomic_fetch_and_optab;
6450 op->mem_fetch_after = atomic_and_fetch_optab;
6451 op->mem_no_result = atomic_and_optab;
6452 op->fetch_before = sync_old_and_optab;
6453 op->fetch_after = sync_new_and_optab;
6454 op->no_result = sync_and_optab;
6455 op->reverse_code = UNKNOWN;
6456 break;
6457 case IOR:
6458 op->mem_fetch_before = atomic_fetch_or_optab;
6459 op->mem_fetch_after = atomic_or_fetch_optab;
6460 op->mem_no_result = atomic_or_optab;
6461 op->fetch_before = sync_old_ior_optab;
6462 op->fetch_after = sync_new_ior_optab;
6463 op->no_result = sync_ior_optab;
6464 op->reverse_code = UNKNOWN;
6465 break;
6466 case NOT:
6467 op->mem_fetch_before = atomic_fetch_nand_optab;
6468 op->mem_fetch_after = atomic_nand_fetch_optab;
6469 op->mem_no_result = atomic_nand_optab;
6470 op->fetch_before = sync_old_nand_optab;
6471 op->fetch_after = sync_new_nand_optab;
6472 op->no_result = sync_nand_optab;
6473 op->reverse_code = UNKNOWN;
6474 break;
6475 default:
6476 gcc_unreachable ();
6477 }
6478 }
6479
6480 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6481 using memory order MODEL. If AFTER is true the operation needs to return
6482 the value of *MEM after the operation, otherwise the previous value.
6483 TARGET is an optional place to place the result. The result is unused if
6484 it is const0_rtx.
6485 Return the result if there is a better sequence, otherwise NULL_RTX. */
6486
6487 static rtx
6488 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6489 enum memmodel model, bool after)
6490 {
6491 /* If the value is prefetched, or not used, it may be possible to replace
6492 the sequence with a native exchange operation. */
6493 if (!after || target == const0_rtx)
6494 {
6495 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6496 if (code == AND && val == const0_rtx)
6497 {
6498 if (target == const0_rtx)
6499 target = gen_reg_rtx (GET_MODE (mem));
6500 return maybe_emit_atomic_exchange (target, mem, val, model);
6501 }
6502
6503 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6504 if (code == IOR && val == constm1_rtx)
6505 {
6506 if (target == const0_rtx)
6507 target = gen_reg_rtx (GET_MODE (mem));
6508 return maybe_emit_atomic_exchange (target, mem, val, model);
6509 }
6510 }
6511
6512 return NULL_RTX;
6513 }
6514
6515 /* Try to emit an instruction for a specific operation varaition.
6516 OPTAB contains the OP functions.
6517 TARGET is an optional place to return the result. const0_rtx means unused.
6518 MEM is the memory location to operate on.
6519 VAL is the value to use in the operation.
6520 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6521 MODEL is the memory model, if used.
6522 AFTER is true if the returned result is the value after the operation. */
6523
6524 static rtx
6525 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6526 rtx val, bool use_memmodel, enum memmodel model, bool after)
6527 {
6528 machine_mode mode = GET_MODE (mem);
6529 struct expand_operand ops[4];
6530 enum insn_code icode;
6531 int op_counter = 0;
6532 int num_ops;
6533
6534 /* Check to see if there is a result returned. */
6535 if (target == const0_rtx)
6536 {
6537 if (use_memmodel)
6538 {
6539 icode = direct_optab_handler (optab->mem_no_result, mode);
6540 create_integer_operand (&ops[2], model);
6541 num_ops = 3;
6542 }
6543 else
6544 {
6545 icode = direct_optab_handler (optab->no_result, mode);
6546 num_ops = 2;
6547 }
6548 }
6549 /* Otherwise, we need to generate a result. */
6550 else
6551 {
6552 if (use_memmodel)
6553 {
6554 icode = direct_optab_handler (after ? optab->mem_fetch_after
6555 : optab->mem_fetch_before, mode);
6556 create_integer_operand (&ops[3], model);
6557 num_ops = 4;
6558 }
6559 else
6560 {
6561 icode = optab_handler (after ? optab->fetch_after
6562 : optab->fetch_before, mode);
6563 num_ops = 3;
6564 }
6565 create_output_operand (&ops[op_counter++], target, mode);
6566 }
6567 if (icode == CODE_FOR_nothing)
6568 return NULL_RTX;
6569
6570 create_fixed_operand (&ops[op_counter++], mem);
6571 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6572 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6573
6574 if (maybe_expand_insn (icode, num_ops, ops))
6575 return (target == const0_rtx ? const0_rtx : ops[0].value);
6576
6577 return NULL_RTX;
6578 }
6579
6580
6581 /* This function expands an atomic fetch_OP or OP_fetch operation:
6582 TARGET is an option place to stick the return value. const0_rtx indicates
6583 the result is unused.
6584 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6585 CODE is the operation being performed (OP)
6586 MEMMODEL is the memory model variant to use.
6587 AFTER is true to return the result of the operation (OP_fetch).
6588 AFTER is false to return the value before the operation (fetch_OP).
6589
6590 This function will *only* generate instructions if there is a direct
6591 optab. No compare and swap loops or libcalls will be generated. */
6592
6593 static rtx
6594 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6595 enum rtx_code code, enum memmodel model,
6596 bool after)
6597 {
6598 machine_mode mode = GET_MODE (mem);
6599 struct atomic_op_functions optab;
6600 rtx result;
6601 bool unused_result = (target == const0_rtx);
6602
6603 get_atomic_op_for_code (&optab, code);
6604
6605 /* Check to see if there are any better instructions. */
6606 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6607 if (result)
6608 return result;
6609
6610 /* Check for the case where the result isn't used and try those patterns. */
6611 if (unused_result)
6612 {
6613 /* Try the memory model variant first. */
6614 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6615 if (result)
6616 return result;
6617
6618 /* Next try the old style withuot a memory model. */
6619 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6620 if (result)
6621 return result;
6622
6623 /* There is no no-result pattern, so try patterns with a result. */
6624 target = NULL_RTX;
6625 }
6626
6627 /* Try the __atomic version. */
6628 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6629 if (result)
6630 return result;
6631
6632 /* Try the older __sync version. */
6633 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6634 if (result)
6635 return result;
6636
6637 /* If the fetch value can be calculated from the other variation of fetch,
6638 try that operation. */
6639 if (after || unused_result || optab.reverse_code != UNKNOWN)
6640 {
6641 /* Try the __atomic version, then the older __sync version. */
6642 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6643 if (!result)
6644 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6645
6646 if (result)
6647 {
6648 /* If the result isn't used, no need to do compensation code. */
6649 if (unused_result)
6650 return result;
6651
6652 /* Issue compensation code. Fetch_after == fetch_before OP val.
6653 Fetch_before == after REVERSE_OP val. */
6654 if (!after)
6655 code = optab.reverse_code;
6656 if (code == NOT)
6657 {
6658 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6659 true, OPTAB_LIB_WIDEN);
6660 result = expand_simple_unop (mode, NOT, result, target, true);
6661 }
6662 else
6663 result = expand_simple_binop (mode, code, result, val, target,
6664 true, OPTAB_LIB_WIDEN);
6665 return result;
6666 }
6667 }
6668
6669 /* No direct opcode can be generated. */
6670 return NULL_RTX;
6671 }
6672
6673
6674
6675 /* This function expands an atomic fetch_OP or OP_fetch operation:
6676 TARGET is an option place to stick the return value. const0_rtx indicates
6677 the result is unused.
6678 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6679 CODE is the operation being performed (OP)
6680 MEMMODEL is the memory model variant to use.
6681 AFTER is true to return the result of the operation (OP_fetch).
6682 AFTER is false to return the value before the operation (fetch_OP). */
6683 rtx
6684 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6685 enum memmodel model, bool after)
6686 {
6687 machine_mode mode = GET_MODE (mem);
6688 rtx result;
6689 bool unused_result = (target == const0_rtx);
6690
6691 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6692 after);
6693
6694 if (result)
6695 return result;
6696
6697 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6698 if (code == PLUS || code == MINUS)
6699 {
6700 rtx tmp;
6701 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6702
6703 start_sequence ();
6704 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6705 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6706 model, after);
6707 if (result)
6708 {
6709 /* PLUS worked so emit the insns and return. */
6710 tmp = get_insns ();
6711 end_sequence ();
6712 emit_insn (tmp);
6713 return result;
6714 }
6715
6716 /* PLUS did not work, so throw away the negation code and continue. */
6717 end_sequence ();
6718 }
6719
6720 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6721 if (!can_compare_and_swap_p (mode, false))
6722 {
6723 rtx libfunc;
6724 bool fixup = false;
6725 enum rtx_code orig_code = code;
6726 struct atomic_op_functions optab;
6727
6728 get_atomic_op_for_code (&optab, code);
6729 libfunc = optab_libfunc (after ? optab.fetch_after
6730 : optab.fetch_before, mode);
6731 if (libfunc == NULL
6732 && (after || unused_result || optab.reverse_code != UNKNOWN))
6733 {
6734 fixup = true;
6735 if (!after)
6736 code = optab.reverse_code;
6737 libfunc = optab_libfunc (after ? optab.fetch_before
6738 : optab.fetch_after, mode);
6739 }
6740 if (libfunc != NULL)
6741 {
6742 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6743 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6744 2, addr, ptr_mode, val, mode);
6745
6746 if (!unused_result && fixup)
6747 result = expand_simple_binop (mode, code, result, val, target,
6748 true, OPTAB_LIB_WIDEN);
6749 return result;
6750 }
6751
6752 /* We need the original code for any further attempts. */
6753 code = orig_code;
6754 }
6755
6756 /* If nothing else has succeeded, default to a compare and swap loop. */
6757 if (can_compare_and_swap_p (mode, true))
6758 {
6759 rtx_insn *insn;
6760 rtx t0 = gen_reg_rtx (mode), t1;
6761
6762 start_sequence ();
6763
6764 /* If the result is used, get a register for it. */
6765 if (!unused_result)
6766 {
6767 if (!target || !register_operand (target, mode))
6768 target = gen_reg_rtx (mode);
6769 /* If fetch_before, copy the value now. */
6770 if (!after)
6771 emit_move_insn (target, t0);
6772 }
6773 else
6774 target = const0_rtx;
6775
6776 t1 = t0;
6777 if (code == NOT)
6778 {
6779 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6780 true, OPTAB_LIB_WIDEN);
6781 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6782 }
6783 else
6784 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6785 OPTAB_LIB_WIDEN);
6786
6787 /* For after, copy the value now. */
6788 if (!unused_result && after)
6789 emit_move_insn (target, t1);
6790 insn = get_insns ();
6791 end_sequence ();
6792
6793 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6794 return target;
6795 }
6796
6797 return NULL_RTX;
6798 }
6799 \f
6800 /* Return true if OPERAND is suitable for operand number OPNO of
6801 instruction ICODE. */
6802
6803 bool
6804 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6805 {
6806 return (!insn_data[(int) icode].operand[opno].predicate
6807 || (insn_data[(int) icode].operand[opno].predicate
6808 (operand, insn_data[(int) icode].operand[opno].mode)));
6809 }
6810 \f
6811 /* TARGET is a target of a multiword operation that we are going to
6812 implement as a series of word-mode operations. Return true if
6813 TARGET is suitable for this purpose. */
6814
6815 bool
6816 valid_multiword_target_p (rtx target)
6817 {
6818 machine_mode mode;
6819 int i;
6820
6821 mode = GET_MODE (target);
6822 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6823 if (!validate_subreg (word_mode, mode, target, i))
6824 return false;
6825 return true;
6826 }
6827
6828 /* Like maybe_legitimize_operand, but do not change the code of the
6829 current rtx value. */
6830
6831 static bool
6832 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6833 struct expand_operand *op)
6834 {
6835 /* See if the operand matches in its current form. */
6836 if (insn_operand_matches (icode, opno, op->value))
6837 return true;
6838
6839 /* If the operand is a memory whose address has no side effects,
6840 try forcing the address into a non-virtual pseudo register.
6841 The check for side effects is important because copy_to_mode_reg
6842 cannot handle things like auto-modified addresses. */
6843 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6844 {
6845 rtx addr, mem;
6846
6847 mem = op->value;
6848 addr = XEXP (mem, 0);
6849 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6850 && !side_effects_p (addr))
6851 {
6852 rtx_insn *last;
6853 machine_mode mode;
6854
6855 last = get_last_insn ();
6856 mode = get_address_mode (mem);
6857 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6858 if (insn_operand_matches (icode, opno, mem))
6859 {
6860 op->value = mem;
6861 return true;
6862 }
6863 delete_insns_since (last);
6864 }
6865 }
6866
6867 return false;
6868 }
6869
6870 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6871 on success, storing the new operand value back in OP. */
6872
6873 static bool
6874 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6875 struct expand_operand *op)
6876 {
6877 machine_mode mode, imode;
6878 bool old_volatile_ok, result;
6879
6880 mode = op->mode;
6881 switch (op->type)
6882 {
6883 case EXPAND_FIXED:
6884 old_volatile_ok = volatile_ok;
6885 volatile_ok = true;
6886 result = maybe_legitimize_operand_same_code (icode, opno, op);
6887 volatile_ok = old_volatile_ok;
6888 return result;
6889
6890 case EXPAND_OUTPUT:
6891 gcc_assert (mode != VOIDmode);
6892 if (op->value
6893 && op->value != const0_rtx
6894 && GET_MODE (op->value) == mode
6895 && maybe_legitimize_operand_same_code (icode, opno, op))
6896 return true;
6897
6898 op->value = gen_reg_rtx (mode);
6899 break;
6900
6901 case EXPAND_INPUT:
6902 input:
6903 gcc_assert (mode != VOIDmode);
6904 gcc_assert (GET_MODE (op->value) == VOIDmode
6905 || GET_MODE (op->value) == mode);
6906 if (maybe_legitimize_operand_same_code (icode, opno, op))
6907 return true;
6908
6909 op->value = copy_to_mode_reg (mode, op->value);
6910 break;
6911
6912 case EXPAND_CONVERT_TO:
6913 gcc_assert (mode != VOIDmode);
6914 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6915 goto input;
6916
6917 case EXPAND_CONVERT_FROM:
6918 if (GET_MODE (op->value) != VOIDmode)
6919 mode = GET_MODE (op->value);
6920 else
6921 /* The caller must tell us what mode this value has. */
6922 gcc_assert (mode != VOIDmode);
6923
6924 imode = insn_data[(int) icode].operand[opno].mode;
6925 if (imode != VOIDmode && imode != mode)
6926 {
6927 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
6928 mode = imode;
6929 }
6930 goto input;
6931
6932 case EXPAND_ADDRESS:
6933 gcc_assert (mode != VOIDmode);
6934 op->value = convert_memory_address (mode, op->value);
6935 goto input;
6936
6937 case EXPAND_INTEGER:
6938 mode = insn_data[(int) icode].operand[opno].mode;
6939 if (mode != VOIDmode && const_int_operand (op->value, mode))
6940 goto input;
6941 break;
6942 }
6943 return insn_operand_matches (icode, opno, op->value);
6944 }
6945
6946 /* Make OP describe an input operand that should have the same value
6947 as VALUE, after any mode conversion that the target might request.
6948 TYPE is the type of VALUE. */
6949
6950 void
6951 create_convert_operand_from_type (struct expand_operand *op,
6952 rtx value, tree type)
6953 {
6954 create_convert_operand_from (op, value, TYPE_MODE (type),
6955 TYPE_UNSIGNED (type));
6956 }
6957
6958 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
6959 of instruction ICODE. Return true on success, leaving the new operand
6960 values in the OPS themselves. Emit no code on failure. */
6961
6962 bool
6963 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
6964 unsigned int nops, struct expand_operand *ops)
6965 {
6966 rtx_insn *last;
6967 unsigned int i;
6968
6969 last = get_last_insn ();
6970 for (i = 0; i < nops; i++)
6971 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
6972 {
6973 delete_insns_since (last);
6974 return false;
6975 }
6976 return true;
6977 }
6978
6979 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
6980 as its operands. Return the instruction pattern on success,
6981 and emit any necessary set-up code. Return null and emit no
6982 code on failure. */
6983
6984 rtx_insn *
6985 maybe_gen_insn (enum insn_code icode, unsigned int nops,
6986 struct expand_operand *ops)
6987 {
6988 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
6989 if (!maybe_legitimize_operands (icode, 0, nops, ops))
6990 return NULL;
6991
6992 switch (nops)
6993 {
6994 case 1:
6995 return GEN_FCN (icode) (ops[0].value);
6996 case 2:
6997 return GEN_FCN (icode) (ops[0].value, ops[1].value);
6998 case 3:
6999 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7000 case 4:
7001 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7002 ops[3].value);
7003 case 5:
7004 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7005 ops[3].value, ops[4].value);
7006 case 6:
7007 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7008 ops[3].value, ops[4].value, ops[5].value);
7009 case 7:
7010 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7011 ops[3].value, ops[4].value, ops[5].value,
7012 ops[6].value);
7013 case 8:
7014 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7015 ops[3].value, ops[4].value, ops[5].value,
7016 ops[6].value, ops[7].value);
7017 case 9:
7018 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7019 ops[3].value, ops[4].value, ops[5].value,
7020 ops[6].value, ops[7].value, ops[8].value);
7021 }
7022 gcc_unreachable ();
7023 }
7024
7025 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7026 as its operands. Return true on success and emit no code on failure. */
7027
7028 bool
7029 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7030 struct expand_operand *ops)
7031 {
7032 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7033 if (pat)
7034 {
7035 emit_insn (pat);
7036 return true;
7037 }
7038 return false;
7039 }
7040
7041 /* Like maybe_expand_insn, but for jumps. */
7042
7043 bool
7044 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7045 struct expand_operand *ops)
7046 {
7047 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7048 if (pat)
7049 {
7050 emit_jump_insn (pat);
7051 return true;
7052 }
7053 return false;
7054 }
7055
7056 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7057 as its operands. */
7058
7059 void
7060 expand_insn (enum insn_code icode, unsigned int nops,
7061 struct expand_operand *ops)
7062 {
7063 if (!maybe_expand_insn (icode, nops, ops))
7064 gcc_unreachable ();
7065 }
7066
7067 /* Like expand_insn, but for jumps. */
7068
7069 void
7070 expand_jump_insn (enum insn_code icode, unsigned int nops,
7071 struct expand_operand *ops)
7072 {
7073 if (!maybe_expand_jump_insn (icode, nops, ops))
7074 gcc_unreachable ();
7075 }