re PR ada/37139 (DEP prevents using Ada tasking)
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
46
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
51
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
54 \f
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
58
59 If the last insn does not set TARGET, don't do anything, but return 1.
60
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
64
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
67 {
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
71
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
73
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
80
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
83
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
87 ;
88
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
93 {
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
97 {
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
114 }
115 return 0;
116 }
117
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
121
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
127
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
130 {
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
139 {
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_SIZE (GET_MODE (op0))
142 > GET_MODE_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
149 }
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
154 }
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
157
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
159
160 return 1;
161 }
162 \f
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
166
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
169 {
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
173
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
180
181 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
182 return to_mode;
183
184 return result;
185 }
186 \f
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
192
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
196 {
197 rtx result;
198
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend && GET_MODE (op) == VOIDmode)
201 return op;
202
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
206 if (! no_extend
207 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
208 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
209 return convert_modes (mode, oldmode, op, unsignedp);
210
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
212 SUBREG. */
213 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
214 return gen_lowpart (mode, force_reg (GET_MODE (op), op));
215
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
217 part to OP. */
218
219 result = gen_reg_rtx (mode);
220 emit_clobber (result);
221 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
222 return result;
223 }
224 \f
225 /* Expand vector widening operations.
226
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
236
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
239 nops OP0 OP1 WIDE_OP
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
244
245 rtx
246 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
247 rtx target, int unsignedp)
248 {
249 struct expand_operand eops[4];
250 tree oprnd0, oprnd1, oprnd2;
251 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
252 optab widen_pattern_optab;
253 enum insn_code icode;
254 int nops = TREE_CODE_LENGTH (ops->code);
255 int op;
256
257 oprnd0 = ops->op0;
258 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
259 widen_pattern_optab =
260 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
261 if (ops->code == WIDEN_MULT_PLUS_EXPR
262 || ops->code == WIDEN_MULT_MINUS_EXPR)
263 icode = find_widening_optab_handler (widen_pattern_optab,
264 TYPE_MODE (TREE_TYPE (ops->op2)),
265 tmode0, 0);
266 else
267 icode = optab_handler (widen_pattern_optab, tmode0);
268 gcc_assert (icode != CODE_FOR_nothing);
269
270 if (nops >= 2)
271 {
272 oprnd1 = ops->op1;
273 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
274 }
275
276 /* The last operand is of a wider mode than the rest of the operands. */
277 if (nops == 2)
278 wmode = tmode1;
279 else if (nops == 3)
280 {
281 gcc_assert (tmode1 == tmode0);
282 gcc_assert (op1);
283 oprnd2 = ops->op2;
284 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
285 }
286
287 op = 0;
288 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
289 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
290 if (op1)
291 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
292 if (wide_op)
293 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
294 expand_insn (icode, op, eops);
295 return eops[0].value;
296 }
297
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
300
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
303
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
308
309 rtx
310 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
311 rtx op1, rtx op2, rtx target, int unsignedp)
312 {
313 struct expand_operand ops[4];
314 enum insn_code icode = optab_handler (ternary_optab, mode);
315
316 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
317
318 create_output_operand (&ops[0], target, mode);
319 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
320 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
321 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
322 expand_insn (icode, 4, ops);
323 return ops[0].value;
324 }
325
326
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
330
331 rtx
332 simplify_expand_binop (machine_mode mode, optab binoptab,
333 rtx op0, rtx op1, rtx target, int unsignedp,
334 enum optab_methods methods)
335 {
336 if (CONSTANT_P (op0) && CONSTANT_P (op1))
337 {
338 rtx x = simplify_binary_operation (optab_to_code (binoptab),
339 mode, op0, op1);
340 if (x)
341 return x;
342 }
343
344 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
345 }
346
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
349
350 bool
351 force_expand_binop (machine_mode mode, optab binoptab,
352 rtx op0, rtx op1, rtx target, int unsignedp,
353 enum optab_methods methods)
354 {
355 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
356 target, unsignedp, methods);
357 if (x == 0)
358 return false;
359 if (x != target)
360 emit_move_insn (target, x);
361 return true;
362 }
363
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
367
368 static rtx
369 expand_vector_broadcast (machine_mode vmode, rtx op)
370 {
371 enum insn_code icode;
372 rtvec vec;
373 rtx ret;
374 int i, n;
375
376 gcc_checking_assert (VECTOR_MODE_P (vmode));
377
378 n = GET_MODE_NUNITS (vmode);
379 vec = rtvec_alloc (n);
380 for (i = 0; i < n; ++i)
381 RTVEC_ELT (vec, i) = op;
382
383 if (CONSTANT_P (op))
384 return gen_rtx_CONST_VECTOR (vmode, vec);
385
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode = optab_handler (vec_init_optab, vmode);
390 if (icode == CODE_FOR_nothing)
391 return NULL;
392
393 ret = gen_reg_rtx (vmode);
394 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
395
396 return ret;
397 }
398
399 /* This subroutine of expand_doubleword_shift handles the cases in which
400 the effective shift value is >= BITS_PER_WORD. The arguments and return
401 value are the same as for the parent routine, except that SUPERWORD_OP1
402 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
403 INTO_TARGET may be null if the caller has decided to calculate it. */
404
405 static bool
406 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
407 rtx outof_target, rtx into_target,
408 int unsignedp, enum optab_methods methods)
409 {
410 if (into_target != 0)
411 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
412 into_target, unsignedp, methods))
413 return false;
414
415 if (outof_target != 0)
416 {
417 /* For a signed right shift, we must fill OUTOF_TARGET with copies
418 of the sign bit, otherwise we must fill it with zeros. */
419 if (binoptab != ashr_optab)
420 emit_move_insn (outof_target, CONST0_RTX (word_mode));
421 else
422 if (!force_expand_binop (word_mode, binoptab,
423 outof_input, GEN_INT (BITS_PER_WORD - 1),
424 outof_target, unsignedp, methods))
425 return false;
426 }
427 return true;
428 }
429
430 /* This subroutine of expand_doubleword_shift handles the cases in which
431 the effective shift value is < BITS_PER_WORD. The arguments and return
432 value are the same as for the parent routine. */
433
434 static bool
435 expand_subword_shift (machine_mode op1_mode, optab binoptab,
436 rtx outof_input, rtx into_input, rtx op1,
437 rtx outof_target, rtx into_target,
438 int unsignedp, enum optab_methods methods,
439 unsigned HOST_WIDE_INT shift_mask)
440 {
441 optab reverse_unsigned_shift, unsigned_shift;
442 rtx tmp, carries;
443
444 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
445 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
446
447 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
448 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
449 the opposite direction to BINOPTAB. */
450 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
451 {
452 carries = outof_input;
453 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
454 op1_mode), op1_mode);
455 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
456 0, true, methods);
457 }
458 else
459 {
460 /* We must avoid shifting by BITS_PER_WORD bits since that is either
461 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
462 has unknown behavior. Do a single shift first, then shift by the
463 remainder. It's OK to use ~OP1 as the remainder if shift counts
464 are truncated to the mode size. */
465 carries = expand_binop (word_mode, reverse_unsigned_shift,
466 outof_input, const1_rtx, 0, unsignedp, methods);
467 if (shift_mask == BITS_PER_WORD - 1)
468 {
469 tmp = immed_wide_int_const
470 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
471 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
472 0, true, methods);
473 }
474 else
475 {
476 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
477 op1_mode), op1_mode);
478 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
479 0, true, methods);
480 }
481 }
482 if (tmp == 0 || carries == 0)
483 return false;
484 carries = expand_binop (word_mode, reverse_unsigned_shift,
485 carries, tmp, 0, unsignedp, methods);
486 if (carries == 0)
487 return false;
488
489 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
490 so the result can go directly into INTO_TARGET if convenient. */
491 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
492 into_target, unsignedp, methods);
493 if (tmp == 0)
494 return false;
495
496 /* Now OR in the bits carried over from OUTOF_INPUT. */
497 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
498 into_target, unsignedp, methods))
499 return false;
500
501 /* Use a standard word_mode shift for the out-of half. */
502 if (outof_target != 0)
503 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
504 outof_target, unsignedp, methods))
505 return false;
506
507 return true;
508 }
509
510
511 /* Try implementing expand_doubleword_shift using conditional moves.
512 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
513 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
514 are the shift counts to use in the former and latter case. All other
515 arguments are the same as the parent routine. */
516
517 static bool
518 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
519 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
520 rtx outof_input, rtx into_input,
521 rtx subword_op1, rtx superword_op1,
522 rtx outof_target, rtx into_target,
523 int unsignedp, enum optab_methods methods,
524 unsigned HOST_WIDE_INT shift_mask)
525 {
526 rtx outof_superword, into_superword;
527
528 /* Put the superword version of the output into OUTOF_SUPERWORD and
529 INTO_SUPERWORD. */
530 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
531 if (outof_target != 0 && subword_op1 == superword_op1)
532 {
533 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
534 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
535 into_superword = outof_target;
536 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
537 outof_superword, 0, unsignedp, methods))
538 return false;
539 }
540 else
541 {
542 into_superword = gen_reg_rtx (word_mode);
543 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
544 outof_superword, into_superword,
545 unsignedp, methods))
546 return false;
547 }
548
549 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
550 if (!expand_subword_shift (op1_mode, binoptab,
551 outof_input, into_input, subword_op1,
552 outof_target, into_target,
553 unsignedp, methods, shift_mask))
554 return false;
555
556 /* Select between them. Do the INTO half first because INTO_SUPERWORD
557 might be the current value of OUTOF_TARGET. */
558 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
559 into_target, into_superword, word_mode, false))
560 return false;
561
562 if (outof_target != 0)
563 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
564 outof_target, outof_superword,
565 word_mode, false))
566 return false;
567
568 return true;
569 }
570
571 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
572 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
573 input operand; the shift moves bits in the direction OUTOF_INPUT->
574 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
575 of the target. OP1 is the shift count and OP1_MODE is its mode.
576 If OP1 is constant, it will have been truncated as appropriate
577 and is known to be nonzero.
578
579 If SHIFT_MASK is zero, the result of word shifts is undefined when the
580 shift count is outside the range [0, BITS_PER_WORD). This routine must
581 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
582
583 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
584 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
585 fill with zeros or sign bits as appropriate.
586
587 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
588 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
589 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
590 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
591 are undefined.
592
593 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
594 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
595 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
596 function wants to calculate it itself.
597
598 Return true if the shift could be successfully synthesized. */
599
600 static bool
601 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
602 rtx outof_input, rtx into_input, rtx op1,
603 rtx outof_target, rtx into_target,
604 int unsignedp, enum optab_methods methods,
605 unsigned HOST_WIDE_INT shift_mask)
606 {
607 rtx superword_op1, tmp, cmp1, cmp2;
608 enum rtx_code cmp_code;
609
610 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
611 fill the result with sign or zero bits as appropriate. If so, the value
612 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
613 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
614 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
615
616 This isn't worthwhile for constant shifts since the optimizers will
617 cope better with in-range shift counts. */
618 if (shift_mask >= BITS_PER_WORD
619 && outof_target != 0
620 && !CONSTANT_P (op1))
621 {
622 if (!expand_doubleword_shift (op1_mode, binoptab,
623 outof_input, into_input, op1,
624 0, into_target,
625 unsignedp, methods, shift_mask))
626 return false;
627 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
628 outof_target, unsignedp, methods))
629 return false;
630 return true;
631 }
632
633 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
634 is true when the effective shift value is less than BITS_PER_WORD.
635 Set SUPERWORD_OP1 to the shift count that should be used to shift
636 OUTOF_INPUT into INTO_TARGET when the condition is false. */
637 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
638 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
639 {
640 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
641 is a subword shift count. */
642 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
643 0, true, methods);
644 cmp2 = CONST0_RTX (op1_mode);
645 cmp_code = EQ;
646 superword_op1 = op1;
647 }
648 else
649 {
650 /* Set CMP1 to OP1 - BITS_PER_WORD. */
651 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
652 0, true, methods);
653 cmp2 = CONST0_RTX (op1_mode);
654 cmp_code = LT;
655 superword_op1 = cmp1;
656 }
657 if (cmp1 == 0)
658 return false;
659
660 /* If we can compute the condition at compile time, pick the
661 appropriate subroutine. */
662 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
663 if (tmp != 0 && CONST_INT_P (tmp))
664 {
665 if (tmp == const0_rtx)
666 return expand_superword_shift (binoptab, outof_input, superword_op1,
667 outof_target, into_target,
668 unsignedp, methods);
669 else
670 return expand_subword_shift (op1_mode, binoptab,
671 outof_input, into_input, op1,
672 outof_target, into_target,
673 unsignedp, methods, shift_mask);
674 }
675
676 /* Try using conditional moves to generate straight-line code. */
677 if (HAVE_conditional_move)
678 {
679 rtx_insn *start = get_last_insn ();
680 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
681 cmp_code, cmp1, cmp2,
682 outof_input, into_input,
683 op1, superword_op1,
684 outof_target, into_target,
685 unsignedp, methods, shift_mask))
686 return true;
687 delete_insns_since (start);
688 }
689
690 /* As a last resort, use branches to select the correct alternative. */
691 rtx_code_label *subword_label = gen_label_rtx ();
692 rtx_code_label *done_label = gen_label_rtx ();
693
694 NO_DEFER_POP;
695 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
696 0, 0, subword_label, -1);
697 OK_DEFER_POP;
698
699 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
700 outof_target, into_target,
701 unsignedp, methods))
702 return false;
703
704 emit_jump_insn (targetm.gen_jump (done_label));
705 emit_barrier ();
706 emit_label (subword_label);
707
708 if (!expand_subword_shift (op1_mode, binoptab,
709 outof_input, into_input, op1,
710 outof_target, into_target,
711 unsignedp, methods, shift_mask))
712 return false;
713
714 emit_label (done_label);
715 return true;
716 }
717 \f
718 /* Subroutine of expand_binop. Perform a double word multiplication of
719 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
720 as the target's word_mode. This function return NULL_RTX if anything
721 goes wrong, in which case it may have already emitted instructions
722 which need to be deleted.
723
724 If we want to multiply two two-word values and have normal and widening
725 multiplies of single-word values, we can do this with three smaller
726 multiplications.
727
728 The multiplication proceeds as follows:
729 _______________________
730 [__op0_high_|__op0_low__]
731 _______________________
732 * [__op1_high_|__op1_low__]
733 _______________________________________________
734 _______________________
735 (1) [__op0_low__*__op1_low__]
736 _______________________
737 (2a) [__op0_low__*__op1_high_]
738 _______________________
739 (2b) [__op0_high_*__op1_low__]
740 _______________________
741 (3) [__op0_high_*__op1_high_]
742
743
744 This gives a 4-word result. Since we are only interested in the
745 lower 2 words, partial result (3) and the upper words of (2a) and
746 (2b) don't need to be calculated. Hence (2a) and (2b) can be
747 calculated using non-widening multiplication.
748
749 (1), however, needs to be calculated with an unsigned widening
750 multiplication. If this operation is not directly supported we
751 try using a signed widening multiplication and adjust the result.
752 This adjustment works as follows:
753
754 If both operands are positive then no adjustment is needed.
755
756 If the operands have different signs, for example op0_low < 0 and
757 op1_low >= 0, the instruction treats the most significant bit of
758 op0_low as a sign bit instead of a bit with significance
759 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
760 with 2**BITS_PER_WORD - op0_low, and two's complements the
761 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
762 the result.
763
764 Similarly, if both operands are negative, we need to add
765 (op0_low + op1_low) * 2**BITS_PER_WORD.
766
767 We use a trick to adjust quickly. We logically shift op0_low right
768 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
769 op0_high (op1_high) before it is used to calculate 2b (2a). If no
770 logical shift exists, we do an arithmetic right shift and subtract
771 the 0 or -1. */
772
773 static rtx
774 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
775 bool umulp, enum optab_methods methods)
776 {
777 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
778 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
779 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
780 rtx product, adjust, product_high, temp;
781
782 rtx op0_high = operand_subword_force (op0, high, mode);
783 rtx op0_low = operand_subword_force (op0, low, mode);
784 rtx op1_high = operand_subword_force (op1, high, mode);
785 rtx op1_low = operand_subword_force (op1, low, mode);
786
787 /* If we're using an unsigned multiply to directly compute the product
788 of the low-order words of the operands and perform any required
789 adjustments of the operands, we begin by trying two more multiplications
790 and then computing the appropriate sum.
791
792 We have checked above that the required addition is provided.
793 Full-word addition will normally always succeed, especially if
794 it is provided at all, so we don't worry about its failure. The
795 multiplication may well fail, however, so we do handle that. */
796
797 if (!umulp)
798 {
799 /* ??? This could be done with emit_store_flag where available. */
800 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
801 NULL_RTX, 1, methods);
802 if (temp)
803 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
804 NULL_RTX, 0, OPTAB_DIRECT);
805 else
806 {
807 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
808 NULL_RTX, 0, methods);
809 if (!temp)
810 return NULL_RTX;
811 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
812 NULL_RTX, 0, OPTAB_DIRECT);
813 }
814
815 if (!op0_high)
816 return NULL_RTX;
817 }
818
819 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
820 NULL_RTX, 0, OPTAB_DIRECT);
821 if (!adjust)
822 return NULL_RTX;
823
824 /* OP0_HIGH should now be dead. */
825
826 if (!umulp)
827 {
828 /* ??? This could be done with emit_store_flag where available. */
829 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
830 NULL_RTX, 1, methods);
831 if (temp)
832 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
833 NULL_RTX, 0, OPTAB_DIRECT);
834 else
835 {
836 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
837 NULL_RTX, 0, methods);
838 if (!temp)
839 return NULL_RTX;
840 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
841 NULL_RTX, 0, OPTAB_DIRECT);
842 }
843
844 if (!op1_high)
845 return NULL_RTX;
846 }
847
848 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
849 NULL_RTX, 0, OPTAB_DIRECT);
850 if (!temp)
851 return NULL_RTX;
852
853 /* OP1_HIGH should now be dead. */
854
855 adjust = expand_binop (word_mode, add_optab, adjust, temp,
856 NULL_RTX, 0, OPTAB_DIRECT);
857
858 if (target && !REG_P (target))
859 target = NULL_RTX;
860
861 if (umulp)
862 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
863 target, 1, OPTAB_DIRECT);
864 else
865 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
866 target, 1, OPTAB_DIRECT);
867
868 if (!product)
869 return NULL_RTX;
870
871 product_high = operand_subword (product, high, 1, mode);
872 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
873 NULL_RTX, 0, OPTAB_DIRECT);
874 emit_move_insn (product_high, adjust);
875 return product;
876 }
877 \f
878 /* Wrapper around expand_binop which takes an rtx code to specify
879 the operation to perform, not an optab pointer. All other
880 arguments are the same. */
881 rtx
882 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
883 rtx op1, rtx target, int unsignedp,
884 enum optab_methods methods)
885 {
886 optab binop = code_to_optab (code);
887 gcc_assert (binop);
888
889 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
890 }
891
892 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
893 binop. Order them according to commutative_operand_precedence and, if
894 possible, try to put TARGET or a pseudo first. */
895 static bool
896 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
897 {
898 int op0_prec = commutative_operand_precedence (op0);
899 int op1_prec = commutative_operand_precedence (op1);
900
901 if (op0_prec < op1_prec)
902 return true;
903
904 if (op0_prec > op1_prec)
905 return false;
906
907 /* With equal precedence, both orders are ok, but it is better if the
908 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
909 if (target == 0 || REG_P (target))
910 return (REG_P (op1) && !REG_P (op0)) || target == op1;
911 else
912 return rtx_equal_p (op1, target);
913 }
914
915 /* Return true if BINOPTAB implements a shift operation. */
916
917 static bool
918 shift_optab_p (optab binoptab)
919 {
920 switch (optab_to_code (binoptab))
921 {
922 case ASHIFT:
923 case SS_ASHIFT:
924 case US_ASHIFT:
925 case ASHIFTRT:
926 case LSHIFTRT:
927 case ROTATE:
928 case ROTATERT:
929 return true;
930
931 default:
932 return false;
933 }
934 }
935
936 /* Return true if BINOPTAB implements a commutative binary operation. */
937
938 static bool
939 commutative_optab_p (optab binoptab)
940 {
941 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
942 || binoptab == smul_widen_optab
943 || binoptab == umul_widen_optab
944 || binoptab == smul_highpart_optab
945 || binoptab == umul_highpart_optab);
946 }
947
948 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
949 optimizing, and if the operand is a constant that costs more than
950 1 instruction, force the constant into a register and return that
951 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
952
953 static rtx
954 avoid_expensive_constant (machine_mode mode, optab binoptab,
955 int opn, rtx x, bool unsignedp)
956 {
957 bool speed = optimize_insn_for_speed_p ();
958
959 if (mode != VOIDmode
960 && optimize
961 && CONSTANT_P (x)
962 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
963 > set_src_cost (x, mode, speed)))
964 {
965 if (CONST_INT_P (x))
966 {
967 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
968 if (intval != INTVAL (x))
969 x = GEN_INT (intval);
970 }
971 else
972 x = convert_modes (mode, VOIDmode, x, unsignedp);
973 x = force_reg (mode, x);
974 }
975 return x;
976 }
977
978 /* Helper function for expand_binop: handle the case where there
979 is an insn that directly implements the indicated operation.
980 Returns null if this is not possible. */
981 static rtx
982 expand_binop_directly (machine_mode mode, optab binoptab,
983 rtx op0, rtx op1,
984 rtx target, int unsignedp, enum optab_methods methods,
985 rtx_insn *last)
986 {
987 machine_mode from_mode = widened_mode (mode, op0, op1);
988 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
989 from_mode, 1);
990 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
991 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
992 machine_mode mode0, mode1, tmp_mode;
993 struct expand_operand ops[3];
994 bool commutative_p;
995 rtx_insn *pat;
996 rtx xop0 = op0, xop1 = op1;
997 bool canonicalize_op1 = false;
998
999 /* If it is a commutative operator and the modes would match
1000 if we would swap the operands, we can save the conversions. */
1001 commutative_p = commutative_optab_p (binoptab);
1002 if (commutative_p
1003 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1004 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1005 std::swap (xop0, xop1);
1006
1007 /* If we are optimizing, force expensive constants into a register. */
1008 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1009 if (!shift_optab_p (binoptab))
1010 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1011 else
1012 /* Shifts and rotates often use a different mode for op1 from op0;
1013 for VOIDmode constants we don't know the mode, so force it
1014 to be canonicalized using convert_modes. */
1015 canonicalize_op1 = true;
1016
1017 /* In case the insn wants input operands in modes different from
1018 those of the actual operands, convert the operands. It would
1019 seem that we don't need to convert CONST_INTs, but we do, so
1020 that they're properly zero-extended, sign-extended or truncated
1021 for their mode. */
1022
1023 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1024 if (xmode0 != VOIDmode && xmode0 != mode0)
1025 {
1026 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1027 mode0 = xmode0;
1028 }
1029
1030 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1031 ? GET_MODE (xop1) : mode);
1032 if (xmode1 != VOIDmode && xmode1 != mode1)
1033 {
1034 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1035 mode1 = xmode1;
1036 }
1037
1038 /* If operation is commutative,
1039 try to make the first operand a register.
1040 Even better, try to make it the same as the target.
1041 Also try to make the last operand a constant. */
1042 if (commutative_p
1043 && swap_commutative_operands_with_target (target, xop0, xop1))
1044 std::swap (xop0, xop1);
1045
1046 /* Now, if insn's predicates don't allow our operands, put them into
1047 pseudo regs. */
1048
1049 if (binoptab == vec_pack_trunc_optab
1050 || binoptab == vec_pack_usat_optab
1051 || binoptab == vec_pack_ssat_optab
1052 || binoptab == vec_pack_ufix_trunc_optab
1053 || binoptab == vec_pack_sfix_trunc_optab)
1054 {
1055 /* The mode of the result is different then the mode of the
1056 arguments. */
1057 tmp_mode = insn_data[(int) icode].operand[0].mode;
1058 if (VECTOR_MODE_P (mode)
1059 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1060 {
1061 delete_insns_since (last);
1062 return NULL_RTX;
1063 }
1064 }
1065 else
1066 tmp_mode = mode;
1067
1068 create_output_operand (&ops[0], target, tmp_mode);
1069 create_input_operand (&ops[1], xop0, mode0);
1070 create_input_operand (&ops[2], xop1, mode1);
1071 pat = maybe_gen_insn (icode, 3, ops);
1072 if (pat)
1073 {
1074 /* If PAT is composed of more than one insn, try to add an appropriate
1075 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1076 operand, call expand_binop again, this time without a target. */
1077 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1078 && ! add_equal_note (pat, ops[0].value,
1079 optab_to_code (binoptab),
1080 ops[1].value, ops[2].value))
1081 {
1082 delete_insns_since (last);
1083 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1084 unsignedp, methods);
1085 }
1086
1087 emit_insn (pat);
1088 return ops[0].value;
1089 }
1090 delete_insns_since (last);
1091 return NULL_RTX;
1092 }
1093
1094 /* Generate code to perform an operation specified by BINOPTAB
1095 on operands OP0 and OP1, with result having machine-mode MODE.
1096
1097 UNSIGNEDP is for the case where we have to widen the operands
1098 to perform the operation. It says to use zero-extension.
1099
1100 If TARGET is nonzero, the value
1101 is generated there, if it is convenient to do so.
1102 In all cases an rtx is returned for the locus of the value;
1103 this may or may not be TARGET. */
1104
1105 rtx
1106 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1107 rtx target, int unsignedp, enum optab_methods methods)
1108 {
1109 enum optab_methods next_methods
1110 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1111 ? OPTAB_WIDEN : methods);
1112 enum mode_class mclass;
1113 machine_mode wider_mode;
1114 rtx libfunc;
1115 rtx temp;
1116 rtx_insn *entry_last = get_last_insn ();
1117 rtx_insn *last;
1118
1119 mclass = GET_MODE_CLASS (mode);
1120
1121 /* If subtracting an integer constant, convert this into an addition of
1122 the negated constant. */
1123
1124 if (binoptab == sub_optab && CONST_INT_P (op1))
1125 {
1126 op1 = negate_rtx (mode, op1);
1127 binoptab = add_optab;
1128 }
1129 /* For shifts, constant invalid op1 might be expanded from different
1130 mode than MODE. As those are invalid, force them to a register
1131 to avoid further problems during expansion. */
1132 else if (CONST_INT_P (op1)
1133 && shift_optab_p (binoptab)
1134 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1135 {
1136 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1137 op1 = force_reg (GET_MODE_INNER (mode), op1);
1138 }
1139
1140 /* Record where to delete back to if we backtrack. */
1141 last = get_last_insn ();
1142
1143 /* If we can do it with a three-operand insn, do so. */
1144
1145 if (methods != OPTAB_MUST_WIDEN
1146 && find_widening_optab_handler (binoptab, mode,
1147 widened_mode (mode, op0, op1), 1)
1148 != CODE_FOR_nothing)
1149 {
1150 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1151 unsignedp, methods, last);
1152 if (temp)
1153 return temp;
1154 }
1155
1156 /* If we were trying to rotate, and that didn't work, try rotating
1157 the other direction before falling back to shifts and bitwise-or. */
1158 if (((binoptab == rotl_optab
1159 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1160 || (binoptab == rotr_optab
1161 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1162 && mclass == MODE_INT)
1163 {
1164 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1165 rtx newop1;
1166 unsigned int bits = GET_MODE_PRECISION (mode);
1167
1168 if (CONST_INT_P (op1))
1169 newop1 = GEN_INT (bits - INTVAL (op1));
1170 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1171 newop1 = negate_rtx (GET_MODE (op1), op1);
1172 else
1173 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1174 gen_int_mode (bits, GET_MODE (op1)), op1,
1175 NULL_RTX, unsignedp, OPTAB_DIRECT);
1176
1177 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1178 target, unsignedp, methods, last);
1179 if (temp)
1180 return temp;
1181 }
1182
1183 /* If this is a multiply, see if we can do a widening operation that
1184 takes operands of this mode and makes a wider mode. */
1185
1186 if (binoptab == smul_optab
1187 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1188 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1189 : smul_widen_optab),
1190 GET_MODE_2XWIDER_MODE (mode), mode)
1191 != CODE_FOR_nothing))
1192 {
1193 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1194 unsignedp ? umul_widen_optab : smul_widen_optab,
1195 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1196
1197 if (temp != 0)
1198 {
1199 if (GET_MODE_CLASS (mode) == MODE_INT
1200 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1201 return gen_lowpart (mode, temp);
1202 else
1203 return convert_to_mode (mode, temp, unsignedp);
1204 }
1205 }
1206
1207 /* If this is a vector shift by a scalar, see if we can do a vector
1208 shift by a vector. If so, broadcast the scalar into a vector. */
1209 if (mclass == MODE_VECTOR_INT)
1210 {
1211 optab otheroptab = unknown_optab;
1212
1213 if (binoptab == ashl_optab)
1214 otheroptab = vashl_optab;
1215 else if (binoptab == ashr_optab)
1216 otheroptab = vashr_optab;
1217 else if (binoptab == lshr_optab)
1218 otheroptab = vlshr_optab;
1219 else if (binoptab == rotl_optab)
1220 otheroptab = vrotl_optab;
1221 else if (binoptab == rotr_optab)
1222 otheroptab = vrotr_optab;
1223
1224 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1225 {
1226 /* The scalar may have been extended to be too wide. Truncate
1227 it back to the proper size to fit in the broadcast vector. */
1228 machine_mode inner_mode = GET_MODE_INNER (mode);
1229 if (!CONST_INT_P (op1)
1230 && (GET_MODE_BITSIZE (inner_mode)
1231 < GET_MODE_BITSIZE (GET_MODE (op1))))
1232 op1 = force_reg (inner_mode,
1233 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1234 GET_MODE (op1)));
1235 rtx vop1 = expand_vector_broadcast (mode, op1);
1236 if (vop1)
1237 {
1238 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1239 target, unsignedp, methods, last);
1240 if (temp)
1241 return temp;
1242 }
1243 }
1244 }
1245
1246 /* Look for a wider mode of the same class for which we think we
1247 can open-code the operation. Check for a widening multiply at the
1248 wider mode as well. */
1249
1250 if (CLASS_HAS_WIDER_MODES_P (mclass)
1251 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1252 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1253 wider_mode != VOIDmode;
1254 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1255 {
1256 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1257 || (binoptab == smul_optab
1258 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1259 && (find_widening_optab_handler ((unsignedp
1260 ? umul_widen_optab
1261 : smul_widen_optab),
1262 GET_MODE_WIDER_MODE (wider_mode),
1263 mode, 0)
1264 != CODE_FOR_nothing)))
1265 {
1266 rtx xop0 = op0, xop1 = op1;
1267 int no_extend = 0;
1268
1269 /* For certain integer operations, we need not actually extend
1270 the narrow operands, as long as we will truncate
1271 the results to the same narrowness. */
1272
1273 if ((binoptab == ior_optab || binoptab == and_optab
1274 || binoptab == xor_optab
1275 || binoptab == add_optab || binoptab == sub_optab
1276 || binoptab == smul_optab || binoptab == ashl_optab)
1277 && mclass == MODE_INT)
1278 {
1279 no_extend = 1;
1280 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1281 xop0, unsignedp);
1282 if (binoptab != ashl_optab)
1283 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1284 xop1, unsignedp);
1285 }
1286
1287 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1288
1289 /* The second operand of a shift must always be extended. */
1290 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1291 no_extend && binoptab != ashl_optab);
1292
1293 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1294 unsignedp, OPTAB_DIRECT);
1295 if (temp)
1296 {
1297 if (mclass != MODE_INT
1298 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1299 {
1300 if (target == 0)
1301 target = gen_reg_rtx (mode);
1302 convert_move (target, temp, 0);
1303 return target;
1304 }
1305 else
1306 return gen_lowpart (mode, temp);
1307 }
1308 else
1309 delete_insns_since (last);
1310 }
1311 }
1312
1313 /* If operation is commutative,
1314 try to make the first operand a register.
1315 Even better, try to make it the same as the target.
1316 Also try to make the last operand a constant. */
1317 if (commutative_optab_p (binoptab)
1318 && swap_commutative_operands_with_target (target, op0, op1))
1319 std::swap (op0, op1);
1320
1321 /* These can be done a word at a time. */
1322 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1323 && mclass == MODE_INT
1324 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1325 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1326 {
1327 int i;
1328 rtx_insn *insns;
1329
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. */
1332 if (target == 0
1333 || target == op0
1334 || target == op1
1335 || !valid_multiword_target_p (target))
1336 target = gen_reg_rtx (mode);
1337
1338 start_sequence ();
1339
1340 /* Do the actual arithmetic. */
1341 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1342 {
1343 rtx target_piece = operand_subword (target, i, 1, mode);
1344 rtx x = expand_binop (word_mode, binoptab,
1345 operand_subword_force (op0, i, mode),
1346 operand_subword_force (op1, i, mode),
1347 target_piece, unsignedp, next_methods);
1348
1349 if (x == 0)
1350 break;
1351
1352 if (target_piece != x)
1353 emit_move_insn (target_piece, x);
1354 }
1355
1356 insns = get_insns ();
1357 end_sequence ();
1358
1359 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1360 {
1361 emit_insn (insns);
1362 return target;
1363 }
1364 }
1365
1366 /* Synthesize double word shifts from single word shifts. */
1367 if ((binoptab == lshr_optab || binoptab == ashl_optab
1368 || binoptab == ashr_optab)
1369 && mclass == MODE_INT
1370 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1371 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1372 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1373 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1374 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1375 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1376 {
1377 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1378 machine_mode op1_mode;
1379
1380 double_shift_mask = targetm.shift_truncation_mask (mode);
1381 shift_mask = targetm.shift_truncation_mask (word_mode);
1382 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1383
1384 /* Apply the truncation to constant shifts. */
1385 if (double_shift_mask > 0 && CONST_INT_P (op1))
1386 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1387
1388 if (op1 == CONST0_RTX (op1_mode))
1389 return op0;
1390
1391 /* Make sure that this is a combination that expand_doubleword_shift
1392 can handle. See the comments there for details. */
1393 if (double_shift_mask == 0
1394 || (shift_mask == BITS_PER_WORD - 1
1395 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1396 {
1397 rtx_insn *insns;
1398 rtx into_target, outof_target;
1399 rtx into_input, outof_input;
1400 int left_shift, outof_word;
1401
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. */
1404 if (target == 0
1405 || target == op0
1406 || target == op1
1407 || !valid_multiword_target_p (target))
1408 target = gen_reg_rtx (mode);
1409
1410 start_sequence ();
1411
1412 /* OUTOF_* is the word we are shifting bits away from, and
1413 INTO_* is the word that we are shifting bits towards, thus
1414 they differ depending on the direction of the shift and
1415 WORDS_BIG_ENDIAN. */
1416
1417 left_shift = binoptab == ashl_optab;
1418 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1419
1420 outof_target = operand_subword (target, outof_word, 1, mode);
1421 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1422
1423 outof_input = operand_subword_force (op0, outof_word, mode);
1424 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1425
1426 if (expand_doubleword_shift (op1_mode, binoptab,
1427 outof_input, into_input, op1,
1428 outof_target, into_target,
1429 unsignedp, next_methods, shift_mask))
1430 {
1431 insns = get_insns ();
1432 end_sequence ();
1433
1434 emit_insn (insns);
1435 return target;
1436 }
1437 end_sequence ();
1438 }
1439 }
1440
1441 /* Synthesize double word rotates from single word shifts. */
1442 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1443 && mclass == MODE_INT
1444 && CONST_INT_P (op1)
1445 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1446 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1447 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1448 {
1449 rtx_insn *insns;
1450 rtx into_target, outof_target;
1451 rtx into_input, outof_input;
1452 rtx inter;
1453 int shift_count, left_shift, outof_word;
1454
1455 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1456 won't be accurate, so use a new target. Do this also if target is not
1457 a REG, first because having a register instead may open optimization
1458 opportunities, and second because if target and op0 happen to be MEMs
1459 designating the same location, we would risk clobbering it too early
1460 in the code sequence we generate below. */
1461 if (target == 0
1462 || target == op0
1463 || target == op1
1464 || !REG_P (target)
1465 || !valid_multiword_target_p (target))
1466 target = gen_reg_rtx (mode);
1467
1468 start_sequence ();
1469
1470 shift_count = INTVAL (op1);
1471
1472 /* OUTOF_* is the word we are shifting bits away from, and
1473 INTO_* is the word that we are shifting bits towards, thus
1474 they differ depending on the direction of the shift and
1475 WORDS_BIG_ENDIAN. */
1476
1477 left_shift = (binoptab == rotl_optab);
1478 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1479
1480 outof_target = operand_subword (target, outof_word, 1, mode);
1481 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1482
1483 outof_input = operand_subword_force (op0, outof_word, mode);
1484 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1485
1486 if (shift_count == BITS_PER_WORD)
1487 {
1488 /* This is just a word swap. */
1489 emit_move_insn (outof_target, into_input);
1490 emit_move_insn (into_target, outof_input);
1491 inter = const0_rtx;
1492 }
1493 else
1494 {
1495 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1496 rtx first_shift_count, second_shift_count;
1497 optab reverse_unsigned_shift, unsigned_shift;
1498
1499 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1500 ? lshr_optab : ashl_optab);
1501
1502 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1503 ? ashl_optab : lshr_optab);
1504
1505 if (shift_count > BITS_PER_WORD)
1506 {
1507 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1508 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1509 }
1510 else
1511 {
1512 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1513 second_shift_count = GEN_INT (shift_count);
1514 }
1515
1516 into_temp1 = expand_binop (word_mode, unsigned_shift,
1517 outof_input, first_shift_count,
1518 NULL_RTX, unsignedp, next_methods);
1519 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1520 into_input, second_shift_count,
1521 NULL_RTX, unsignedp, next_methods);
1522
1523 if (into_temp1 != 0 && into_temp2 != 0)
1524 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1525 into_target, unsignedp, next_methods);
1526 else
1527 inter = 0;
1528
1529 if (inter != 0 && inter != into_target)
1530 emit_move_insn (into_target, inter);
1531
1532 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1533 into_input, first_shift_count,
1534 NULL_RTX, unsignedp, next_methods);
1535 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1536 outof_input, second_shift_count,
1537 NULL_RTX, unsignedp, next_methods);
1538
1539 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1540 inter = expand_binop (word_mode, ior_optab,
1541 outof_temp1, outof_temp2,
1542 outof_target, unsignedp, next_methods);
1543
1544 if (inter != 0 && inter != outof_target)
1545 emit_move_insn (outof_target, inter);
1546 }
1547
1548 insns = get_insns ();
1549 end_sequence ();
1550
1551 if (inter != 0)
1552 {
1553 emit_insn (insns);
1554 return target;
1555 }
1556 }
1557
1558 /* These can be done a word at a time by propagating carries. */
1559 if ((binoptab == add_optab || binoptab == sub_optab)
1560 && mclass == MODE_INT
1561 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1562 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1563 {
1564 unsigned int i;
1565 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1566 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1567 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1568 rtx xop0, xop1, xtarget;
1569
1570 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1571 value is one of those, use it. Otherwise, use 1 since it is the
1572 one easiest to get. */
1573 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1574 int normalizep = STORE_FLAG_VALUE;
1575 #else
1576 int normalizep = 1;
1577 #endif
1578
1579 /* Prepare the operands. */
1580 xop0 = force_reg (mode, op0);
1581 xop1 = force_reg (mode, op1);
1582
1583 xtarget = gen_reg_rtx (mode);
1584
1585 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1586 target = xtarget;
1587
1588 /* Indicate for flow that the entire target reg is being set. */
1589 if (REG_P (target))
1590 emit_clobber (xtarget);
1591
1592 /* Do the actual arithmetic. */
1593 for (i = 0; i < nwords; i++)
1594 {
1595 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1596 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1597 rtx op0_piece = operand_subword_force (xop0, index, mode);
1598 rtx op1_piece = operand_subword_force (xop1, index, mode);
1599 rtx x;
1600
1601 /* Main add/subtract of the input operands. */
1602 x = expand_binop (word_mode, binoptab,
1603 op0_piece, op1_piece,
1604 target_piece, unsignedp, next_methods);
1605 if (x == 0)
1606 break;
1607
1608 if (i + 1 < nwords)
1609 {
1610 /* Store carry from main add/subtract. */
1611 carry_out = gen_reg_rtx (word_mode);
1612 carry_out = emit_store_flag_force (carry_out,
1613 (binoptab == add_optab
1614 ? LT : GT),
1615 x, op0_piece,
1616 word_mode, 1, normalizep);
1617 }
1618
1619 if (i > 0)
1620 {
1621 rtx newx;
1622
1623 /* Add/subtract previous carry to main result. */
1624 newx = expand_binop (word_mode,
1625 normalizep == 1 ? binoptab : otheroptab,
1626 x, carry_in,
1627 NULL_RTX, 1, next_methods);
1628
1629 if (i + 1 < nwords)
1630 {
1631 /* Get out carry from adding/subtracting carry in. */
1632 rtx carry_tmp = gen_reg_rtx (word_mode);
1633 carry_tmp = emit_store_flag_force (carry_tmp,
1634 (binoptab == add_optab
1635 ? LT : GT),
1636 newx, x,
1637 word_mode, 1, normalizep);
1638
1639 /* Logical-ior the two poss. carry together. */
1640 carry_out = expand_binop (word_mode, ior_optab,
1641 carry_out, carry_tmp,
1642 carry_out, 0, next_methods);
1643 if (carry_out == 0)
1644 break;
1645 }
1646 emit_move_insn (target_piece, newx);
1647 }
1648 else
1649 {
1650 if (x != target_piece)
1651 emit_move_insn (target_piece, x);
1652 }
1653
1654 carry_in = carry_out;
1655 }
1656
1657 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1658 {
1659 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
1660 || ! rtx_equal_p (target, xtarget))
1661 {
1662 rtx_insn *temp = emit_move_insn (target, xtarget);
1663
1664 set_dst_reg_note (temp, REG_EQUAL,
1665 gen_rtx_fmt_ee (optab_to_code (binoptab),
1666 mode, copy_rtx (xop0),
1667 copy_rtx (xop1)),
1668 target);
1669 }
1670 else
1671 target = xtarget;
1672
1673 return target;
1674 }
1675
1676 else
1677 delete_insns_since (last);
1678 }
1679
1680 /* Attempt to synthesize double word multiplies using a sequence of word
1681 mode multiplications. We first attempt to generate a sequence using a
1682 more efficient unsigned widening multiply, and if that fails we then
1683 try using a signed widening multiply. */
1684
1685 if (binoptab == smul_optab
1686 && mclass == MODE_INT
1687 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1688 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1689 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1690 {
1691 rtx product = NULL_RTX;
1692 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
1693 != CODE_FOR_nothing)
1694 {
1695 product = expand_doubleword_mult (mode, op0, op1, target,
1696 true, methods);
1697 if (!product)
1698 delete_insns_since (last);
1699 }
1700
1701 if (product == NULL_RTX
1702 && widening_optab_handler (smul_widen_optab, mode, word_mode)
1703 != CODE_FOR_nothing)
1704 {
1705 product = expand_doubleword_mult (mode, op0, op1, target,
1706 false, methods);
1707 if (!product)
1708 delete_insns_since (last);
1709 }
1710
1711 if (product != NULL_RTX)
1712 {
1713 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
1714 {
1715 temp = emit_move_insn (target ? target : product, product);
1716 set_dst_reg_note (temp,
1717 REG_EQUAL,
1718 gen_rtx_fmt_ee (MULT, mode,
1719 copy_rtx (op0),
1720 copy_rtx (op1)),
1721 target ? target : product);
1722 }
1723 return product;
1724 }
1725 }
1726
1727 /* It can't be open-coded in this mode.
1728 Use a library call if one is available and caller says that's ok. */
1729
1730 libfunc = optab_libfunc (binoptab, mode);
1731 if (libfunc
1732 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1733 {
1734 rtx_insn *insns;
1735 rtx op1x = op1;
1736 machine_mode op1_mode = mode;
1737 rtx value;
1738
1739 start_sequence ();
1740
1741 if (shift_optab_p (binoptab))
1742 {
1743 op1_mode = targetm.libgcc_shift_count_mode ();
1744 /* Specify unsigned here,
1745 since negative shift counts are meaningless. */
1746 op1x = convert_to_mode (op1_mode, op1, 1);
1747 }
1748
1749 if (GET_MODE (op0) != VOIDmode
1750 && GET_MODE (op0) != mode)
1751 op0 = convert_to_mode (mode, op0, unsignedp);
1752
1753 /* Pass 1 for NO_QUEUE so we don't lose any increments
1754 if the libcall is cse'd or moved. */
1755 value = emit_library_call_value (libfunc,
1756 NULL_RTX, LCT_CONST, mode, 2,
1757 op0, mode, op1x, op1_mode);
1758
1759 insns = get_insns ();
1760 end_sequence ();
1761
1762 bool trapv = trapv_binoptab_p (binoptab);
1763 target = gen_reg_rtx (mode);
1764 emit_libcall_block_1 (insns, target, value,
1765 trapv ? NULL_RTX
1766 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1767 mode, op0, op1), trapv);
1768
1769 return target;
1770 }
1771
1772 delete_insns_since (last);
1773
1774 /* It can't be done in this mode. Can we do it in a wider mode? */
1775
1776 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1777 || methods == OPTAB_MUST_WIDEN))
1778 {
1779 /* Caller says, don't even try. */
1780 delete_insns_since (entry_last);
1781 return 0;
1782 }
1783
1784 /* Compute the value of METHODS to pass to recursive calls.
1785 Don't allow widening to be tried recursively. */
1786
1787 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1788
1789 /* Look for a wider mode of the same class for which it appears we can do
1790 the operation. */
1791
1792 if (CLASS_HAS_WIDER_MODES_P (mclass))
1793 {
1794 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1795 wider_mode != VOIDmode;
1796 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1797 {
1798 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1799 != CODE_FOR_nothing
1800 || (methods == OPTAB_LIB
1801 && optab_libfunc (binoptab, wider_mode)))
1802 {
1803 rtx xop0 = op0, xop1 = op1;
1804 int no_extend = 0;
1805
1806 /* For certain integer operations, we need not actually extend
1807 the narrow operands, as long as we will truncate
1808 the results to the same narrowness. */
1809
1810 if ((binoptab == ior_optab || binoptab == and_optab
1811 || binoptab == xor_optab
1812 || binoptab == add_optab || binoptab == sub_optab
1813 || binoptab == smul_optab || binoptab == ashl_optab)
1814 && mclass == MODE_INT)
1815 no_extend = 1;
1816
1817 xop0 = widen_operand (xop0, wider_mode, mode,
1818 unsignedp, no_extend);
1819
1820 /* The second operand of a shift must always be extended. */
1821 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1822 no_extend && binoptab != ashl_optab);
1823
1824 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1825 unsignedp, methods);
1826 if (temp)
1827 {
1828 if (mclass != MODE_INT
1829 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1830 {
1831 if (target == 0)
1832 target = gen_reg_rtx (mode);
1833 convert_move (target, temp, 0);
1834 return target;
1835 }
1836 else
1837 return gen_lowpart (mode, temp);
1838 }
1839 else
1840 delete_insns_since (last);
1841 }
1842 }
1843 }
1844
1845 delete_insns_since (entry_last);
1846 return 0;
1847 }
1848 \f
1849 /* Expand a binary operator which has both signed and unsigned forms.
1850 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1851 signed operations.
1852
1853 If we widen unsigned operands, we may use a signed wider operation instead
1854 of an unsigned wider operation, since the result would be the same. */
1855
1856 rtx
1857 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1858 rtx op0, rtx op1, rtx target, int unsignedp,
1859 enum optab_methods methods)
1860 {
1861 rtx temp;
1862 optab direct_optab = unsignedp ? uoptab : soptab;
1863 bool save_enable;
1864
1865 /* Do it without widening, if possible. */
1866 temp = expand_binop (mode, direct_optab, op0, op1, target,
1867 unsignedp, OPTAB_DIRECT);
1868 if (temp || methods == OPTAB_DIRECT)
1869 return temp;
1870
1871 /* Try widening to a signed int. Disable any direct use of any
1872 signed insn in the current mode. */
1873 save_enable = swap_optab_enable (soptab, mode, false);
1874
1875 temp = expand_binop (mode, soptab, op0, op1, target,
1876 unsignedp, OPTAB_WIDEN);
1877
1878 /* For unsigned operands, try widening to an unsigned int. */
1879 if (!temp && unsignedp)
1880 temp = expand_binop (mode, uoptab, op0, op1, target,
1881 unsignedp, OPTAB_WIDEN);
1882 if (temp || methods == OPTAB_WIDEN)
1883 goto egress;
1884
1885 /* Use the right width libcall if that exists. */
1886 temp = expand_binop (mode, direct_optab, op0, op1, target,
1887 unsignedp, OPTAB_LIB);
1888 if (temp || methods == OPTAB_LIB)
1889 goto egress;
1890
1891 /* Must widen and use a libcall, use either signed or unsigned. */
1892 temp = expand_binop (mode, soptab, op0, op1, target,
1893 unsignedp, methods);
1894 if (!temp && unsignedp)
1895 temp = expand_binop (mode, uoptab, op0, op1, target,
1896 unsignedp, methods);
1897
1898 egress:
1899 /* Undo the fiddling above. */
1900 if (save_enable)
1901 swap_optab_enable (soptab, mode, true);
1902 return temp;
1903 }
1904 \f
1905 /* Generate code to perform an operation specified by UNOPPTAB
1906 on operand OP0, with two results to TARG0 and TARG1.
1907 We assume that the order of the operands for the instruction
1908 is TARG0, TARG1, OP0.
1909
1910 Either TARG0 or TARG1 may be zero, but what that means is that
1911 the result is not actually wanted. We will generate it into
1912 a dummy pseudo-reg and discard it. They may not both be zero.
1913
1914 Returns 1 if this operation can be performed; 0 if not. */
1915
1916 int
1917 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1918 int unsignedp)
1919 {
1920 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1921 enum mode_class mclass;
1922 machine_mode wider_mode;
1923 rtx_insn *entry_last = get_last_insn ();
1924 rtx_insn *last;
1925
1926 mclass = GET_MODE_CLASS (mode);
1927
1928 if (!targ0)
1929 targ0 = gen_reg_rtx (mode);
1930 if (!targ1)
1931 targ1 = gen_reg_rtx (mode);
1932
1933 /* Record where to go back to if we fail. */
1934 last = get_last_insn ();
1935
1936 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1937 {
1938 struct expand_operand ops[3];
1939 enum insn_code icode = optab_handler (unoptab, mode);
1940
1941 create_fixed_operand (&ops[0], targ0);
1942 create_fixed_operand (&ops[1], targ1);
1943 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1944 if (maybe_expand_insn (icode, 3, ops))
1945 return 1;
1946 }
1947
1948 /* It can't be done in this mode. Can we do it in a wider mode? */
1949
1950 if (CLASS_HAS_WIDER_MODES_P (mclass))
1951 {
1952 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1953 wider_mode != VOIDmode;
1954 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1955 {
1956 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1957 {
1958 rtx t0 = gen_reg_rtx (wider_mode);
1959 rtx t1 = gen_reg_rtx (wider_mode);
1960 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1961
1962 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1963 {
1964 convert_move (targ0, t0, unsignedp);
1965 convert_move (targ1, t1, unsignedp);
1966 return 1;
1967 }
1968 else
1969 delete_insns_since (last);
1970 }
1971 }
1972 }
1973
1974 delete_insns_since (entry_last);
1975 return 0;
1976 }
1977 \f
1978 /* Generate code to perform an operation specified by BINOPTAB
1979 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1980 We assume that the order of the operands for the instruction
1981 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1982 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1983
1984 Either TARG0 or TARG1 may be zero, but what that means is that
1985 the result is not actually wanted. We will generate it into
1986 a dummy pseudo-reg and discard it. They may not both be zero.
1987
1988 Returns 1 if this operation can be performed; 0 if not. */
1989
1990 int
1991 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1992 int unsignedp)
1993 {
1994 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1995 enum mode_class mclass;
1996 machine_mode wider_mode;
1997 rtx_insn *entry_last = get_last_insn ();
1998 rtx_insn *last;
1999
2000 mclass = GET_MODE_CLASS (mode);
2001
2002 if (!targ0)
2003 targ0 = gen_reg_rtx (mode);
2004 if (!targ1)
2005 targ1 = gen_reg_rtx (mode);
2006
2007 /* Record where to go back to if we fail. */
2008 last = get_last_insn ();
2009
2010 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2011 {
2012 struct expand_operand ops[4];
2013 enum insn_code icode = optab_handler (binoptab, mode);
2014 machine_mode mode0 = insn_data[icode].operand[1].mode;
2015 machine_mode mode1 = insn_data[icode].operand[2].mode;
2016 rtx xop0 = op0, xop1 = op1;
2017
2018 /* If we are optimizing, force expensive constants into a register. */
2019 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2020 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2021
2022 create_fixed_operand (&ops[0], targ0);
2023 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2024 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2025 create_fixed_operand (&ops[3], targ1);
2026 if (maybe_expand_insn (icode, 4, ops))
2027 return 1;
2028 delete_insns_since (last);
2029 }
2030
2031 /* It can't be done in this mode. Can we do it in a wider mode? */
2032
2033 if (CLASS_HAS_WIDER_MODES_P (mclass))
2034 {
2035 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2036 wider_mode != VOIDmode;
2037 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2038 {
2039 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2040 {
2041 rtx t0 = gen_reg_rtx (wider_mode);
2042 rtx t1 = gen_reg_rtx (wider_mode);
2043 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2044 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2045
2046 if (expand_twoval_binop (binoptab, cop0, cop1,
2047 t0, t1, unsignedp))
2048 {
2049 convert_move (targ0, t0, unsignedp);
2050 convert_move (targ1, t1, unsignedp);
2051 return 1;
2052 }
2053 else
2054 delete_insns_since (last);
2055 }
2056 }
2057 }
2058
2059 delete_insns_since (entry_last);
2060 return 0;
2061 }
2062
2063 /* Expand the two-valued library call indicated by BINOPTAB, but
2064 preserve only one of the values. If TARG0 is non-NULL, the first
2065 value is placed into TARG0; otherwise the second value is placed
2066 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2067 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2068 This routine assumes that the value returned by the library call is
2069 as if the return value was of an integral mode twice as wide as the
2070 mode of OP0. Returns 1 if the call was successful. */
2071
2072 bool
2073 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2074 rtx targ0, rtx targ1, enum rtx_code code)
2075 {
2076 machine_mode mode;
2077 machine_mode libval_mode;
2078 rtx libval;
2079 rtx_insn *insns;
2080 rtx libfunc;
2081
2082 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2083 gcc_assert (!targ0 != !targ1);
2084
2085 mode = GET_MODE (op0);
2086 libfunc = optab_libfunc (binoptab, mode);
2087 if (!libfunc)
2088 return false;
2089
2090 /* The value returned by the library function will have twice as
2091 many bits as the nominal MODE. */
2092 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2093 MODE_INT);
2094 start_sequence ();
2095 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2096 libval_mode, 2,
2097 op0, mode,
2098 op1, mode);
2099 /* Get the part of VAL containing the value that we want. */
2100 libval = simplify_gen_subreg (mode, libval, libval_mode,
2101 targ0 ? 0 : GET_MODE_SIZE (mode));
2102 insns = get_insns ();
2103 end_sequence ();
2104 /* Move the into the desired location. */
2105 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2106 gen_rtx_fmt_ee (code, mode, op0, op1));
2107
2108 return true;
2109 }
2110
2111 \f
2112 /* Wrapper around expand_unop which takes an rtx code to specify
2113 the operation to perform, not an optab pointer. All other
2114 arguments are the same. */
2115 rtx
2116 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2117 rtx target, int unsignedp)
2118 {
2119 optab unop = code_to_optab (code);
2120 gcc_assert (unop);
2121
2122 return expand_unop (mode, unop, op0, target, unsignedp);
2123 }
2124
2125 /* Try calculating
2126 (clz:narrow x)
2127 as
2128 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2129
2130 A similar operation can be used for clrsb. UNOPTAB says which operation
2131 we are trying to expand. */
2132 static rtx
2133 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2134 {
2135 enum mode_class mclass = GET_MODE_CLASS (mode);
2136 if (CLASS_HAS_WIDER_MODES_P (mclass))
2137 {
2138 machine_mode wider_mode;
2139 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2140 wider_mode != VOIDmode;
2141 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2142 {
2143 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2144 {
2145 rtx xop0, temp;
2146 rtx_insn *last;
2147
2148 last = get_last_insn ();
2149
2150 if (target == 0)
2151 target = gen_reg_rtx (mode);
2152 xop0 = widen_operand (op0, wider_mode, mode,
2153 unoptab != clrsb_optab, false);
2154 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2155 unoptab != clrsb_optab);
2156 if (temp != 0)
2157 temp = expand_binop
2158 (wider_mode, sub_optab, temp,
2159 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2160 - GET_MODE_PRECISION (mode),
2161 wider_mode),
2162 target, true, OPTAB_DIRECT);
2163 if (temp == 0)
2164 delete_insns_since (last);
2165
2166 return temp;
2167 }
2168 }
2169 }
2170 return 0;
2171 }
2172
2173 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2174 quantities, choosing which based on whether the high word is nonzero. */
2175 static rtx
2176 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2177 {
2178 rtx xop0 = force_reg (mode, op0);
2179 rtx subhi = gen_highpart (word_mode, xop0);
2180 rtx sublo = gen_lowpart (word_mode, xop0);
2181 rtx_code_label *hi0_label = gen_label_rtx ();
2182 rtx_code_label *after_label = gen_label_rtx ();
2183 rtx_insn *seq;
2184 rtx temp, result;
2185
2186 /* If we were not given a target, use a word_mode register, not a
2187 'mode' register. The result will fit, and nobody is expecting
2188 anything bigger (the return type of __builtin_clz* is int). */
2189 if (!target)
2190 target = gen_reg_rtx (word_mode);
2191
2192 /* In any case, write to a word_mode scratch in both branches of the
2193 conditional, so we can ensure there is a single move insn setting
2194 'target' to tag a REG_EQUAL note on. */
2195 result = gen_reg_rtx (word_mode);
2196
2197 start_sequence ();
2198
2199 /* If the high word is not equal to zero,
2200 then clz of the full value is clz of the high word. */
2201 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2202 word_mode, true, hi0_label);
2203
2204 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2205 if (!temp)
2206 goto fail;
2207
2208 if (temp != result)
2209 convert_move (result, temp, true);
2210
2211 emit_jump_insn (targetm.gen_jump (after_label));
2212 emit_barrier ();
2213
2214 /* Else clz of the full value is clz of the low word plus the number
2215 of bits in the high word. */
2216 emit_label (hi0_label);
2217
2218 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2219 if (!temp)
2220 goto fail;
2221 temp = expand_binop (word_mode, add_optab, temp,
2222 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2223 result, true, OPTAB_DIRECT);
2224 if (!temp)
2225 goto fail;
2226 if (temp != result)
2227 convert_move (result, temp, true);
2228
2229 emit_label (after_label);
2230 convert_move (target, result, true);
2231
2232 seq = get_insns ();
2233 end_sequence ();
2234
2235 add_equal_note (seq, target, CLZ, xop0, 0);
2236 emit_insn (seq);
2237 return target;
2238
2239 fail:
2240 end_sequence ();
2241 return 0;
2242 }
2243
2244 /* Try calculating popcount of a double-word quantity as two popcount's of
2245 word-sized quantities and summing up the results. */
2246 static rtx
2247 expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
2248 {
2249 rtx t0, t1, t;
2250 rtx_insn *seq;
2251
2252 start_sequence ();
2253
2254 t0 = expand_unop_direct (word_mode, popcount_optab,
2255 operand_subword_force (op0, 0, mode), NULL_RTX,
2256 true);
2257 t1 = expand_unop_direct (word_mode, popcount_optab,
2258 operand_subword_force (op0, 1, mode), NULL_RTX,
2259 true);
2260 if (!t0 || !t1)
2261 {
2262 end_sequence ();
2263 return NULL_RTX;
2264 }
2265
2266 /* If we were not given a target, use a word_mode register, not a
2267 'mode' register. The result will fit, and nobody is expecting
2268 anything bigger (the return type of __builtin_popcount* is int). */
2269 if (!target)
2270 target = gen_reg_rtx (word_mode);
2271
2272 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2273
2274 seq = get_insns ();
2275 end_sequence ();
2276
2277 add_equal_note (seq, t, POPCOUNT, op0, 0);
2278 emit_insn (seq);
2279 return t;
2280 }
2281
2282 /* Try calculating
2283 (parity:wide x)
2284 as
2285 (parity:narrow (low (x) ^ high (x))) */
2286 static rtx
2287 expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
2288 {
2289 rtx t = expand_binop (word_mode, xor_optab,
2290 operand_subword_force (op0, 0, mode),
2291 operand_subword_force (op0, 1, mode),
2292 NULL_RTX, 0, OPTAB_DIRECT);
2293 return expand_unop (word_mode, parity_optab, t, target, true);
2294 }
2295
2296 /* Try calculating
2297 (bswap:narrow x)
2298 as
2299 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2300 static rtx
2301 widen_bswap (machine_mode mode, rtx op0, rtx target)
2302 {
2303 enum mode_class mclass = GET_MODE_CLASS (mode);
2304 machine_mode wider_mode;
2305 rtx x;
2306 rtx_insn *last;
2307
2308 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2309 return NULL_RTX;
2310
2311 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2312 wider_mode != VOIDmode;
2313 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2314 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2315 goto found;
2316 return NULL_RTX;
2317
2318 found:
2319 last = get_last_insn ();
2320
2321 x = widen_operand (op0, wider_mode, mode, true, true);
2322 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2323
2324 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2325 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2326 if (x != 0)
2327 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2328 GET_MODE_BITSIZE (wider_mode)
2329 - GET_MODE_BITSIZE (mode),
2330 NULL_RTX, true);
2331
2332 if (x != 0)
2333 {
2334 if (target == 0)
2335 target = gen_reg_rtx (mode);
2336 emit_move_insn (target, gen_lowpart (mode, x));
2337 }
2338 else
2339 delete_insns_since (last);
2340
2341 return target;
2342 }
2343
2344 /* Try calculating bswap as two bswaps of two word-sized operands. */
2345
2346 static rtx
2347 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2348 {
2349 rtx t0, t1;
2350
2351 t1 = expand_unop (word_mode, bswap_optab,
2352 operand_subword_force (op, 0, mode), NULL_RTX, true);
2353 t0 = expand_unop (word_mode, bswap_optab,
2354 operand_subword_force (op, 1, mode), NULL_RTX, true);
2355
2356 if (target == 0 || !valid_multiword_target_p (target))
2357 target = gen_reg_rtx (mode);
2358 if (REG_P (target))
2359 emit_clobber (target);
2360 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2361 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2362
2363 return target;
2364 }
2365
2366 /* Try calculating (parity x) as (and (popcount x) 1), where
2367 popcount can also be done in a wider mode. */
2368 static rtx
2369 expand_parity (machine_mode mode, rtx op0, rtx target)
2370 {
2371 enum mode_class mclass = GET_MODE_CLASS (mode);
2372 if (CLASS_HAS_WIDER_MODES_P (mclass))
2373 {
2374 machine_mode wider_mode;
2375 for (wider_mode = mode; wider_mode != VOIDmode;
2376 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2377 {
2378 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2379 {
2380 rtx xop0, temp;
2381 rtx_insn *last;
2382
2383 last = get_last_insn ();
2384
2385 if (target == 0 || GET_MODE (target) != wider_mode)
2386 target = gen_reg_rtx (wider_mode);
2387
2388 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2389 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2390 true);
2391 if (temp != 0)
2392 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2393 target, true, OPTAB_DIRECT);
2394
2395 if (temp)
2396 {
2397 if (mclass != MODE_INT
2398 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2399 return convert_to_mode (mode, temp, 0);
2400 else
2401 return gen_lowpart (mode, temp);
2402 }
2403 else
2404 delete_insns_since (last);
2405 }
2406 }
2407 }
2408 return 0;
2409 }
2410
2411 /* Try calculating ctz(x) as K - clz(x & -x) ,
2412 where K is GET_MODE_PRECISION(mode) - 1.
2413
2414 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2415 don't have to worry about what the hardware does in that case. (If
2416 the clz instruction produces the usual value at 0, which is K, the
2417 result of this code sequence will be -1; expand_ffs, below, relies
2418 on this. It might be nice to have it be K instead, for consistency
2419 with the (very few) processors that provide a ctz with a defined
2420 value, but that would take one more instruction, and it would be
2421 less convenient for expand_ffs anyway. */
2422
2423 static rtx
2424 expand_ctz (machine_mode mode, rtx op0, rtx target)
2425 {
2426 rtx_insn *seq;
2427 rtx temp;
2428
2429 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2430 return 0;
2431
2432 start_sequence ();
2433
2434 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2435 if (temp)
2436 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2437 true, OPTAB_DIRECT);
2438 if (temp)
2439 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2440 if (temp)
2441 temp = expand_binop (mode, sub_optab,
2442 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2443 temp, target,
2444 true, OPTAB_DIRECT);
2445 if (temp == 0)
2446 {
2447 end_sequence ();
2448 return 0;
2449 }
2450
2451 seq = get_insns ();
2452 end_sequence ();
2453
2454 add_equal_note (seq, temp, CTZ, op0, 0);
2455 emit_insn (seq);
2456 return temp;
2457 }
2458
2459
2460 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2461 else with the sequence used by expand_clz.
2462
2463 The ffs builtin promises to return zero for a zero value and ctz/clz
2464 may have an undefined value in that case. If they do not give us a
2465 convenient value, we have to generate a test and branch. */
2466 static rtx
2467 expand_ffs (machine_mode mode, rtx op0, rtx target)
2468 {
2469 HOST_WIDE_INT val = 0;
2470 bool defined_at_zero = false;
2471 rtx temp;
2472 rtx_insn *seq;
2473
2474 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2475 {
2476 start_sequence ();
2477
2478 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2479 if (!temp)
2480 goto fail;
2481
2482 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2483 }
2484 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2485 {
2486 start_sequence ();
2487 temp = expand_ctz (mode, op0, 0);
2488 if (!temp)
2489 goto fail;
2490
2491 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2492 {
2493 defined_at_zero = true;
2494 val = (GET_MODE_PRECISION (mode) - 1) - val;
2495 }
2496 }
2497 else
2498 return 0;
2499
2500 if (defined_at_zero && val == -1)
2501 /* No correction needed at zero. */;
2502 else
2503 {
2504 /* We don't try to do anything clever with the situation found
2505 on some processors (eg Alpha) where ctz(0:mode) ==
2506 bitsize(mode). If someone can think of a way to send N to -1
2507 and leave alone all values in the range 0..N-1 (where N is a
2508 power of two), cheaper than this test-and-branch, please add it.
2509
2510 The test-and-branch is done after the operation itself, in case
2511 the operation sets condition codes that can be recycled for this.
2512 (This is true on i386, for instance.) */
2513
2514 rtx_code_label *nonzero_label = gen_label_rtx ();
2515 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2516 mode, true, nonzero_label);
2517
2518 convert_move (temp, GEN_INT (-1), false);
2519 emit_label (nonzero_label);
2520 }
2521
2522 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2523 to produce a value in the range 0..bitsize. */
2524 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2525 target, false, OPTAB_DIRECT);
2526 if (!temp)
2527 goto fail;
2528
2529 seq = get_insns ();
2530 end_sequence ();
2531
2532 add_equal_note (seq, temp, FFS, op0, 0);
2533 emit_insn (seq);
2534 return temp;
2535
2536 fail:
2537 end_sequence ();
2538 return 0;
2539 }
2540
2541 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2542 conditions, VAL may already be a SUBREG against which we cannot generate
2543 a further SUBREG. In this case, we expect forcing the value into a
2544 register will work around the situation. */
2545
2546 static rtx
2547 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2548 machine_mode imode)
2549 {
2550 rtx ret;
2551 ret = lowpart_subreg (omode, val, imode);
2552 if (ret == NULL)
2553 {
2554 val = force_reg (imode, val);
2555 ret = lowpart_subreg (omode, val, imode);
2556 gcc_assert (ret != NULL);
2557 }
2558 return ret;
2559 }
2560
2561 /* Expand a floating point absolute value or negation operation via a
2562 logical operation on the sign bit. */
2563
2564 static rtx
2565 expand_absneg_bit (enum rtx_code code, machine_mode mode,
2566 rtx op0, rtx target)
2567 {
2568 const struct real_format *fmt;
2569 int bitpos, word, nwords, i;
2570 machine_mode imode;
2571 rtx temp;
2572 rtx_insn *insns;
2573
2574 /* The format has to have a simple sign bit. */
2575 fmt = REAL_MODE_FORMAT (mode);
2576 if (fmt == NULL)
2577 return NULL_RTX;
2578
2579 bitpos = fmt->signbit_rw;
2580 if (bitpos < 0)
2581 return NULL_RTX;
2582
2583 /* Don't create negative zeros if the format doesn't support them. */
2584 if (code == NEG && !fmt->has_signed_zero)
2585 return NULL_RTX;
2586
2587 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2588 {
2589 imode = int_mode_for_mode (mode);
2590 if (imode == BLKmode)
2591 return NULL_RTX;
2592 word = 0;
2593 nwords = 1;
2594 }
2595 else
2596 {
2597 imode = word_mode;
2598
2599 if (FLOAT_WORDS_BIG_ENDIAN)
2600 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2601 else
2602 word = bitpos / BITS_PER_WORD;
2603 bitpos = bitpos % BITS_PER_WORD;
2604 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2605 }
2606
2607 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2608 if (code == ABS)
2609 mask = ~mask;
2610
2611 if (target == 0
2612 || target == op0
2613 || (nwords > 1 && !valid_multiword_target_p (target)))
2614 target = gen_reg_rtx (mode);
2615
2616 if (nwords > 1)
2617 {
2618 start_sequence ();
2619
2620 for (i = 0; i < nwords; ++i)
2621 {
2622 rtx targ_piece = operand_subword (target, i, 1, mode);
2623 rtx op0_piece = operand_subword_force (op0, i, mode);
2624
2625 if (i == word)
2626 {
2627 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2628 op0_piece,
2629 immed_wide_int_const (mask, imode),
2630 targ_piece, 1, OPTAB_LIB_WIDEN);
2631 if (temp != targ_piece)
2632 emit_move_insn (targ_piece, temp);
2633 }
2634 else
2635 emit_move_insn (targ_piece, op0_piece);
2636 }
2637
2638 insns = get_insns ();
2639 end_sequence ();
2640
2641 emit_insn (insns);
2642 }
2643 else
2644 {
2645 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2646 gen_lowpart (imode, op0),
2647 immed_wide_int_const (mask, imode),
2648 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2649 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2650
2651 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2652 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2653 target);
2654 }
2655
2656 return target;
2657 }
2658
2659 /* As expand_unop, but will fail rather than attempt the operation in a
2660 different mode or with a libcall. */
2661 static rtx
2662 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2663 int unsignedp)
2664 {
2665 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2666 {
2667 struct expand_operand ops[2];
2668 enum insn_code icode = optab_handler (unoptab, mode);
2669 rtx_insn *last = get_last_insn ();
2670 rtx_insn *pat;
2671
2672 create_output_operand (&ops[0], target, mode);
2673 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2674 pat = maybe_gen_insn (icode, 2, ops);
2675 if (pat)
2676 {
2677 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2678 && ! add_equal_note (pat, ops[0].value,
2679 optab_to_code (unoptab),
2680 ops[1].value, NULL_RTX))
2681 {
2682 delete_insns_since (last);
2683 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2684 }
2685
2686 emit_insn (pat);
2687
2688 return ops[0].value;
2689 }
2690 }
2691 return 0;
2692 }
2693
2694 /* Generate code to perform an operation specified by UNOPTAB
2695 on operand OP0, with result having machine-mode MODE.
2696
2697 UNSIGNEDP is for the case where we have to widen the operands
2698 to perform the operation. It says to use zero-extension.
2699
2700 If TARGET is nonzero, the value
2701 is generated there, if it is convenient to do so.
2702 In all cases an rtx is returned for the locus of the value;
2703 this may or may not be TARGET. */
2704
2705 rtx
2706 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2707 int unsignedp)
2708 {
2709 enum mode_class mclass = GET_MODE_CLASS (mode);
2710 machine_mode wider_mode;
2711 rtx temp;
2712 rtx libfunc;
2713
2714 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2715 if (temp)
2716 return temp;
2717
2718 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2719
2720 /* Widening (or narrowing) clz needs special treatment. */
2721 if (unoptab == clz_optab)
2722 {
2723 temp = widen_leading (mode, op0, target, unoptab);
2724 if (temp)
2725 return temp;
2726
2727 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2728 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2729 {
2730 temp = expand_doubleword_clz (mode, op0, target);
2731 if (temp)
2732 return temp;
2733 }
2734
2735 goto try_libcall;
2736 }
2737
2738 if (unoptab == clrsb_optab)
2739 {
2740 temp = widen_leading (mode, op0, target, unoptab);
2741 if (temp)
2742 return temp;
2743 goto try_libcall;
2744 }
2745
2746 if (unoptab == popcount_optab
2747 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2748 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2749 && optimize_insn_for_speed_p ())
2750 {
2751 temp = expand_doubleword_popcount (mode, op0, target);
2752 if (temp)
2753 return temp;
2754 }
2755
2756 if (unoptab == parity_optab
2757 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2758 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2759 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2760 && optimize_insn_for_speed_p ())
2761 {
2762 temp = expand_doubleword_parity (mode, op0, target);
2763 if (temp)
2764 return temp;
2765 }
2766
2767 /* Widening (or narrowing) bswap needs special treatment. */
2768 if (unoptab == bswap_optab)
2769 {
2770 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2771 or ROTATERT. First try these directly; if this fails, then try the
2772 obvious pair of shifts with allowed widening, as this will probably
2773 be always more efficient than the other fallback methods. */
2774 if (mode == HImode)
2775 {
2776 rtx_insn *last;
2777 rtx temp1, temp2;
2778
2779 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2780 {
2781 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2782 unsignedp, OPTAB_DIRECT);
2783 if (temp)
2784 return temp;
2785 }
2786
2787 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2788 {
2789 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2790 unsignedp, OPTAB_DIRECT);
2791 if (temp)
2792 return temp;
2793 }
2794
2795 last = get_last_insn ();
2796
2797 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2798 unsignedp, OPTAB_WIDEN);
2799 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2800 unsignedp, OPTAB_WIDEN);
2801 if (temp1 && temp2)
2802 {
2803 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2804 unsignedp, OPTAB_WIDEN);
2805 if (temp)
2806 return temp;
2807 }
2808
2809 delete_insns_since (last);
2810 }
2811
2812 temp = widen_bswap (mode, op0, target);
2813 if (temp)
2814 return temp;
2815
2816 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2817 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2818 {
2819 temp = expand_doubleword_bswap (mode, op0, target);
2820 if (temp)
2821 return temp;
2822 }
2823
2824 goto try_libcall;
2825 }
2826
2827 if (CLASS_HAS_WIDER_MODES_P (mclass))
2828 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2829 wider_mode != VOIDmode;
2830 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2831 {
2832 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2833 {
2834 rtx xop0 = op0;
2835 rtx_insn *last = get_last_insn ();
2836
2837 /* For certain operations, we need not actually extend
2838 the narrow operand, as long as we will truncate the
2839 results to the same narrowness. */
2840
2841 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2842 (unoptab == neg_optab
2843 || unoptab == one_cmpl_optab)
2844 && mclass == MODE_INT);
2845
2846 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2847 unsignedp);
2848
2849 if (temp)
2850 {
2851 if (mclass != MODE_INT
2852 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2853 {
2854 if (target == 0)
2855 target = gen_reg_rtx (mode);
2856 convert_move (target, temp, 0);
2857 return target;
2858 }
2859 else
2860 return gen_lowpart (mode, temp);
2861 }
2862 else
2863 delete_insns_since (last);
2864 }
2865 }
2866
2867 /* These can be done a word at a time. */
2868 if (unoptab == one_cmpl_optab
2869 && mclass == MODE_INT
2870 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2871 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2872 {
2873 int i;
2874 rtx_insn *insns;
2875
2876 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2877 target = gen_reg_rtx (mode);
2878
2879 start_sequence ();
2880
2881 /* Do the actual arithmetic. */
2882 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2883 {
2884 rtx target_piece = operand_subword (target, i, 1, mode);
2885 rtx x = expand_unop (word_mode, unoptab,
2886 operand_subword_force (op0, i, mode),
2887 target_piece, unsignedp);
2888
2889 if (target_piece != x)
2890 emit_move_insn (target_piece, x);
2891 }
2892
2893 insns = get_insns ();
2894 end_sequence ();
2895
2896 emit_insn (insns);
2897 return target;
2898 }
2899
2900 if (optab_to_code (unoptab) == NEG)
2901 {
2902 /* Try negating floating point values by flipping the sign bit. */
2903 if (SCALAR_FLOAT_MODE_P (mode))
2904 {
2905 temp = expand_absneg_bit (NEG, mode, op0, target);
2906 if (temp)
2907 return temp;
2908 }
2909
2910 /* If there is no negation pattern, and we have no negative zero,
2911 try subtracting from zero. */
2912 if (!HONOR_SIGNED_ZEROS (mode))
2913 {
2914 temp = expand_binop (mode, (unoptab == negv_optab
2915 ? subv_optab : sub_optab),
2916 CONST0_RTX (mode), op0, target,
2917 unsignedp, OPTAB_DIRECT);
2918 if (temp)
2919 return temp;
2920 }
2921 }
2922
2923 /* Try calculating parity (x) as popcount (x) % 2. */
2924 if (unoptab == parity_optab)
2925 {
2926 temp = expand_parity (mode, op0, target);
2927 if (temp)
2928 return temp;
2929 }
2930
2931 /* Try implementing ffs (x) in terms of clz (x). */
2932 if (unoptab == ffs_optab)
2933 {
2934 temp = expand_ffs (mode, op0, target);
2935 if (temp)
2936 return temp;
2937 }
2938
2939 /* Try implementing ctz (x) in terms of clz (x). */
2940 if (unoptab == ctz_optab)
2941 {
2942 temp = expand_ctz (mode, op0, target);
2943 if (temp)
2944 return temp;
2945 }
2946
2947 try_libcall:
2948 /* Now try a library call in this mode. */
2949 libfunc = optab_libfunc (unoptab, mode);
2950 if (libfunc)
2951 {
2952 rtx_insn *insns;
2953 rtx value;
2954 rtx eq_value;
2955 machine_mode outmode = mode;
2956
2957 /* All of these functions return small values. Thus we choose to
2958 have them return something that isn't a double-word. */
2959 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2960 || unoptab == clrsb_optab || unoptab == popcount_optab
2961 || unoptab == parity_optab)
2962 outmode
2963 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2964 optab_libfunc (unoptab, mode)));
2965
2966 start_sequence ();
2967
2968 /* Pass 1 for NO_QUEUE so we don't lose any increments
2969 if the libcall is cse'd or moved. */
2970 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2971 1, op0, mode);
2972 insns = get_insns ();
2973 end_sequence ();
2974
2975 target = gen_reg_rtx (outmode);
2976 bool trapv = trapv_unoptab_p (unoptab);
2977 if (trapv)
2978 eq_value = NULL_RTX;
2979 else
2980 {
2981 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2982 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2983 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2984 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2985 eq_value = simplify_gen_unary (ZERO_EXTEND,
2986 outmode, eq_value, mode);
2987 }
2988 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2989
2990 return target;
2991 }
2992
2993 /* It can't be done in this mode. Can we do it in a wider mode? */
2994
2995 if (CLASS_HAS_WIDER_MODES_P (mclass))
2996 {
2997 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2998 wider_mode != VOIDmode;
2999 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3000 {
3001 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3002 || optab_libfunc (unoptab, wider_mode))
3003 {
3004 rtx xop0 = op0;
3005 rtx_insn *last = get_last_insn ();
3006
3007 /* For certain operations, we need not actually extend
3008 the narrow operand, as long as we will truncate the
3009 results to the same narrowness. */
3010 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3011 (unoptab == neg_optab
3012 || unoptab == one_cmpl_optab
3013 || unoptab == bswap_optab)
3014 && mclass == MODE_INT);
3015
3016 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3017 unsignedp);
3018
3019 /* If we are generating clz using wider mode, adjust the
3020 result. Similarly for clrsb. */
3021 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3022 && temp != 0)
3023 temp = expand_binop
3024 (wider_mode, sub_optab, temp,
3025 gen_int_mode (GET_MODE_PRECISION (wider_mode)
3026 - GET_MODE_PRECISION (mode),
3027 wider_mode),
3028 target, true, OPTAB_DIRECT);
3029
3030 /* Likewise for bswap. */
3031 if (unoptab == bswap_optab && temp != 0)
3032 {
3033 gcc_assert (GET_MODE_PRECISION (wider_mode)
3034 == GET_MODE_BITSIZE (wider_mode)
3035 && GET_MODE_PRECISION (mode)
3036 == GET_MODE_BITSIZE (mode));
3037
3038 temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3039 GET_MODE_BITSIZE (wider_mode)
3040 - GET_MODE_BITSIZE (mode),
3041 NULL_RTX, true);
3042 }
3043
3044 if (temp)
3045 {
3046 if (mclass != MODE_INT)
3047 {
3048 if (target == 0)
3049 target = gen_reg_rtx (mode);
3050 convert_move (target, temp, 0);
3051 return target;
3052 }
3053 else
3054 return gen_lowpart (mode, temp);
3055 }
3056 else
3057 delete_insns_since (last);
3058 }
3059 }
3060 }
3061
3062 /* One final attempt at implementing negation via subtraction,
3063 this time allowing widening of the operand. */
3064 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3065 {
3066 rtx temp;
3067 temp = expand_binop (mode,
3068 unoptab == negv_optab ? subv_optab : sub_optab,
3069 CONST0_RTX (mode), op0,
3070 target, unsignedp, OPTAB_LIB_WIDEN);
3071 if (temp)
3072 return temp;
3073 }
3074
3075 return 0;
3076 }
3077 \f
3078 /* Emit code to compute the absolute value of OP0, with result to
3079 TARGET if convenient. (TARGET may be 0.) The return value says
3080 where the result actually is to be found.
3081
3082 MODE is the mode of the operand; the mode of the result is
3083 different but can be deduced from MODE.
3084
3085 */
3086
3087 rtx
3088 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3089 int result_unsignedp)
3090 {
3091 rtx temp;
3092
3093 if (GET_MODE_CLASS (mode) != MODE_INT
3094 || ! flag_trapv)
3095 result_unsignedp = 1;
3096
3097 /* First try to do it with a special abs instruction. */
3098 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3099 op0, target, 0);
3100 if (temp != 0)
3101 return temp;
3102
3103 /* For floating point modes, try clearing the sign bit. */
3104 if (SCALAR_FLOAT_MODE_P (mode))
3105 {
3106 temp = expand_absneg_bit (ABS, mode, op0, target);
3107 if (temp)
3108 return temp;
3109 }
3110
3111 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3112 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3113 && !HONOR_SIGNED_ZEROS (mode))
3114 {
3115 rtx_insn *last = get_last_insn ();
3116
3117 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3118 op0, NULL_RTX, 0);
3119 if (temp != 0)
3120 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3121 OPTAB_WIDEN);
3122
3123 if (temp != 0)
3124 return temp;
3125
3126 delete_insns_since (last);
3127 }
3128
3129 /* If this machine has expensive jumps, we can do integer absolute
3130 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3131 where W is the width of MODE. */
3132
3133 if (GET_MODE_CLASS (mode) == MODE_INT
3134 && BRANCH_COST (optimize_insn_for_speed_p (),
3135 false) >= 2)
3136 {
3137 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3138 GET_MODE_PRECISION (mode) - 1,
3139 NULL_RTX, 0);
3140
3141 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3142 OPTAB_LIB_WIDEN);
3143 if (temp != 0)
3144 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3145 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3146
3147 if (temp != 0)
3148 return temp;
3149 }
3150
3151 return NULL_RTX;
3152 }
3153
3154 rtx
3155 expand_abs (machine_mode mode, rtx op0, rtx target,
3156 int result_unsignedp, int safe)
3157 {
3158 rtx temp;
3159 rtx_code_label *op1;
3160
3161 if (GET_MODE_CLASS (mode) != MODE_INT
3162 || ! flag_trapv)
3163 result_unsignedp = 1;
3164
3165 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3166 if (temp != 0)
3167 return temp;
3168
3169 /* If that does not win, use conditional jump and negate. */
3170
3171 /* It is safe to use the target if it is the same
3172 as the source if this is also a pseudo register */
3173 if (op0 == target && REG_P (op0)
3174 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3175 safe = 1;
3176
3177 op1 = gen_label_rtx ();
3178 if (target == 0 || ! safe
3179 || GET_MODE (target) != mode
3180 || (MEM_P (target) && MEM_VOLATILE_P (target))
3181 || (REG_P (target)
3182 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3183 target = gen_reg_rtx (mode);
3184
3185 emit_move_insn (target, op0);
3186 NO_DEFER_POP;
3187
3188 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3189 NULL_RTX, NULL, op1, -1);
3190
3191 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3192 target, target, 0);
3193 if (op0 != target)
3194 emit_move_insn (target, op0);
3195 emit_label (op1);
3196 OK_DEFER_POP;
3197 return target;
3198 }
3199
3200 /* Emit code to compute the one's complement absolute value of OP0
3201 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3202 (TARGET may be NULL_RTX.) The return value says where the result
3203 actually is to be found.
3204
3205 MODE is the mode of the operand; the mode of the result is
3206 different but can be deduced from MODE. */
3207
3208 rtx
3209 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3210 {
3211 rtx temp;
3212
3213 /* Not applicable for floating point modes. */
3214 if (FLOAT_MODE_P (mode))
3215 return NULL_RTX;
3216
3217 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3218 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3219 {
3220 rtx_insn *last = get_last_insn ();
3221
3222 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3223 if (temp != 0)
3224 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3225 OPTAB_WIDEN);
3226
3227 if (temp != 0)
3228 return temp;
3229
3230 delete_insns_since (last);
3231 }
3232
3233 /* If this machine has expensive jumps, we can do one's complement
3234 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3235
3236 if (GET_MODE_CLASS (mode) == MODE_INT
3237 && BRANCH_COST (optimize_insn_for_speed_p (),
3238 false) >= 2)
3239 {
3240 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3241 GET_MODE_PRECISION (mode) - 1,
3242 NULL_RTX, 0);
3243
3244 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3245 OPTAB_LIB_WIDEN);
3246
3247 if (temp != 0)
3248 return temp;
3249 }
3250
3251 return NULL_RTX;
3252 }
3253
3254 /* A subroutine of expand_copysign, perform the copysign operation using the
3255 abs and neg primitives advertised to exist on the target. The assumption
3256 is that we have a split register file, and leaving op0 in fp registers,
3257 and not playing with subregs so much, will help the register allocator. */
3258
3259 static rtx
3260 expand_copysign_absneg (machine_mode mode, rtx op0, rtx op1, rtx target,
3261 int bitpos, bool op0_is_abs)
3262 {
3263 machine_mode imode;
3264 enum insn_code icode;
3265 rtx sign;
3266 rtx_code_label *label;
3267
3268 if (target == op1)
3269 target = NULL_RTX;
3270
3271 /* Check if the back end provides an insn that handles signbit for the
3272 argument's mode. */
3273 icode = optab_handler (signbit_optab, mode);
3274 if (icode != CODE_FOR_nothing)
3275 {
3276 imode = insn_data[(int) icode].operand[0].mode;
3277 sign = gen_reg_rtx (imode);
3278 emit_unop_insn (icode, sign, op1, UNKNOWN);
3279 }
3280 else
3281 {
3282 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3283 {
3284 imode = int_mode_for_mode (mode);
3285 if (imode == BLKmode)
3286 return NULL_RTX;
3287 op1 = gen_lowpart (imode, op1);
3288 }
3289 else
3290 {
3291 int word;
3292
3293 imode = word_mode;
3294 if (FLOAT_WORDS_BIG_ENDIAN)
3295 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3296 else
3297 word = bitpos / BITS_PER_WORD;
3298 bitpos = bitpos % BITS_PER_WORD;
3299 op1 = operand_subword_force (op1, word, mode);
3300 }
3301
3302 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3303 sign = expand_binop (imode, and_optab, op1,
3304 immed_wide_int_const (mask, imode),
3305 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3306 }
3307
3308 if (!op0_is_abs)
3309 {
3310 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3311 if (op0 == NULL)
3312 return NULL_RTX;
3313 target = op0;
3314 }
3315 else
3316 {
3317 if (target == NULL_RTX)
3318 target = copy_to_reg (op0);
3319 else
3320 emit_move_insn (target, op0);
3321 }
3322
3323 label = gen_label_rtx ();
3324 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3325
3326 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3327 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3328 else
3329 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3330 if (op0 != target)
3331 emit_move_insn (target, op0);
3332
3333 emit_label (label);
3334
3335 return target;
3336 }
3337
3338
3339 /* A subroutine of expand_copysign, perform the entire copysign operation
3340 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3341 is true if op0 is known to have its sign bit clear. */
3342
3343 static rtx
3344 expand_copysign_bit (machine_mode mode, rtx op0, rtx op1, rtx target,
3345 int bitpos, bool op0_is_abs)
3346 {
3347 machine_mode imode;
3348 int word, nwords, i;
3349 rtx temp;
3350 rtx_insn *insns;
3351
3352 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3353 {
3354 imode = int_mode_for_mode (mode);
3355 if (imode == BLKmode)
3356 return NULL_RTX;
3357 word = 0;
3358 nwords = 1;
3359 }
3360 else
3361 {
3362 imode = word_mode;
3363
3364 if (FLOAT_WORDS_BIG_ENDIAN)
3365 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3366 else
3367 word = bitpos / BITS_PER_WORD;
3368 bitpos = bitpos % BITS_PER_WORD;
3369 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3370 }
3371
3372 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3373
3374 if (target == 0
3375 || target == op0
3376 || target == op1
3377 || (nwords > 1 && !valid_multiword_target_p (target)))
3378 target = gen_reg_rtx (mode);
3379
3380 if (nwords > 1)
3381 {
3382 start_sequence ();
3383
3384 for (i = 0; i < nwords; ++i)
3385 {
3386 rtx targ_piece = operand_subword (target, i, 1, mode);
3387 rtx op0_piece = operand_subword_force (op0, i, mode);
3388
3389 if (i == word)
3390 {
3391 if (!op0_is_abs)
3392 op0_piece
3393 = expand_binop (imode, and_optab, op0_piece,
3394 immed_wide_int_const (~mask, imode),
3395 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3396 op1 = expand_binop (imode, and_optab,
3397 operand_subword_force (op1, i, mode),
3398 immed_wide_int_const (mask, imode),
3399 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3400
3401 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3402 targ_piece, 1, OPTAB_LIB_WIDEN);
3403 if (temp != targ_piece)
3404 emit_move_insn (targ_piece, temp);
3405 }
3406 else
3407 emit_move_insn (targ_piece, op0_piece);
3408 }
3409
3410 insns = get_insns ();
3411 end_sequence ();
3412
3413 emit_insn (insns);
3414 }
3415 else
3416 {
3417 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3418 immed_wide_int_const (mask, imode),
3419 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3420
3421 op0 = gen_lowpart (imode, op0);
3422 if (!op0_is_abs)
3423 op0 = expand_binop (imode, and_optab, op0,
3424 immed_wide_int_const (~mask, imode),
3425 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3426
3427 temp = expand_binop (imode, ior_optab, op0, op1,
3428 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3429 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3430 }
3431
3432 return target;
3433 }
3434
3435 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3436 scalar floating point mode. Return NULL if we do not know how to
3437 expand the operation inline. */
3438
3439 rtx
3440 expand_copysign (rtx op0, rtx op1, rtx target)
3441 {
3442 machine_mode mode = GET_MODE (op0);
3443 const struct real_format *fmt;
3444 bool op0_is_abs;
3445 rtx temp;
3446
3447 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3448 gcc_assert (GET_MODE (op1) == mode);
3449
3450 /* First try to do it with a special instruction. */
3451 temp = expand_binop (mode, copysign_optab, op0, op1,
3452 target, 0, OPTAB_DIRECT);
3453 if (temp)
3454 return temp;
3455
3456 fmt = REAL_MODE_FORMAT (mode);
3457 if (fmt == NULL || !fmt->has_signed_zero)
3458 return NULL_RTX;
3459
3460 op0_is_abs = false;
3461 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3462 {
3463 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3464 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3465 op0_is_abs = true;
3466 }
3467
3468 if (fmt->signbit_ro >= 0
3469 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3470 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3471 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3472 {
3473 temp = expand_copysign_absneg (mode, op0, op1, target,
3474 fmt->signbit_ro, op0_is_abs);
3475 if (temp)
3476 return temp;
3477 }
3478
3479 if (fmt->signbit_rw < 0)
3480 return NULL_RTX;
3481 return expand_copysign_bit (mode, op0, op1, target,
3482 fmt->signbit_rw, op0_is_abs);
3483 }
3484 \f
3485 /* Generate an instruction whose insn-code is INSN_CODE,
3486 with two operands: an output TARGET and an input OP0.
3487 TARGET *must* be nonzero, and the output is always stored there.
3488 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3489 the value that is stored into TARGET.
3490
3491 Return false if expansion failed. */
3492
3493 bool
3494 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3495 enum rtx_code code)
3496 {
3497 struct expand_operand ops[2];
3498 rtx_insn *pat;
3499
3500 create_output_operand (&ops[0], target, GET_MODE (target));
3501 create_input_operand (&ops[1], op0, GET_MODE (op0));
3502 pat = maybe_gen_insn (icode, 2, ops);
3503 if (!pat)
3504 return false;
3505
3506 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3507 && code != UNKNOWN)
3508 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3509
3510 emit_insn (pat);
3511
3512 if (ops[0].value != target)
3513 emit_move_insn (target, ops[0].value);
3514 return true;
3515 }
3516 /* Generate an instruction whose insn-code is INSN_CODE,
3517 with two operands: an output TARGET and an input OP0.
3518 TARGET *must* be nonzero, and the output is always stored there.
3519 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3520 the value that is stored into TARGET. */
3521
3522 void
3523 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3524 {
3525 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3526 gcc_assert (ok);
3527 }
3528 \f
3529 struct no_conflict_data
3530 {
3531 rtx target;
3532 rtx_insn *first, *insn;
3533 bool must_stay;
3534 };
3535
3536 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3537 the currently examined clobber / store has to stay in the list of
3538 insns that constitute the actual libcall block. */
3539 static void
3540 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3541 {
3542 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3543
3544 /* If this inns directly contributes to setting the target, it must stay. */
3545 if (reg_overlap_mentioned_p (p->target, dest))
3546 p->must_stay = true;
3547 /* If we haven't committed to keeping any other insns in the list yet,
3548 there is nothing more to check. */
3549 else if (p->insn == p->first)
3550 return;
3551 /* If this insn sets / clobbers a register that feeds one of the insns
3552 already in the list, this insn has to stay too. */
3553 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3554 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3555 || reg_used_between_p (dest, p->first, p->insn)
3556 /* Likewise if this insn depends on a register set by a previous
3557 insn in the list, or if it sets a result (presumably a hard
3558 register) that is set or clobbered by a previous insn.
3559 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3560 SET_DEST perform the former check on the address, and the latter
3561 check on the MEM. */
3562 || (GET_CODE (set) == SET
3563 && (modified_in_p (SET_SRC (set), p->first)
3564 || modified_in_p (SET_DEST (set), p->first)
3565 || modified_between_p (SET_SRC (set), p->first, p->insn)
3566 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3567 p->must_stay = true;
3568 }
3569
3570 \f
3571 /* Emit code to make a call to a constant function or a library call.
3572
3573 INSNS is a list containing all insns emitted in the call.
3574 These insns leave the result in RESULT. Our block is to copy RESULT
3575 to TARGET, which is logically equivalent to EQUIV.
3576
3577 We first emit any insns that set a pseudo on the assumption that these are
3578 loading constants into registers; doing so allows them to be safely cse'ed
3579 between blocks. Then we emit all the other insns in the block, followed by
3580 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3581 note with an operand of EQUIV. */
3582
3583 static void
3584 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3585 bool equiv_may_trap)
3586 {
3587 rtx final_dest = target;
3588 rtx_insn *next, *last, *insn;
3589
3590 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3591 into a MEM later. Protect the libcall block from this change. */
3592 if (! REG_P (target) || REG_USERVAR_P (target))
3593 target = gen_reg_rtx (GET_MODE (target));
3594
3595 /* If we're using non-call exceptions, a libcall corresponding to an
3596 operation that may trap may also trap. */
3597 /* ??? See the comment in front of make_reg_eh_region_note. */
3598 if (cfun->can_throw_non_call_exceptions
3599 && (equiv_may_trap || may_trap_p (equiv)))
3600 {
3601 for (insn = insns; insn; insn = NEXT_INSN (insn))
3602 if (CALL_P (insn))
3603 {
3604 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3605 if (note)
3606 {
3607 int lp_nr = INTVAL (XEXP (note, 0));
3608 if (lp_nr == 0 || lp_nr == INT_MIN)
3609 remove_note (insn, note);
3610 }
3611 }
3612 }
3613 else
3614 {
3615 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3616 reg note to indicate that this call cannot throw or execute a nonlocal
3617 goto (unless there is already a REG_EH_REGION note, in which case
3618 we update it). */
3619 for (insn = insns; insn; insn = NEXT_INSN (insn))
3620 if (CALL_P (insn))
3621 make_reg_eh_region_note_nothrow_nononlocal (insn);
3622 }
3623
3624 /* First emit all insns that set pseudos. Remove them from the list as
3625 we go. Avoid insns that set pseudos which were referenced in previous
3626 insns. These can be generated by move_by_pieces, for example,
3627 to update an address. Similarly, avoid insns that reference things
3628 set in previous insns. */
3629
3630 for (insn = insns; insn; insn = next)
3631 {
3632 rtx set = single_set (insn);
3633
3634 next = NEXT_INSN (insn);
3635
3636 if (set != 0 && REG_P (SET_DEST (set))
3637 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3638 {
3639 struct no_conflict_data data;
3640
3641 data.target = const0_rtx;
3642 data.first = insns;
3643 data.insn = insn;
3644 data.must_stay = 0;
3645 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3646 if (! data.must_stay)
3647 {
3648 if (PREV_INSN (insn))
3649 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3650 else
3651 insns = next;
3652
3653 if (next)
3654 SET_PREV_INSN (next) = PREV_INSN (insn);
3655
3656 add_insn (insn);
3657 }
3658 }
3659
3660 /* Some ports use a loop to copy large arguments onto the stack.
3661 Don't move anything outside such a loop. */
3662 if (LABEL_P (insn))
3663 break;
3664 }
3665
3666 /* Write the remaining insns followed by the final copy. */
3667 for (insn = insns; insn; insn = next)
3668 {
3669 next = NEXT_INSN (insn);
3670
3671 add_insn (insn);
3672 }
3673
3674 last = emit_move_insn (target, result);
3675 if (equiv)
3676 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3677
3678 if (final_dest != target)
3679 emit_move_insn (final_dest, target);
3680 }
3681
3682 void
3683 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3684 {
3685 emit_libcall_block_1 (safe_as_a <rtx_insn *> (insns),
3686 target, result, equiv, false);
3687 }
3688 \f
3689 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3690 PURPOSE describes how this comparison will be used. CODE is the rtx
3691 comparison code we will be using.
3692
3693 ??? Actually, CODE is slightly weaker than that. A target is still
3694 required to implement all of the normal bcc operations, but not
3695 required to implement all (or any) of the unordered bcc operations. */
3696
3697 int
3698 can_compare_p (enum rtx_code code, machine_mode mode,
3699 enum can_compare_purpose purpose)
3700 {
3701 rtx test;
3702 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3703 do
3704 {
3705 enum insn_code icode;
3706
3707 if (purpose == ccp_jump
3708 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3709 && insn_operand_matches (icode, 0, test))
3710 return 1;
3711 if (purpose == ccp_store_flag
3712 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3713 && insn_operand_matches (icode, 1, test))
3714 return 1;
3715 if (purpose == ccp_cmov
3716 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3717 return 1;
3718
3719 mode = GET_MODE_WIDER_MODE (mode);
3720 PUT_MODE (test, mode);
3721 }
3722 while (mode != VOIDmode);
3723
3724 return 0;
3725 }
3726
3727 /* This function is called when we are going to emit a compare instruction that
3728 compares the values found in X and Y, using the rtl operator COMPARISON.
3729
3730 If they have mode BLKmode, then SIZE specifies the size of both operands.
3731
3732 UNSIGNEDP nonzero says that the operands are unsigned;
3733 this matters if they need to be widened (as given by METHODS).
3734
3735 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3736 if we failed to produce one.
3737
3738 *PMODE is the mode of the inputs (in case they are const_int).
3739
3740 This function performs all the setup necessary so that the caller only has
3741 to emit a single comparison insn. This setup can involve doing a BLKmode
3742 comparison or emitting a library call to perform the comparison if no insn
3743 is available to handle it.
3744 The values which are passed in through pointers can be modified; the caller
3745 should perform the comparison on the modified values. Constant
3746 comparisons must have already been folded. */
3747
3748 static void
3749 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3750 int unsignedp, enum optab_methods methods,
3751 rtx *ptest, machine_mode *pmode)
3752 {
3753 machine_mode mode = *pmode;
3754 rtx libfunc, test;
3755 machine_mode cmp_mode;
3756 enum mode_class mclass;
3757
3758 /* The other methods are not needed. */
3759 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3760 || methods == OPTAB_LIB_WIDEN);
3761
3762 /* If we are optimizing, force expensive constants into a register. */
3763 if (CONSTANT_P (x) && optimize
3764 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3765 > COSTS_N_INSNS (1)))
3766 x = force_reg (mode, x);
3767
3768 if (CONSTANT_P (y) && optimize
3769 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3770 > COSTS_N_INSNS (1)))
3771 y = force_reg (mode, y);
3772
3773 #if HAVE_cc0
3774 /* Make sure if we have a canonical comparison. The RTL
3775 documentation states that canonical comparisons are required only
3776 for targets which have cc0. */
3777 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3778 #endif
3779
3780 /* Don't let both operands fail to indicate the mode. */
3781 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3782 x = force_reg (mode, x);
3783 if (mode == VOIDmode)
3784 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3785
3786 /* Handle all BLKmode compares. */
3787
3788 if (mode == BLKmode)
3789 {
3790 machine_mode result_mode;
3791 enum insn_code cmp_code;
3792 rtx result;
3793 rtx opalign
3794 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3795
3796 gcc_assert (size);
3797
3798 /* Try to use a memory block compare insn - either cmpstr
3799 or cmpmem will do. */
3800 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3801 cmp_mode != VOIDmode;
3802 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3803 {
3804 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3805 if (cmp_code == CODE_FOR_nothing)
3806 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3807 if (cmp_code == CODE_FOR_nothing)
3808 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3809 if (cmp_code == CODE_FOR_nothing)
3810 continue;
3811
3812 /* Must make sure the size fits the insn's mode. */
3813 if ((CONST_INT_P (size)
3814 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3815 || (GET_MODE_BITSIZE (GET_MODE (size))
3816 > GET_MODE_BITSIZE (cmp_mode)))
3817 continue;
3818
3819 result_mode = insn_data[cmp_code].operand[0].mode;
3820 result = gen_reg_rtx (result_mode);
3821 size = convert_to_mode (cmp_mode, size, 1);
3822 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3823
3824 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3825 *pmode = result_mode;
3826 return;
3827 }
3828
3829 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3830 goto fail;
3831
3832 /* Otherwise call a library function. */
3833 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3834
3835 x = result;
3836 y = const0_rtx;
3837 mode = TYPE_MODE (integer_type_node);
3838 methods = OPTAB_LIB_WIDEN;
3839 unsignedp = false;
3840 }
3841
3842 /* Don't allow operands to the compare to trap, as that can put the
3843 compare and branch in different basic blocks. */
3844 if (cfun->can_throw_non_call_exceptions)
3845 {
3846 if (may_trap_p (x))
3847 x = force_reg (mode, x);
3848 if (may_trap_p (y))
3849 y = force_reg (mode, y);
3850 }
3851
3852 if (GET_MODE_CLASS (mode) == MODE_CC)
3853 {
3854 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3855 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3856 gcc_assert (icode != CODE_FOR_nothing
3857 && insn_operand_matches (icode, 0, test));
3858 *ptest = test;
3859 return;
3860 }
3861
3862 mclass = GET_MODE_CLASS (mode);
3863 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3864 cmp_mode = mode;
3865 do
3866 {
3867 enum insn_code icode;
3868 icode = optab_handler (cbranch_optab, cmp_mode);
3869 if (icode != CODE_FOR_nothing
3870 && insn_operand_matches (icode, 0, test))
3871 {
3872 rtx_insn *last = get_last_insn ();
3873 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3874 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3875 if (op0 && op1
3876 && insn_operand_matches (icode, 1, op0)
3877 && insn_operand_matches (icode, 2, op1))
3878 {
3879 XEXP (test, 0) = op0;
3880 XEXP (test, 1) = op1;
3881 *ptest = test;
3882 *pmode = cmp_mode;
3883 return;
3884 }
3885 delete_insns_since (last);
3886 }
3887
3888 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3889 break;
3890 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
3891 }
3892 while (cmp_mode != VOIDmode);
3893
3894 if (methods != OPTAB_LIB_WIDEN)
3895 goto fail;
3896
3897 if (!SCALAR_FLOAT_MODE_P (mode))
3898 {
3899 rtx result;
3900 machine_mode ret_mode;
3901
3902 /* Handle a libcall just for the mode we are using. */
3903 libfunc = optab_libfunc (cmp_optab, mode);
3904 gcc_assert (libfunc);
3905
3906 /* If we want unsigned, and this mode has a distinct unsigned
3907 comparison routine, use that. */
3908 if (unsignedp)
3909 {
3910 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3911 if (ulibfunc)
3912 libfunc = ulibfunc;
3913 }
3914
3915 ret_mode = targetm.libgcc_cmp_return_mode ();
3916 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3917 ret_mode, 2, x, mode, y, mode);
3918
3919 /* There are two kinds of comparison routines. Biased routines
3920 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3921 of gcc expect that the comparison operation is equivalent
3922 to the modified comparison. For signed comparisons compare the
3923 result against 1 in the biased case, and zero in the unbiased
3924 case. For unsigned comparisons always compare against 1 after
3925 biasing the unbiased result by adding 1. This gives us a way to
3926 represent LTU.
3927 The comparisons in the fixed-point helper library are always
3928 biased. */
3929 x = result;
3930 y = const1_rtx;
3931
3932 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3933 {
3934 if (unsignedp)
3935 x = plus_constant (ret_mode, result, 1);
3936 else
3937 y = const0_rtx;
3938 }
3939
3940 *pmode = ret_mode;
3941 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3942 ptest, pmode);
3943 }
3944 else
3945 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3946
3947 return;
3948
3949 fail:
3950 *ptest = NULL_RTX;
3951 }
3952
3953 /* Before emitting an insn with code ICODE, make sure that X, which is going
3954 to be used for operand OPNUM of the insn, is converted from mode MODE to
3955 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3956 that it is accepted by the operand predicate. Return the new value. */
3957
3958 rtx
3959 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3960 machine_mode wider_mode, int unsignedp)
3961 {
3962 if (mode != wider_mode)
3963 x = convert_modes (wider_mode, mode, x, unsignedp);
3964
3965 if (!insn_operand_matches (icode, opnum, x))
3966 {
3967 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3968 if (reload_completed)
3969 return NULL_RTX;
3970 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3971 return NULL_RTX;
3972 x = copy_to_mode_reg (op_mode, x);
3973 }
3974
3975 return x;
3976 }
3977
3978 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3979 we can do the branch. */
3980
3981 static void
3982 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, int prob)
3983 {
3984 machine_mode optab_mode;
3985 enum mode_class mclass;
3986 enum insn_code icode;
3987 rtx_insn *insn;
3988
3989 mclass = GET_MODE_CLASS (mode);
3990 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3991 icode = optab_handler (cbranch_optab, optab_mode);
3992
3993 gcc_assert (icode != CODE_FOR_nothing);
3994 gcc_assert (insn_operand_matches (icode, 0, test));
3995 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
3996 XEXP (test, 1), label));
3997 if (prob != -1
3998 && profile_status_for_fn (cfun) != PROFILE_ABSENT
3999 && insn
4000 && JUMP_P (insn)
4001 && any_condjump_p (insn)
4002 && !find_reg_note (insn, REG_BR_PROB, 0))
4003 add_int_reg_note (insn, REG_BR_PROB, prob);
4004 }
4005
4006 /* Generate code to compare X with Y so that the condition codes are
4007 set and to jump to LABEL if the condition is true. If X is a
4008 constant and Y is not a constant, then the comparison is swapped to
4009 ensure that the comparison RTL has the canonical form.
4010
4011 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4012 need to be widened. UNSIGNEDP is also used to select the proper
4013 branch condition code.
4014
4015 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4016
4017 MODE is the mode of the inputs (in case they are const_int).
4018
4019 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4020 It will be potentially converted into an unsigned variant based on
4021 UNSIGNEDP to select a proper jump instruction.
4022
4023 PROB is the probability of jumping to LABEL. */
4024
4025 void
4026 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4027 machine_mode mode, int unsignedp, rtx label,
4028 int prob)
4029 {
4030 rtx op0 = x, op1 = y;
4031 rtx test;
4032
4033 /* Swap operands and condition to ensure canonical RTL. */
4034 if (swap_commutative_operands_p (x, y)
4035 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4036 {
4037 op0 = y, op1 = x;
4038 comparison = swap_condition (comparison);
4039 }
4040
4041 /* If OP0 is still a constant, then both X and Y must be constants
4042 or the opposite comparison is not supported. Force X into a register
4043 to create canonical RTL. */
4044 if (CONSTANT_P (op0))
4045 op0 = force_reg (mode, op0);
4046
4047 if (unsignedp)
4048 comparison = unsigned_condition (comparison);
4049
4050 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4051 &test, &mode);
4052 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4053 }
4054
4055 \f
4056 /* Emit a library call comparison between floating point X and Y.
4057 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4058
4059 static void
4060 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4061 rtx *ptest, machine_mode *pmode)
4062 {
4063 enum rtx_code swapped = swap_condition (comparison);
4064 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4065 machine_mode orig_mode = GET_MODE (x);
4066 machine_mode mode, cmp_mode;
4067 rtx true_rtx, false_rtx;
4068 rtx value, target, equiv;
4069 rtx_insn *insns;
4070 rtx libfunc = 0;
4071 bool reversed_p = false;
4072 cmp_mode = targetm.libgcc_cmp_return_mode ();
4073
4074 for (mode = orig_mode;
4075 mode != VOIDmode;
4076 mode = GET_MODE_WIDER_MODE (mode))
4077 {
4078 if (code_to_optab (comparison)
4079 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4080 break;
4081
4082 if (code_to_optab (swapped)
4083 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4084 {
4085 std::swap (x, y);
4086 comparison = swapped;
4087 break;
4088 }
4089
4090 if (code_to_optab (reversed)
4091 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4092 {
4093 comparison = reversed;
4094 reversed_p = true;
4095 break;
4096 }
4097 }
4098
4099 gcc_assert (mode != VOIDmode);
4100
4101 if (mode != orig_mode)
4102 {
4103 x = convert_to_mode (mode, x, 0);
4104 y = convert_to_mode (mode, y, 0);
4105 }
4106
4107 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4108 the RTL. The allows the RTL optimizers to delete the libcall if the
4109 condition can be determined at compile-time. */
4110 if (comparison == UNORDERED
4111 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4112 {
4113 true_rtx = const_true_rtx;
4114 false_rtx = const0_rtx;
4115 }
4116 else
4117 {
4118 switch (comparison)
4119 {
4120 case EQ:
4121 true_rtx = const0_rtx;
4122 false_rtx = const_true_rtx;
4123 break;
4124
4125 case NE:
4126 true_rtx = const_true_rtx;
4127 false_rtx = const0_rtx;
4128 break;
4129
4130 case GT:
4131 true_rtx = const1_rtx;
4132 false_rtx = const0_rtx;
4133 break;
4134
4135 case GE:
4136 true_rtx = const0_rtx;
4137 false_rtx = constm1_rtx;
4138 break;
4139
4140 case LT:
4141 true_rtx = constm1_rtx;
4142 false_rtx = const0_rtx;
4143 break;
4144
4145 case LE:
4146 true_rtx = const0_rtx;
4147 false_rtx = const1_rtx;
4148 break;
4149
4150 default:
4151 gcc_unreachable ();
4152 }
4153 }
4154
4155 if (comparison == UNORDERED)
4156 {
4157 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4158 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4159 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4160 temp, const_true_rtx, equiv);
4161 }
4162 else
4163 {
4164 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4165 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4166 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4167 equiv, true_rtx, false_rtx);
4168 }
4169
4170 start_sequence ();
4171 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4172 cmp_mode, 2, x, mode, y, mode);
4173 insns = get_insns ();
4174 end_sequence ();
4175
4176 target = gen_reg_rtx (cmp_mode);
4177 emit_libcall_block (insns, target, value, equiv);
4178
4179 if (comparison == UNORDERED
4180 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4181 || reversed_p)
4182 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4183 else
4184 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4185
4186 *pmode = cmp_mode;
4187 }
4188 \f
4189 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4190
4191 void
4192 emit_indirect_jump (rtx loc)
4193 {
4194 if (!targetm.have_indirect_jump ())
4195 sorry ("indirect jumps are not available on this target");
4196 else
4197 {
4198 struct expand_operand ops[1];
4199 create_address_operand (&ops[0], loc);
4200 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4201 emit_barrier ();
4202 }
4203 }
4204 \f
4205
4206 /* Emit a conditional move instruction if the machine supports one for that
4207 condition and machine mode.
4208
4209 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4210 the mode to use should they be constants. If it is VOIDmode, they cannot
4211 both be constants.
4212
4213 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4214 should be stored there. MODE is the mode to use should they be constants.
4215 If it is VOIDmode, they cannot both be constants.
4216
4217 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4218 is not supported. */
4219
4220 rtx
4221 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4222 machine_mode cmode, rtx op2, rtx op3,
4223 machine_mode mode, int unsignedp)
4224 {
4225 rtx comparison;
4226 rtx_insn *last;
4227 enum insn_code icode;
4228 enum rtx_code reversed;
4229
4230 /* If the two source operands are identical, that's just a move. */
4231
4232 if (rtx_equal_p (op2, op3))
4233 {
4234 if (!target)
4235 target = gen_reg_rtx (mode);
4236
4237 emit_move_insn (target, op3);
4238 return target;
4239 }
4240
4241 /* If one operand is constant, make it the second one. Only do this
4242 if the other operand is not constant as well. */
4243
4244 if (swap_commutative_operands_p (op0, op1))
4245 {
4246 std::swap (op0, op1);
4247 code = swap_condition (code);
4248 }
4249
4250 /* get_condition will prefer to generate LT and GT even if the old
4251 comparison was against zero, so undo that canonicalization here since
4252 comparisons against zero are cheaper. */
4253 if (code == LT && op1 == const1_rtx)
4254 code = LE, op1 = const0_rtx;
4255 else if (code == GT && op1 == constm1_rtx)
4256 code = GE, op1 = const0_rtx;
4257
4258 if (cmode == VOIDmode)
4259 cmode = GET_MODE (op0);
4260
4261 if (swap_commutative_operands_p (op2, op3)
4262 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4263 != UNKNOWN))
4264 {
4265 std::swap (op2, op3);
4266 code = reversed;
4267 }
4268
4269 if (mode == VOIDmode)
4270 mode = GET_MODE (op2);
4271
4272 icode = direct_optab_handler (movcc_optab, mode);
4273
4274 if (icode == CODE_FOR_nothing)
4275 return 0;
4276
4277 if (!target)
4278 target = gen_reg_rtx (mode);
4279
4280 code = unsignedp ? unsigned_condition (code) : code;
4281 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4282
4283 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4284 return NULL and let the caller figure out how best to deal with this
4285 situation. */
4286 if (!COMPARISON_P (comparison))
4287 return NULL_RTX;
4288
4289 saved_pending_stack_adjust save;
4290 save_pending_stack_adjust (&save);
4291 last = get_last_insn ();
4292 do_pending_stack_adjust ();
4293 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4294 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4295 &comparison, &cmode);
4296 if (comparison)
4297 {
4298 struct expand_operand ops[4];
4299
4300 create_output_operand (&ops[0], target, mode);
4301 create_fixed_operand (&ops[1], comparison);
4302 create_input_operand (&ops[2], op2, mode);
4303 create_input_operand (&ops[3], op3, mode);
4304 if (maybe_expand_insn (icode, 4, ops))
4305 {
4306 if (ops[0].value != target)
4307 convert_move (target, ops[0].value, false);
4308 return target;
4309 }
4310 }
4311 delete_insns_since (last);
4312 restore_pending_stack_adjust (&save);
4313 return NULL_RTX;
4314 }
4315
4316
4317 /* Emit a conditional negate or bitwise complement using the
4318 negcc or notcc optabs if available. Return NULL_RTX if such operations
4319 are not available. Otherwise return the RTX holding the result.
4320 TARGET is the desired destination of the result. COMP is the comparison
4321 on which to negate. If COND is true move into TARGET the negation
4322 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4323 CODE is either NEG or NOT. MODE is the machine mode in which the
4324 operation is performed. */
4325
4326 rtx
4327 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4328 machine_mode mode, rtx cond, rtx op1,
4329 rtx op2)
4330 {
4331 optab op = unknown_optab;
4332 if (code == NEG)
4333 op = negcc_optab;
4334 else if (code == NOT)
4335 op = notcc_optab;
4336 else
4337 gcc_unreachable ();
4338
4339 insn_code icode = direct_optab_handler (op, mode);
4340
4341 if (icode == CODE_FOR_nothing)
4342 return NULL_RTX;
4343
4344 if (!target)
4345 target = gen_reg_rtx (mode);
4346
4347 rtx_insn *last = get_last_insn ();
4348 struct expand_operand ops[4];
4349
4350 create_output_operand (&ops[0], target, mode);
4351 create_fixed_operand (&ops[1], cond);
4352 create_input_operand (&ops[2], op1, mode);
4353 create_input_operand (&ops[3], op2, mode);
4354
4355 if (maybe_expand_insn (icode, 4, ops))
4356 {
4357 if (ops[0].value != target)
4358 convert_move (target, ops[0].value, false);
4359
4360 return target;
4361 }
4362 delete_insns_since (last);
4363 return NULL_RTX;
4364 }
4365
4366 /* Emit a conditional addition instruction if the machine supports one for that
4367 condition and machine mode.
4368
4369 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4370 the mode to use should they be constants. If it is VOIDmode, they cannot
4371 both be constants.
4372
4373 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4374 should be stored there. MODE is the mode to use should they be constants.
4375 If it is VOIDmode, they cannot both be constants.
4376
4377 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4378 is not supported. */
4379
4380 rtx
4381 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4382 machine_mode cmode, rtx op2, rtx op3,
4383 machine_mode mode, int unsignedp)
4384 {
4385 rtx comparison;
4386 rtx_insn *last;
4387 enum insn_code icode;
4388
4389 /* If one operand is constant, make it the second one. Only do this
4390 if the other operand is not constant as well. */
4391
4392 if (swap_commutative_operands_p (op0, op1))
4393 {
4394 std::swap (op0, op1);
4395 code = swap_condition (code);
4396 }
4397
4398 /* get_condition will prefer to generate LT and GT even if the old
4399 comparison was against zero, so undo that canonicalization here since
4400 comparisons against zero are cheaper. */
4401 if (code == LT && op1 == const1_rtx)
4402 code = LE, op1 = const0_rtx;
4403 else if (code == GT && op1 == constm1_rtx)
4404 code = GE, op1 = const0_rtx;
4405
4406 if (cmode == VOIDmode)
4407 cmode = GET_MODE (op0);
4408
4409 if (mode == VOIDmode)
4410 mode = GET_MODE (op2);
4411
4412 icode = optab_handler (addcc_optab, mode);
4413
4414 if (icode == CODE_FOR_nothing)
4415 return 0;
4416
4417 if (!target)
4418 target = gen_reg_rtx (mode);
4419
4420 code = unsignedp ? unsigned_condition (code) : code;
4421 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4422
4423 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4424 return NULL and let the caller figure out how best to deal with this
4425 situation. */
4426 if (!COMPARISON_P (comparison))
4427 return NULL_RTX;
4428
4429 do_pending_stack_adjust ();
4430 last = get_last_insn ();
4431 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4432 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4433 &comparison, &cmode);
4434 if (comparison)
4435 {
4436 struct expand_operand ops[4];
4437
4438 create_output_operand (&ops[0], target, mode);
4439 create_fixed_operand (&ops[1], comparison);
4440 create_input_operand (&ops[2], op2, mode);
4441 create_input_operand (&ops[3], op3, mode);
4442 if (maybe_expand_insn (icode, 4, ops))
4443 {
4444 if (ops[0].value != target)
4445 convert_move (target, ops[0].value, false);
4446 return target;
4447 }
4448 }
4449 delete_insns_since (last);
4450 return NULL_RTX;
4451 }
4452 \f
4453 /* These functions attempt to generate an insn body, rather than
4454 emitting the insn, but if the gen function already emits them, we
4455 make no attempt to turn them back into naked patterns. */
4456
4457 /* Generate and return an insn body to add Y to X. */
4458
4459 rtx_insn *
4460 gen_add2_insn (rtx x, rtx y)
4461 {
4462 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4463
4464 gcc_assert (insn_operand_matches (icode, 0, x));
4465 gcc_assert (insn_operand_matches (icode, 1, x));
4466 gcc_assert (insn_operand_matches (icode, 2, y));
4467
4468 return GEN_FCN (icode) (x, x, y);
4469 }
4470
4471 /* Generate and return an insn body to add r1 and c,
4472 storing the result in r0. */
4473
4474 rtx_insn *
4475 gen_add3_insn (rtx r0, rtx r1, rtx c)
4476 {
4477 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4478
4479 if (icode == CODE_FOR_nothing
4480 || !insn_operand_matches (icode, 0, r0)
4481 || !insn_operand_matches (icode, 1, r1)
4482 || !insn_operand_matches (icode, 2, c))
4483 return NULL;
4484
4485 return GEN_FCN (icode) (r0, r1, c);
4486 }
4487
4488 int
4489 have_add2_insn (rtx x, rtx y)
4490 {
4491 enum insn_code icode;
4492
4493 gcc_assert (GET_MODE (x) != VOIDmode);
4494
4495 icode = optab_handler (add_optab, GET_MODE (x));
4496
4497 if (icode == CODE_FOR_nothing)
4498 return 0;
4499
4500 if (!insn_operand_matches (icode, 0, x)
4501 || !insn_operand_matches (icode, 1, x)
4502 || !insn_operand_matches (icode, 2, y))
4503 return 0;
4504
4505 return 1;
4506 }
4507
4508 /* Generate and return an insn body to add Y to X. */
4509
4510 rtx_insn *
4511 gen_addptr3_insn (rtx x, rtx y, rtx z)
4512 {
4513 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4514
4515 gcc_assert (insn_operand_matches (icode, 0, x));
4516 gcc_assert (insn_operand_matches (icode, 1, y));
4517 gcc_assert (insn_operand_matches (icode, 2, z));
4518
4519 return GEN_FCN (icode) (x, y, z);
4520 }
4521
4522 /* Return true if the target implements an addptr pattern and X, Y,
4523 and Z are valid for the pattern predicates. */
4524
4525 int
4526 have_addptr3_insn (rtx x, rtx y, rtx z)
4527 {
4528 enum insn_code icode;
4529
4530 gcc_assert (GET_MODE (x) != VOIDmode);
4531
4532 icode = optab_handler (addptr3_optab, GET_MODE (x));
4533
4534 if (icode == CODE_FOR_nothing)
4535 return 0;
4536
4537 if (!insn_operand_matches (icode, 0, x)
4538 || !insn_operand_matches (icode, 1, y)
4539 || !insn_operand_matches (icode, 2, z))
4540 return 0;
4541
4542 return 1;
4543 }
4544
4545 /* Generate and return an insn body to subtract Y from X. */
4546
4547 rtx_insn *
4548 gen_sub2_insn (rtx x, rtx y)
4549 {
4550 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4551
4552 gcc_assert (insn_operand_matches (icode, 0, x));
4553 gcc_assert (insn_operand_matches (icode, 1, x));
4554 gcc_assert (insn_operand_matches (icode, 2, y));
4555
4556 return GEN_FCN (icode) (x, x, y);
4557 }
4558
4559 /* Generate and return an insn body to subtract r1 and c,
4560 storing the result in r0. */
4561
4562 rtx_insn *
4563 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4564 {
4565 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4566
4567 if (icode == CODE_FOR_nothing
4568 || !insn_operand_matches (icode, 0, r0)
4569 || !insn_operand_matches (icode, 1, r1)
4570 || !insn_operand_matches (icode, 2, c))
4571 return NULL;
4572
4573 return GEN_FCN (icode) (r0, r1, c);
4574 }
4575
4576 int
4577 have_sub2_insn (rtx x, rtx y)
4578 {
4579 enum insn_code icode;
4580
4581 gcc_assert (GET_MODE (x) != VOIDmode);
4582
4583 icode = optab_handler (sub_optab, GET_MODE (x));
4584
4585 if (icode == CODE_FOR_nothing)
4586 return 0;
4587
4588 if (!insn_operand_matches (icode, 0, x)
4589 || !insn_operand_matches (icode, 1, x)
4590 || !insn_operand_matches (icode, 2, y))
4591 return 0;
4592
4593 return 1;
4594 }
4595 \f
4596 /* Generate the body of an insn to extend Y (with mode MFROM)
4597 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4598
4599 rtx_insn *
4600 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4601 machine_mode mfrom, int unsignedp)
4602 {
4603 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4604 return GEN_FCN (icode) (x, y);
4605 }
4606 \f
4607 /* Generate code to convert FROM to floating point
4608 and store in TO. FROM must be fixed point and not VOIDmode.
4609 UNSIGNEDP nonzero means regard FROM as unsigned.
4610 Normally this is done by correcting the final value
4611 if it is negative. */
4612
4613 void
4614 expand_float (rtx to, rtx from, int unsignedp)
4615 {
4616 enum insn_code icode;
4617 rtx target = to;
4618 machine_mode fmode, imode;
4619 bool can_do_signed = false;
4620
4621 /* Crash now, because we won't be able to decide which mode to use. */
4622 gcc_assert (GET_MODE (from) != VOIDmode);
4623
4624 /* Look for an insn to do the conversion. Do it in the specified
4625 modes if possible; otherwise convert either input, output or both to
4626 wider mode. If the integer mode is wider than the mode of FROM,
4627 we can do the conversion signed even if the input is unsigned. */
4628
4629 for (fmode = GET_MODE (to); fmode != VOIDmode;
4630 fmode = GET_MODE_WIDER_MODE (fmode))
4631 for (imode = GET_MODE (from); imode != VOIDmode;
4632 imode = GET_MODE_WIDER_MODE (imode))
4633 {
4634 int doing_unsigned = unsignedp;
4635
4636 if (fmode != GET_MODE (to)
4637 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4638 continue;
4639
4640 icode = can_float_p (fmode, imode, unsignedp);
4641 if (icode == CODE_FOR_nothing && unsignedp)
4642 {
4643 enum insn_code scode = can_float_p (fmode, imode, 0);
4644 if (scode != CODE_FOR_nothing)
4645 can_do_signed = true;
4646 if (imode != GET_MODE (from))
4647 icode = scode, doing_unsigned = 0;
4648 }
4649
4650 if (icode != CODE_FOR_nothing)
4651 {
4652 if (imode != GET_MODE (from))
4653 from = convert_to_mode (imode, from, unsignedp);
4654
4655 if (fmode != GET_MODE (to))
4656 target = gen_reg_rtx (fmode);
4657
4658 emit_unop_insn (icode, target, from,
4659 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4660
4661 if (target != to)
4662 convert_move (to, target, 0);
4663 return;
4664 }
4665 }
4666
4667 /* Unsigned integer, and no way to convert directly. Convert as signed,
4668 then unconditionally adjust the result. */
4669 if (unsignedp && can_do_signed)
4670 {
4671 rtx_code_label *label = gen_label_rtx ();
4672 rtx temp;
4673 REAL_VALUE_TYPE offset;
4674
4675 /* Look for a usable floating mode FMODE wider than the source and at
4676 least as wide as the target. Using FMODE will avoid rounding woes
4677 with unsigned values greater than the signed maximum value. */
4678
4679 for (fmode = GET_MODE (to); fmode != VOIDmode;
4680 fmode = GET_MODE_WIDER_MODE (fmode))
4681 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4682 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4683 break;
4684
4685 if (fmode == VOIDmode)
4686 {
4687 /* There is no such mode. Pretend the target is wide enough. */
4688 fmode = GET_MODE (to);
4689
4690 /* Avoid double-rounding when TO is narrower than FROM. */
4691 if ((significand_size (fmode) + 1)
4692 < GET_MODE_PRECISION (GET_MODE (from)))
4693 {
4694 rtx temp1;
4695 rtx_code_label *neglabel = gen_label_rtx ();
4696
4697 /* Don't use TARGET if it isn't a register, is a hard register,
4698 or is the wrong mode. */
4699 if (!REG_P (target)
4700 || REGNO (target) < FIRST_PSEUDO_REGISTER
4701 || GET_MODE (target) != fmode)
4702 target = gen_reg_rtx (fmode);
4703
4704 imode = GET_MODE (from);
4705 do_pending_stack_adjust ();
4706
4707 /* Test whether the sign bit is set. */
4708 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4709 0, neglabel);
4710
4711 /* The sign bit is not set. Convert as signed. */
4712 expand_float (target, from, 0);
4713 emit_jump_insn (targetm.gen_jump (label));
4714 emit_barrier ();
4715
4716 /* The sign bit is set.
4717 Convert to a usable (positive signed) value by shifting right
4718 one bit, while remembering if a nonzero bit was shifted
4719 out; i.e., compute (from & 1) | (from >> 1). */
4720
4721 emit_label (neglabel);
4722 temp = expand_binop (imode, and_optab, from, const1_rtx,
4723 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4724 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4725 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4726 OPTAB_LIB_WIDEN);
4727 expand_float (target, temp, 0);
4728
4729 /* Multiply by 2 to undo the shift above. */
4730 temp = expand_binop (fmode, add_optab, target, target,
4731 target, 0, OPTAB_LIB_WIDEN);
4732 if (temp != target)
4733 emit_move_insn (target, temp);
4734
4735 do_pending_stack_adjust ();
4736 emit_label (label);
4737 goto done;
4738 }
4739 }
4740
4741 /* If we are about to do some arithmetic to correct for an
4742 unsigned operand, do it in a pseudo-register. */
4743
4744 if (GET_MODE (to) != fmode
4745 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4746 target = gen_reg_rtx (fmode);
4747
4748 /* Convert as signed integer to floating. */
4749 expand_float (target, from, 0);
4750
4751 /* If FROM is negative (and therefore TO is negative),
4752 correct its value by 2**bitwidth. */
4753
4754 do_pending_stack_adjust ();
4755 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4756 0, label);
4757
4758
4759 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4760 temp = expand_binop (fmode, add_optab, target,
4761 const_double_from_real_value (offset, fmode),
4762 target, 0, OPTAB_LIB_WIDEN);
4763 if (temp != target)
4764 emit_move_insn (target, temp);
4765
4766 do_pending_stack_adjust ();
4767 emit_label (label);
4768 goto done;
4769 }
4770
4771 /* No hardware instruction available; call a library routine. */
4772 {
4773 rtx libfunc;
4774 rtx_insn *insns;
4775 rtx value;
4776 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4777
4778 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4779 from = convert_to_mode (SImode, from, unsignedp);
4780
4781 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4782 gcc_assert (libfunc);
4783
4784 start_sequence ();
4785
4786 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4787 GET_MODE (to), 1, from,
4788 GET_MODE (from));
4789 insns = get_insns ();
4790 end_sequence ();
4791
4792 emit_libcall_block (insns, target, value,
4793 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4794 GET_MODE (to), from));
4795 }
4796
4797 done:
4798
4799 /* Copy result to requested destination
4800 if we have been computing in a temp location. */
4801
4802 if (target != to)
4803 {
4804 if (GET_MODE (target) == GET_MODE (to))
4805 emit_move_insn (to, target);
4806 else
4807 convert_move (to, target, 0);
4808 }
4809 }
4810 \f
4811 /* Generate code to convert FROM to fixed point and store in TO. FROM
4812 must be floating point. */
4813
4814 void
4815 expand_fix (rtx to, rtx from, int unsignedp)
4816 {
4817 enum insn_code icode;
4818 rtx target = to;
4819 machine_mode fmode, imode;
4820 bool must_trunc = false;
4821
4822 /* We first try to find a pair of modes, one real and one integer, at
4823 least as wide as FROM and TO, respectively, in which we can open-code
4824 this conversion. If the integer mode is wider than the mode of TO,
4825 we can do the conversion either signed or unsigned. */
4826
4827 for (fmode = GET_MODE (from); fmode != VOIDmode;
4828 fmode = GET_MODE_WIDER_MODE (fmode))
4829 for (imode = GET_MODE (to); imode != VOIDmode;
4830 imode = GET_MODE_WIDER_MODE (imode))
4831 {
4832 int doing_unsigned = unsignedp;
4833
4834 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4835 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4836 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4837
4838 if (icode != CODE_FOR_nothing)
4839 {
4840 rtx_insn *last = get_last_insn ();
4841 if (fmode != GET_MODE (from))
4842 from = convert_to_mode (fmode, from, 0);
4843
4844 if (must_trunc)
4845 {
4846 rtx temp = gen_reg_rtx (GET_MODE (from));
4847 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4848 temp, 0);
4849 }
4850
4851 if (imode != GET_MODE (to))
4852 target = gen_reg_rtx (imode);
4853
4854 if (maybe_emit_unop_insn (icode, target, from,
4855 doing_unsigned ? UNSIGNED_FIX : FIX))
4856 {
4857 if (target != to)
4858 convert_move (to, target, unsignedp);
4859 return;
4860 }
4861 delete_insns_since (last);
4862 }
4863 }
4864
4865 /* For an unsigned conversion, there is one more way to do it.
4866 If we have a signed conversion, we generate code that compares
4867 the real value to the largest representable positive number. If if
4868 is smaller, the conversion is done normally. Otherwise, subtract
4869 one plus the highest signed number, convert, and add it back.
4870
4871 We only need to check all real modes, since we know we didn't find
4872 anything with a wider integer mode.
4873
4874 This code used to extend FP value into mode wider than the destination.
4875 This is needed for decimal float modes which cannot accurately
4876 represent one plus the highest signed number of the same size, but
4877 not for binary modes. Consider, for instance conversion from SFmode
4878 into DImode.
4879
4880 The hot path through the code is dealing with inputs smaller than 2^63
4881 and doing just the conversion, so there is no bits to lose.
4882
4883 In the other path we know the value is positive in the range 2^63..2^64-1
4884 inclusive. (as for other input overflow happens and result is undefined)
4885 So we know that the most important bit set in mantissa corresponds to
4886 2^63. The subtraction of 2^63 should not generate any rounding as it
4887 simply clears out that bit. The rest is trivial. */
4888
4889 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4890 for (fmode = GET_MODE (from); fmode != VOIDmode;
4891 fmode = GET_MODE_WIDER_MODE (fmode))
4892 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
4893 && (!DECIMAL_FLOAT_MODE_P (fmode)
4894 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
4895 {
4896 int bitsize;
4897 REAL_VALUE_TYPE offset;
4898 rtx limit;
4899 rtx_code_label *lab1, *lab2;
4900 rtx_insn *insn;
4901
4902 bitsize = GET_MODE_PRECISION (GET_MODE (to));
4903 real_2expN (&offset, bitsize - 1, fmode);
4904 limit = const_double_from_real_value (offset, fmode);
4905 lab1 = gen_label_rtx ();
4906 lab2 = gen_label_rtx ();
4907
4908 if (fmode != GET_MODE (from))
4909 from = convert_to_mode (fmode, from, 0);
4910
4911 /* See if we need to do the subtraction. */
4912 do_pending_stack_adjust ();
4913 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4914 0, lab1);
4915
4916 /* If not, do the signed "fix" and branch around fixup code. */
4917 expand_fix (to, from, 0);
4918 emit_jump_insn (targetm.gen_jump (lab2));
4919 emit_barrier ();
4920
4921 /* Otherwise, subtract 2**(N-1), convert to signed number,
4922 then add 2**(N-1). Do the addition using XOR since this
4923 will often generate better code. */
4924 emit_label (lab1);
4925 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4926 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4927 expand_fix (to, target, 0);
4928 target = expand_binop (GET_MODE (to), xor_optab, to,
4929 gen_int_mode
4930 (HOST_WIDE_INT_1 << (bitsize - 1),
4931 GET_MODE (to)),
4932 to, 1, OPTAB_LIB_WIDEN);
4933
4934 if (target != to)
4935 emit_move_insn (to, target);
4936
4937 emit_label (lab2);
4938
4939 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
4940 {
4941 /* Make a place for a REG_NOTE and add it. */
4942 insn = emit_move_insn (to, to);
4943 set_dst_reg_note (insn, REG_EQUAL,
4944 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
4945 copy_rtx (from)),
4946 to);
4947 }
4948
4949 return;
4950 }
4951
4952 /* We can't do it with an insn, so use a library call. But first ensure
4953 that the mode of TO is at least as wide as SImode, since those are the
4954 only library calls we know about. */
4955
4956 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4957 {
4958 target = gen_reg_rtx (SImode);
4959
4960 expand_fix (target, from, unsignedp);
4961 }
4962 else
4963 {
4964 rtx_insn *insns;
4965 rtx value;
4966 rtx libfunc;
4967
4968 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4969 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4970 gcc_assert (libfunc);
4971
4972 start_sequence ();
4973
4974 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4975 GET_MODE (to), 1, from,
4976 GET_MODE (from));
4977 insns = get_insns ();
4978 end_sequence ();
4979
4980 emit_libcall_block (insns, target, value,
4981 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4982 GET_MODE (to), from));
4983 }
4984
4985 if (target != to)
4986 {
4987 if (GET_MODE (to) == GET_MODE (target))
4988 emit_move_insn (to, target);
4989 else
4990 convert_move (to, target, 0);
4991 }
4992 }
4993
4994
4995 /* Promote integer arguments for a libcall if necessary.
4996 emit_library_call_value cannot do the promotion because it does not
4997 know if it should do a signed or unsigned promotion. This is because
4998 there are no tree types defined for libcalls. */
4999
5000 static rtx
5001 prepare_libcall_arg (rtx arg, int uintp)
5002 {
5003 machine_mode mode = GET_MODE (arg);
5004 machine_mode arg_mode;
5005 if (SCALAR_INT_MODE_P (mode))
5006 {
5007 /* If we need to promote the integer function argument we need to do
5008 it here instead of inside emit_library_call_value because in
5009 emit_library_call_value we don't know if we should do a signed or
5010 unsigned promotion. */
5011
5012 int unsigned_p = 0;
5013 arg_mode = promote_function_mode (NULL_TREE, mode,
5014 &unsigned_p, NULL_TREE, 0);
5015 if (arg_mode != mode)
5016 return convert_to_mode (arg_mode, arg, uintp);
5017 }
5018 return arg;
5019 }
5020
5021 /* Generate code to convert FROM or TO a fixed-point.
5022 If UINTP is true, either TO or FROM is an unsigned integer.
5023 If SATP is true, we need to saturate the result. */
5024
5025 void
5026 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5027 {
5028 machine_mode to_mode = GET_MODE (to);
5029 machine_mode from_mode = GET_MODE (from);
5030 convert_optab tab;
5031 enum rtx_code this_code;
5032 enum insn_code code;
5033 rtx_insn *insns;
5034 rtx value;
5035 rtx libfunc;
5036
5037 if (to_mode == from_mode)
5038 {
5039 emit_move_insn (to, from);
5040 return;
5041 }
5042
5043 if (uintp)
5044 {
5045 tab = satp ? satfractuns_optab : fractuns_optab;
5046 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5047 }
5048 else
5049 {
5050 tab = satp ? satfract_optab : fract_optab;
5051 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5052 }
5053 code = convert_optab_handler (tab, to_mode, from_mode);
5054 if (code != CODE_FOR_nothing)
5055 {
5056 emit_unop_insn (code, to, from, this_code);
5057 return;
5058 }
5059
5060 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5061 gcc_assert (libfunc);
5062
5063 from = prepare_libcall_arg (from, uintp);
5064 from_mode = GET_MODE (from);
5065
5066 start_sequence ();
5067 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5068 1, from, from_mode);
5069 insns = get_insns ();
5070 end_sequence ();
5071
5072 emit_libcall_block (insns, to, value,
5073 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5074 }
5075
5076 /* Generate code to convert FROM to fixed point and store in TO. FROM
5077 must be floating point, TO must be signed. Use the conversion optab
5078 TAB to do the conversion. */
5079
5080 bool
5081 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5082 {
5083 enum insn_code icode;
5084 rtx target = to;
5085 machine_mode fmode, imode;
5086
5087 /* We first try to find a pair of modes, one real and one integer, at
5088 least as wide as FROM and TO, respectively, in which we can open-code
5089 this conversion. If the integer mode is wider than the mode of TO,
5090 we can do the conversion either signed or unsigned. */
5091
5092 for (fmode = GET_MODE (from); fmode != VOIDmode;
5093 fmode = GET_MODE_WIDER_MODE (fmode))
5094 for (imode = GET_MODE (to); imode != VOIDmode;
5095 imode = GET_MODE_WIDER_MODE (imode))
5096 {
5097 icode = convert_optab_handler (tab, imode, fmode);
5098 if (icode != CODE_FOR_nothing)
5099 {
5100 rtx_insn *last = get_last_insn ();
5101 if (fmode != GET_MODE (from))
5102 from = convert_to_mode (fmode, from, 0);
5103
5104 if (imode != GET_MODE (to))
5105 target = gen_reg_rtx (imode);
5106
5107 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5108 {
5109 delete_insns_since (last);
5110 continue;
5111 }
5112 if (target != to)
5113 convert_move (to, target, 0);
5114 return true;
5115 }
5116 }
5117
5118 return false;
5119 }
5120 \f
5121 /* Report whether we have an instruction to perform the operation
5122 specified by CODE on operands of mode MODE. */
5123 int
5124 have_insn_for (enum rtx_code code, machine_mode mode)
5125 {
5126 return (code_to_optab (code)
5127 && (optab_handler (code_to_optab (code), mode)
5128 != CODE_FOR_nothing));
5129 }
5130
5131 /* Print information about the current contents of the optabs on
5132 STDERR. */
5133
5134 DEBUG_FUNCTION void
5135 debug_optab_libfuncs (void)
5136 {
5137 int i, j, k;
5138
5139 /* Dump the arithmetic optabs. */
5140 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5141 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5142 {
5143 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5144 if (l)
5145 {
5146 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5147 fprintf (stderr, "%s\t%s:\t%s\n",
5148 GET_RTX_NAME (optab_to_code ((optab) i)),
5149 GET_MODE_NAME (j),
5150 XSTR (l, 0));
5151 }
5152 }
5153
5154 /* Dump the conversion optabs. */
5155 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5156 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5157 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5158 {
5159 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5160 (machine_mode) k);
5161 if (l)
5162 {
5163 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5164 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5165 GET_RTX_NAME (optab_to_code ((optab) i)),
5166 GET_MODE_NAME (j),
5167 GET_MODE_NAME (k),
5168 XSTR (l, 0));
5169 }
5170 }
5171 }
5172
5173 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5174 CODE. Return 0 on failure. */
5175
5176 rtx_insn *
5177 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5178 {
5179 machine_mode mode = GET_MODE (op1);
5180 enum insn_code icode;
5181 rtx_insn *insn;
5182 rtx trap_rtx;
5183
5184 if (mode == VOIDmode)
5185 return 0;
5186
5187 icode = optab_handler (ctrap_optab, mode);
5188 if (icode == CODE_FOR_nothing)
5189 return 0;
5190
5191 /* Some targets only accept a zero trap code. */
5192 if (!insn_operand_matches (icode, 3, tcode))
5193 return 0;
5194
5195 do_pending_stack_adjust ();
5196 start_sequence ();
5197 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5198 &trap_rtx, &mode);
5199 if (!trap_rtx)
5200 insn = NULL;
5201 else
5202 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5203 tcode);
5204
5205 /* If that failed, then give up. */
5206 if (insn == 0)
5207 {
5208 end_sequence ();
5209 return 0;
5210 }
5211
5212 emit_insn (insn);
5213 insn = get_insns ();
5214 end_sequence ();
5215 return insn;
5216 }
5217
5218 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5219 or unsigned operation code. */
5220
5221 enum rtx_code
5222 get_rtx_code (enum tree_code tcode, bool unsignedp)
5223 {
5224 enum rtx_code code;
5225 switch (tcode)
5226 {
5227 case EQ_EXPR:
5228 code = EQ;
5229 break;
5230 case NE_EXPR:
5231 code = NE;
5232 break;
5233 case LT_EXPR:
5234 code = unsignedp ? LTU : LT;
5235 break;
5236 case LE_EXPR:
5237 code = unsignedp ? LEU : LE;
5238 break;
5239 case GT_EXPR:
5240 code = unsignedp ? GTU : GT;
5241 break;
5242 case GE_EXPR:
5243 code = unsignedp ? GEU : GE;
5244 break;
5245
5246 case UNORDERED_EXPR:
5247 code = UNORDERED;
5248 break;
5249 case ORDERED_EXPR:
5250 code = ORDERED;
5251 break;
5252 case UNLT_EXPR:
5253 code = UNLT;
5254 break;
5255 case UNLE_EXPR:
5256 code = UNLE;
5257 break;
5258 case UNGT_EXPR:
5259 code = UNGT;
5260 break;
5261 case UNGE_EXPR:
5262 code = UNGE;
5263 break;
5264 case UNEQ_EXPR:
5265 code = UNEQ;
5266 break;
5267 case LTGT_EXPR:
5268 code = LTGT;
5269 break;
5270
5271 case BIT_AND_EXPR:
5272 code = AND;
5273 break;
5274
5275 case BIT_IOR_EXPR:
5276 code = IOR;
5277 break;
5278
5279 default:
5280 gcc_unreachable ();
5281 }
5282 return code;
5283 }
5284
5285 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5286 unsigned operators. OPNO holds an index of the first comparison
5287 operand in insn with code ICODE. Do not generate compare instruction. */
5288
5289 static rtx
5290 vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
5291 bool unsignedp, enum insn_code icode,
5292 unsigned int opno)
5293 {
5294 struct expand_operand ops[2];
5295 rtx rtx_op0, rtx_op1;
5296 machine_mode m0, m1;
5297 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5298
5299 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5300
5301 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5302 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5303 cases, use the original mode. */
5304 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5305 EXPAND_STACK_PARM);
5306 m0 = GET_MODE (rtx_op0);
5307 if (m0 == VOIDmode)
5308 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5309
5310 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5311 EXPAND_STACK_PARM);
5312 m1 = GET_MODE (rtx_op1);
5313 if (m1 == VOIDmode)
5314 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5315
5316 create_input_operand (&ops[0], rtx_op0, m0);
5317 create_input_operand (&ops[1], rtx_op1, m1);
5318 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5319 gcc_unreachable ();
5320 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
5321 }
5322
5323 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5324 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5325 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5326 shift. */
5327 static rtx
5328 shift_amt_for_vec_perm_mask (rtx sel)
5329 {
5330 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5331 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5332
5333 if (GET_CODE (sel) != CONST_VECTOR)
5334 return NULL_RTX;
5335
5336 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5337 if (first >= nelt)
5338 return NULL_RTX;
5339 for (i = 1; i < nelt; i++)
5340 {
5341 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5342 unsigned int expected = i + first;
5343 /* Indices into the second vector are all equivalent. */
5344 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5345 return NULL_RTX;
5346 }
5347
5348 return GEN_INT (first * bitsize);
5349 }
5350
5351 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5352
5353 static rtx
5354 expand_vec_perm_1 (enum insn_code icode, rtx target,
5355 rtx v0, rtx v1, rtx sel)
5356 {
5357 machine_mode tmode = GET_MODE (target);
5358 machine_mode smode = GET_MODE (sel);
5359 struct expand_operand ops[4];
5360
5361 create_output_operand (&ops[0], target, tmode);
5362 create_input_operand (&ops[3], sel, smode);
5363
5364 /* Make an effort to preserve v0 == v1. The target expander is able to
5365 rely on this to determine if we're permuting a single input operand. */
5366 if (rtx_equal_p (v0, v1))
5367 {
5368 if (!insn_operand_matches (icode, 1, v0))
5369 v0 = force_reg (tmode, v0);
5370 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5371 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5372
5373 create_fixed_operand (&ops[1], v0);
5374 create_fixed_operand (&ops[2], v0);
5375 }
5376 else
5377 {
5378 create_input_operand (&ops[1], v0, tmode);
5379 create_input_operand (&ops[2], v1, tmode);
5380 }
5381
5382 if (maybe_expand_insn (icode, 4, ops))
5383 return ops[0].value;
5384 return NULL_RTX;
5385 }
5386
5387 /* Generate instructions for vec_perm optab given its mode
5388 and three operands. */
5389
5390 rtx
5391 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5392 {
5393 enum insn_code icode;
5394 machine_mode qimode;
5395 unsigned int i, w, e, u;
5396 rtx tmp, sel_qi = NULL;
5397 rtvec vec;
5398
5399 if (!target || GET_MODE (target) != mode)
5400 target = gen_reg_rtx (mode);
5401
5402 w = GET_MODE_SIZE (mode);
5403 e = GET_MODE_NUNITS (mode);
5404 u = GET_MODE_UNIT_SIZE (mode);
5405
5406 /* Set QIMODE to a different vector mode with byte elements.
5407 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5408 qimode = VOIDmode;
5409 if (GET_MODE_INNER (mode) != QImode)
5410 {
5411 qimode = mode_for_vector (QImode, w);
5412 if (!VECTOR_MODE_P (qimode))
5413 qimode = VOIDmode;
5414 }
5415
5416 /* If the input is a constant, expand it specially. */
5417 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5418 if (GET_CODE (sel) == CONST_VECTOR)
5419 {
5420 /* See if this can be handled with a vec_shr. We only do this if the
5421 second vector is all zeroes. */
5422 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5423 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5424 ? optab_handler (vec_shr_optab, qimode)
5425 : CODE_FOR_nothing);
5426 rtx shift_amt = NULL_RTX;
5427 if (v1 == CONST0_RTX (GET_MODE (v1))
5428 && (shift_code != CODE_FOR_nothing
5429 || shift_code_qi != CODE_FOR_nothing))
5430 {
5431 shift_amt = shift_amt_for_vec_perm_mask (sel);
5432 if (shift_amt)
5433 {
5434 struct expand_operand ops[3];
5435 if (shift_code != CODE_FOR_nothing)
5436 {
5437 create_output_operand (&ops[0], target, mode);
5438 create_input_operand (&ops[1], v0, mode);
5439 create_convert_operand_from_type (&ops[2], shift_amt,
5440 sizetype);
5441 if (maybe_expand_insn (shift_code, 3, ops))
5442 return ops[0].value;
5443 }
5444 if (shift_code_qi != CODE_FOR_nothing)
5445 {
5446 tmp = gen_reg_rtx (qimode);
5447 create_output_operand (&ops[0], tmp, qimode);
5448 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5449 qimode);
5450 create_convert_operand_from_type (&ops[2], shift_amt,
5451 sizetype);
5452 if (maybe_expand_insn (shift_code_qi, 3, ops))
5453 return gen_lowpart (mode, ops[0].value);
5454 }
5455 }
5456 }
5457
5458 icode = direct_optab_handler (vec_perm_const_optab, mode);
5459 if (icode != CODE_FOR_nothing)
5460 {
5461 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5462 if (tmp)
5463 return tmp;
5464 }
5465
5466 /* Fall back to a constant byte-based permutation. */
5467 if (qimode != VOIDmode)
5468 {
5469 vec = rtvec_alloc (w);
5470 for (i = 0; i < e; ++i)
5471 {
5472 unsigned int j, this_e;
5473
5474 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5475 this_e &= 2 * e - 1;
5476 this_e *= u;
5477
5478 for (j = 0; j < u; ++j)
5479 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5480 }
5481 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5482
5483 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5484 if (icode != CODE_FOR_nothing)
5485 {
5486 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5487 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5488 gen_lowpart (qimode, v1), sel_qi);
5489 if (tmp)
5490 return gen_lowpart (mode, tmp);
5491 }
5492 }
5493 }
5494
5495 /* Otherwise expand as a fully variable permuation. */
5496 icode = direct_optab_handler (vec_perm_optab, mode);
5497 if (icode != CODE_FOR_nothing)
5498 {
5499 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5500 if (tmp)
5501 return tmp;
5502 }
5503
5504 /* As a special case to aid several targets, lower the element-based
5505 permutation to a byte-based permutation and try again. */
5506 if (qimode == VOIDmode)
5507 return NULL_RTX;
5508 icode = direct_optab_handler (vec_perm_optab, qimode);
5509 if (icode == CODE_FOR_nothing)
5510 return NULL_RTX;
5511
5512 if (sel_qi == NULL)
5513 {
5514 /* Multiply each element by its byte size. */
5515 machine_mode selmode = GET_MODE (sel);
5516 if (u == 2)
5517 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5518 NULL, 0, OPTAB_DIRECT);
5519 else
5520 sel = expand_simple_binop (selmode, ASHIFT, sel,
5521 GEN_INT (exact_log2 (u)),
5522 NULL, 0, OPTAB_DIRECT);
5523 gcc_assert (sel != NULL);
5524
5525 /* Broadcast the low byte each element into each of its bytes. */
5526 vec = rtvec_alloc (w);
5527 for (i = 0; i < w; ++i)
5528 {
5529 int this_e = i / u * u;
5530 if (BYTES_BIG_ENDIAN)
5531 this_e += u - 1;
5532 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5533 }
5534 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5535 sel = gen_lowpart (qimode, sel);
5536 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5537 gcc_assert (sel != NULL);
5538
5539 /* Add the byte offset to each byte element. */
5540 /* Note that the definition of the indicies here is memory ordering,
5541 so there should be no difference between big and little endian. */
5542 vec = rtvec_alloc (w);
5543 for (i = 0; i < w; ++i)
5544 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5545 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5546 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5547 sel, 0, OPTAB_DIRECT);
5548 gcc_assert (sel_qi != NULL);
5549 }
5550
5551 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5552 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5553 gen_lowpart (qimode, v1), sel_qi);
5554 if (tmp)
5555 tmp = gen_lowpart (mode, tmp);
5556 return tmp;
5557 }
5558
5559 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5560 three operands. */
5561
5562 rtx
5563 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5564 rtx target)
5565 {
5566 struct expand_operand ops[4];
5567 machine_mode mode = TYPE_MODE (vec_cond_type);
5568 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5569 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5570 rtx mask, rtx_op1, rtx_op2;
5571
5572 if (icode == CODE_FOR_nothing)
5573 return 0;
5574
5575 mask = expand_normal (op0);
5576 rtx_op1 = expand_normal (op1);
5577 rtx_op2 = expand_normal (op2);
5578
5579 mask = force_reg (mask_mode, mask);
5580 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5581
5582 create_output_operand (&ops[0], target, mode);
5583 create_input_operand (&ops[1], rtx_op1, mode);
5584 create_input_operand (&ops[2], rtx_op2, mode);
5585 create_input_operand (&ops[3], mask, mask_mode);
5586 expand_insn (icode, 4, ops);
5587
5588 return ops[0].value;
5589 }
5590
5591 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5592 three operands. */
5593
5594 rtx
5595 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5596 rtx target)
5597 {
5598 struct expand_operand ops[6];
5599 enum insn_code icode;
5600 rtx comparison, rtx_op1, rtx_op2;
5601 machine_mode mode = TYPE_MODE (vec_cond_type);
5602 machine_mode cmp_op_mode;
5603 bool unsignedp;
5604 tree op0a, op0b;
5605 enum tree_code tcode;
5606
5607 if (COMPARISON_CLASS_P (op0))
5608 {
5609 op0a = TREE_OPERAND (op0, 0);
5610 op0b = TREE_OPERAND (op0, 1);
5611 tcode = TREE_CODE (op0);
5612 }
5613 else
5614 {
5615 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5616 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5617 != CODE_FOR_nothing)
5618 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5619 op2, target);
5620 /* Fake op0 < 0. */
5621 else
5622 {
5623 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5624 == MODE_VECTOR_INT);
5625 op0a = op0;
5626 op0b = build_zero_cst (TREE_TYPE (op0));
5627 tcode = LT_EXPR;
5628 }
5629 }
5630 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5631 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5632
5633
5634 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5635 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5636
5637 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5638 if (icode == CODE_FOR_nothing)
5639 return 0;
5640
5641 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 4);
5642 rtx_op1 = expand_normal (op1);
5643 rtx_op2 = expand_normal (op2);
5644
5645 create_output_operand (&ops[0], target, mode);
5646 create_input_operand (&ops[1], rtx_op1, mode);
5647 create_input_operand (&ops[2], rtx_op2, mode);
5648 create_fixed_operand (&ops[3], comparison);
5649 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5650 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5651 expand_insn (icode, 6, ops);
5652 return ops[0].value;
5653 }
5654
5655 /* Generate insns for a vector comparison into a mask. */
5656
5657 rtx
5658 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5659 {
5660 struct expand_operand ops[4];
5661 enum insn_code icode;
5662 rtx comparison;
5663 machine_mode mask_mode = TYPE_MODE (type);
5664 machine_mode vmode;
5665 bool unsignedp;
5666 tree op0a, op0b;
5667 enum tree_code tcode;
5668
5669 op0a = TREE_OPERAND (exp, 0);
5670 op0b = TREE_OPERAND (exp, 1);
5671 tcode = TREE_CODE (exp);
5672
5673 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5674 vmode = TYPE_MODE (TREE_TYPE (op0a));
5675
5676 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5677 if (icode == CODE_FOR_nothing)
5678 return 0;
5679
5680 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 2);
5681 create_output_operand (&ops[0], target, mask_mode);
5682 create_fixed_operand (&ops[1], comparison);
5683 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5684 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5685 expand_insn (icode, 4, ops);
5686 return ops[0].value;
5687 }
5688
5689 /* Expand a highpart multiply. */
5690
5691 rtx
5692 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5693 rtx target, bool uns_p)
5694 {
5695 struct expand_operand eops[3];
5696 enum insn_code icode;
5697 int method, i, nunits;
5698 machine_mode wmode;
5699 rtx m1, m2, perm;
5700 optab tab1, tab2;
5701 rtvec v;
5702
5703 method = can_mult_highpart_p (mode, uns_p);
5704 switch (method)
5705 {
5706 case 0:
5707 return NULL_RTX;
5708 case 1:
5709 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5710 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5711 OPTAB_LIB_WIDEN);
5712 case 2:
5713 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5714 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5715 break;
5716 case 3:
5717 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5718 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5719 if (BYTES_BIG_ENDIAN)
5720 std::swap (tab1, tab2);
5721 break;
5722 default:
5723 gcc_unreachable ();
5724 }
5725
5726 icode = optab_handler (tab1, mode);
5727 nunits = GET_MODE_NUNITS (mode);
5728 wmode = insn_data[icode].operand[0].mode;
5729 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5730 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5731
5732 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5733 create_input_operand (&eops[1], op0, mode);
5734 create_input_operand (&eops[2], op1, mode);
5735 expand_insn (icode, 3, eops);
5736 m1 = gen_lowpart (mode, eops[0].value);
5737
5738 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5739 create_input_operand (&eops[1], op0, mode);
5740 create_input_operand (&eops[2], op1, mode);
5741 expand_insn (optab_handler (tab2, mode), 3, eops);
5742 m2 = gen_lowpart (mode, eops[0].value);
5743
5744 v = rtvec_alloc (nunits);
5745 if (method == 2)
5746 {
5747 for (i = 0; i < nunits; ++i)
5748 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5749 + ((i & 1) ? nunits : 0));
5750 }
5751 else
5752 {
5753 for (i = 0; i < nunits; ++i)
5754 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5755 }
5756 perm = gen_rtx_CONST_VECTOR (mode, v);
5757
5758 return expand_vec_perm (mode, m1, m2, perm, target);
5759 }
5760 \f
5761 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5762 pattern. */
5763
5764 static void
5765 find_cc_set (rtx x, const_rtx pat, void *data)
5766 {
5767 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5768 && GET_CODE (pat) == SET)
5769 {
5770 rtx *p_cc_reg = (rtx *) data;
5771 gcc_assert (!*p_cc_reg);
5772 *p_cc_reg = x;
5773 }
5774 }
5775
5776 /* This is a helper function for the other atomic operations. This function
5777 emits a loop that contains SEQ that iterates until a compare-and-swap
5778 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5779 a set of instructions that takes a value from OLD_REG as an input and
5780 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5781 set to the current contents of MEM. After SEQ, a compare-and-swap will
5782 attempt to update MEM with NEW_REG. The function returns true when the
5783 loop was generated successfully. */
5784
5785 static bool
5786 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5787 {
5788 machine_mode mode = GET_MODE (mem);
5789 rtx_code_label *label;
5790 rtx cmp_reg, success, oldval;
5791
5792 /* The loop we want to generate looks like
5793
5794 cmp_reg = mem;
5795 label:
5796 old_reg = cmp_reg;
5797 seq;
5798 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5799 if (success)
5800 goto label;
5801
5802 Note that we only do the plain load from memory once. Subsequent
5803 iterations use the value loaded by the compare-and-swap pattern. */
5804
5805 label = gen_label_rtx ();
5806 cmp_reg = gen_reg_rtx (mode);
5807
5808 emit_move_insn (cmp_reg, mem);
5809 emit_label (label);
5810 emit_move_insn (old_reg, cmp_reg);
5811 if (seq)
5812 emit_insn (seq);
5813
5814 success = NULL_RTX;
5815 oldval = cmp_reg;
5816 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5817 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5818 MEMMODEL_RELAXED))
5819 return false;
5820
5821 if (oldval != cmp_reg)
5822 emit_move_insn (cmp_reg, oldval);
5823
5824 /* Mark this jump predicted not taken. */
5825 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5826 GET_MODE (success), 1, label, 0);
5827 return true;
5828 }
5829
5830
5831 /* This function tries to emit an atomic_exchange intruction. VAL is written
5832 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5833 using TARGET if possible. */
5834
5835 static rtx
5836 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5837 {
5838 machine_mode mode = GET_MODE (mem);
5839 enum insn_code icode;
5840
5841 /* If the target supports the exchange directly, great. */
5842 icode = direct_optab_handler (atomic_exchange_optab, mode);
5843 if (icode != CODE_FOR_nothing)
5844 {
5845 struct expand_operand ops[4];
5846
5847 create_output_operand (&ops[0], target, mode);
5848 create_fixed_operand (&ops[1], mem);
5849 create_input_operand (&ops[2], val, mode);
5850 create_integer_operand (&ops[3], model);
5851 if (maybe_expand_insn (icode, 4, ops))
5852 return ops[0].value;
5853 }
5854
5855 return NULL_RTX;
5856 }
5857
5858 /* This function tries to implement an atomic exchange operation using
5859 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5860 The previous contents of *MEM are returned, using TARGET if possible.
5861 Since this instructionn is an acquire barrier only, stronger memory
5862 models may require additional barriers to be emitted. */
5863
5864 static rtx
5865 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5866 enum memmodel model)
5867 {
5868 machine_mode mode = GET_MODE (mem);
5869 enum insn_code icode;
5870 rtx_insn *last_insn = get_last_insn ();
5871
5872 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5873
5874 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5875 exists, and the memory model is stronger than acquire, add a release
5876 barrier before the instruction. */
5877
5878 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5879 expand_mem_thread_fence (model);
5880
5881 if (icode != CODE_FOR_nothing)
5882 {
5883 struct expand_operand ops[3];
5884 create_output_operand (&ops[0], target, mode);
5885 create_fixed_operand (&ops[1], mem);
5886 create_input_operand (&ops[2], val, mode);
5887 if (maybe_expand_insn (icode, 3, ops))
5888 return ops[0].value;
5889 }
5890
5891 /* If an external test-and-set libcall is provided, use that instead of
5892 any external compare-and-swap that we might get from the compare-and-
5893 swap-loop expansion later. */
5894 if (!can_compare_and_swap_p (mode, false))
5895 {
5896 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5897 if (libfunc != NULL)
5898 {
5899 rtx addr;
5900
5901 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5902 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5903 mode, 2, addr, ptr_mode,
5904 val, mode);
5905 }
5906 }
5907
5908 /* If the test_and_set can't be emitted, eliminate any barrier that might
5909 have been emitted. */
5910 delete_insns_since (last_insn);
5911 return NULL_RTX;
5912 }
5913
5914 /* This function tries to implement an atomic exchange operation using a
5915 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5916 *MEM are returned, using TARGET if possible. No memory model is required
5917 since a compare_and_swap loop is seq-cst. */
5918
5919 static rtx
5920 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5921 {
5922 machine_mode mode = GET_MODE (mem);
5923
5924 if (can_compare_and_swap_p (mode, true))
5925 {
5926 if (!target || !register_operand (target, mode))
5927 target = gen_reg_rtx (mode);
5928 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5929 return target;
5930 }
5931
5932 return NULL_RTX;
5933 }
5934
5935 /* This function tries to implement an atomic test-and-set operation
5936 using the atomic_test_and_set instruction pattern. A boolean value
5937 is returned from the operation, using TARGET if possible. */
5938
5939 static rtx
5940 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5941 {
5942 machine_mode pat_bool_mode;
5943 struct expand_operand ops[3];
5944
5945 if (!targetm.have_atomic_test_and_set ())
5946 return NULL_RTX;
5947
5948 /* While we always get QImode from __atomic_test_and_set, we get
5949 other memory modes from __sync_lock_test_and_set. Note that we
5950 use no endian adjustment here. This matches the 4.6 behavior
5951 in the Sparc backend. */
5952 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5953 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5954 if (GET_MODE (mem) != QImode)
5955 mem = adjust_address_nv (mem, QImode, 0);
5956
5957 pat_bool_mode = insn_data[icode].operand[0].mode;
5958 create_output_operand (&ops[0], target, pat_bool_mode);
5959 create_fixed_operand (&ops[1], mem);
5960 create_integer_operand (&ops[2], model);
5961
5962 if (maybe_expand_insn (icode, 3, ops))
5963 return ops[0].value;
5964 return NULL_RTX;
5965 }
5966
5967 /* This function expands the legacy _sync_lock test_and_set operation which is
5968 generally an atomic exchange. Some limited targets only allow the
5969 constant 1 to be stored. This is an ACQUIRE operation.
5970
5971 TARGET is an optional place to stick the return value.
5972 MEM is where VAL is stored. */
5973
5974 rtx
5975 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
5976 {
5977 rtx ret;
5978
5979 /* Try an atomic_exchange first. */
5980 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
5981 if (ret)
5982 return ret;
5983
5984 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
5985 MEMMODEL_SYNC_ACQUIRE);
5986 if (ret)
5987 return ret;
5988
5989 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
5990 if (ret)
5991 return ret;
5992
5993 /* If there are no other options, try atomic_test_and_set if the value
5994 being stored is 1. */
5995 if (val == const1_rtx)
5996 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
5997
5998 return ret;
5999 }
6000
6001 /* This function expands the atomic test_and_set operation:
6002 atomically store a boolean TRUE into MEM and return the previous value.
6003
6004 MEMMODEL is the memory model variant to use.
6005 TARGET is an optional place to stick the return value. */
6006
6007 rtx
6008 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6009 {
6010 machine_mode mode = GET_MODE (mem);
6011 rtx ret, trueval, subtarget;
6012
6013 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6014 if (ret)
6015 return ret;
6016
6017 /* Be binary compatible with non-default settings of trueval, and different
6018 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6019 another only has atomic-exchange. */
6020 if (targetm.atomic_test_and_set_trueval == 1)
6021 {
6022 trueval = const1_rtx;
6023 subtarget = target ? target : gen_reg_rtx (mode);
6024 }
6025 else
6026 {
6027 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6028 subtarget = gen_reg_rtx (mode);
6029 }
6030
6031 /* Try the atomic-exchange optab... */
6032 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6033
6034 /* ... then an atomic-compare-and-swap loop ... */
6035 if (!ret)
6036 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6037
6038 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6039 if (!ret)
6040 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6041
6042 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6043 things with the value 1. Thus we try again without trueval. */
6044 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6045 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6046
6047 /* Failing all else, assume a single threaded environment and simply
6048 perform the operation. */
6049 if (!ret)
6050 {
6051 /* If the result is ignored skip the move to target. */
6052 if (subtarget != const0_rtx)
6053 emit_move_insn (subtarget, mem);
6054
6055 emit_move_insn (mem, trueval);
6056 ret = subtarget;
6057 }
6058
6059 /* Recall that have to return a boolean value; rectify if trueval
6060 is not exactly one. */
6061 if (targetm.atomic_test_and_set_trueval != 1)
6062 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6063
6064 return ret;
6065 }
6066
6067 /* This function expands the atomic exchange operation:
6068 atomically store VAL in MEM and return the previous value in MEM.
6069
6070 MEMMODEL is the memory model variant to use.
6071 TARGET is an optional place to stick the return value. */
6072
6073 rtx
6074 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6075 {
6076 rtx ret;
6077
6078 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6079
6080 /* Next try a compare-and-swap loop for the exchange. */
6081 if (!ret)
6082 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6083
6084 return ret;
6085 }
6086
6087 /* This function expands the atomic compare exchange operation:
6088
6089 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6090 *PTARGET_OVAL is an optional place to store the old value from memory.
6091 Both target parameters may be NULL or const0_rtx to indicate that we do
6092 not care about that return value. Both target parameters are updated on
6093 success to the actual location of the corresponding result.
6094
6095 MEMMODEL is the memory model variant to use.
6096
6097 The return value of the function is true for success. */
6098
6099 bool
6100 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6101 rtx mem, rtx expected, rtx desired,
6102 bool is_weak, enum memmodel succ_model,
6103 enum memmodel fail_model)
6104 {
6105 machine_mode mode = GET_MODE (mem);
6106 struct expand_operand ops[8];
6107 enum insn_code icode;
6108 rtx target_oval, target_bool = NULL_RTX;
6109 rtx libfunc;
6110
6111 /* Load expected into a register for the compare and swap. */
6112 if (MEM_P (expected))
6113 expected = copy_to_reg (expected);
6114
6115 /* Make sure we always have some place to put the return oldval.
6116 Further, make sure that place is distinct from the input expected,
6117 just in case we need that path down below. */
6118 if (ptarget_oval && *ptarget_oval == const0_rtx)
6119 ptarget_oval = NULL;
6120
6121 if (ptarget_oval == NULL
6122 || (target_oval = *ptarget_oval) == NULL
6123 || reg_overlap_mentioned_p (expected, target_oval))
6124 target_oval = gen_reg_rtx (mode);
6125
6126 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6127 if (icode != CODE_FOR_nothing)
6128 {
6129 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6130
6131 if (ptarget_bool && *ptarget_bool == const0_rtx)
6132 ptarget_bool = NULL;
6133
6134 /* Make sure we always have a place for the bool operand. */
6135 if (ptarget_bool == NULL
6136 || (target_bool = *ptarget_bool) == NULL
6137 || GET_MODE (target_bool) != bool_mode)
6138 target_bool = gen_reg_rtx (bool_mode);
6139
6140 /* Emit the compare_and_swap. */
6141 create_output_operand (&ops[0], target_bool, bool_mode);
6142 create_output_operand (&ops[1], target_oval, mode);
6143 create_fixed_operand (&ops[2], mem);
6144 create_input_operand (&ops[3], expected, mode);
6145 create_input_operand (&ops[4], desired, mode);
6146 create_integer_operand (&ops[5], is_weak);
6147 create_integer_operand (&ops[6], succ_model);
6148 create_integer_operand (&ops[7], fail_model);
6149 if (maybe_expand_insn (icode, 8, ops))
6150 {
6151 /* Return success/failure. */
6152 target_bool = ops[0].value;
6153 target_oval = ops[1].value;
6154 goto success;
6155 }
6156 }
6157
6158 /* Otherwise fall back to the original __sync_val_compare_and_swap
6159 which is always seq-cst. */
6160 icode = optab_handler (sync_compare_and_swap_optab, mode);
6161 if (icode != CODE_FOR_nothing)
6162 {
6163 rtx cc_reg;
6164
6165 create_output_operand (&ops[0], target_oval, mode);
6166 create_fixed_operand (&ops[1], mem);
6167 create_input_operand (&ops[2], expected, mode);
6168 create_input_operand (&ops[3], desired, mode);
6169 if (!maybe_expand_insn (icode, 4, ops))
6170 return false;
6171
6172 target_oval = ops[0].value;
6173
6174 /* If the caller isn't interested in the boolean return value,
6175 skip the computation of it. */
6176 if (ptarget_bool == NULL)
6177 goto success;
6178
6179 /* Otherwise, work out if the compare-and-swap succeeded. */
6180 cc_reg = NULL_RTX;
6181 if (have_insn_for (COMPARE, CCmode))
6182 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6183 if (cc_reg)
6184 {
6185 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6186 const0_rtx, VOIDmode, 0, 1);
6187 goto success;
6188 }
6189 goto success_bool_from_val;
6190 }
6191
6192 /* Also check for library support for __sync_val_compare_and_swap. */
6193 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6194 if (libfunc != NULL)
6195 {
6196 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6197 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6198 mode, 3, addr, ptr_mode,
6199 expected, mode, desired, mode);
6200 emit_move_insn (target_oval, target);
6201
6202 /* Compute the boolean return value only if requested. */
6203 if (ptarget_bool)
6204 goto success_bool_from_val;
6205 else
6206 goto success;
6207 }
6208
6209 /* Failure. */
6210 return false;
6211
6212 success_bool_from_val:
6213 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6214 expected, VOIDmode, 1, 1);
6215 success:
6216 /* Make sure that the oval output winds up where the caller asked. */
6217 if (ptarget_oval)
6218 *ptarget_oval = target_oval;
6219 if (ptarget_bool)
6220 *ptarget_bool = target_bool;
6221 return true;
6222 }
6223
6224 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6225
6226 static void
6227 expand_asm_memory_barrier (void)
6228 {
6229 rtx asm_op, clob;
6230
6231 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
6232 rtvec_alloc (0), rtvec_alloc (0),
6233 rtvec_alloc (0), UNKNOWN_LOCATION);
6234 MEM_VOLATILE_P (asm_op) = 1;
6235
6236 clob = gen_rtx_SCRATCH (VOIDmode);
6237 clob = gen_rtx_MEM (BLKmode, clob);
6238 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6239
6240 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6241 }
6242
6243 /* This routine will either emit the mem_thread_fence pattern or issue a
6244 sync_synchronize to generate a fence for memory model MEMMODEL. */
6245
6246 void
6247 expand_mem_thread_fence (enum memmodel model)
6248 {
6249 if (targetm.have_mem_thread_fence ())
6250 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6251 else if (!is_mm_relaxed (model))
6252 {
6253 if (targetm.have_memory_barrier ())
6254 emit_insn (targetm.gen_memory_barrier ());
6255 else if (synchronize_libfunc != NULL_RTX)
6256 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
6257 else
6258 expand_asm_memory_barrier ();
6259 }
6260 }
6261
6262 /* This routine will either emit the mem_signal_fence pattern or issue a
6263 sync_synchronize to generate a fence for memory model MEMMODEL. */
6264
6265 void
6266 expand_mem_signal_fence (enum memmodel model)
6267 {
6268 if (targetm.have_mem_signal_fence ())
6269 emit_insn (targetm.gen_mem_signal_fence (GEN_INT (model)));
6270 else if (!is_mm_relaxed (model))
6271 {
6272 /* By default targets are coherent between a thread and the signal
6273 handler running on the same thread. Thus this really becomes a
6274 compiler barrier, in that stores must not be sunk past
6275 (or raised above) a given point. */
6276 expand_asm_memory_barrier ();
6277 }
6278 }
6279
6280 /* This function expands the atomic load operation:
6281 return the atomically loaded value in MEM.
6282
6283 MEMMODEL is the memory model variant to use.
6284 TARGET is an option place to stick the return value. */
6285
6286 rtx
6287 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6288 {
6289 machine_mode mode = GET_MODE (mem);
6290 enum insn_code icode;
6291
6292 /* If the target supports the load directly, great. */
6293 icode = direct_optab_handler (atomic_load_optab, mode);
6294 if (icode != CODE_FOR_nothing)
6295 {
6296 struct expand_operand ops[3];
6297
6298 create_output_operand (&ops[0], target, mode);
6299 create_fixed_operand (&ops[1], mem);
6300 create_integer_operand (&ops[2], model);
6301 if (maybe_expand_insn (icode, 3, ops))
6302 return ops[0].value;
6303 }
6304
6305 /* If the size of the object is greater than word size on this target,
6306 then we assume that a load will not be atomic. */
6307 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6308 {
6309 /* Issue val = compare_and_swap (mem, 0, 0).
6310 This may cause the occasional harmless store of 0 when the value is
6311 already 0, but it seems to be OK according to the standards guys. */
6312 if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
6313 const0_rtx, false, model, model))
6314 return target;
6315 else
6316 /* Otherwise there is no atomic load, leave the library call. */
6317 return NULL_RTX;
6318 }
6319
6320 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6321 if (!target || target == const0_rtx)
6322 target = gen_reg_rtx (mode);
6323
6324 /* For SEQ_CST, emit a barrier before the load. */
6325 if (is_mm_seq_cst (model))
6326 expand_mem_thread_fence (model);
6327
6328 emit_move_insn (target, mem);
6329
6330 /* Emit the appropriate barrier after the load. */
6331 expand_mem_thread_fence (model);
6332
6333 return target;
6334 }
6335
6336 /* This function expands the atomic store operation:
6337 Atomically store VAL in MEM.
6338 MEMMODEL is the memory model variant to use.
6339 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6340 function returns const0_rtx if a pattern was emitted. */
6341
6342 rtx
6343 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6344 {
6345 machine_mode mode = GET_MODE (mem);
6346 enum insn_code icode;
6347 struct expand_operand ops[3];
6348
6349 /* If the target supports the store directly, great. */
6350 icode = direct_optab_handler (atomic_store_optab, mode);
6351 if (icode != CODE_FOR_nothing)
6352 {
6353 create_fixed_operand (&ops[0], mem);
6354 create_input_operand (&ops[1], val, mode);
6355 create_integer_operand (&ops[2], model);
6356 if (maybe_expand_insn (icode, 3, ops))
6357 return const0_rtx;
6358 }
6359
6360 /* If using __sync_lock_release is a viable alternative, try it. */
6361 if (use_release)
6362 {
6363 icode = direct_optab_handler (sync_lock_release_optab, mode);
6364 if (icode != CODE_FOR_nothing)
6365 {
6366 create_fixed_operand (&ops[0], mem);
6367 create_input_operand (&ops[1], const0_rtx, mode);
6368 if (maybe_expand_insn (icode, 2, ops))
6369 {
6370 /* lock_release is only a release barrier. */
6371 if (is_mm_seq_cst (model))
6372 expand_mem_thread_fence (model);
6373 return const0_rtx;
6374 }
6375 }
6376 }
6377
6378 /* If the size of the object is greater than word size on this target,
6379 a default store will not be atomic, Try a mem_exchange and throw away
6380 the result. If that doesn't work, don't do anything. */
6381 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6382 {
6383 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6384 if (!target)
6385 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
6386 if (target)
6387 return const0_rtx;
6388 else
6389 return NULL_RTX;
6390 }
6391
6392 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6393 expand_mem_thread_fence (model);
6394
6395 emit_move_insn (mem, val);
6396
6397 /* For SEQ_CST, also emit a barrier after the store. */
6398 if (is_mm_seq_cst (model))
6399 expand_mem_thread_fence (model);
6400
6401 return const0_rtx;
6402 }
6403
6404
6405 /* Structure containing the pointers and values required to process the
6406 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6407
6408 struct atomic_op_functions
6409 {
6410 direct_optab mem_fetch_before;
6411 direct_optab mem_fetch_after;
6412 direct_optab mem_no_result;
6413 optab fetch_before;
6414 optab fetch_after;
6415 direct_optab no_result;
6416 enum rtx_code reverse_code;
6417 };
6418
6419
6420 /* Fill in structure pointed to by OP with the various optab entries for an
6421 operation of type CODE. */
6422
6423 static void
6424 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6425 {
6426 gcc_assert (op!= NULL);
6427
6428 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6429 in the source code during compilation, and the optab entries are not
6430 computable until runtime. Fill in the values at runtime. */
6431 switch (code)
6432 {
6433 case PLUS:
6434 op->mem_fetch_before = atomic_fetch_add_optab;
6435 op->mem_fetch_after = atomic_add_fetch_optab;
6436 op->mem_no_result = atomic_add_optab;
6437 op->fetch_before = sync_old_add_optab;
6438 op->fetch_after = sync_new_add_optab;
6439 op->no_result = sync_add_optab;
6440 op->reverse_code = MINUS;
6441 break;
6442 case MINUS:
6443 op->mem_fetch_before = atomic_fetch_sub_optab;
6444 op->mem_fetch_after = atomic_sub_fetch_optab;
6445 op->mem_no_result = atomic_sub_optab;
6446 op->fetch_before = sync_old_sub_optab;
6447 op->fetch_after = sync_new_sub_optab;
6448 op->no_result = sync_sub_optab;
6449 op->reverse_code = PLUS;
6450 break;
6451 case XOR:
6452 op->mem_fetch_before = atomic_fetch_xor_optab;
6453 op->mem_fetch_after = atomic_xor_fetch_optab;
6454 op->mem_no_result = atomic_xor_optab;
6455 op->fetch_before = sync_old_xor_optab;
6456 op->fetch_after = sync_new_xor_optab;
6457 op->no_result = sync_xor_optab;
6458 op->reverse_code = XOR;
6459 break;
6460 case AND:
6461 op->mem_fetch_before = atomic_fetch_and_optab;
6462 op->mem_fetch_after = atomic_and_fetch_optab;
6463 op->mem_no_result = atomic_and_optab;
6464 op->fetch_before = sync_old_and_optab;
6465 op->fetch_after = sync_new_and_optab;
6466 op->no_result = sync_and_optab;
6467 op->reverse_code = UNKNOWN;
6468 break;
6469 case IOR:
6470 op->mem_fetch_before = atomic_fetch_or_optab;
6471 op->mem_fetch_after = atomic_or_fetch_optab;
6472 op->mem_no_result = atomic_or_optab;
6473 op->fetch_before = sync_old_ior_optab;
6474 op->fetch_after = sync_new_ior_optab;
6475 op->no_result = sync_ior_optab;
6476 op->reverse_code = UNKNOWN;
6477 break;
6478 case NOT:
6479 op->mem_fetch_before = atomic_fetch_nand_optab;
6480 op->mem_fetch_after = atomic_nand_fetch_optab;
6481 op->mem_no_result = atomic_nand_optab;
6482 op->fetch_before = sync_old_nand_optab;
6483 op->fetch_after = sync_new_nand_optab;
6484 op->no_result = sync_nand_optab;
6485 op->reverse_code = UNKNOWN;
6486 break;
6487 default:
6488 gcc_unreachable ();
6489 }
6490 }
6491
6492 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6493 using memory order MODEL. If AFTER is true the operation needs to return
6494 the value of *MEM after the operation, otherwise the previous value.
6495 TARGET is an optional place to place the result. The result is unused if
6496 it is const0_rtx.
6497 Return the result if there is a better sequence, otherwise NULL_RTX. */
6498
6499 static rtx
6500 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6501 enum memmodel model, bool after)
6502 {
6503 /* If the value is prefetched, or not used, it may be possible to replace
6504 the sequence with a native exchange operation. */
6505 if (!after || target == const0_rtx)
6506 {
6507 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6508 if (code == AND && val == const0_rtx)
6509 {
6510 if (target == const0_rtx)
6511 target = gen_reg_rtx (GET_MODE (mem));
6512 return maybe_emit_atomic_exchange (target, mem, val, model);
6513 }
6514
6515 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6516 if (code == IOR && val == constm1_rtx)
6517 {
6518 if (target == const0_rtx)
6519 target = gen_reg_rtx (GET_MODE (mem));
6520 return maybe_emit_atomic_exchange (target, mem, val, model);
6521 }
6522 }
6523
6524 return NULL_RTX;
6525 }
6526
6527 /* Try to emit an instruction for a specific operation varaition.
6528 OPTAB contains the OP functions.
6529 TARGET is an optional place to return the result. const0_rtx means unused.
6530 MEM is the memory location to operate on.
6531 VAL is the value to use in the operation.
6532 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6533 MODEL is the memory model, if used.
6534 AFTER is true if the returned result is the value after the operation. */
6535
6536 static rtx
6537 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6538 rtx val, bool use_memmodel, enum memmodel model, bool after)
6539 {
6540 machine_mode mode = GET_MODE (mem);
6541 struct expand_operand ops[4];
6542 enum insn_code icode;
6543 int op_counter = 0;
6544 int num_ops;
6545
6546 /* Check to see if there is a result returned. */
6547 if (target == const0_rtx)
6548 {
6549 if (use_memmodel)
6550 {
6551 icode = direct_optab_handler (optab->mem_no_result, mode);
6552 create_integer_operand (&ops[2], model);
6553 num_ops = 3;
6554 }
6555 else
6556 {
6557 icode = direct_optab_handler (optab->no_result, mode);
6558 num_ops = 2;
6559 }
6560 }
6561 /* Otherwise, we need to generate a result. */
6562 else
6563 {
6564 if (use_memmodel)
6565 {
6566 icode = direct_optab_handler (after ? optab->mem_fetch_after
6567 : optab->mem_fetch_before, mode);
6568 create_integer_operand (&ops[3], model);
6569 num_ops = 4;
6570 }
6571 else
6572 {
6573 icode = optab_handler (after ? optab->fetch_after
6574 : optab->fetch_before, mode);
6575 num_ops = 3;
6576 }
6577 create_output_operand (&ops[op_counter++], target, mode);
6578 }
6579 if (icode == CODE_FOR_nothing)
6580 return NULL_RTX;
6581
6582 create_fixed_operand (&ops[op_counter++], mem);
6583 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6584 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6585
6586 if (maybe_expand_insn (icode, num_ops, ops))
6587 return (target == const0_rtx ? const0_rtx : ops[0].value);
6588
6589 return NULL_RTX;
6590 }
6591
6592
6593 /* This function expands an atomic fetch_OP or OP_fetch operation:
6594 TARGET is an option place to stick the return value. const0_rtx indicates
6595 the result is unused.
6596 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6597 CODE is the operation being performed (OP)
6598 MEMMODEL is the memory model variant to use.
6599 AFTER is true to return the result of the operation (OP_fetch).
6600 AFTER is false to return the value before the operation (fetch_OP).
6601
6602 This function will *only* generate instructions if there is a direct
6603 optab. No compare and swap loops or libcalls will be generated. */
6604
6605 static rtx
6606 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6607 enum rtx_code code, enum memmodel model,
6608 bool after)
6609 {
6610 machine_mode mode = GET_MODE (mem);
6611 struct atomic_op_functions optab;
6612 rtx result;
6613 bool unused_result = (target == const0_rtx);
6614
6615 get_atomic_op_for_code (&optab, code);
6616
6617 /* Check to see if there are any better instructions. */
6618 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6619 if (result)
6620 return result;
6621
6622 /* Check for the case where the result isn't used and try those patterns. */
6623 if (unused_result)
6624 {
6625 /* Try the memory model variant first. */
6626 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6627 if (result)
6628 return result;
6629
6630 /* Next try the old style withuot a memory model. */
6631 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6632 if (result)
6633 return result;
6634
6635 /* There is no no-result pattern, so try patterns with a result. */
6636 target = NULL_RTX;
6637 }
6638
6639 /* Try the __atomic version. */
6640 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6641 if (result)
6642 return result;
6643
6644 /* Try the older __sync version. */
6645 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6646 if (result)
6647 return result;
6648
6649 /* If the fetch value can be calculated from the other variation of fetch,
6650 try that operation. */
6651 if (after || unused_result || optab.reverse_code != UNKNOWN)
6652 {
6653 /* Try the __atomic version, then the older __sync version. */
6654 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6655 if (!result)
6656 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6657
6658 if (result)
6659 {
6660 /* If the result isn't used, no need to do compensation code. */
6661 if (unused_result)
6662 return result;
6663
6664 /* Issue compensation code. Fetch_after == fetch_before OP val.
6665 Fetch_before == after REVERSE_OP val. */
6666 if (!after)
6667 code = optab.reverse_code;
6668 if (code == NOT)
6669 {
6670 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6671 true, OPTAB_LIB_WIDEN);
6672 result = expand_simple_unop (mode, NOT, result, target, true);
6673 }
6674 else
6675 result = expand_simple_binop (mode, code, result, val, target,
6676 true, OPTAB_LIB_WIDEN);
6677 return result;
6678 }
6679 }
6680
6681 /* No direct opcode can be generated. */
6682 return NULL_RTX;
6683 }
6684
6685
6686
6687 /* This function expands an atomic fetch_OP or OP_fetch operation:
6688 TARGET is an option place to stick the return value. const0_rtx indicates
6689 the result is unused.
6690 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6691 CODE is the operation being performed (OP)
6692 MEMMODEL is the memory model variant to use.
6693 AFTER is true to return the result of the operation (OP_fetch).
6694 AFTER is false to return the value before the operation (fetch_OP). */
6695 rtx
6696 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6697 enum memmodel model, bool after)
6698 {
6699 machine_mode mode = GET_MODE (mem);
6700 rtx result;
6701 bool unused_result = (target == const0_rtx);
6702
6703 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6704 after);
6705
6706 if (result)
6707 return result;
6708
6709 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6710 if (code == PLUS || code == MINUS)
6711 {
6712 rtx tmp;
6713 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6714
6715 start_sequence ();
6716 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6717 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6718 model, after);
6719 if (result)
6720 {
6721 /* PLUS worked so emit the insns and return. */
6722 tmp = get_insns ();
6723 end_sequence ();
6724 emit_insn (tmp);
6725 return result;
6726 }
6727
6728 /* PLUS did not work, so throw away the negation code and continue. */
6729 end_sequence ();
6730 }
6731
6732 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6733 if (!can_compare_and_swap_p (mode, false))
6734 {
6735 rtx libfunc;
6736 bool fixup = false;
6737 enum rtx_code orig_code = code;
6738 struct atomic_op_functions optab;
6739
6740 get_atomic_op_for_code (&optab, code);
6741 libfunc = optab_libfunc (after ? optab.fetch_after
6742 : optab.fetch_before, mode);
6743 if (libfunc == NULL
6744 && (after || unused_result || optab.reverse_code != UNKNOWN))
6745 {
6746 fixup = true;
6747 if (!after)
6748 code = optab.reverse_code;
6749 libfunc = optab_libfunc (after ? optab.fetch_before
6750 : optab.fetch_after, mode);
6751 }
6752 if (libfunc != NULL)
6753 {
6754 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6755 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6756 2, addr, ptr_mode, val, mode);
6757
6758 if (!unused_result && fixup)
6759 result = expand_simple_binop (mode, code, result, val, target,
6760 true, OPTAB_LIB_WIDEN);
6761 return result;
6762 }
6763
6764 /* We need the original code for any further attempts. */
6765 code = orig_code;
6766 }
6767
6768 /* If nothing else has succeeded, default to a compare and swap loop. */
6769 if (can_compare_and_swap_p (mode, true))
6770 {
6771 rtx_insn *insn;
6772 rtx t0 = gen_reg_rtx (mode), t1;
6773
6774 start_sequence ();
6775
6776 /* If the result is used, get a register for it. */
6777 if (!unused_result)
6778 {
6779 if (!target || !register_operand (target, mode))
6780 target = gen_reg_rtx (mode);
6781 /* If fetch_before, copy the value now. */
6782 if (!after)
6783 emit_move_insn (target, t0);
6784 }
6785 else
6786 target = const0_rtx;
6787
6788 t1 = t0;
6789 if (code == NOT)
6790 {
6791 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6792 true, OPTAB_LIB_WIDEN);
6793 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6794 }
6795 else
6796 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6797 OPTAB_LIB_WIDEN);
6798
6799 /* For after, copy the value now. */
6800 if (!unused_result && after)
6801 emit_move_insn (target, t1);
6802 insn = get_insns ();
6803 end_sequence ();
6804
6805 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6806 return target;
6807 }
6808
6809 return NULL_RTX;
6810 }
6811 \f
6812 /* Return true if OPERAND is suitable for operand number OPNO of
6813 instruction ICODE. */
6814
6815 bool
6816 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6817 {
6818 return (!insn_data[(int) icode].operand[opno].predicate
6819 || (insn_data[(int) icode].operand[opno].predicate
6820 (operand, insn_data[(int) icode].operand[opno].mode)));
6821 }
6822 \f
6823 /* TARGET is a target of a multiword operation that we are going to
6824 implement as a series of word-mode operations. Return true if
6825 TARGET is suitable for this purpose. */
6826
6827 bool
6828 valid_multiword_target_p (rtx target)
6829 {
6830 machine_mode mode;
6831 int i;
6832
6833 mode = GET_MODE (target);
6834 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6835 if (!validate_subreg (word_mode, mode, target, i))
6836 return false;
6837 return true;
6838 }
6839
6840 /* Like maybe_legitimize_operand, but do not change the code of the
6841 current rtx value. */
6842
6843 static bool
6844 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6845 struct expand_operand *op)
6846 {
6847 /* See if the operand matches in its current form. */
6848 if (insn_operand_matches (icode, opno, op->value))
6849 return true;
6850
6851 /* If the operand is a memory whose address has no side effects,
6852 try forcing the address into a non-virtual pseudo register.
6853 The check for side effects is important because copy_to_mode_reg
6854 cannot handle things like auto-modified addresses. */
6855 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6856 {
6857 rtx addr, mem;
6858
6859 mem = op->value;
6860 addr = XEXP (mem, 0);
6861 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6862 && !side_effects_p (addr))
6863 {
6864 rtx_insn *last;
6865 machine_mode mode;
6866
6867 last = get_last_insn ();
6868 mode = get_address_mode (mem);
6869 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6870 if (insn_operand_matches (icode, opno, mem))
6871 {
6872 op->value = mem;
6873 return true;
6874 }
6875 delete_insns_since (last);
6876 }
6877 }
6878
6879 return false;
6880 }
6881
6882 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6883 on success, storing the new operand value back in OP. */
6884
6885 static bool
6886 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6887 struct expand_operand *op)
6888 {
6889 machine_mode mode, imode;
6890 bool old_volatile_ok, result;
6891
6892 mode = op->mode;
6893 switch (op->type)
6894 {
6895 case EXPAND_FIXED:
6896 old_volatile_ok = volatile_ok;
6897 volatile_ok = true;
6898 result = maybe_legitimize_operand_same_code (icode, opno, op);
6899 volatile_ok = old_volatile_ok;
6900 return result;
6901
6902 case EXPAND_OUTPUT:
6903 gcc_assert (mode != VOIDmode);
6904 if (op->value
6905 && op->value != const0_rtx
6906 && GET_MODE (op->value) == mode
6907 && maybe_legitimize_operand_same_code (icode, opno, op))
6908 return true;
6909
6910 op->value = gen_reg_rtx (mode);
6911 break;
6912
6913 case EXPAND_INPUT:
6914 input:
6915 gcc_assert (mode != VOIDmode);
6916 gcc_assert (GET_MODE (op->value) == VOIDmode
6917 || GET_MODE (op->value) == mode);
6918 if (maybe_legitimize_operand_same_code (icode, opno, op))
6919 return true;
6920
6921 op->value = copy_to_mode_reg (mode, op->value);
6922 break;
6923
6924 case EXPAND_CONVERT_TO:
6925 gcc_assert (mode != VOIDmode);
6926 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6927 goto input;
6928
6929 case EXPAND_CONVERT_FROM:
6930 if (GET_MODE (op->value) != VOIDmode)
6931 mode = GET_MODE (op->value);
6932 else
6933 /* The caller must tell us what mode this value has. */
6934 gcc_assert (mode != VOIDmode);
6935
6936 imode = insn_data[(int) icode].operand[opno].mode;
6937 if (imode != VOIDmode && imode != mode)
6938 {
6939 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
6940 mode = imode;
6941 }
6942 goto input;
6943
6944 case EXPAND_ADDRESS:
6945 gcc_assert (mode != VOIDmode);
6946 op->value = convert_memory_address (mode, op->value);
6947 goto input;
6948
6949 case EXPAND_INTEGER:
6950 mode = insn_data[(int) icode].operand[opno].mode;
6951 if (mode != VOIDmode && const_int_operand (op->value, mode))
6952 goto input;
6953 break;
6954 }
6955 return insn_operand_matches (icode, opno, op->value);
6956 }
6957
6958 /* Make OP describe an input operand that should have the same value
6959 as VALUE, after any mode conversion that the target might request.
6960 TYPE is the type of VALUE. */
6961
6962 void
6963 create_convert_operand_from_type (struct expand_operand *op,
6964 rtx value, tree type)
6965 {
6966 create_convert_operand_from (op, value, TYPE_MODE (type),
6967 TYPE_UNSIGNED (type));
6968 }
6969
6970 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
6971 of instruction ICODE. Return true on success, leaving the new operand
6972 values in the OPS themselves. Emit no code on failure. */
6973
6974 bool
6975 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
6976 unsigned int nops, struct expand_operand *ops)
6977 {
6978 rtx_insn *last;
6979 unsigned int i;
6980
6981 last = get_last_insn ();
6982 for (i = 0; i < nops; i++)
6983 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
6984 {
6985 delete_insns_since (last);
6986 return false;
6987 }
6988 return true;
6989 }
6990
6991 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
6992 as its operands. Return the instruction pattern on success,
6993 and emit any necessary set-up code. Return null and emit no
6994 code on failure. */
6995
6996 rtx_insn *
6997 maybe_gen_insn (enum insn_code icode, unsigned int nops,
6998 struct expand_operand *ops)
6999 {
7000 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7001 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7002 return NULL;
7003
7004 switch (nops)
7005 {
7006 case 1:
7007 return GEN_FCN (icode) (ops[0].value);
7008 case 2:
7009 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7010 case 3:
7011 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7012 case 4:
7013 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7014 ops[3].value);
7015 case 5:
7016 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7017 ops[3].value, ops[4].value);
7018 case 6:
7019 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7020 ops[3].value, ops[4].value, ops[5].value);
7021 case 7:
7022 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7023 ops[3].value, ops[4].value, ops[5].value,
7024 ops[6].value);
7025 case 8:
7026 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7027 ops[3].value, ops[4].value, ops[5].value,
7028 ops[6].value, ops[7].value);
7029 case 9:
7030 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7031 ops[3].value, ops[4].value, ops[5].value,
7032 ops[6].value, ops[7].value, ops[8].value);
7033 }
7034 gcc_unreachable ();
7035 }
7036
7037 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7038 as its operands. Return true on success and emit no code on failure. */
7039
7040 bool
7041 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7042 struct expand_operand *ops)
7043 {
7044 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7045 if (pat)
7046 {
7047 emit_insn (pat);
7048 return true;
7049 }
7050 return false;
7051 }
7052
7053 /* Like maybe_expand_insn, but for jumps. */
7054
7055 bool
7056 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7057 struct expand_operand *ops)
7058 {
7059 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7060 if (pat)
7061 {
7062 emit_jump_insn (pat);
7063 return true;
7064 }
7065 return false;
7066 }
7067
7068 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7069 as its operands. */
7070
7071 void
7072 expand_insn (enum insn_code icode, unsigned int nops,
7073 struct expand_operand *ops)
7074 {
7075 if (!maybe_expand_insn (icode, nops, ops))
7076 gcc_unreachable ();
7077 }
7078
7079 /* Like expand_insn, but for jumps. */
7080
7081 void
7082 expand_jump_insn (enum insn_code icode, unsigned int nops,
7083 struct expand_operand *ops)
7084 {
7085 if (!maybe_expand_jump_insn (icode, nops, ops))
7086 gcc_unreachable ();
7087 }