cp-tree.h (LOOKUP_SEEN_P, [...]): New.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
46
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
51
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
54 \f
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
58
59 If the last insn does not set TARGET, don't do anything, but return 1.
60
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
64
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
67 {
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
71
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
73
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
80
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
83
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
87 ;
88
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
93 {
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
97 {
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
114 }
115 return 0;
116 }
117
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
121
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
127
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
130 {
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
139 {
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_SIZE (GET_MODE (op0))
142 > GET_MODE_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
149 }
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
154 }
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
157
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
159
160 return 1;
161 }
162 \f
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
166
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
169 {
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
173
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
180
181 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
182 return to_mode;
183
184 return result;
185 }
186 \f
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
192
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
196 {
197 rtx result;
198
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend && GET_MODE (op) == VOIDmode)
201 return op;
202
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
206 if (! no_extend
207 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
208 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
209 return convert_modes (mode, oldmode, op, unsignedp);
210
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
212 SUBREG. */
213 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
214 return gen_lowpart (mode, force_reg (GET_MODE (op), op));
215
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
217 part to OP. */
218
219 result = gen_reg_rtx (mode);
220 emit_clobber (result);
221 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
222 return result;
223 }
224 \f
225 /* Expand vector widening operations.
226
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
236
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
239 nops OP0 OP1 WIDE_OP
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
244
245 rtx
246 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
247 rtx target, int unsignedp)
248 {
249 struct expand_operand eops[4];
250 tree oprnd0, oprnd1, oprnd2;
251 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
252 optab widen_pattern_optab;
253 enum insn_code icode;
254 int nops = TREE_CODE_LENGTH (ops->code);
255 int op;
256
257 oprnd0 = ops->op0;
258 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
259 widen_pattern_optab =
260 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
261 if (ops->code == WIDEN_MULT_PLUS_EXPR
262 || ops->code == WIDEN_MULT_MINUS_EXPR)
263 icode = find_widening_optab_handler (widen_pattern_optab,
264 TYPE_MODE (TREE_TYPE (ops->op2)),
265 tmode0, 0);
266 else
267 icode = optab_handler (widen_pattern_optab, tmode0);
268 gcc_assert (icode != CODE_FOR_nothing);
269
270 if (nops >= 2)
271 {
272 oprnd1 = ops->op1;
273 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
274 }
275
276 /* The last operand is of a wider mode than the rest of the operands. */
277 if (nops == 2)
278 wmode = tmode1;
279 else if (nops == 3)
280 {
281 gcc_assert (tmode1 == tmode0);
282 gcc_assert (op1);
283 oprnd2 = ops->op2;
284 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
285 }
286
287 op = 0;
288 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
289 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
290 if (op1)
291 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
292 if (wide_op)
293 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
294 expand_insn (icode, op, eops);
295 return eops[0].value;
296 }
297
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
300
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
303
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
308
309 rtx
310 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
311 rtx op1, rtx op2, rtx target, int unsignedp)
312 {
313 struct expand_operand ops[4];
314 enum insn_code icode = optab_handler (ternary_optab, mode);
315
316 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
317
318 create_output_operand (&ops[0], target, mode);
319 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
320 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
321 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
322 expand_insn (icode, 4, ops);
323 return ops[0].value;
324 }
325
326
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
330
331 rtx
332 simplify_expand_binop (machine_mode mode, optab binoptab,
333 rtx op0, rtx op1, rtx target, int unsignedp,
334 enum optab_methods methods)
335 {
336 if (CONSTANT_P (op0) && CONSTANT_P (op1))
337 {
338 rtx x = simplify_binary_operation (optab_to_code (binoptab),
339 mode, op0, op1);
340 if (x)
341 return x;
342 }
343
344 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
345 }
346
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
349
350 bool
351 force_expand_binop (machine_mode mode, optab binoptab,
352 rtx op0, rtx op1, rtx target, int unsignedp,
353 enum optab_methods methods)
354 {
355 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
356 target, unsignedp, methods);
357 if (x == 0)
358 return false;
359 if (x != target)
360 emit_move_insn (target, x);
361 return true;
362 }
363
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
367
368 static rtx
369 expand_vector_broadcast (machine_mode vmode, rtx op)
370 {
371 enum insn_code icode;
372 rtvec vec;
373 rtx ret;
374 int i, n;
375
376 gcc_checking_assert (VECTOR_MODE_P (vmode));
377
378 n = GET_MODE_NUNITS (vmode);
379 vec = rtvec_alloc (n);
380 for (i = 0; i < n; ++i)
381 RTVEC_ELT (vec, i) = op;
382
383 if (CONSTANT_P (op))
384 return gen_rtx_CONST_VECTOR (vmode, vec);
385
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode = optab_handler (vec_init_optab, vmode);
390 if (icode == CODE_FOR_nothing)
391 return NULL;
392
393 ret = gen_reg_rtx (vmode);
394 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
395
396 return ret;
397 }
398
399 /* This subroutine of expand_doubleword_shift handles the cases in which
400 the effective shift value is >= BITS_PER_WORD. The arguments and return
401 value are the same as for the parent routine, except that SUPERWORD_OP1
402 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
403 INTO_TARGET may be null if the caller has decided to calculate it. */
404
405 static bool
406 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
407 rtx outof_target, rtx into_target,
408 int unsignedp, enum optab_methods methods)
409 {
410 if (into_target != 0)
411 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
412 into_target, unsignedp, methods))
413 return false;
414
415 if (outof_target != 0)
416 {
417 /* For a signed right shift, we must fill OUTOF_TARGET with copies
418 of the sign bit, otherwise we must fill it with zeros. */
419 if (binoptab != ashr_optab)
420 emit_move_insn (outof_target, CONST0_RTX (word_mode));
421 else
422 if (!force_expand_binop (word_mode, binoptab,
423 outof_input, GEN_INT (BITS_PER_WORD - 1),
424 outof_target, unsignedp, methods))
425 return false;
426 }
427 return true;
428 }
429
430 /* This subroutine of expand_doubleword_shift handles the cases in which
431 the effective shift value is < BITS_PER_WORD. The arguments and return
432 value are the same as for the parent routine. */
433
434 static bool
435 expand_subword_shift (machine_mode op1_mode, optab binoptab,
436 rtx outof_input, rtx into_input, rtx op1,
437 rtx outof_target, rtx into_target,
438 int unsignedp, enum optab_methods methods,
439 unsigned HOST_WIDE_INT shift_mask)
440 {
441 optab reverse_unsigned_shift, unsigned_shift;
442 rtx tmp, carries;
443
444 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
445 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
446
447 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
448 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
449 the opposite direction to BINOPTAB. */
450 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
451 {
452 carries = outof_input;
453 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
454 op1_mode), op1_mode);
455 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
456 0, true, methods);
457 }
458 else
459 {
460 /* We must avoid shifting by BITS_PER_WORD bits since that is either
461 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
462 has unknown behavior. Do a single shift first, then shift by the
463 remainder. It's OK to use ~OP1 as the remainder if shift counts
464 are truncated to the mode size. */
465 carries = expand_binop (word_mode, reverse_unsigned_shift,
466 outof_input, const1_rtx, 0, unsignedp, methods);
467 if (shift_mask == BITS_PER_WORD - 1)
468 {
469 tmp = immed_wide_int_const
470 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
471 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
472 0, true, methods);
473 }
474 else
475 {
476 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
477 op1_mode), op1_mode);
478 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
479 0, true, methods);
480 }
481 }
482 if (tmp == 0 || carries == 0)
483 return false;
484 carries = expand_binop (word_mode, reverse_unsigned_shift,
485 carries, tmp, 0, unsignedp, methods);
486 if (carries == 0)
487 return false;
488
489 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
490 so the result can go directly into INTO_TARGET if convenient. */
491 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
492 into_target, unsignedp, methods);
493 if (tmp == 0)
494 return false;
495
496 /* Now OR in the bits carried over from OUTOF_INPUT. */
497 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
498 into_target, unsignedp, methods))
499 return false;
500
501 /* Use a standard word_mode shift for the out-of half. */
502 if (outof_target != 0)
503 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
504 outof_target, unsignedp, methods))
505 return false;
506
507 return true;
508 }
509
510
511 /* Try implementing expand_doubleword_shift using conditional moves.
512 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
513 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
514 are the shift counts to use in the former and latter case. All other
515 arguments are the same as the parent routine. */
516
517 static bool
518 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
519 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
520 rtx outof_input, rtx into_input,
521 rtx subword_op1, rtx superword_op1,
522 rtx outof_target, rtx into_target,
523 int unsignedp, enum optab_methods methods,
524 unsigned HOST_WIDE_INT shift_mask)
525 {
526 rtx outof_superword, into_superword;
527
528 /* Put the superword version of the output into OUTOF_SUPERWORD and
529 INTO_SUPERWORD. */
530 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
531 if (outof_target != 0 && subword_op1 == superword_op1)
532 {
533 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
534 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
535 into_superword = outof_target;
536 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
537 outof_superword, 0, unsignedp, methods))
538 return false;
539 }
540 else
541 {
542 into_superword = gen_reg_rtx (word_mode);
543 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
544 outof_superword, into_superword,
545 unsignedp, methods))
546 return false;
547 }
548
549 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
550 if (!expand_subword_shift (op1_mode, binoptab,
551 outof_input, into_input, subword_op1,
552 outof_target, into_target,
553 unsignedp, methods, shift_mask))
554 return false;
555
556 /* Select between them. Do the INTO half first because INTO_SUPERWORD
557 might be the current value of OUTOF_TARGET. */
558 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
559 into_target, into_superword, word_mode, false))
560 return false;
561
562 if (outof_target != 0)
563 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
564 outof_target, outof_superword,
565 word_mode, false))
566 return false;
567
568 return true;
569 }
570
571 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
572 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
573 input operand; the shift moves bits in the direction OUTOF_INPUT->
574 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
575 of the target. OP1 is the shift count and OP1_MODE is its mode.
576 If OP1 is constant, it will have been truncated as appropriate
577 and is known to be nonzero.
578
579 If SHIFT_MASK is zero, the result of word shifts is undefined when the
580 shift count is outside the range [0, BITS_PER_WORD). This routine must
581 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
582
583 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
584 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
585 fill with zeros or sign bits as appropriate.
586
587 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
588 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
589 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
590 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
591 are undefined.
592
593 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
594 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
595 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
596 function wants to calculate it itself.
597
598 Return true if the shift could be successfully synthesized. */
599
600 static bool
601 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
602 rtx outof_input, rtx into_input, rtx op1,
603 rtx outof_target, rtx into_target,
604 int unsignedp, enum optab_methods methods,
605 unsigned HOST_WIDE_INT shift_mask)
606 {
607 rtx superword_op1, tmp, cmp1, cmp2;
608 enum rtx_code cmp_code;
609
610 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
611 fill the result with sign or zero bits as appropriate. If so, the value
612 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
613 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
614 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
615
616 This isn't worthwhile for constant shifts since the optimizers will
617 cope better with in-range shift counts. */
618 if (shift_mask >= BITS_PER_WORD
619 && outof_target != 0
620 && !CONSTANT_P (op1))
621 {
622 if (!expand_doubleword_shift (op1_mode, binoptab,
623 outof_input, into_input, op1,
624 0, into_target,
625 unsignedp, methods, shift_mask))
626 return false;
627 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
628 outof_target, unsignedp, methods))
629 return false;
630 return true;
631 }
632
633 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
634 is true when the effective shift value is less than BITS_PER_WORD.
635 Set SUPERWORD_OP1 to the shift count that should be used to shift
636 OUTOF_INPUT into INTO_TARGET when the condition is false. */
637 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
638 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
639 {
640 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
641 is a subword shift count. */
642 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
643 0, true, methods);
644 cmp2 = CONST0_RTX (op1_mode);
645 cmp_code = EQ;
646 superword_op1 = op1;
647 }
648 else
649 {
650 /* Set CMP1 to OP1 - BITS_PER_WORD. */
651 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
652 0, true, methods);
653 cmp2 = CONST0_RTX (op1_mode);
654 cmp_code = LT;
655 superword_op1 = cmp1;
656 }
657 if (cmp1 == 0)
658 return false;
659
660 /* If we can compute the condition at compile time, pick the
661 appropriate subroutine. */
662 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
663 if (tmp != 0 && CONST_INT_P (tmp))
664 {
665 if (tmp == const0_rtx)
666 return expand_superword_shift (binoptab, outof_input, superword_op1,
667 outof_target, into_target,
668 unsignedp, methods);
669 else
670 return expand_subword_shift (op1_mode, binoptab,
671 outof_input, into_input, op1,
672 outof_target, into_target,
673 unsignedp, methods, shift_mask);
674 }
675
676 /* Try using conditional moves to generate straight-line code. */
677 if (HAVE_conditional_move)
678 {
679 rtx_insn *start = get_last_insn ();
680 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
681 cmp_code, cmp1, cmp2,
682 outof_input, into_input,
683 op1, superword_op1,
684 outof_target, into_target,
685 unsignedp, methods, shift_mask))
686 return true;
687 delete_insns_since (start);
688 }
689
690 /* As a last resort, use branches to select the correct alternative. */
691 rtx_code_label *subword_label = gen_label_rtx ();
692 rtx_code_label *done_label = gen_label_rtx ();
693
694 NO_DEFER_POP;
695 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
696 0, 0, subword_label, -1);
697 OK_DEFER_POP;
698
699 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
700 outof_target, into_target,
701 unsignedp, methods))
702 return false;
703
704 emit_jump_insn (targetm.gen_jump (done_label));
705 emit_barrier ();
706 emit_label (subword_label);
707
708 if (!expand_subword_shift (op1_mode, binoptab,
709 outof_input, into_input, op1,
710 outof_target, into_target,
711 unsignedp, methods, shift_mask))
712 return false;
713
714 emit_label (done_label);
715 return true;
716 }
717 \f
718 /* Subroutine of expand_binop. Perform a double word multiplication of
719 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
720 as the target's word_mode. This function return NULL_RTX if anything
721 goes wrong, in which case it may have already emitted instructions
722 which need to be deleted.
723
724 If we want to multiply two two-word values and have normal and widening
725 multiplies of single-word values, we can do this with three smaller
726 multiplications.
727
728 The multiplication proceeds as follows:
729 _______________________
730 [__op0_high_|__op0_low__]
731 _______________________
732 * [__op1_high_|__op1_low__]
733 _______________________________________________
734 _______________________
735 (1) [__op0_low__*__op1_low__]
736 _______________________
737 (2a) [__op0_low__*__op1_high_]
738 _______________________
739 (2b) [__op0_high_*__op1_low__]
740 _______________________
741 (3) [__op0_high_*__op1_high_]
742
743
744 This gives a 4-word result. Since we are only interested in the
745 lower 2 words, partial result (3) and the upper words of (2a) and
746 (2b) don't need to be calculated. Hence (2a) and (2b) can be
747 calculated using non-widening multiplication.
748
749 (1), however, needs to be calculated with an unsigned widening
750 multiplication. If this operation is not directly supported we
751 try using a signed widening multiplication and adjust the result.
752 This adjustment works as follows:
753
754 If both operands are positive then no adjustment is needed.
755
756 If the operands have different signs, for example op0_low < 0 and
757 op1_low >= 0, the instruction treats the most significant bit of
758 op0_low as a sign bit instead of a bit with significance
759 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
760 with 2**BITS_PER_WORD - op0_low, and two's complements the
761 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
762 the result.
763
764 Similarly, if both operands are negative, we need to add
765 (op0_low + op1_low) * 2**BITS_PER_WORD.
766
767 We use a trick to adjust quickly. We logically shift op0_low right
768 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
769 op0_high (op1_high) before it is used to calculate 2b (2a). If no
770 logical shift exists, we do an arithmetic right shift and subtract
771 the 0 or -1. */
772
773 static rtx
774 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
775 bool umulp, enum optab_methods methods)
776 {
777 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
778 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
779 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
780 rtx product, adjust, product_high, temp;
781
782 rtx op0_high = operand_subword_force (op0, high, mode);
783 rtx op0_low = operand_subword_force (op0, low, mode);
784 rtx op1_high = operand_subword_force (op1, high, mode);
785 rtx op1_low = operand_subword_force (op1, low, mode);
786
787 /* If we're using an unsigned multiply to directly compute the product
788 of the low-order words of the operands and perform any required
789 adjustments of the operands, we begin by trying two more multiplications
790 and then computing the appropriate sum.
791
792 We have checked above that the required addition is provided.
793 Full-word addition will normally always succeed, especially if
794 it is provided at all, so we don't worry about its failure. The
795 multiplication may well fail, however, so we do handle that. */
796
797 if (!umulp)
798 {
799 /* ??? This could be done with emit_store_flag where available. */
800 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
801 NULL_RTX, 1, methods);
802 if (temp)
803 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
804 NULL_RTX, 0, OPTAB_DIRECT);
805 else
806 {
807 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
808 NULL_RTX, 0, methods);
809 if (!temp)
810 return NULL_RTX;
811 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
812 NULL_RTX, 0, OPTAB_DIRECT);
813 }
814
815 if (!op0_high)
816 return NULL_RTX;
817 }
818
819 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
820 NULL_RTX, 0, OPTAB_DIRECT);
821 if (!adjust)
822 return NULL_RTX;
823
824 /* OP0_HIGH should now be dead. */
825
826 if (!umulp)
827 {
828 /* ??? This could be done with emit_store_flag where available. */
829 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
830 NULL_RTX, 1, methods);
831 if (temp)
832 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
833 NULL_RTX, 0, OPTAB_DIRECT);
834 else
835 {
836 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
837 NULL_RTX, 0, methods);
838 if (!temp)
839 return NULL_RTX;
840 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
841 NULL_RTX, 0, OPTAB_DIRECT);
842 }
843
844 if (!op1_high)
845 return NULL_RTX;
846 }
847
848 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
849 NULL_RTX, 0, OPTAB_DIRECT);
850 if (!temp)
851 return NULL_RTX;
852
853 /* OP1_HIGH should now be dead. */
854
855 adjust = expand_binop (word_mode, add_optab, adjust, temp,
856 NULL_RTX, 0, OPTAB_DIRECT);
857
858 if (target && !REG_P (target))
859 target = NULL_RTX;
860
861 if (umulp)
862 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
863 target, 1, OPTAB_DIRECT);
864 else
865 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
866 target, 1, OPTAB_DIRECT);
867
868 if (!product)
869 return NULL_RTX;
870
871 product_high = operand_subword (product, high, 1, mode);
872 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
873 NULL_RTX, 0, OPTAB_DIRECT);
874 emit_move_insn (product_high, adjust);
875 return product;
876 }
877 \f
878 /* Wrapper around expand_binop which takes an rtx code to specify
879 the operation to perform, not an optab pointer. All other
880 arguments are the same. */
881 rtx
882 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
883 rtx op1, rtx target, int unsignedp,
884 enum optab_methods methods)
885 {
886 optab binop = code_to_optab (code);
887 gcc_assert (binop);
888
889 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
890 }
891
892 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
893 binop. Order them according to commutative_operand_precedence and, if
894 possible, try to put TARGET or a pseudo first. */
895 static bool
896 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
897 {
898 int op0_prec = commutative_operand_precedence (op0);
899 int op1_prec = commutative_operand_precedence (op1);
900
901 if (op0_prec < op1_prec)
902 return true;
903
904 if (op0_prec > op1_prec)
905 return false;
906
907 /* With equal precedence, both orders are ok, but it is better if the
908 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
909 if (target == 0 || REG_P (target))
910 return (REG_P (op1) && !REG_P (op0)) || target == op1;
911 else
912 return rtx_equal_p (op1, target);
913 }
914
915 /* Return true if BINOPTAB implements a shift operation. */
916
917 static bool
918 shift_optab_p (optab binoptab)
919 {
920 switch (optab_to_code (binoptab))
921 {
922 case ASHIFT:
923 case SS_ASHIFT:
924 case US_ASHIFT:
925 case ASHIFTRT:
926 case LSHIFTRT:
927 case ROTATE:
928 case ROTATERT:
929 return true;
930
931 default:
932 return false;
933 }
934 }
935
936 /* Return true if BINOPTAB implements a commutative binary operation. */
937
938 static bool
939 commutative_optab_p (optab binoptab)
940 {
941 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
942 || binoptab == smul_widen_optab
943 || binoptab == umul_widen_optab
944 || binoptab == smul_highpart_optab
945 || binoptab == umul_highpart_optab);
946 }
947
948 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
949 optimizing, and if the operand is a constant that costs more than
950 1 instruction, force the constant into a register and return that
951 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
952
953 static rtx
954 avoid_expensive_constant (machine_mode mode, optab binoptab,
955 int opn, rtx x, bool unsignedp)
956 {
957 bool speed = optimize_insn_for_speed_p ();
958
959 if (mode != VOIDmode
960 && optimize
961 && CONSTANT_P (x)
962 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
963 > set_src_cost (x, mode, speed)))
964 {
965 if (CONST_INT_P (x))
966 {
967 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
968 if (intval != INTVAL (x))
969 x = GEN_INT (intval);
970 }
971 else
972 x = convert_modes (mode, VOIDmode, x, unsignedp);
973 x = force_reg (mode, x);
974 }
975 return x;
976 }
977
978 /* Helper function for expand_binop: handle the case where there
979 is an insn that directly implements the indicated operation.
980 Returns null if this is not possible. */
981 static rtx
982 expand_binop_directly (machine_mode mode, optab binoptab,
983 rtx op0, rtx op1,
984 rtx target, int unsignedp, enum optab_methods methods,
985 rtx_insn *last)
986 {
987 machine_mode from_mode = widened_mode (mode, op0, op1);
988 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
989 from_mode, 1);
990 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
991 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
992 machine_mode mode0, mode1, tmp_mode;
993 struct expand_operand ops[3];
994 bool commutative_p;
995 rtx_insn *pat;
996 rtx xop0 = op0, xop1 = op1;
997 bool canonicalize_op1 = false;
998
999 /* If it is a commutative operator and the modes would match
1000 if we would swap the operands, we can save the conversions. */
1001 commutative_p = commutative_optab_p (binoptab);
1002 if (commutative_p
1003 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1004 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1005 std::swap (xop0, xop1);
1006
1007 /* If we are optimizing, force expensive constants into a register. */
1008 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1009 if (!shift_optab_p (binoptab))
1010 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1011 else
1012 /* Shifts and rotates often use a different mode for op1 from op0;
1013 for VOIDmode constants we don't know the mode, so force it
1014 to be canonicalized using convert_modes. */
1015 canonicalize_op1 = true;
1016
1017 /* In case the insn wants input operands in modes different from
1018 those of the actual operands, convert the operands. It would
1019 seem that we don't need to convert CONST_INTs, but we do, so
1020 that they're properly zero-extended, sign-extended or truncated
1021 for their mode. */
1022
1023 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1024 if (xmode0 != VOIDmode && xmode0 != mode0)
1025 {
1026 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1027 mode0 = xmode0;
1028 }
1029
1030 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1031 ? GET_MODE (xop1) : mode);
1032 if (xmode1 != VOIDmode && xmode1 != mode1)
1033 {
1034 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1035 mode1 = xmode1;
1036 }
1037
1038 /* If operation is commutative,
1039 try to make the first operand a register.
1040 Even better, try to make it the same as the target.
1041 Also try to make the last operand a constant. */
1042 if (commutative_p
1043 && swap_commutative_operands_with_target (target, xop0, xop1))
1044 std::swap (xop0, xop1);
1045
1046 /* Now, if insn's predicates don't allow our operands, put them into
1047 pseudo regs. */
1048
1049 if (binoptab == vec_pack_trunc_optab
1050 || binoptab == vec_pack_usat_optab
1051 || binoptab == vec_pack_ssat_optab
1052 || binoptab == vec_pack_ufix_trunc_optab
1053 || binoptab == vec_pack_sfix_trunc_optab)
1054 {
1055 /* The mode of the result is different then the mode of the
1056 arguments. */
1057 tmp_mode = insn_data[(int) icode].operand[0].mode;
1058 if (VECTOR_MODE_P (mode)
1059 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1060 {
1061 delete_insns_since (last);
1062 return NULL_RTX;
1063 }
1064 }
1065 else
1066 tmp_mode = mode;
1067
1068 create_output_operand (&ops[0], target, tmp_mode);
1069 create_input_operand (&ops[1], xop0, mode0);
1070 create_input_operand (&ops[2], xop1, mode1);
1071 pat = maybe_gen_insn (icode, 3, ops);
1072 if (pat)
1073 {
1074 /* If PAT is composed of more than one insn, try to add an appropriate
1075 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1076 operand, call expand_binop again, this time without a target. */
1077 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1078 && ! add_equal_note (pat, ops[0].value,
1079 optab_to_code (binoptab),
1080 ops[1].value, ops[2].value))
1081 {
1082 delete_insns_since (last);
1083 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1084 unsignedp, methods);
1085 }
1086
1087 emit_insn (pat);
1088 return ops[0].value;
1089 }
1090 delete_insns_since (last);
1091 return NULL_RTX;
1092 }
1093
1094 /* Generate code to perform an operation specified by BINOPTAB
1095 on operands OP0 and OP1, with result having machine-mode MODE.
1096
1097 UNSIGNEDP is for the case where we have to widen the operands
1098 to perform the operation. It says to use zero-extension.
1099
1100 If TARGET is nonzero, the value
1101 is generated there, if it is convenient to do so.
1102 In all cases an rtx is returned for the locus of the value;
1103 this may or may not be TARGET. */
1104
1105 rtx
1106 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1107 rtx target, int unsignedp, enum optab_methods methods)
1108 {
1109 enum optab_methods next_methods
1110 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1111 ? OPTAB_WIDEN : methods);
1112 enum mode_class mclass;
1113 machine_mode wider_mode;
1114 rtx libfunc;
1115 rtx temp;
1116 rtx_insn *entry_last = get_last_insn ();
1117 rtx_insn *last;
1118
1119 mclass = GET_MODE_CLASS (mode);
1120
1121 /* If subtracting an integer constant, convert this into an addition of
1122 the negated constant. */
1123
1124 if (binoptab == sub_optab && CONST_INT_P (op1))
1125 {
1126 op1 = negate_rtx (mode, op1);
1127 binoptab = add_optab;
1128 }
1129 /* For shifts, constant invalid op1 might be expanded from different
1130 mode than MODE. As those are invalid, force them to a register
1131 to avoid further problems during expansion. */
1132 else if (CONST_INT_P (op1)
1133 && shift_optab_p (binoptab)
1134 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1135 {
1136 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1137 op1 = force_reg (GET_MODE_INNER (mode), op1);
1138 }
1139
1140 /* Record where to delete back to if we backtrack. */
1141 last = get_last_insn ();
1142
1143 /* If we can do it with a three-operand insn, do so. */
1144
1145 if (methods != OPTAB_MUST_WIDEN
1146 && find_widening_optab_handler (binoptab, mode,
1147 widened_mode (mode, op0, op1), 1)
1148 != CODE_FOR_nothing)
1149 {
1150 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1151 unsignedp, methods, last);
1152 if (temp)
1153 return temp;
1154 }
1155
1156 /* If we were trying to rotate, and that didn't work, try rotating
1157 the other direction before falling back to shifts and bitwise-or. */
1158 if (((binoptab == rotl_optab
1159 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1160 || (binoptab == rotr_optab
1161 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1162 && mclass == MODE_INT)
1163 {
1164 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1165 rtx newop1;
1166 unsigned int bits = GET_MODE_PRECISION (mode);
1167
1168 if (CONST_INT_P (op1))
1169 newop1 = GEN_INT (bits - INTVAL (op1));
1170 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1171 newop1 = negate_rtx (GET_MODE (op1), op1);
1172 else
1173 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1174 gen_int_mode (bits, GET_MODE (op1)), op1,
1175 NULL_RTX, unsignedp, OPTAB_DIRECT);
1176
1177 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1178 target, unsignedp, methods, last);
1179 if (temp)
1180 return temp;
1181 }
1182
1183 /* If this is a multiply, see if we can do a widening operation that
1184 takes operands of this mode and makes a wider mode. */
1185
1186 if (binoptab == smul_optab
1187 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1188 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1189 : smul_widen_optab),
1190 GET_MODE_2XWIDER_MODE (mode), mode)
1191 != CODE_FOR_nothing))
1192 {
1193 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1194 unsignedp ? umul_widen_optab : smul_widen_optab,
1195 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1196
1197 if (temp != 0)
1198 {
1199 if (GET_MODE_CLASS (mode) == MODE_INT
1200 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1201 return gen_lowpart (mode, temp);
1202 else
1203 return convert_to_mode (mode, temp, unsignedp);
1204 }
1205 }
1206
1207 /* If this is a vector shift by a scalar, see if we can do a vector
1208 shift by a vector. If so, broadcast the scalar into a vector. */
1209 if (mclass == MODE_VECTOR_INT)
1210 {
1211 optab otheroptab = unknown_optab;
1212
1213 if (binoptab == ashl_optab)
1214 otheroptab = vashl_optab;
1215 else if (binoptab == ashr_optab)
1216 otheroptab = vashr_optab;
1217 else if (binoptab == lshr_optab)
1218 otheroptab = vlshr_optab;
1219 else if (binoptab == rotl_optab)
1220 otheroptab = vrotl_optab;
1221 else if (binoptab == rotr_optab)
1222 otheroptab = vrotr_optab;
1223
1224 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1225 {
1226 /* The scalar may have been extended to be too wide. Truncate
1227 it back to the proper size to fit in the broadcast vector. */
1228 machine_mode inner_mode = GET_MODE_INNER (mode);
1229 if (!CONST_INT_P (op1)
1230 && (GET_MODE_BITSIZE (inner_mode)
1231 < GET_MODE_BITSIZE (GET_MODE (op1))))
1232 op1 = force_reg (inner_mode,
1233 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1234 GET_MODE (op1)));
1235 rtx vop1 = expand_vector_broadcast (mode, op1);
1236 if (vop1)
1237 {
1238 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1239 target, unsignedp, methods, last);
1240 if (temp)
1241 return temp;
1242 }
1243 }
1244 }
1245
1246 /* Look for a wider mode of the same class for which we think we
1247 can open-code the operation. Check for a widening multiply at the
1248 wider mode as well. */
1249
1250 if (CLASS_HAS_WIDER_MODES_P (mclass)
1251 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1252 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1253 wider_mode != VOIDmode;
1254 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1255 {
1256 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1257 || (binoptab == smul_optab
1258 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1259 && (find_widening_optab_handler ((unsignedp
1260 ? umul_widen_optab
1261 : smul_widen_optab),
1262 GET_MODE_WIDER_MODE (wider_mode),
1263 mode, 0)
1264 != CODE_FOR_nothing)))
1265 {
1266 rtx xop0 = op0, xop1 = op1;
1267 int no_extend = 0;
1268
1269 /* For certain integer operations, we need not actually extend
1270 the narrow operands, as long as we will truncate
1271 the results to the same narrowness. */
1272
1273 if ((binoptab == ior_optab || binoptab == and_optab
1274 || binoptab == xor_optab
1275 || binoptab == add_optab || binoptab == sub_optab
1276 || binoptab == smul_optab || binoptab == ashl_optab)
1277 && mclass == MODE_INT)
1278 {
1279 no_extend = 1;
1280 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1281 xop0, unsignedp);
1282 if (binoptab != ashl_optab)
1283 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1284 xop1, unsignedp);
1285 }
1286
1287 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1288
1289 /* The second operand of a shift must always be extended. */
1290 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1291 no_extend && binoptab != ashl_optab);
1292
1293 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1294 unsignedp, OPTAB_DIRECT);
1295 if (temp)
1296 {
1297 if (mclass != MODE_INT
1298 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1299 {
1300 if (target == 0)
1301 target = gen_reg_rtx (mode);
1302 convert_move (target, temp, 0);
1303 return target;
1304 }
1305 else
1306 return gen_lowpart (mode, temp);
1307 }
1308 else
1309 delete_insns_since (last);
1310 }
1311 }
1312
1313 /* If operation is commutative,
1314 try to make the first operand a register.
1315 Even better, try to make it the same as the target.
1316 Also try to make the last operand a constant. */
1317 if (commutative_optab_p (binoptab)
1318 && swap_commutative_operands_with_target (target, op0, op1))
1319 std::swap (op0, op1);
1320
1321 /* These can be done a word at a time. */
1322 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1323 && mclass == MODE_INT
1324 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1325 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1326 {
1327 int i;
1328 rtx_insn *insns;
1329
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. */
1332 if (target == 0
1333 || target == op0
1334 || target == op1
1335 || !valid_multiword_target_p (target))
1336 target = gen_reg_rtx (mode);
1337
1338 start_sequence ();
1339
1340 /* Do the actual arithmetic. */
1341 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1342 {
1343 rtx target_piece = operand_subword (target, i, 1, mode);
1344 rtx x = expand_binop (word_mode, binoptab,
1345 operand_subword_force (op0, i, mode),
1346 operand_subword_force (op1, i, mode),
1347 target_piece, unsignedp, next_methods);
1348
1349 if (x == 0)
1350 break;
1351
1352 if (target_piece != x)
1353 emit_move_insn (target_piece, x);
1354 }
1355
1356 insns = get_insns ();
1357 end_sequence ();
1358
1359 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1360 {
1361 emit_insn (insns);
1362 return target;
1363 }
1364 }
1365
1366 /* Synthesize double word shifts from single word shifts. */
1367 if ((binoptab == lshr_optab || binoptab == ashl_optab
1368 || binoptab == ashr_optab)
1369 && mclass == MODE_INT
1370 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1371 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1372 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1373 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1374 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1375 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1376 {
1377 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1378 machine_mode op1_mode;
1379
1380 double_shift_mask = targetm.shift_truncation_mask (mode);
1381 shift_mask = targetm.shift_truncation_mask (word_mode);
1382 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1383
1384 /* Apply the truncation to constant shifts. */
1385 if (double_shift_mask > 0 && CONST_INT_P (op1))
1386 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1387
1388 if (op1 == CONST0_RTX (op1_mode))
1389 return op0;
1390
1391 /* Make sure that this is a combination that expand_doubleword_shift
1392 can handle. See the comments there for details. */
1393 if (double_shift_mask == 0
1394 || (shift_mask == BITS_PER_WORD - 1
1395 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1396 {
1397 rtx_insn *insns;
1398 rtx into_target, outof_target;
1399 rtx into_input, outof_input;
1400 int left_shift, outof_word;
1401
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. */
1404 if (target == 0
1405 || target == op0
1406 || target == op1
1407 || !valid_multiword_target_p (target))
1408 target = gen_reg_rtx (mode);
1409
1410 start_sequence ();
1411
1412 /* OUTOF_* is the word we are shifting bits away from, and
1413 INTO_* is the word that we are shifting bits towards, thus
1414 they differ depending on the direction of the shift and
1415 WORDS_BIG_ENDIAN. */
1416
1417 left_shift = binoptab == ashl_optab;
1418 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1419
1420 outof_target = operand_subword (target, outof_word, 1, mode);
1421 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1422
1423 outof_input = operand_subword_force (op0, outof_word, mode);
1424 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1425
1426 if (expand_doubleword_shift (op1_mode, binoptab,
1427 outof_input, into_input, op1,
1428 outof_target, into_target,
1429 unsignedp, next_methods, shift_mask))
1430 {
1431 insns = get_insns ();
1432 end_sequence ();
1433
1434 emit_insn (insns);
1435 return target;
1436 }
1437 end_sequence ();
1438 }
1439 }
1440
1441 /* Synthesize double word rotates from single word shifts. */
1442 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1443 && mclass == MODE_INT
1444 && CONST_INT_P (op1)
1445 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1446 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1447 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1448 {
1449 rtx_insn *insns;
1450 rtx into_target, outof_target;
1451 rtx into_input, outof_input;
1452 rtx inter;
1453 int shift_count, left_shift, outof_word;
1454
1455 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1456 won't be accurate, so use a new target. Do this also if target is not
1457 a REG, first because having a register instead may open optimization
1458 opportunities, and second because if target and op0 happen to be MEMs
1459 designating the same location, we would risk clobbering it too early
1460 in the code sequence we generate below. */
1461 if (target == 0
1462 || target == op0
1463 || target == op1
1464 || !REG_P (target)
1465 || !valid_multiword_target_p (target))
1466 target = gen_reg_rtx (mode);
1467
1468 start_sequence ();
1469
1470 shift_count = INTVAL (op1);
1471
1472 /* OUTOF_* is the word we are shifting bits away from, and
1473 INTO_* is the word that we are shifting bits towards, thus
1474 they differ depending on the direction of the shift and
1475 WORDS_BIG_ENDIAN. */
1476
1477 left_shift = (binoptab == rotl_optab);
1478 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1479
1480 outof_target = operand_subword (target, outof_word, 1, mode);
1481 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1482
1483 outof_input = operand_subword_force (op0, outof_word, mode);
1484 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1485
1486 if (shift_count == BITS_PER_WORD)
1487 {
1488 /* This is just a word swap. */
1489 emit_move_insn (outof_target, into_input);
1490 emit_move_insn (into_target, outof_input);
1491 inter = const0_rtx;
1492 }
1493 else
1494 {
1495 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1496 rtx first_shift_count, second_shift_count;
1497 optab reverse_unsigned_shift, unsigned_shift;
1498
1499 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1500 ? lshr_optab : ashl_optab);
1501
1502 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1503 ? ashl_optab : lshr_optab);
1504
1505 if (shift_count > BITS_PER_WORD)
1506 {
1507 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1508 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1509 }
1510 else
1511 {
1512 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1513 second_shift_count = GEN_INT (shift_count);
1514 }
1515
1516 into_temp1 = expand_binop (word_mode, unsigned_shift,
1517 outof_input, first_shift_count,
1518 NULL_RTX, unsignedp, next_methods);
1519 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1520 into_input, second_shift_count,
1521 NULL_RTX, unsignedp, next_methods);
1522
1523 if (into_temp1 != 0 && into_temp2 != 0)
1524 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1525 into_target, unsignedp, next_methods);
1526 else
1527 inter = 0;
1528
1529 if (inter != 0 && inter != into_target)
1530 emit_move_insn (into_target, inter);
1531
1532 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1533 into_input, first_shift_count,
1534 NULL_RTX, unsignedp, next_methods);
1535 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1536 outof_input, second_shift_count,
1537 NULL_RTX, unsignedp, next_methods);
1538
1539 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1540 inter = expand_binop (word_mode, ior_optab,
1541 outof_temp1, outof_temp2,
1542 outof_target, unsignedp, next_methods);
1543
1544 if (inter != 0 && inter != outof_target)
1545 emit_move_insn (outof_target, inter);
1546 }
1547
1548 insns = get_insns ();
1549 end_sequence ();
1550
1551 if (inter != 0)
1552 {
1553 emit_insn (insns);
1554 return target;
1555 }
1556 }
1557
1558 /* These can be done a word at a time by propagating carries. */
1559 if ((binoptab == add_optab || binoptab == sub_optab)
1560 && mclass == MODE_INT
1561 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1562 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1563 {
1564 unsigned int i;
1565 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1566 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1567 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1568 rtx xop0, xop1, xtarget;
1569
1570 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1571 value is one of those, use it. Otherwise, use 1 since it is the
1572 one easiest to get. */
1573 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1574 int normalizep = STORE_FLAG_VALUE;
1575 #else
1576 int normalizep = 1;
1577 #endif
1578
1579 /* Prepare the operands. */
1580 xop0 = force_reg (mode, op0);
1581 xop1 = force_reg (mode, op1);
1582
1583 xtarget = gen_reg_rtx (mode);
1584
1585 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1586 target = xtarget;
1587
1588 /* Indicate for flow that the entire target reg is being set. */
1589 if (REG_P (target))
1590 emit_clobber (xtarget);
1591
1592 /* Do the actual arithmetic. */
1593 for (i = 0; i < nwords; i++)
1594 {
1595 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1596 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1597 rtx op0_piece = operand_subword_force (xop0, index, mode);
1598 rtx op1_piece = operand_subword_force (xop1, index, mode);
1599 rtx x;
1600
1601 /* Main add/subtract of the input operands. */
1602 x = expand_binop (word_mode, binoptab,
1603 op0_piece, op1_piece,
1604 target_piece, unsignedp, next_methods);
1605 if (x == 0)
1606 break;
1607
1608 if (i + 1 < nwords)
1609 {
1610 /* Store carry from main add/subtract. */
1611 carry_out = gen_reg_rtx (word_mode);
1612 carry_out = emit_store_flag_force (carry_out,
1613 (binoptab == add_optab
1614 ? LT : GT),
1615 x, op0_piece,
1616 word_mode, 1, normalizep);
1617 }
1618
1619 if (i > 0)
1620 {
1621 rtx newx;
1622
1623 /* Add/subtract previous carry to main result. */
1624 newx = expand_binop (word_mode,
1625 normalizep == 1 ? binoptab : otheroptab,
1626 x, carry_in,
1627 NULL_RTX, 1, next_methods);
1628
1629 if (i + 1 < nwords)
1630 {
1631 /* Get out carry from adding/subtracting carry in. */
1632 rtx carry_tmp = gen_reg_rtx (word_mode);
1633 carry_tmp = emit_store_flag_force (carry_tmp,
1634 (binoptab == add_optab
1635 ? LT : GT),
1636 newx, x,
1637 word_mode, 1, normalizep);
1638
1639 /* Logical-ior the two poss. carry together. */
1640 carry_out = expand_binop (word_mode, ior_optab,
1641 carry_out, carry_tmp,
1642 carry_out, 0, next_methods);
1643 if (carry_out == 0)
1644 break;
1645 }
1646 emit_move_insn (target_piece, newx);
1647 }
1648 else
1649 {
1650 if (x != target_piece)
1651 emit_move_insn (target_piece, x);
1652 }
1653
1654 carry_in = carry_out;
1655 }
1656
1657 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1658 {
1659 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
1660 || ! rtx_equal_p (target, xtarget))
1661 {
1662 rtx_insn *temp = emit_move_insn (target, xtarget);
1663
1664 set_dst_reg_note (temp, REG_EQUAL,
1665 gen_rtx_fmt_ee (optab_to_code (binoptab),
1666 mode, copy_rtx (xop0),
1667 copy_rtx (xop1)),
1668 target);
1669 }
1670 else
1671 target = xtarget;
1672
1673 return target;
1674 }
1675
1676 else
1677 delete_insns_since (last);
1678 }
1679
1680 /* Attempt to synthesize double word multiplies using a sequence of word
1681 mode multiplications. We first attempt to generate a sequence using a
1682 more efficient unsigned widening multiply, and if that fails we then
1683 try using a signed widening multiply. */
1684
1685 if (binoptab == smul_optab
1686 && mclass == MODE_INT
1687 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1688 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1689 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1690 {
1691 rtx product = NULL_RTX;
1692 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
1693 != CODE_FOR_nothing)
1694 {
1695 product = expand_doubleword_mult (mode, op0, op1, target,
1696 true, methods);
1697 if (!product)
1698 delete_insns_since (last);
1699 }
1700
1701 if (product == NULL_RTX
1702 && widening_optab_handler (smul_widen_optab, mode, word_mode)
1703 != CODE_FOR_nothing)
1704 {
1705 product = expand_doubleword_mult (mode, op0, op1, target,
1706 false, methods);
1707 if (!product)
1708 delete_insns_since (last);
1709 }
1710
1711 if (product != NULL_RTX)
1712 {
1713 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
1714 {
1715 rtx_insn *move = emit_move_insn (target ? target : product,
1716 product);
1717 set_dst_reg_note (move,
1718 REG_EQUAL,
1719 gen_rtx_fmt_ee (MULT, mode,
1720 copy_rtx (op0),
1721 copy_rtx (op1)),
1722 target ? target : product);
1723 }
1724 return product;
1725 }
1726 }
1727
1728 /* It can't be open-coded in this mode.
1729 Use a library call if one is available and caller says that's ok. */
1730
1731 libfunc = optab_libfunc (binoptab, mode);
1732 if (libfunc
1733 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1734 {
1735 rtx_insn *insns;
1736 rtx op1x = op1;
1737 machine_mode op1_mode = mode;
1738 rtx value;
1739
1740 start_sequence ();
1741
1742 if (shift_optab_p (binoptab))
1743 {
1744 op1_mode = targetm.libgcc_shift_count_mode ();
1745 /* Specify unsigned here,
1746 since negative shift counts are meaningless. */
1747 op1x = convert_to_mode (op1_mode, op1, 1);
1748 }
1749
1750 if (GET_MODE (op0) != VOIDmode
1751 && GET_MODE (op0) != mode)
1752 op0 = convert_to_mode (mode, op0, unsignedp);
1753
1754 /* Pass 1 for NO_QUEUE so we don't lose any increments
1755 if the libcall is cse'd or moved. */
1756 value = emit_library_call_value (libfunc,
1757 NULL_RTX, LCT_CONST, mode, 2,
1758 op0, mode, op1x, op1_mode);
1759
1760 insns = get_insns ();
1761 end_sequence ();
1762
1763 bool trapv = trapv_binoptab_p (binoptab);
1764 target = gen_reg_rtx (mode);
1765 emit_libcall_block_1 (insns, target, value,
1766 trapv ? NULL_RTX
1767 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1768 mode, op0, op1), trapv);
1769
1770 return target;
1771 }
1772
1773 delete_insns_since (last);
1774
1775 /* It can't be done in this mode. Can we do it in a wider mode? */
1776
1777 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1778 || methods == OPTAB_MUST_WIDEN))
1779 {
1780 /* Caller says, don't even try. */
1781 delete_insns_since (entry_last);
1782 return 0;
1783 }
1784
1785 /* Compute the value of METHODS to pass to recursive calls.
1786 Don't allow widening to be tried recursively. */
1787
1788 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1789
1790 /* Look for a wider mode of the same class for which it appears we can do
1791 the operation. */
1792
1793 if (CLASS_HAS_WIDER_MODES_P (mclass))
1794 {
1795 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1796 wider_mode != VOIDmode;
1797 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1798 {
1799 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1800 != CODE_FOR_nothing
1801 || (methods == OPTAB_LIB
1802 && optab_libfunc (binoptab, wider_mode)))
1803 {
1804 rtx xop0 = op0, xop1 = op1;
1805 int no_extend = 0;
1806
1807 /* For certain integer operations, we need not actually extend
1808 the narrow operands, as long as we will truncate
1809 the results to the same narrowness. */
1810
1811 if ((binoptab == ior_optab || binoptab == and_optab
1812 || binoptab == xor_optab
1813 || binoptab == add_optab || binoptab == sub_optab
1814 || binoptab == smul_optab || binoptab == ashl_optab)
1815 && mclass == MODE_INT)
1816 no_extend = 1;
1817
1818 xop0 = widen_operand (xop0, wider_mode, mode,
1819 unsignedp, no_extend);
1820
1821 /* The second operand of a shift must always be extended. */
1822 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1823 no_extend && binoptab != ashl_optab);
1824
1825 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1826 unsignedp, methods);
1827 if (temp)
1828 {
1829 if (mclass != MODE_INT
1830 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1831 {
1832 if (target == 0)
1833 target = gen_reg_rtx (mode);
1834 convert_move (target, temp, 0);
1835 return target;
1836 }
1837 else
1838 return gen_lowpart (mode, temp);
1839 }
1840 else
1841 delete_insns_since (last);
1842 }
1843 }
1844 }
1845
1846 delete_insns_since (entry_last);
1847 return 0;
1848 }
1849 \f
1850 /* Expand a binary operator which has both signed and unsigned forms.
1851 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1852 signed operations.
1853
1854 If we widen unsigned operands, we may use a signed wider operation instead
1855 of an unsigned wider operation, since the result would be the same. */
1856
1857 rtx
1858 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1859 rtx op0, rtx op1, rtx target, int unsignedp,
1860 enum optab_methods methods)
1861 {
1862 rtx temp;
1863 optab direct_optab = unsignedp ? uoptab : soptab;
1864 bool save_enable;
1865
1866 /* Do it without widening, if possible. */
1867 temp = expand_binop (mode, direct_optab, op0, op1, target,
1868 unsignedp, OPTAB_DIRECT);
1869 if (temp || methods == OPTAB_DIRECT)
1870 return temp;
1871
1872 /* Try widening to a signed int. Disable any direct use of any
1873 signed insn in the current mode. */
1874 save_enable = swap_optab_enable (soptab, mode, false);
1875
1876 temp = expand_binop (mode, soptab, op0, op1, target,
1877 unsignedp, OPTAB_WIDEN);
1878
1879 /* For unsigned operands, try widening to an unsigned int. */
1880 if (!temp && unsignedp)
1881 temp = expand_binop (mode, uoptab, op0, op1, target,
1882 unsignedp, OPTAB_WIDEN);
1883 if (temp || methods == OPTAB_WIDEN)
1884 goto egress;
1885
1886 /* Use the right width libcall if that exists. */
1887 temp = expand_binop (mode, direct_optab, op0, op1, target,
1888 unsignedp, OPTAB_LIB);
1889 if (temp || methods == OPTAB_LIB)
1890 goto egress;
1891
1892 /* Must widen and use a libcall, use either signed or unsigned. */
1893 temp = expand_binop (mode, soptab, op0, op1, target,
1894 unsignedp, methods);
1895 if (!temp && unsignedp)
1896 temp = expand_binop (mode, uoptab, op0, op1, target,
1897 unsignedp, methods);
1898
1899 egress:
1900 /* Undo the fiddling above. */
1901 if (save_enable)
1902 swap_optab_enable (soptab, mode, true);
1903 return temp;
1904 }
1905 \f
1906 /* Generate code to perform an operation specified by UNOPPTAB
1907 on operand OP0, with two results to TARG0 and TARG1.
1908 We assume that the order of the operands for the instruction
1909 is TARG0, TARG1, OP0.
1910
1911 Either TARG0 or TARG1 may be zero, but what that means is that
1912 the result is not actually wanted. We will generate it into
1913 a dummy pseudo-reg and discard it. They may not both be zero.
1914
1915 Returns 1 if this operation can be performed; 0 if not. */
1916
1917 int
1918 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1919 int unsignedp)
1920 {
1921 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1922 enum mode_class mclass;
1923 machine_mode wider_mode;
1924 rtx_insn *entry_last = get_last_insn ();
1925 rtx_insn *last;
1926
1927 mclass = GET_MODE_CLASS (mode);
1928
1929 if (!targ0)
1930 targ0 = gen_reg_rtx (mode);
1931 if (!targ1)
1932 targ1 = gen_reg_rtx (mode);
1933
1934 /* Record where to go back to if we fail. */
1935 last = get_last_insn ();
1936
1937 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1938 {
1939 struct expand_operand ops[3];
1940 enum insn_code icode = optab_handler (unoptab, mode);
1941
1942 create_fixed_operand (&ops[0], targ0);
1943 create_fixed_operand (&ops[1], targ1);
1944 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1945 if (maybe_expand_insn (icode, 3, ops))
1946 return 1;
1947 }
1948
1949 /* It can't be done in this mode. Can we do it in a wider mode? */
1950
1951 if (CLASS_HAS_WIDER_MODES_P (mclass))
1952 {
1953 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1954 wider_mode != VOIDmode;
1955 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1956 {
1957 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1958 {
1959 rtx t0 = gen_reg_rtx (wider_mode);
1960 rtx t1 = gen_reg_rtx (wider_mode);
1961 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1962
1963 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1964 {
1965 convert_move (targ0, t0, unsignedp);
1966 convert_move (targ1, t1, unsignedp);
1967 return 1;
1968 }
1969 else
1970 delete_insns_since (last);
1971 }
1972 }
1973 }
1974
1975 delete_insns_since (entry_last);
1976 return 0;
1977 }
1978 \f
1979 /* Generate code to perform an operation specified by BINOPTAB
1980 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1981 We assume that the order of the operands for the instruction
1982 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1983 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1984
1985 Either TARG0 or TARG1 may be zero, but what that means is that
1986 the result is not actually wanted. We will generate it into
1987 a dummy pseudo-reg and discard it. They may not both be zero.
1988
1989 Returns 1 if this operation can be performed; 0 if not. */
1990
1991 int
1992 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1993 int unsignedp)
1994 {
1995 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1996 enum mode_class mclass;
1997 machine_mode wider_mode;
1998 rtx_insn *entry_last = get_last_insn ();
1999 rtx_insn *last;
2000
2001 mclass = GET_MODE_CLASS (mode);
2002
2003 if (!targ0)
2004 targ0 = gen_reg_rtx (mode);
2005 if (!targ1)
2006 targ1 = gen_reg_rtx (mode);
2007
2008 /* Record where to go back to if we fail. */
2009 last = get_last_insn ();
2010
2011 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2012 {
2013 struct expand_operand ops[4];
2014 enum insn_code icode = optab_handler (binoptab, mode);
2015 machine_mode mode0 = insn_data[icode].operand[1].mode;
2016 machine_mode mode1 = insn_data[icode].operand[2].mode;
2017 rtx xop0 = op0, xop1 = op1;
2018
2019 /* If we are optimizing, force expensive constants into a register. */
2020 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2021 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2022
2023 create_fixed_operand (&ops[0], targ0);
2024 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2025 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2026 create_fixed_operand (&ops[3], targ1);
2027 if (maybe_expand_insn (icode, 4, ops))
2028 return 1;
2029 delete_insns_since (last);
2030 }
2031
2032 /* It can't be done in this mode. Can we do it in a wider mode? */
2033
2034 if (CLASS_HAS_WIDER_MODES_P (mclass))
2035 {
2036 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2037 wider_mode != VOIDmode;
2038 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2039 {
2040 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2041 {
2042 rtx t0 = gen_reg_rtx (wider_mode);
2043 rtx t1 = gen_reg_rtx (wider_mode);
2044 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2045 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2046
2047 if (expand_twoval_binop (binoptab, cop0, cop1,
2048 t0, t1, unsignedp))
2049 {
2050 convert_move (targ0, t0, unsignedp);
2051 convert_move (targ1, t1, unsignedp);
2052 return 1;
2053 }
2054 else
2055 delete_insns_since (last);
2056 }
2057 }
2058 }
2059
2060 delete_insns_since (entry_last);
2061 return 0;
2062 }
2063
2064 /* Expand the two-valued library call indicated by BINOPTAB, but
2065 preserve only one of the values. If TARG0 is non-NULL, the first
2066 value is placed into TARG0; otherwise the second value is placed
2067 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2068 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2069 This routine assumes that the value returned by the library call is
2070 as if the return value was of an integral mode twice as wide as the
2071 mode of OP0. Returns 1 if the call was successful. */
2072
2073 bool
2074 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2075 rtx targ0, rtx targ1, enum rtx_code code)
2076 {
2077 machine_mode mode;
2078 machine_mode libval_mode;
2079 rtx libval;
2080 rtx_insn *insns;
2081 rtx libfunc;
2082
2083 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2084 gcc_assert (!targ0 != !targ1);
2085
2086 mode = GET_MODE (op0);
2087 libfunc = optab_libfunc (binoptab, mode);
2088 if (!libfunc)
2089 return false;
2090
2091 /* The value returned by the library function will have twice as
2092 many bits as the nominal MODE. */
2093 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2094 MODE_INT);
2095 start_sequence ();
2096 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2097 libval_mode, 2,
2098 op0, mode,
2099 op1, mode);
2100 /* Get the part of VAL containing the value that we want. */
2101 libval = simplify_gen_subreg (mode, libval, libval_mode,
2102 targ0 ? 0 : GET_MODE_SIZE (mode));
2103 insns = get_insns ();
2104 end_sequence ();
2105 /* Move the into the desired location. */
2106 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2107 gen_rtx_fmt_ee (code, mode, op0, op1));
2108
2109 return true;
2110 }
2111
2112 \f
2113 /* Wrapper around expand_unop which takes an rtx code to specify
2114 the operation to perform, not an optab pointer. All other
2115 arguments are the same. */
2116 rtx
2117 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2118 rtx target, int unsignedp)
2119 {
2120 optab unop = code_to_optab (code);
2121 gcc_assert (unop);
2122
2123 return expand_unop (mode, unop, op0, target, unsignedp);
2124 }
2125
2126 /* Try calculating
2127 (clz:narrow x)
2128 as
2129 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2130
2131 A similar operation can be used for clrsb. UNOPTAB says which operation
2132 we are trying to expand. */
2133 static rtx
2134 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2135 {
2136 enum mode_class mclass = GET_MODE_CLASS (mode);
2137 if (CLASS_HAS_WIDER_MODES_P (mclass))
2138 {
2139 machine_mode wider_mode;
2140 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2141 wider_mode != VOIDmode;
2142 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2143 {
2144 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2145 {
2146 rtx xop0, temp;
2147 rtx_insn *last;
2148
2149 last = get_last_insn ();
2150
2151 if (target == 0)
2152 target = gen_reg_rtx (mode);
2153 xop0 = widen_operand (op0, wider_mode, mode,
2154 unoptab != clrsb_optab, false);
2155 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2156 unoptab != clrsb_optab);
2157 if (temp != 0)
2158 temp = expand_binop
2159 (wider_mode, sub_optab, temp,
2160 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2161 - GET_MODE_PRECISION (mode),
2162 wider_mode),
2163 target, true, OPTAB_DIRECT);
2164 if (temp == 0)
2165 delete_insns_since (last);
2166
2167 return temp;
2168 }
2169 }
2170 }
2171 return 0;
2172 }
2173
2174 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2175 quantities, choosing which based on whether the high word is nonzero. */
2176 static rtx
2177 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2178 {
2179 rtx xop0 = force_reg (mode, op0);
2180 rtx subhi = gen_highpart (word_mode, xop0);
2181 rtx sublo = gen_lowpart (word_mode, xop0);
2182 rtx_code_label *hi0_label = gen_label_rtx ();
2183 rtx_code_label *after_label = gen_label_rtx ();
2184 rtx_insn *seq;
2185 rtx temp, result;
2186
2187 /* If we were not given a target, use a word_mode register, not a
2188 'mode' register. The result will fit, and nobody is expecting
2189 anything bigger (the return type of __builtin_clz* is int). */
2190 if (!target)
2191 target = gen_reg_rtx (word_mode);
2192
2193 /* In any case, write to a word_mode scratch in both branches of the
2194 conditional, so we can ensure there is a single move insn setting
2195 'target' to tag a REG_EQUAL note on. */
2196 result = gen_reg_rtx (word_mode);
2197
2198 start_sequence ();
2199
2200 /* If the high word is not equal to zero,
2201 then clz of the full value is clz of the high word. */
2202 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2203 word_mode, true, hi0_label);
2204
2205 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2206 if (!temp)
2207 goto fail;
2208
2209 if (temp != result)
2210 convert_move (result, temp, true);
2211
2212 emit_jump_insn (targetm.gen_jump (after_label));
2213 emit_barrier ();
2214
2215 /* Else clz of the full value is clz of the low word plus the number
2216 of bits in the high word. */
2217 emit_label (hi0_label);
2218
2219 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2220 if (!temp)
2221 goto fail;
2222 temp = expand_binop (word_mode, add_optab, temp,
2223 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2224 result, true, OPTAB_DIRECT);
2225 if (!temp)
2226 goto fail;
2227 if (temp != result)
2228 convert_move (result, temp, true);
2229
2230 emit_label (after_label);
2231 convert_move (target, result, true);
2232
2233 seq = get_insns ();
2234 end_sequence ();
2235
2236 add_equal_note (seq, target, CLZ, xop0, 0);
2237 emit_insn (seq);
2238 return target;
2239
2240 fail:
2241 end_sequence ();
2242 return 0;
2243 }
2244
2245 /* Try calculating popcount of a double-word quantity as two popcount's of
2246 word-sized quantities and summing up the results. */
2247 static rtx
2248 expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
2249 {
2250 rtx t0, t1, t;
2251 rtx_insn *seq;
2252
2253 start_sequence ();
2254
2255 t0 = expand_unop_direct (word_mode, popcount_optab,
2256 operand_subword_force (op0, 0, mode), NULL_RTX,
2257 true);
2258 t1 = expand_unop_direct (word_mode, popcount_optab,
2259 operand_subword_force (op0, 1, mode), NULL_RTX,
2260 true);
2261 if (!t0 || !t1)
2262 {
2263 end_sequence ();
2264 return NULL_RTX;
2265 }
2266
2267 /* If we were not given a target, use a word_mode register, not a
2268 'mode' register. The result will fit, and nobody is expecting
2269 anything bigger (the return type of __builtin_popcount* is int). */
2270 if (!target)
2271 target = gen_reg_rtx (word_mode);
2272
2273 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2274
2275 seq = get_insns ();
2276 end_sequence ();
2277
2278 add_equal_note (seq, t, POPCOUNT, op0, 0);
2279 emit_insn (seq);
2280 return t;
2281 }
2282
2283 /* Try calculating
2284 (parity:wide x)
2285 as
2286 (parity:narrow (low (x) ^ high (x))) */
2287 static rtx
2288 expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
2289 {
2290 rtx t = expand_binop (word_mode, xor_optab,
2291 operand_subword_force (op0, 0, mode),
2292 operand_subword_force (op0, 1, mode),
2293 NULL_RTX, 0, OPTAB_DIRECT);
2294 return expand_unop (word_mode, parity_optab, t, target, true);
2295 }
2296
2297 /* Try calculating
2298 (bswap:narrow x)
2299 as
2300 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2301 static rtx
2302 widen_bswap (machine_mode mode, rtx op0, rtx target)
2303 {
2304 enum mode_class mclass = GET_MODE_CLASS (mode);
2305 machine_mode wider_mode;
2306 rtx x;
2307 rtx_insn *last;
2308
2309 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2310 return NULL_RTX;
2311
2312 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2313 wider_mode != VOIDmode;
2314 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2315 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2316 goto found;
2317 return NULL_RTX;
2318
2319 found:
2320 last = get_last_insn ();
2321
2322 x = widen_operand (op0, wider_mode, mode, true, true);
2323 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2324
2325 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2326 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2327 if (x != 0)
2328 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2329 GET_MODE_BITSIZE (wider_mode)
2330 - GET_MODE_BITSIZE (mode),
2331 NULL_RTX, true);
2332
2333 if (x != 0)
2334 {
2335 if (target == 0)
2336 target = gen_reg_rtx (mode);
2337 emit_move_insn (target, gen_lowpart (mode, x));
2338 }
2339 else
2340 delete_insns_since (last);
2341
2342 return target;
2343 }
2344
2345 /* Try calculating bswap as two bswaps of two word-sized operands. */
2346
2347 static rtx
2348 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2349 {
2350 rtx t0, t1;
2351
2352 t1 = expand_unop (word_mode, bswap_optab,
2353 operand_subword_force (op, 0, mode), NULL_RTX, true);
2354 t0 = expand_unop (word_mode, bswap_optab,
2355 operand_subword_force (op, 1, mode), NULL_RTX, true);
2356
2357 if (target == 0 || !valid_multiword_target_p (target))
2358 target = gen_reg_rtx (mode);
2359 if (REG_P (target))
2360 emit_clobber (target);
2361 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2362 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2363
2364 return target;
2365 }
2366
2367 /* Try calculating (parity x) as (and (popcount x) 1), where
2368 popcount can also be done in a wider mode. */
2369 static rtx
2370 expand_parity (machine_mode mode, rtx op0, rtx target)
2371 {
2372 enum mode_class mclass = GET_MODE_CLASS (mode);
2373 if (CLASS_HAS_WIDER_MODES_P (mclass))
2374 {
2375 machine_mode wider_mode;
2376 for (wider_mode = mode; wider_mode != VOIDmode;
2377 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2378 {
2379 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2380 {
2381 rtx xop0, temp;
2382 rtx_insn *last;
2383
2384 last = get_last_insn ();
2385
2386 if (target == 0 || GET_MODE (target) != wider_mode)
2387 target = gen_reg_rtx (wider_mode);
2388
2389 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2390 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2391 true);
2392 if (temp != 0)
2393 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2394 target, true, OPTAB_DIRECT);
2395
2396 if (temp)
2397 {
2398 if (mclass != MODE_INT
2399 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2400 return convert_to_mode (mode, temp, 0);
2401 else
2402 return gen_lowpart (mode, temp);
2403 }
2404 else
2405 delete_insns_since (last);
2406 }
2407 }
2408 }
2409 return 0;
2410 }
2411
2412 /* Try calculating ctz(x) as K - clz(x & -x) ,
2413 where K is GET_MODE_PRECISION(mode) - 1.
2414
2415 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2416 don't have to worry about what the hardware does in that case. (If
2417 the clz instruction produces the usual value at 0, which is K, the
2418 result of this code sequence will be -1; expand_ffs, below, relies
2419 on this. It might be nice to have it be K instead, for consistency
2420 with the (very few) processors that provide a ctz with a defined
2421 value, but that would take one more instruction, and it would be
2422 less convenient for expand_ffs anyway. */
2423
2424 static rtx
2425 expand_ctz (machine_mode mode, rtx op0, rtx target)
2426 {
2427 rtx_insn *seq;
2428 rtx temp;
2429
2430 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2431 return 0;
2432
2433 start_sequence ();
2434
2435 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2436 if (temp)
2437 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2438 true, OPTAB_DIRECT);
2439 if (temp)
2440 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2441 if (temp)
2442 temp = expand_binop (mode, sub_optab,
2443 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2444 temp, target,
2445 true, OPTAB_DIRECT);
2446 if (temp == 0)
2447 {
2448 end_sequence ();
2449 return 0;
2450 }
2451
2452 seq = get_insns ();
2453 end_sequence ();
2454
2455 add_equal_note (seq, temp, CTZ, op0, 0);
2456 emit_insn (seq);
2457 return temp;
2458 }
2459
2460
2461 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2462 else with the sequence used by expand_clz.
2463
2464 The ffs builtin promises to return zero for a zero value and ctz/clz
2465 may have an undefined value in that case. If they do not give us a
2466 convenient value, we have to generate a test and branch. */
2467 static rtx
2468 expand_ffs (machine_mode mode, rtx op0, rtx target)
2469 {
2470 HOST_WIDE_INT val = 0;
2471 bool defined_at_zero = false;
2472 rtx temp;
2473 rtx_insn *seq;
2474
2475 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2476 {
2477 start_sequence ();
2478
2479 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2480 if (!temp)
2481 goto fail;
2482
2483 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2484 }
2485 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2486 {
2487 start_sequence ();
2488 temp = expand_ctz (mode, op0, 0);
2489 if (!temp)
2490 goto fail;
2491
2492 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2493 {
2494 defined_at_zero = true;
2495 val = (GET_MODE_PRECISION (mode) - 1) - val;
2496 }
2497 }
2498 else
2499 return 0;
2500
2501 if (defined_at_zero && val == -1)
2502 /* No correction needed at zero. */;
2503 else
2504 {
2505 /* We don't try to do anything clever with the situation found
2506 on some processors (eg Alpha) where ctz(0:mode) ==
2507 bitsize(mode). If someone can think of a way to send N to -1
2508 and leave alone all values in the range 0..N-1 (where N is a
2509 power of two), cheaper than this test-and-branch, please add it.
2510
2511 The test-and-branch is done after the operation itself, in case
2512 the operation sets condition codes that can be recycled for this.
2513 (This is true on i386, for instance.) */
2514
2515 rtx_code_label *nonzero_label = gen_label_rtx ();
2516 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2517 mode, true, nonzero_label);
2518
2519 convert_move (temp, GEN_INT (-1), false);
2520 emit_label (nonzero_label);
2521 }
2522
2523 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2524 to produce a value in the range 0..bitsize. */
2525 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2526 target, false, OPTAB_DIRECT);
2527 if (!temp)
2528 goto fail;
2529
2530 seq = get_insns ();
2531 end_sequence ();
2532
2533 add_equal_note (seq, temp, FFS, op0, 0);
2534 emit_insn (seq);
2535 return temp;
2536
2537 fail:
2538 end_sequence ();
2539 return 0;
2540 }
2541
2542 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2543 conditions, VAL may already be a SUBREG against which we cannot generate
2544 a further SUBREG. In this case, we expect forcing the value into a
2545 register will work around the situation. */
2546
2547 static rtx
2548 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2549 machine_mode imode)
2550 {
2551 rtx ret;
2552 ret = lowpart_subreg (omode, val, imode);
2553 if (ret == NULL)
2554 {
2555 val = force_reg (imode, val);
2556 ret = lowpart_subreg (omode, val, imode);
2557 gcc_assert (ret != NULL);
2558 }
2559 return ret;
2560 }
2561
2562 /* Expand a floating point absolute value or negation operation via a
2563 logical operation on the sign bit. */
2564
2565 static rtx
2566 expand_absneg_bit (enum rtx_code code, machine_mode mode,
2567 rtx op0, rtx target)
2568 {
2569 const struct real_format *fmt;
2570 int bitpos, word, nwords, i;
2571 machine_mode imode;
2572 rtx temp;
2573 rtx_insn *insns;
2574
2575 /* The format has to have a simple sign bit. */
2576 fmt = REAL_MODE_FORMAT (mode);
2577 if (fmt == NULL)
2578 return NULL_RTX;
2579
2580 bitpos = fmt->signbit_rw;
2581 if (bitpos < 0)
2582 return NULL_RTX;
2583
2584 /* Don't create negative zeros if the format doesn't support them. */
2585 if (code == NEG && !fmt->has_signed_zero)
2586 return NULL_RTX;
2587
2588 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2589 {
2590 imode = int_mode_for_mode (mode);
2591 if (imode == BLKmode)
2592 return NULL_RTX;
2593 word = 0;
2594 nwords = 1;
2595 }
2596 else
2597 {
2598 imode = word_mode;
2599
2600 if (FLOAT_WORDS_BIG_ENDIAN)
2601 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2602 else
2603 word = bitpos / BITS_PER_WORD;
2604 bitpos = bitpos % BITS_PER_WORD;
2605 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2606 }
2607
2608 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2609 if (code == ABS)
2610 mask = ~mask;
2611
2612 if (target == 0
2613 || target == op0
2614 || (nwords > 1 && !valid_multiword_target_p (target)))
2615 target = gen_reg_rtx (mode);
2616
2617 if (nwords > 1)
2618 {
2619 start_sequence ();
2620
2621 for (i = 0; i < nwords; ++i)
2622 {
2623 rtx targ_piece = operand_subword (target, i, 1, mode);
2624 rtx op0_piece = operand_subword_force (op0, i, mode);
2625
2626 if (i == word)
2627 {
2628 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2629 op0_piece,
2630 immed_wide_int_const (mask, imode),
2631 targ_piece, 1, OPTAB_LIB_WIDEN);
2632 if (temp != targ_piece)
2633 emit_move_insn (targ_piece, temp);
2634 }
2635 else
2636 emit_move_insn (targ_piece, op0_piece);
2637 }
2638
2639 insns = get_insns ();
2640 end_sequence ();
2641
2642 emit_insn (insns);
2643 }
2644 else
2645 {
2646 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2647 gen_lowpart (imode, op0),
2648 immed_wide_int_const (mask, imode),
2649 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2650 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2651
2652 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2653 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2654 target);
2655 }
2656
2657 return target;
2658 }
2659
2660 /* As expand_unop, but will fail rather than attempt the operation in a
2661 different mode or with a libcall. */
2662 static rtx
2663 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2664 int unsignedp)
2665 {
2666 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2667 {
2668 struct expand_operand ops[2];
2669 enum insn_code icode = optab_handler (unoptab, mode);
2670 rtx_insn *last = get_last_insn ();
2671 rtx_insn *pat;
2672
2673 create_output_operand (&ops[0], target, mode);
2674 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2675 pat = maybe_gen_insn (icode, 2, ops);
2676 if (pat)
2677 {
2678 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2679 && ! add_equal_note (pat, ops[0].value,
2680 optab_to_code (unoptab),
2681 ops[1].value, NULL_RTX))
2682 {
2683 delete_insns_since (last);
2684 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2685 }
2686
2687 emit_insn (pat);
2688
2689 return ops[0].value;
2690 }
2691 }
2692 return 0;
2693 }
2694
2695 /* Generate code to perform an operation specified by UNOPTAB
2696 on operand OP0, with result having machine-mode MODE.
2697
2698 UNSIGNEDP is for the case where we have to widen the operands
2699 to perform the operation. It says to use zero-extension.
2700
2701 If TARGET is nonzero, the value
2702 is generated there, if it is convenient to do so.
2703 In all cases an rtx is returned for the locus of the value;
2704 this may or may not be TARGET. */
2705
2706 rtx
2707 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2708 int unsignedp)
2709 {
2710 enum mode_class mclass = GET_MODE_CLASS (mode);
2711 machine_mode wider_mode;
2712 rtx temp;
2713 rtx libfunc;
2714
2715 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2716 if (temp)
2717 return temp;
2718
2719 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2720
2721 /* Widening (or narrowing) clz needs special treatment. */
2722 if (unoptab == clz_optab)
2723 {
2724 temp = widen_leading (mode, op0, target, unoptab);
2725 if (temp)
2726 return temp;
2727
2728 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2729 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2730 {
2731 temp = expand_doubleword_clz (mode, op0, target);
2732 if (temp)
2733 return temp;
2734 }
2735
2736 goto try_libcall;
2737 }
2738
2739 if (unoptab == clrsb_optab)
2740 {
2741 temp = widen_leading (mode, op0, target, unoptab);
2742 if (temp)
2743 return temp;
2744 goto try_libcall;
2745 }
2746
2747 if (unoptab == popcount_optab
2748 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2749 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2750 && optimize_insn_for_speed_p ())
2751 {
2752 temp = expand_doubleword_popcount (mode, op0, target);
2753 if (temp)
2754 return temp;
2755 }
2756
2757 if (unoptab == parity_optab
2758 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2759 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2760 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2761 && optimize_insn_for_speed_p ())
2762 {
2763 temp = expand_doubleword_parity (mode, op0, target);
2764 if (temp)
2765 return temp;
2766 }
2767
2768 /* Widening (or narrowing) bswap needs special treatment. */
2769 if (unoptab == bswap_optab)
2770 {
2771 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2772 or ROTATERT. First try these directly; if this fails, then try the
2773 obvious pair of shifts with allowed widening, as this will probably
2774 be always more efficient than the other fallback methods. */
2775 if (mode == HImode)
2776 {
2777 rtx_insn *last;
2778 rtx temp1, temp2;
2779
2780 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2781 {
2782 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2783 unsignedp, OPTAB_DIRECT);
2784 if (temp)
2785 return temp;
2786 }
2787
2788 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2789 {
2790 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2791 unsignedp, OPTAB_DIRECT);
2792 if (temp)
2793 return temp;
2794 }
2795
2796 last = get_last_insn ();
2797
2798 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2799 unsignedp, OPTAB_WIDEN);
2800 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2801 unsignedp, OPTAB_WIDEN);
2802 if (temp1 && temp2)
2803 {
2804 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2805 unsignedp, OPTAB_WIDEN);
2806 if (temp)
2807 return temp;
2808 }
2809
2810 delete_insns_since (last);
2811 }
2812
2813 temp = widen_bswap (mode, op0, target);
2814 if (temp)
2815 return temp;
2816
2817 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2818 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2819 {
2820 temp = expand_doubleword_bswap (mode, op0, target);
2821 if (temp)
2822 return temp;
2823 }
2824
2825 goto try_libcall;
2826 }
2827
2828 if (CLASS_HAS_WIDER_MODES_P (mclass))
2829 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2830 wider_mode != VOIDmode;
2831 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2832 {
2833 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2834 {
2835 rtx xop0 = op0;
2836 rtx_insn *last = get_last_insn ();
2837
2838 /* For certain operations, we need not actually extend
2839 the narrow operand, as long as we will truncate the
2840 results to the same narrowness. */
2841
2842 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2843 (unoptab == neg_optab
2844 || unoptab == one_cmpl_optab)
2845 && mclass == MODE_INT);
2846
2847 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2848 unsignedp);
2849
2850 if (temp)
2851 {
2852 if (mclass != MODE_INT
2853 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2854 {
2855 if (target == 0)
2856 target = gen_reg_rtx (mode);
2857 convert_move (target, temp, 0);
2858 return target;
2859 }
2860 else
2861 return gen_lowpart (mode, temp);
2862 }
2863 else
2864 delete_insns_since (last);
2865 }
2866 }
2867
2868 /* These can be done a word at a time. */
2869 if (unoptab == one_cmpl_optab
2870 && mclass == MODE_INT
2871 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2872 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2873 {
2874 int i;
2875 rtx_insn *insns;
2876
2877 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2878 target = gen_reg_rtx (mode);
2879
2880 start_sequence ();
2881
2882 /* Do the actual arithmetic. */
2883 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2884 {
2885 rtx target_piece = operand_subword (target, i, 1, mode);
2886 rtx x = expand_unop (word_mode, unoptab,
2887 operand_subword_force (op0, i, mode),
2888 target_piece, unsignedp);
2889
2890 if (target_piece != x)
2891 emit_move_insn (target_piece, x);
2892 }
2893
2894 insns = get_insns ();
2895 end_sequence ();
2896
2897 emit_insn (insns);
2898 return target;
2899 }
2900
2901 if (optab_to_code (unoptab) == NEG)
2902 {
2903 /* Try negating floating point values by flipping the sign bit. */
2904 if (SCALAR_FLOAT_MODE_P (mode))
2905 {
2906 temp = expand_absneg_bit (NEG, mode, op0, target);
2907 if (temp)
2908 return temp;
2909 }
2910
2911 /* If there is no negation pattern, and we have no negative zero,
2912 try subtracting from zero. */
2913 if (!HONOR_SIGNED_ZEROS (mode))
2914 {
2915 temp = expand_binop (mode, (unoptab == negv_optab
2916 ? subv_optab : sub_optab),
2917 CONST0_RTX (mode), op0, target,
2918 unsignedp, OPTAB_DIRECT);
2919 if (temp)
2920 return temp;
2921 }
2922 }
2923
2924 /* Try calculating parity (x) as popcount (x) % 2. */
2925 if (unoptab == parity_optab)
2926 {
2927 temp = expand_parity (mode, op0, target);
2928 if (temp)
2929 return temp;
2930 }
2931
2932 /* Try implementing ffs (x) in terms of clz (x). */
2933 if (unoptab == ffs_optab)
2934 {
2935 temp = expand_ffs (mode, op0, target);
2936 if (temp)
2937 return temp;
2938 }
2939
2940 /* Try implementing ctz (x) in terms of clz (x). */
2941 if (unoptab == ctz_optab)
2942 {
2943 temp = expand_ctz (mode, op0, target);
2944 if (temp)
2945 return temp;
2946 }
2947
2948 try_libcall:
2949 /* Now try a library call in this mode. */
2950 libfunc = optab_libfunc (unoptab, mode);
2951 if (libfunc)
2952 {
2953 rtx_insn *insns;
2954 rtx value;
2955 rtx eq_value;
2956 machine_mode outmode = mode;
2957
2958 /* All of these functions return small values. Thus we choose to
2959 have them return something that isn't a double-word. */
2960 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2961 || unoptab == clrsb_optab || unoptab == popcount_optab
2962 || unoptab == parity_optab)
2963 outmode
2964 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2965 optab_libfunc (unoptab, mode)));
2966
2967 start_sequence ();
2968
2969 /* Pass 1 for NO_QUEUE so we don't lose any increments
2970 if the libcall is cse'd or moved. */
2971 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2972 1, op0, mode);
2973 insns = get_insns ();
2974 end_sequence ();
2975
2976 target = gen_reg_rtx (outmode);
2977 bool trapv = trapv_unoptab_p (unoptab);
2978 if (trapv)
2979 eq_value = NULL_RTX;
2980 else
2981 {
2982 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2983 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2984 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2985 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2986 eq_value = simplify_gen_unary (ZERO_EXTEND,
2987 outmode, eq_value, mode);
2988 }
2989 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2990
2991 return target;
2992 }
2993
2994 /* It can't be done in this mode. Can we do it in a wider mode? */
2995
2996 if (CLASS_HAS_WIDER_MODES_P (mclass))
2997 {
2998 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2999 wider_mode != VOIDmode;
3000 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3001 {
3002 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3003 || optab_libfunc (unoptab, wider_mode))
3004 {
3005 rtx xop0 = op0;
3006 rtx_insn *last = get_last_insn ();
3007
3008 /* For certain operations, we need not actually extend
3009 the narrow operand, as long as we will truncate the
3010 results to the same narrowness. */
3011 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3012 (unoptab == neg_optab
3013 || unoptab == one_cmpl_optab
3014 || unoptab == bswap_optab)
3015 && mclass == MODE_INT);
3016
3017 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3018 unsignedp);
3019
3020 /* If we are generating clz using wider mode, adjust the
3021 result. Similarly for clrsb. */
3022 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3023 && temp != 0)
3024 temp = expand_binop
3025 (wider_mode, sub_optab, temp,
3026 gen_int_mode (GET_MODE_PRECISION (wider_mode)
3027 - GET_MODE_PRECISION (mode),
3028 wider_mode),
3029 target, true, OPTAB_DIRECT);
3030
3031 /* Likewise for bswap. */
3032 if (unoptab == bswap_optab && temp != 0)
3033 {
3034 gcc_assert (GET_MODE_PRECISION (wider_mode)
3035 == GET_MODE_BITSIZE (wider_mode)
3036 && GET_MODE_PRECISION (mode)
3037 == GET_MODE_BITSIZE (mode));
3038
3039 temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3040 GET_MODE_BITSIZE (wider_mode)
3041 - GET_MODE_BITSIZE (mode),
3042 NULL_RTX, true);
3043 }
3044
3045 if (temp)
3046 {
3047 if (mclass != MODE_INT)
3048 {
3049 if (target == 0)
3050 target = gen_reg_rtx (mode);
3051 convert_move (target, temp, 0);
3052 return target;
3053 }
3054 else
3055 return gen_lowpart (mode, temp);
3056 }
3057 else
3058 delete_insns_since (last);
3059 }
3060 }
3061 }
3062
3063 /* One final attempt at implementing negation via subtraction,
3064 this time allowing widening of the operand. */
3065 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3066 {
3067 rtx temp;
3068 temp = expand_binop (mode,
3069 unoptab == negv_optab ? subv_optab : sub_optab,
3070 CONST0_RTX (mode), op0,
3071 target, unsignedp, OPTAB_LIB_WIDEN);
3072 if (temp)
3073 return temp;
3074 }
3075
3076 return 0;
3077 }
3078 \f
3079 /* Emit code to compute the absolute value of OP0, with result to
3080 TARGET if convenient. (TARGET may be 0.) The return value says
3081 where the result actually is to be found.
3082
3083 MODE is the mode of the operand; the mode of the result is
3084 different but can be deduced from MODE.
3085
3086 */
3087
3088 rtx
3089 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3090 int result_unsignedp)
3091 {
3092 rtx temp;
3093
3094 if (GET_MODE_CLASS (mode) != MODE_INT
3095 || ! flag_trapv)
3096 result_unsignedp = 1;
3097
3098 /* First try to do it with a special abs instruction. */
3099 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3100 op0, target, 0);
3101 if (temp != 0)
3102 return temp;
3103
3104 /* For floating point modes, try clearing the sign bit. */
3105 if (SCALAR_FLOAT_MODE_P (mode))
3106 {
3107 temp = expand_absneg_bit (ABS, mode, op0, target);
3108 if (temp)
3109 return temp;
3110 }
3111
3112 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3113 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3114 && !HONOR_SIGNED_ZEROS (mode))
3115 {
3116 rtx_insn *last = get_last_insn ();
3117
3118 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3119 op0, NULL_RTX, 0);
3120 if (temp != 0)
3121 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3122 OPTAB_WIDEN);
3123
3124 if (temp != 0)
3125 return temp;
3126
3127 delete_insns_since (last);
3128 }
3129
3130 /* If this machine has expensive jumps, we can do integer absolute
3131 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3132 where W is the width of MODE. */
3133
3134 if (GET_MODE_CLASS (mode) == MODE_INT
3135 && BRANCH_COST (optimize_insn_for_speed_p (),
3136 false) >= 2)
3137 {
3138 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3139 GET_MODE_PRECISION (mode) - 1,
3140 NULL_RTX, 0);
3141
3142 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3143 OPTAB_LIB_WIDEN);
3144 if (temp != 0)
3145 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3146 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3147
3148 if (temp != 0)
3149 return temp;
3150 }
3151
3152 return NULL_RTX;
3153 }
3154
3155 rtx
3156 expand_abs (machine_mode mode, rtx op0, rtx target,
3157 int result_unsignedp, int safe)
3158 {
3159 rtx temp;
3160 rtx_code_label *op1;
3161
3162 if (GET_MODE_CLASS (mode) != MODE_INT
3163 || ! flag_trapv)
3164 result_unsignedp = 1;
3165
3166 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3167 if (temp != 0)
3168 return temp;
3169
3170 /* If that does not win, use conditional jump and negate. */
3171
3172 /* It is safe to use the target if it is the same
3173 as the source if this is also a pseudo register */
3174 if (op0 == target && REG_P (op0)
3175 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3176 safe = 1;
3177
3178 op1 = gen_label_rtx ();
3179 if (target == 0 || ! safe
3180 || GET_MODE (target) != mode
3181 || (MEM_P (target) && MEM_VOLATILE_P (target))
3182 || (REG_P (target)
3183 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3184 target = gen_reg_rtx (mode);
3185
3186 emit_move_insn (target, op0);
3187 NO_DEFER_POP;
3188
3189 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3190 NULL_RTX, NULL, op1, -1);
3191
3192 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3193 target, target, 0);
3194 if (op0 != target)
3195 emit_move_insn (target, op0);
3196 emit_label (op1);
3197 OK_DEFER_POP;
3198 return target;
3199 }
3200
3201 /* Emit code to compute the one's complement absolute value of OP0
3202 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3203 (TARGET may be NULL_RTX.) The return value says where the result
3204 actually is to be found.
3205
3206 MODE is the mode of the operand; the mode of the result is
3207 different but can be deduced from MODE. */
3208
3209 rtx
3210 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3211 {
3212 rtx temp;
3213
3214 /* Not applicable for floating point modes. */
3215 if (FLOAT_MODE_P (mode))
3216 return NULL_RTX;
3217
3218 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3219 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3220 {
3221 rtx_insn *last = get_last_insn ();
3222
3223 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3224 if (temp != 0)
3225 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3226 OPTAB_WIDEN);
3227
3228 if (temp != 0)
3229 return temp;
3230
3231 delete_insns_since (last);
3232 }
3233
3234 /* If this machine has expensive jumps, we can do one's complement
3235 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3236
3237 if (GET_MODE_CLASS (mode) == MODE_INT
3238 && BRANCH_COST (optimize_insn_for_speed_p (),
3239 false) >= 2)
3240 {
3241 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3242 GET_MODE_PRECISION (mode) - 1,
3243 NULL_RTX, 0);
3244
3245 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3246 OPTAB_LIB_WIDEN);
3247
3248 if (temp != 0)
3249 return temp;
3250 }
3251
3252 return NULL_RTX;
3253 }
3254
3255 /* A subroutine of expand_copysign, perform the copysign operation using the
3256 abs and neg primitives advertised to exist on the target. The assumption
3257 is that we have a split register file, and leaving op0 in fp registers,
3258 and not playing with subregs so much, will help the register allocator. */
3259
3260 static rtx
3261 expand_copysign_absneg (machine_mode mode, rtx op0, rtx op1, rtx target,
3262 int bitpos, bool op0_is_abs)
3263 {
3264 machine_mode imode;
3265 enum insn_code icode;
3266 rtx sign;
3267 rtx_code_label *label;
3268
3269 if (target == op1)
3270 target = NULL_RTX;
3271
3272 /* Check if the back end provides an insn that handles signbit for the
3273 argument's mode. */
3274 icode = optab_handler (signbit_optab, mode);
3275 if (icode != CODE_FOR_nothing)
3276 {
3277 imode = insn_data[(int) icode].operand[0].mode;
3278 sign = gen_reg_rtx (imode);
3279 emit_unop_insn (icode, sign, op1, UNKNOWN);
3280 }
3281 else
3282 {
3283 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3284 {
3285 imode = int_mode_for_mode (mode);
3286 if (imode == BLKmode)
3287 return NULL_RTX;
3288 op1 = gen_lowpart (imode, op1);
3289 }
3290 else
3291 {
3292 int word;
3293
3294 imode = word_mode;
3295 if (FLOAT_WORDS_BIG_ENDIAN)
3296 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3297 else
3298 word = bitpos / BITS_PER_WORD;
3299 bitpos = bitpos % BITS_PER_WORD;
3300 op1 = operand_subword_force (op1, word, mode);
3301 }
3302
3303 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3304 sign = expand_binop (imode, and_optab, op1,
3305 immed_wide_int_const (mask, imode),
3306 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3307 }
3308
3309 if (!op0_is_abs)
3310 {
3311 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3312 if (op0 == NULL)
3313 return NULL_RTX;
3314 target = op0;
3315 }
3316 else
3317 {
3318 if (target == NULL_RTX)
3319 target = copy_to_reg (op0);
3320 else
3321 emit_move_insn (target, op0);
3322 }
3323
3324 label = gen_label_rtx ();
3325 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3326
3327 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3328 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3329 else
3330 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3331 if (op0 != target)
3332 emit_move_insn (target, op0);
3333
3334 emit_label (label);
3335
3336 return target;
3337 }
3338
3339
3340 /* A subroutine of expand_copysign, perform the entire copysign operation
3341 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3342 is true if op0 is known to have its sign bit clear. */
3343
3344 static rtx
3345 expand_copysign_bit (machine_mode mode, rtx op0, rtx op1, rtx target,
3346 int bitpos, bool op0_is_abs)
3347 {
3348 machine_mode imode;
3349 int word, nwords, i;
3350 rtx temp;
3351 rtx_insn *insns;
3352
3353 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3354 {
3355 imode = int_mode_for_mode (mode);
3356 if (imode == BLKmode)
3357 return NULL_RTX;
3358 word = 0;
3359 nwords = 1;
3360 }
3361 else
3362 {
3363 imode = word_mode;
3364
3365 if (FLOAT_WORDS_BIG_ENDIAN)
3366 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3367 else
3368 word = bitpos / BITS_PER_WORD;
3369 bitpos = bitpos % BITS_PER_WORD;
3370 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3371 }
3372
3373 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3374
3375 if (target == 0
3376 || target == op0
3377 || target == op1
3378 || (nwords > 1 && !valid_multiword_target_p (target)))
3379 target = gen_reg_rtx (mode);
3380
3381 if (nwords > 1)
3382 {
3383 start_sequence ();
3384
3385 for (i = 0; i < nwords; ++i)
3386 {
3387 rtx targ_piece = operand_subword (target, i, 1, mode);
3388 rtx op0_piece = operand_subword_force (op0, i, mode);
3389
3390 if (i == word)
3391 {
3392 if (!op0_is_abs)
3393 op0_piece
3394 = expand_binop (imode, and_optab, op0_piece,
3395 immed_wide_int_const (~mask, imode),
3396 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3397 op1 = expand_binop (imode, and_optab,
3398 operand_subword_force (op1, i, mode),
3399 immed_wide_int_const (mask, imode),
3400 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3401
3402 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3403 targ_piece, 1, OPTAB_LIB_WIDEN);
3404 if (temp != targ_piece)
3405 emit_move_insn (targ_piece, temp);
3406 }
3407 else
3408 emit_move_insn (targ_piece, op0_piece);
3409 }
3410
3411 insns = get_insns ();
3412 end_sequence ();
3413
3414 emit_insn (insns);
3415 }
3416 else
3417 {
3418 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3419 immed_wide_int_const (mask, imode),
3420 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3421
3422 op0 = gen_lowpart (imode, op0);
3423 if (!op0_is_abs)
3424 op0 = expand_binop (imode, and_optab, op0,
3425 immed_wide_int_const (~mask, imode),
3426 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3427
3428 temp = expand_binop (imode, ior_optab, op0, op1,
3429 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3430 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3431 }
3432
3433 return target;
3434 }
3435
3436 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3437 scalar floating point mode. Return NULL if we do not know how to
3438 expand the operation inline. */
3439
3440 rtx
3441 expand_copysign (rtx op0, rtx op1, rtx target)
3442 {
3443 machine_mode mode = GET_MODE (op0);
3444 const struct real_format *fmt;
3445 bool op0_is_abs;
3446 rtx temp;
3447
3448 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3449 gcc_assert (GET_MODE (op1) == mode);
3450
3451 /* First try to do it with a special instruction. */
3452 temp = expand_binop (mode, copysign_optab, op0, op1,
3453 target, 0, OPTAB_DIRECT);
3454 if (temp)
3455 return temp;
3456
3457 fmt = REAL_MODE_FORMAT (mode);
3458 if (fmt == NULL || !fmt->has_signed_zero)
3459 return NULL_RTX;
3460
3461 op0_is_abs = false;
3462 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3463 {
3464 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3465 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3466 op0_is_abs = true;
3467 }
3468
3469 if (fmt->signbit_ro >= 0
3470 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3471 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3472 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3473 {
3474 temp = expand_copysign_absneg (mode, op0, op1, target,
3475 fmt->signbit_ro, op0_is_abs);
3476 if (temp)
3477 return temp;
3478 }
3479
3480 if (fmt->signbit_rw < 0)
3481 return NULL_RTX;
3482 return expand_copysign_bit (mode, op0, op1, target,
3483 fmt->signbit_rw, op0_is_abs);
3484 }
3485 \f
3486 /* Generate an instruction whose insn-code is INSN_CODE,
3487 with two operands: an output TARGET and an input OP0.
3488 TARGET *must* be nonzero, and the output is always stored there.
3489 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3490 the value that is stored into TARGET.
3491
3492 Return false if expansion failed. */
3493
3494 bool
3495 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3496 enum rtx_code code)
3497 {
3498 struct expand_operand ops[2];
3499 rtx_insn *pat;
3500
3501 create_output_operand (&ops[0], target, GET_MODE (target));
3502 create_input_operand (&ops[1], op0, GET_MODE (op0));
3503 pat = maybe_gen_insn (icode, 2, ops);
3504 if (!pat)
3505 return false;
3506
3507 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3508 && code != UNKNOWN)
3509 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3510
3511 emit_insn (pat);
3512
3513 if (ops[0].value != target)
3514 emit_move_insn (target, ops[0].value);
3515 return true;
3516 }
3517 /* Generate an instruction whose insn-code is INSN_CODE,
3518 with two operands: an output TARGET and an input OP0.
3519 TARGET *must* be nonzero, and the output is always stored there.
3520 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3521 the value that is stored into TARGET. */
3522
3523 void
3524 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3525 {
3526 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3527 gcc_assert (ok);
3528 }
3529 \f
3530 struct no_conflict_data
3531 {
3532 rtx target;
3533 rtx_insn *first, *insn;
3534 bool must_stay;
3535 };
3536
3537 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3538 the currently examined clobber / store has to stay in the list of
3539 insns that constitute the actual libcall block. */
3540 static void
3541 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3542 {
3543 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3544
3545 /* If this inns directly contributes to setting the target, it must stay. */
3546 if (reg_overlap_mentioned_p (p->target, dest))
3547 p->must_stay = true;
3548 /* If we haven't committed to keeping any other insns in the list yet,
3549 there is nothing more to check. */
3550 else if (p->insn == p->first)
3551 return;
3552 /* If this insn sets / clobbers a register that feeds one of the insns
3553 already in the list, this insn has to stay too. */
3554 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3555 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3556 || reg_used_between_p (dest, p->first, p->insn)
3557 /* Likewise if this insn depends on a register set by a previous
3558 insn in the list, or if it sets a result (presumably a hard
3559 register) that is set or clobbered by a previous insn.
3560 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3561 SET_DEST perform the former check on the address, and the latter
3562 check on the MEM. */
3563 || (GET_CODE (set) == SET
3564 && (modified_in_p (SET_SRC (set), p->first)
3565 || modified_in_p (SET_DEST (set), p->first)
3566 || modified_between_p (SET_SRC (set), p->first, p->insn)
3567 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3568 p->must_stay = true;
3569 }
3570
3571 \f
3572 /* Emit code to make a call to a constant function or a library call.
3573
3574 INSNS is a list containing all insns emitted in the call.
3575 These insns leave the result in RESULT. Our block is to copy RESULT
3576 to TARGET, which is logically equivalent to EQUIV.
3577
3578 We first emit any insns that set a pseudo on the assumption that these are
3579 loading constants into registers; doing so allows them to be safely cse'ed
3580 between blocks. Then we emit all the other insns in the block, followed by
3581 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3582 note with an operand of EQUIV. */
3583
3584 static void
3585 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3586 bool equiv_may_trap)
3587 {
3588 rtx final_dest = target;
3589 rtx_insn *next, *last, *insn;
3590
3591 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3592 into a MEM later. Protect the libcall block from this change. */
3593 if (! REG_P (target) || REG_USERVAR_P (target))
3594 target = gen_reg_rtx (GET_MODE (target));
3595
3596 /* If we're using non-call exceptions, a libcall corresponding to an
3597 operation that may trap may also trap. */
3598 /* ??? See the comment in front of make_reg_eh_region_note. */
3599 if (cfun->can_throw_non_call_exceptions
3600 && (equiv_may_trap || may_trap_p (equiv)))
3601 {
3602 for (insn = insns; insn; insn = NEXT_INSN (insn))
3603 if (CALL_P (insn))
3604 {
3605 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3606 if (note)
3607 {
3608 int lp_nr = INTVAL (XEXP (note, 0));
3609 if (lp_nr == 0 || lp_nr == INT_MIN)
3610 remove_note (insn, note);
3611 }
3612 }
3613 }
3614 else
3615 {
3616 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3617 reg note to indicate that this call cannot throw or execute a nonlocal
3618 goto (unless there is already a REG_EH_REGION note, in which case
3619 we update it). */
3620 for (insn = insns; insn; insn = NEXT_INSN (insn))
3621 if (CALL_P (insn))
3622 make_reg_eh_region_note_nothrow_nononlocal (insn);
3623 }
3624
3625 /* First emit all insns that set pseudos. Remove them from the list as
3626 we go. Avoid insns that set pseudos which were referenced in previous
3627 insns. These can be generated by move_by_pieces, for example,
3628 to update an address. Similarly, avoid insns that reference things
3629 set in previous insns. */
3630
3631 for (insn = insns; insn; insn = next)
3632 {
3633 rtx set = single_set (insn);
3634
3635 next = NEXT_INSN (insn);
3636
3637 if (set != 0 && REG_P (SET_DEST (set))
3638 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3639 {
3640 struct no_conflict_data data;
3641
3642 data.target = const0_rtx;
3643 data.first = insns;
3644 data.insn = insn;
3645 data.must_stay = 0;
3646 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3647 if (! data.must_stay)
3648 {
3649 if (PREV_INSN (insn))
3650 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3651 else
3652 insns = next;
3653
3654 if (next)
3655 SET_PREV_INSN (next) = PREV_INSN (insn);
3656
3657 add_insn (insn);
3658 }
3659 }
3660
3661 /* Some ports use a loop to copy large arguments onto the stack.
3662 Don't move anything outside such a loop. */
3663 if (LABEL_P (insn))
3664 break;
3665 }
3666
3667 /* Write the remaining insns followed by the final copy. */
3668 for (insn = insns; insn; insn = next)
3669 {
3670 next = NEXT_INSN (insn);
3671
3672 add_insn (insn);
3673 }
3674
3675 last = emit_move_insn (target, result);
3676 if (equiv)
3677 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3678
3679 if (final_dest != target)
3680 emit_move_insn (final_dest, target);
3681 }
3682
3683 void
3684 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3685 {
3686 emit_libcall_block_1 (insns, target, result, equiv, false);
3687 }
3688 \f
3689 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3690 PURPOSE describes how this comparison will be used. CODE is the rtx
3691 comparison code we will be using.
3692
3693 ??? Actually, CODE is slightly weaker than that. A target is still
3694 required to implement all of the normal bcc operations, but not
3695 required to implement all (or any) of the unordered bcc operations. */
3696
3697 int
3698 can_compare_p (enum rtx_code code, machine_mode mode,
3699 enum can_compare_purpose purpose)
3700 {
3701 rtx test;
3702 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3703 do
3704 {
3705 enum insn_code icode;
3706
3707 if (purpose == ccp_jump
3708 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3709 && insn_operand_matches (icode, 0, test))
3710 return 1;
3711 if (purpose == ccp_store_flag
3712 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3713 && insn_operand_matches (icode, 1, test))
3714 return 1;
3715 if (purpose == ccp_cmov
3716 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3717 return 1;
3718
3719 mode = GET_MODE_WIDER_MODE (mode);
3720 PUT_MODE (test, mode);
3721 }
3722 while (mode != VOIDmode);
3723
3724 return 0;
3725 }
3726
3727 /* This function is called when we are going to emit a compare instruction that
3728 compares the values found in X and Y, using the rtl operator COMPARISON.
3729
3730 If they have mode BLKmode, then SIZE specifies the size of both operands.
3731
3732 UNSIGNEDP nonzero says that the operands are unsigned;
3733 this matters if they need to be widened (as given by METHODS).
3734
3735 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3736 if we failed to produce one.
3737
3738 *PMODE is the mode of the inputs (in case they are const_int).
3739
3740 This function performs all the setup necessary so that the caller only has
3741 to emit a single comparison insn. This setup can involve doing a BLKmode
3742 comparison or emitting a library call to perform the comparison if no insn
3743 is available to handle it.
3744 The values which are passed in through pointers can be modified; the caller
3745 should perform the comparison on the modified values. Constant
3746 comparisons must have already been folded. */
3747
3748 static void
3749 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3750 int unsignedp, enum optab_methods methods,
3751 rtx *ptest, machine_mode *pmode)
3752 {
3753 machine_mode mode = *pmode;
3754 rtx libfunc, test;
3755 machine_mode cmp_mode;
3756 enum mode_class mclass;
3757
3758 /* The other methods are not needed. */
3759 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3760 || methods == OPTAB_LIB_WIDEN);
3761
3762 /* If we are optimizing, force expensive constants into a register. */
3763 if (CONSTANT_P (x) && optimize
3764 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3765 > COSTS_N_INSNS (1)))
3766 x = force_reg (mode, x);
3767
3768 if (CONSTANT_P (y) && optimize
3769 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3770 > COSTS_N_INSNS (1)))
3771 y = force_reg (mode, y);
3772
3773 #if HAVE_cc0
3774 /* Make sure if we have a canonical comparison. The RTL
3775 documentation states that canonical comparisons are required only
3776 for targets which have cc0. */
3777 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3778 #endif
3779
3780 /* Don't let both operands fail to indicate the mode. */
3781 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3782 x = force_reg (mode, x);
3783 if (mode == VOIDmode)
3784 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3785
3786 /* Handle all BLKmode compares. */
3787
3788 if (mode == BLKmode)
3789 {
3790 machine_mode result_mode;
3791 enum insn_code cmp_code;
3792 rtx result;
3793 rtx opalign
3794 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3795
3796 gcc_assert (size);
3797
3798 /* Try to use a memory block compare insn - either cmpstr
3799 or cmpmem will do. */
3800 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3801 cmp_mode != VOIDmode;
3802 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3803 {
3804 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3805 if (cmp_code == CODE_FOR_nothing)
3806 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3807 if (cmp_code == CODE_FOR_nothing)
3808 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3809 if (cmp_code == CODE_FOR_nothing)
3810 continue;
3811
3812 /* Must make sure the size fits the insn's mode. */
3813 if ((CONST_INT_P (size)
3814 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3815 || (GET_MODE_BITSIZE (GET_MODE (size))
3816 > GET_MODE_BITSIZE (cmp_mode)))
3817 continue;
3818
3819 result_mode = insn_data[cmp_code].operand[0].mode;
3820 result = gen_reg_rtx (result_mode);
3821 size = convert_to_mode (cmp_mode, size, 1);
3822 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3823
3824 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3825 *pmode = result_mode;
3826 return;
3827 }
3828
3829 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3830 goto fail;
3831
3832 /* Otherwise call a library function. */
3833 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3834
3835 x = result;
3836 y = const0_rtx;
3837 mode = TYPE_MODE (integer_type_node);
3838 methods = OPTAB_LIB_WIDEN;
3839 unsignedp = false;
3840 }
3841
3842 /* Don't allow operands to the compare to trap, as that can put the
3843 compare and branch in different basic blocks. */
3844 if (cfun->can_throw_non_call_exceptions)
3845 {
3846 if (may_trap_p (x))
3847 x = force_reg (mode, x);
3848 if (may_trap_p (y))
3849 y = force_reg (mode, y);
3850 }
3851
3852 if (GET_MODE_CLASS (mode) == MODE_CC)
3853 {
3854 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3855 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3856 gcc_assert (icode != CODE_FOR_nothing
3857 && insn_operand_matches (icode, 0, test));
3858 *ptest = test;
3859 return;
3860 }
3861
3862 mclass = GET_MODE_CLASS (mode);
3863 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3864 cmp_mode = mode;
3865 do
3866 {
3867 enum insn_code icode;
3868 icode = optab_handler (cbranch_optab, cmp_mode);
3869 if (icode != CODE_FOR_nothing
3870 && insn_operand_matches (icode, 0, test))
3871 {
3872 rtx_insn *last = get_last_insn ();
3873 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3874 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3875 if (op0 && op1
3876 && insn_operand_matches (icode, 1, op0)
3877 && insn_operand_matches (icode, 2, op1))
3878 {
3879 XEXP (test, 0) = op0;
3880 XEXP (test, 1) = op1;
3881 *ptest = test;
3882 *pmode = cmp_mode;
3883 return;
3884 }
3885 delete_insns_since (last);
3886 }
3887
3888 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3889 break;
3890 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
3891 }
3892 while (cmp_mode != VOIDmode);
3893
3894 if (methods != OPTAB_LIB_WIDEN)
3895 goto fail;
3896
3897 if (!SCALAR_FLOAT_MODE_P (mode))
3898 {
3899 rtx result;
3900 machine_mode ret_mode;
3901
3902 /* Handle a libcall just for the mode we are using. */
3903 libfunc = optab_libfunc (cmp_optab, mode);
3904 gcc_assert (libfunc);
3905
3906 /* If we want unsigned, and this mode has a distinct unsigned
3907 comparison routine, use that. */
3908 if (unsignedp)
3909 {
3910 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3911 if (ulibfunc)
3912 libfunc = ulibfunc;
3913 }
3914
3915 ret_mode = targetm.libgcc_cmp_return_mode ();
3916 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3917 ret_mode, 2, x, mode, y, mode);
3918
3919 /* There are two kinds of comparison routines. Biased routines
3920 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3921 of gcc expect that the comparison operation is equivalent
3922 to the modified comparison. For signed comparisons compare the
3923 result against 1 in the biased case, and zero in the unbiased
3924 case. For unsigned comparisons always compare against 1 after
3925 biasing the unbiased result by adding 1. This gives us a way to
3926 represent LTU.
3927 The comparisons in the fixed-point helper library are always
3928 biased. */
3929 x = result;
3930 y = const1_rtx;
3931
3932 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3933 {
3934 if (unsignedp)
3935 x = plus_constant (ret_mode, result, 1);
3936 else
3937 y = const0_rtx;
3938 }
3939
3940 *pmode = ret_mode;
3941 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3942 ptest, pmode);
3943 }
3944 else
3945 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3946
3947 return;
3948
3949 fail:
3950 *ptest = NULL_RTX;
3951 }
3952
3953 /* Before emitting an insn with code ICODE, make sure that X, which is going
3954 to be used for operand OPNUM of the insn, is converted from mode MODE to
3955 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3956 that it is accepted by the operand predicate. Return the new value. */
3957
3958 rtx
3959 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3960 machine_mode wider_mode, int unsignedp)
3961 {
3962 if (mode != wider_mode)
3963 x = convert_modes (wider_mode, mode, x, unsignedp);
3964
3965 if (!insn_operand_matches (icode, opnum, x))
3966 {
3967 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3968 if (reload_completed)
3969 return NULL_RTX;
3970 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3971 return NULL_RTX;
3972 x = copy_to_mode_reg (op_mode, x);
3973 }
3974
3975 return x;
3976 }
3977
3978 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3979 we can do the branch. */
3980
3981 static void
3982 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, int prob)
3983 {
3984 machine_mode optab_mode;
3985 enum mode_class mclass;
3986 enum insn_code icode;
3987 rtx_insn *insn;
3988
3989 mclass = GET_MODE_CLASS (mode);
3990 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3991 icode = optab_handler (cbranch_optab, optab_mode);
3992
3993 gcc_assert (icode != CODE_FOR_nothing);
3994 gcc_assert (insn_operand_matches (icode, 0, test));
3995 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
3996 XEXP (test, 1), label));
3997 if (prob != -1
3998 && profile_status_for_fn (cfun) != PROFILE_ABSENT
3999 && insn
4000 && JUMP_P (insn)
4001 && any_condjump_p (insn)
4002 && !find_reg_note (insn, REG_BR_PROB, 0))
4003 add_int_reg_note (insn, REG_BR_PROB, prob);
4004 }
4005
4006 /* Generate code to compare X with Y so that the condition codes are
4007 set and to jump to LABEL if the condition is true. If X is a
4008 constant and Y is not a constant, then the comparison is swapped to
4009 ensure that the comparison RTL has the canonical form.
4010
4011 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4012 need to be widened. UNSIGNEDP is also used to select the proper
4013 branch condition code.
4014
4015 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4016
4017 MODE is the mode of the inputs (in case they are const_int).
4018
4019 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4020 It will be potentially converted into an unsigned variant based on
4021 UNSIGNEDP to select a proper jump instruction.
4022
4023 PROB is the probability of jumping to LABEL. */
4024
4025 void
4026 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4027 machine_mode mode, int unsignedp, rtx label,
4028 int prob)
4029 {
4030 rtx op0 = x, op1 = y;
4031 rtx test;
4032
4033 /* Swap operands and condition to ensure canonical RTL. */
4034 if (swap_commutative_operands_p (x, y)
4035 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4036 {
4037 op0 = y, op1 = x;
4038 comparison = swap_condition (comparison);
4039 }
4040
4041 /* If OP0 is still a constant, then both X and Y must be constants
4042 or the opposite comparison is not supported. Force X into a register
4043 to create canonical RTL. */
4044 if (CONSTANT_P (op0))
4045 op0 = force_reg (mode, op0);
4046
4047 if (unsignedp)
4048 comparison = unsigned_condition (comparison);
4049
4050 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4051 &test, &mode);
4052 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4053 }
4054
4055 \f
4056 /* Emit a library call comparison between floating point X and Y.
4057 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4058
4059 static void
4060 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4061 rtx *ptest, machine_mode *pmode)
4062 {
4063 enum rtx_code swapped = swap_condition (comparison);
4064 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4065 machine_mode orig_mode = GET_MODE (x);
4066 machine_mode mode, cmp_mode;
4067 rtx true_rtx, false_rtx;
4068 rtx value, target, equiv;
4069 rtx_insn *insns;
4070 rtx libfunc = 0;
4071 bool reversed_p = false;
4072 cmp_mode = targetm.libgcc_cmp_return_mode ();
4073
4074 for (mode = orig_mode;
4075 mode != VOIDmode;
4076 mode = GET_MODE_WIDER_MODE (mode))
4077 {
4078 if (code_to_optab (comparison)
4079 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4080 break;
4081
4082 if (code_to_optab (swapped)
4083 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4084 {
4085 std::swap (x, y);
4086 comparison = swapped;
4087 break;
4088 }
4089
4090 if (code_to_optab (reversed)
4091 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4092 {
4093 comparison = reversed;
4094 reversed_p = true;
4095 break;
4096 }
4097 }
4098
4099 gcc_assert (mode != VOIDmode);
4100
4101 if (mode != orig_mode)
4102 {
4103 x = convert_to_mode (mode, x, 0);
4104 y = convert_to_mode (mode, y, 0);
4105 }
4106
4107 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4108 the RTL. The allows the RTL optimizers to delete the libcall if the
4109 condition can be determined at compile-time. */
4110 if (comparison == UNORDERED
4111 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4112 {
4113 true_rtx = const_true_rtx;
4114 false_rtx = const0_rtx;
4115 }
4116 else
4117 {
4118 switch (comparison)
4119 {
4120 case EQ:
4121 true_rtx = const0_rtx;
4122 false_rtx = const_true_rtx;
4123 break;
4124
4125 case NE:
4126 true_rtx = const_true_rtx;
4127 false_rtx = const0_rtx;
4128 break;
4129
4130 case GT:
4131 true_rtx = const1_rtx;
4132 false_rtx = const0_rtx;
4133 break;
4134
4135 case GE:
4136 true_rtx = const0_rtx;
4137 false_rtx = constm1_rtx;
4138 break;
4139
4140 case LT:
4141 true_rtx = constm1_rtx;
4142 false_rtx = const0_rtx;
4143 break;
4144
4145 case LE:
4146 true_rtx = const0_rtx;
4147 false_rtx = const1_rtx;
4148 break;
4149
4150 default:
4151 gcc_unreachable ();
4152 }
4153 }
4154
4155 if (comparison == UNORDERED)
4156 {
4157 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4158 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4159 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4160 temp, const_true_rtx, equiv);
4161 }
4162 else
4163 {
4164 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4165 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4166 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4167 equiv, true_rtx, false_rtx);
4168 }
4169
4170 start_sequence ();
4171 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4172 cmp_mode, 2, x, mode, y, mode);
4173 insns = get_insns ();
4174 end_sequence ();
4175
4176 target = gen_reg_rtx (cmp_mode);
4177 emit_libcall_block (insns, target, value, equiv);
4178
4179 if (comparison == UNORDERED
4180 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4181 || reversed_p)
4182 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4183 else
4184 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4185
4186 *pmode = cmp_mode;
4187 }
4188 \f
4189 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4190
4191 void
4192 emit_indirect_jump (rtx loc)
4193 {
4194 if (!targetm.have_indirect_jump ())
4195 sorry ("indirect jumps are not available on this target");
4196 else
4197 {
4198 struct expand_operand ops[1];
4199 create_address_operand (&ops[0], loc);
4200 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4201 emit_barrier ();
4202 }
4203 }
4204 \f
4205
4206 /* Emit a conditional move instruction if the machine supports one for that
4207 condition and machine mode.
4208
4209 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4210 the mode to use should they be constants. If it is VOIDmode, they cannot
4211 both be constants.
4212
4213 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4214 should be stored there. MODE is the mode to use should they be constants.
4215 If it is VOIDmode, they cannot both be constants.
4216
4217 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4218 is not supported. */
4219
4220 rtx
4221 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4222 machine_mode cmode, rtx op2, rtx op3,
4223 machine_mode mode, int unsignedp)
4224 {
4225 rtx comparison;
4226 rtx_insn *last;
4227 enum insn_code icode;
4228 enum rtx_code reversed;
4229
4230 /* If the two source operands are identical, that's just a move. */
4231
4232 if (rtx_equal_p (op2, op3))
4233 {
4234 if (!target)
4235 target = gen_reg_rtx (mode);
4236
4237 emit_move_insn (target, op3);
4238 return target;
4239 }
4240
4241 /* If one operand is constant, make it the second one. Only do this
4242 if the other operand is not constant as well. */
4243
4244 if (swap_commutative_operands_p (op0, op1))
4245 {
4246 std::swap (op0, op1);
4247 code = swap_condition (code);
4248 }
4249
4250 /* get_condition will prefer to generate LT and GT even if the old
4251 comparison was against zero, so undo that canonicalization here since
4252 comparisons against zero are cheaper. */
4253 if (code == LT && op1 == const1_rtx)
4254 code = LE, op1 = const0_rtx;
4255 else if (code == GT && op1 == constm1_rtx)
4256 code = GE, op1 = const0_rtx;
4257
4258 if (cmode == VOIDmode)
4259 cmode = GET_MODE (op0);
4260
4261 enum rtx_code orig_code = code;
4262 bool swapped = false;
4263 if (swap_commutative_operands_p (op2, op3)
4264 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4265 != UNKNOWN))
4266 {
4267 std::swap (op2, op3);
4268 code = reversed;
4269 swapped = true;
4270 }
4271
4272 if (mode == VOIDmode)
4273 mode = GET_MODE (op2);
4274
4275 icode = direct_optab_handler (movcc_optab, mode);
4276
4277 if (icode == CODE_FOR_nothing)
4278 return NULL_RTX;
4279
4280 if (!target)
4281 target = gen_reg_rtx (mode);
4282
4283 for (int pass = 0; ; pass++)
4284 {
4285 code = unsignedp ? unsigned_condition (code) : code;
4286 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4287
4288 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4289 punt and let the caller figure out how best to deal with this
4290 situation. */
4291 if (COMPARISON_P (comparison))
4292 {
4293 saved_pending_stack_adjust save;
4294 save_pending_stack_adjust (&save);
4295 last = get_last_insn ();
4296 do_pending_stack_adjust ();
4297 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4298 GET_CODE (comparison), NULL_RTX, unsignedp,
4299 OPTAB_WIDEN, &comparison, &cmode);
4300 if (comparison)
4301 {
4302 struct expand_operand ops[4];
4303
4304 create_output_operand (&ops[0], target, mode);
4305 create_fixed_operand (&ops[1], comparison);
4306 create_input_operand (&ops[2], op2, mode);
4307 create_input_operand (&ops[3], op3, mode);
4308 if (maybe_expand_insn (icode, 4, ops))
4309 {
4310 if (ops[0].value != target)
4311 convert_move (target, ops[0].value, false);
4312 return target;
4313 }
4314 }
4315 delete_insns_since (last);
4316 restore_pending_stack_adjust (&save);
4317 }
4318
4319 if (pass == 1)
4320 return NULL_RTX;
4321
4322 /* If the preferred op2/op3 order is not usable, retry with other
4323 operand order, perhaps it will expand successfully. */
4324 if (swapped)
4325 code = orig_code;
4326 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4327 NULL))
4328 != UNKNOWN)
4329 code = reversed;
4330 else
4331 return NULL_RTX;
4332 std::swap (op2, op3);
4333 }
4334 }
4335
4336
4337 /* Emit a conditional negate or bitwise complement using the
4338 negcc or notcc optabs if available. Return NULL_RTX if such operations
4339 are not available. Otherwise return the RTX holding the result.
4340 TARGET is the desired destination of the result. COMP is the comparison
4341 on which to negate. If COND is true move into TARGET the negation
4342 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4343 CODE is either NEG or NOT. MODE is the machine mode in which the
4344 operation is performed. */
4345
4346 rtx
4347 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4348 machine_mode mode, rtx cond, rtx op1,
4349 rtx op2)
4350 {
4351 optab op = unknown_optab;
4352 if (code == NEG)
4353 op = negcc_optab;
4354 else if (code == NOT)
4355 op = notcc_optab;
4356 else
4357 gcc_unreachable ();
4358
4359 insn_code icode = direct_optab_handler (op, mode);
4360
4361 if (icode == CODE_FOR_nothing)
4362 return NULL_RTX;
4363
4364 if (!target)
4365 target = gen_reg_rtx (mode);
4366
4367 rtx_insn *last = get_last_insn ();
4368 struct expand_operand ops[4];
4369
4370 create_output_operand (&ops[0], target, mode);
4371 create_fixed_operand (&ops[1], cond);
4372 create_input_operand (&ops[2], op1, mode);
4373 create_input_operand (&ops[3], op2, mode);
4374
4375 if (maybe_expand_insn (icode, 4, ops))
4376 {
4377 if (ops[0].value != target)
4378 convert_move (target, ops[0].value, false);
4379
4380 return target;
4381 }
4382 delete_insns_since (last);
4383 return NULL_RTX;
4384 }
4385
4386 /* Emit a conditional addition instruction if the machine supports one for that
4387 condition and machine mode.
4388
4389 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4390 the mode to use should they be constants. If it is VOIDmode, they cannot
4391 both be constants.
4392
4393 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4394 should be stored there. MODE is the mode to use should they be constants.
4395 If it is VOIDmode, they cannot both be constants.
4396
4397 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4398 is not supported. */
4399
4400 rtx
4401 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4402 machine_mode cmode, rtx op2, rtx op3,
4403 machine_mode mode, int unsignedp)
4404 {
4405 rtx comparison;
4406 rtx_insn *last;
4407 enum insn_code icode;
4408
4409 /* If one operand is constant, make it the second one. Only do this
4410 if the other operand is not constant as well. */
4411
4412 if (swap_commutative_operands_p (op0, op1))
4413 {
4414 std::swap (op0, op1);
4415 code = swap_condition (code);
4416 }
4417
4418 /* get_condition will prefer to generate LT and GT even if the old
4419 comparison was against zero, so undo that canonicalization here since
4420 comparisons against zero are cheaper. */
4421 if (code == LT && op1 == const1_rtx)
4422 code = LE, op1 = const0_rtx;
4423 else if (code == GT && op1 == constm1_rtx)
4424 code = GE, op1 = const0_rtx;
4425
4426 if (cmode == VOIDmode)
4427 cmode = GET_MODE (op0);
4428
4429 if (mode == VOIDmode)
4430 mode = GET_MODE (op2);
4431
4432 icode = optab_handler (addcc_optab, mode);
4433
4434 if (icode == CODE_FOR_nothing)
4435 return 0;
4436
4437 if (!target)
4438 target = gen_reg_rtx (mode);
4439
4440 code = unsignedp ? unsigned_condition (code) : code;
4441 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4442
4443 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4444 return NULL and let the caller figure out how best to deal with this
4445 situation. */
4446 if (!COMPARISON_P (comparison))
4447 return NULL_RTX;
4448
4449 do_pending_stack_adjust ();
4450 last = get_last_insn ();
4451 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4452 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4453 &comparison, &cmode);
4454 if (comparison)
4455 {
4456 struct expand_operand ops[4];
4457
4458 create_output_operand (&ops[0], target, mode);
4459 create_fixed_operand (&ops[1], comparison);
4460 create_input_operand (&ops[2], op2, mode);
4461 create_input_operand (&ops[3], op3, mode);
4462 if (maybe_expand_insn (icode, 4, ops))
4463 {
4464 if (ops[0].value != target)
4465 convert_move (target, ops[0].value, false);
4466 return target;
4467 }
4468 }
4469 delete_insns_since (last);
4470 return NULL_RTX;
4471 }
4472 \f
4473 /* These functions attempt to generate an insn body, rather than
4474 emitting the insn, but if the gen function already emits them, we
4475 make no attempt to turn them back into naked patterns. */
4476
4477 /* Generate and return an insn body to add Y to X. */
4478
4479 rtx_insn *
4480 gen_add2_insn (rtx x, rtx y)
4481 {
4482 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4483
4484 gcc_assert (insn_operand_matches (icode, 0, x));
4485 gcc_assert (insn_operand_matches (icode, 1, x));
4486 gcc_assert (insn_operand_matches (icode, 2, y));
4487
4488 return GEN_FCN (icode) (x, x, y);
4489 }
4490
4491 /* Generate and return an insn body to add r1 and c,
4492 storing the result in r0. */
4493
4494 rtx_insn *
4495 gen_add3_insn (rtx r0, rtx r1, rtx c)
4496 {
4497 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4498
4499 if (icode == CODE_FOR_nothing
4500 || !insn_operand_matches (icode, 0, r0)
4501 || !insn_operand_matches (icode, 1, r1)
4502 || !insn_operand_matches (icode, 2, c))
4503 return NULL;
4504
4505 return GEN_FCN (icode) (r0, r1, c);
4506 }
4507
4508 int
4509 have_add2_insn (rtx x, rtx y)
4510 {
4511 enum insn_code icode;
4512
4513 gcc_assert (GET_MODE (x) != VOIDmode);
4514
4515 icode = optab_handler (add_optab, GET_MODE (x));
4516
4517 if (icode == CODE_FOR_nothing)
4518 return 0;
4519
4520 if (!insn_operand_matches (icode, 0, x)
4521 || !insn_operand_matches (icode, 1, x)
4522 || !insn_operand_matches (icode, 2, y))
4523 return 0;
4524
4525 return 1;
4526 }
4527
4528 /* Generate and return an insn body to add Y to X. */
4529
4530 rtx_insn *
4531 gen_addptr3_insn (rtx x, rtx y, rtx z)
4532 {
4533 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4534
4535 gcc_assert (insn_operand_matches (icode, 0, x));
4536 gcc_assert (insn_operand_matches (icode, 1, y));
4537 gcc_assert (insn_operand_matches (icode, 2, z));
4538
4539 return GEN_FCN (icode) (x, y, z);
4540 }
4541
4542 /* Return true if the target implements an addptr pattern and X, Y,
4543 and Z are valid for the pattern predicates. */
4544
4545 int
4546 have_addptr3_insn (rtx x, rtx y, rtx z)
4547 {
4548 enum insn_code icode;
4549
4550 gcc_assert (GET_MODE (x) != VOIDmode);
4551
4552 icode = optab_handler (addptr3_optab, GET_MODE (x));
4553
4554 if (icode == CODE_FOR_nothing)
4555 return 0;
4556
4557 if (!insn_operand_matches (icode, 0, x)
4558 || !insn_operand_matches (icode, 1, y)
4559 || !insn_operand_matches (icode, 2, z))
4560 return 0;
4561
4562 return 1;
4563 }
4564
4565 /* Generate and return an insn body to subtract Y from X. */
4566
4567 rtx_insn *
4568 gen_sub2_insn (rtx x, rtx y)
4569 {
4570 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4571
4572 gcc_assert (insn_operand_matches (icode, 0, x));
4573 gcc_assert (insn_operand_matches (icode, 1, x));
4574 gcc_assert (insn_operand_matches (icode, 2, y));
4575
4576 return GEN_FCN (icode) (x, x, y);
4577 }
4578
4579 /* Generate and return an insn body to subtract r1 and c,
4580 storing the result in r0. */
4581
4582 rtx_insn *
4583 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4584 {
4585 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4586
4587 if (icode == CODE_FOR_nothing
4588 || !insn_operand_matches (icode, 0, r0)
4589 || !insn_operand_matches (icode, 1, r1)
4590 || !insn_operand_matches (icode, 2, c))
4591 return NULL;
4592
4593 return GEN_FCN (icode) (r0, r1, c);
4594 }
4595
4596 int
4597 have_sub2_insn (rtx x, rtx y)
4598 {
4599 enum insn_code icode;
4600
4601 gcc_assert (GET_MODE (x) != VOIDmode);
4602
4603 icode = optab_handler (sub_optab, GET_MODE (x));
4604
4605 if (icode == CODE_FOR_nothing)
4606 return 0;
4607
4608 if (!insn_operand_matches (icode, 0, x)
4609 || !insn_operand_matches (icode, 1, x)
4610 || !insn_operand_matches (icode, 2, y))
4611 return 0;
4612
4613 return 1;
4614 }
4615 \f
4616 /* Generate the body of an insn to extend Y (with mode MFROM)
4617 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4618
4619 rtx_insn *
4620 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4621 machine_mode mfrom, int unsignedp)
4622 {
4623 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4624 return GEN_FCN (icode) (x, y);
4625 }
4626 \f
4627 /* Generate code to convert FROM to floating point
4628 and store in TO. FROM must be fixed point and not VOIDmode.
4629 UNSIGNEDP nonzero means regard FROM as unsigned.
4630 Normally this is done by correcting the final value
4631 if it is negative. */
4632
4633 void
4634 expand_float (rtx to, rtx from, int unsignedp)
4635 {
4636 enum insn_code icode;
4637 rtx target = to;
4638 machine_mode fmode, imode;
4639 bool can_do_signed = false;
4640
4641 /* Crash now, because we won't be able to decide which mode to use. */
4642 gcc_assert (GET_MODE (from) != VOIDmode);
4643
4644 /* Look for an insn to do the conversion. Do it in the specified
4645 modes if possible; otherwise convert either input, output or both to
4646 wider mode. If the integer mode is wider than the mode of FROM,
4647 we can do the conversion signed even if the input is unsigned. */
4648
4649 for (fmode = GET_MODE (to); fmode != VOIDmode;
4650 fmode = GET_MODE_WIDER_MODE (fmode))
4651 for (imode = GET_MODE (from); imode != VOIDmode;
4652 imode = GET_MODE_WIDER_MODE (imode))
4653 {
4654 int doing_unsigned = unsignedp;
4655
4656 if (fmode != GET_MODE (to)
4657 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4658 continue;
4659
4660 icode = can_float_p (fmode, imode, unsignedp);
4661 if (icode == CODE_FOR_nothing && unsignedp)
4662 {
4663 enum insn_code scode = can_float_p (fmode, imode, 0);
4664 if (scode != CODE_FOR_nothing)
4665 can_do_signed = true;
4666 if (imode != GET_MODE (from))
4667 icode = scode, doing_unsigned = 0;
4668 }
4669
4670 if (icode != CODE_FOR_nothing)
4671 {
4672 if (imode != GET_MODE (from))
4673 from = convert_to_mode (imode, from, unsignedp);
4674
4675 if (fmode != GET_MODE (to))
4676 target = gen_reg_rtx (fmode);
4677
4678 emit_unop_insn (icode, target, from,
4679 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4680
4681 if (target != to)
4682 convert_move (to, target, 0);
4683 return;
4684 }
4685 }
4686
4687 /* Unsigned integer, and no way to convert directly. Convert as signed,
4688 then unconditionally adjust the result. */
4689 if (unsignedp && can_do_signed)
4690 {
4691 rtx_code_label *label = gen_label_rtx ();
4692 rtx temp;
4693 REAL_VALUE_TYPE offset;
4694
4695 /* Look for a usable floating mode FMODE wider than the source and at
4696 least as wide as the target. Using FMODE will avoid rounding woes
4697 with unsigned values greater than the signed maximum value. */
4698
4699 for (fmode = GET_MODE (to); fmode != VOIDmode;
4700 fmode = GET_MODE_WIDER_MODE (fmode))
4701 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4702 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4703 break;
4704
4705 if (fmode == VOIDmode)
4706 {
4707 /* There is no such mode. Pretend the target is wide enough. */
4708 fmode = GET_MODE (to);
4709
4710 /* Avoid double-rounding when TO is narrower than FROM. */
4711 if ((significand_size (fmode) + 1)
4712 < GET_MODE_PRECISION (GET_MODE (from)))
4713 {
4714 rtx temp1;
4715 rtx_code_label *neglabel = gen_label_rtx ();
4716
4717 /* Don't use TARGET if it isn't a register, is a hard register,
4718 or is the wrong mode. */
4719 if (!REG_P (target)
4720 || REGNO (target) < FIRST_PSEUDO_REGISTER
4721 || GET_MODE (target) != fmode)
4722 target = gen_reg_rtx (fmode);
4723
4724 imode = GET_MODE (from);
4725 do_pending_stack_adjust ();
4726
4727 /* Test whether the sign bit is set. */
4728 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4729 0, neglabel);
4730
4731 /* The sign bit is not set. Convert as signed. */
4732 expand_float (target, from, 0);
4733 emit_jump_insn (targetm.gen_jump (label));
4734 emit_barrier ();
4735
4736 /* The sign bit is set.
4737 Convert to a usable (positive signed) value by shifting right
4738 one bit, while remembering if a nonzero bit was shifted
4739 out; i.e., compute (from & 1) | (from >> 1). */
4740
4741 emit_label (neglabel);
4742 temp = expand_binop (imode, and_optab, from, const1_rtx,
4743 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4744 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4745 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4746 OPTAB_LIB_WIDEN);
4747 expand_float (target, temp, 0);
4748
4749 /* Multiply by 2 to undo the shift above. */
4750 temp = expand_binop (fmode, add_optab, target, target,
4751 target, 0, OPTAB_LIB_WIDEN);
4752 if (temp != target)
4753 emit_move_insn (target, temp);
4754
4755 do_pending_stack_adjust ();
4756 emit_label (label);
4757 goto done;
4758 }
4759 }
4760
4761 /* If we are about to do some arithmetic to correct for an
4762 unsigned operand, do it in a pseudo-register. */
4763
4764 if (GET_MODE (to) != fmode
4765 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4766 target = gen_reg_rtx (fmode);
4767
4768 /* Convert as signed integer to floating. */
4769 expand_float (target, from, 0);
4770
4771 /* If FROM is negative (and therefore TO is negative),
4772 correct its value by 2**bitwidth. */
4773
4774 do_pending_stack_adjust ();
4775 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4776 0, label);
4777
4778
4779 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4780 temp = expand_binop (fmode, add_optab, target,
4781 const_double_from_real_value (offset, fmode),
4782 target, 0, OPTAB_LIB_WIDEN);
4783 if (temp != target)
4784 emit_move_insn (target, temp);
4785
4786 do_pending_stack_adjust ();
4787 emit_label (label);
4788 goto done;
4789 }
4790
4791 /* No hardware instruction available; call a library routine. */
4792 {
4793 rtx libfunc;
4794 rtx_insn *insns;
4795 rtx value;
4796 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4797
4798 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4799 from = convert_to_mode (SImode, from, unsignedp);
4800
4801 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4802 gcc_assert (libfunc);
4803
4804 start_sequence ();
4805
4806 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4807 GET_MODE (to), 1, from,
4808 GET_MODE (from));
4809 insns = get_insns ();
4810 end_sequence ();
4811
4812 emit_libcall_block (insns, target, value,
4813 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4814 GET_MODE (to), from));
4815 }
4816
4817 done:
4818
4819 /* Copy result to requested destination
4820 if we have been computing in a temp location. */
4821
4822 if (target != to)
4823 {
4824 if (GET_MODE (target) == GET_MODE (to))
4825 emit_move_insn (to, target);
4826 else
4827 convert_move (to, target, 0);
4828 }
4829 }
4830 \f
4831 /* Generate code to convert FROM to fixed point and store in TO. FROM
4832 must be floating point. */
4833
4834 void
4835 expand_fix (rtx to, rtx from, int unsignedp)
4836 {
4837 enum insn_code icode;
4838 rtx target = to;
4839 machine_mode fmode, imode;
4840 bool must_trunc = false;
4841
4842 /* We first try to find a pair of modes, one real and one integer, at
4843 least as wide as FROM and TO, respectively, in which we can open-code
4844 this conversion. If the integer mode is wider than the mode of TO,
4845 we can do the conversion either signed or unsigned. */
4846
4847 for (fmode = GET_MODE (from); fmode != VOIDmode;
4848 fmode = GET_MODE_WIDER_MODE (fmode))
4849 for (imode = GET_MODE (to); imode != VOIDmode;
4850 imode = GET_MODE_WIDER_MODE (imode))
4851 {
4852 int doing_unsigned = unsignedp;
4853
4854 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4855 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4856 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4857
4858 if (icode != CODE_FOR_nothing)
4859 {
4860 rtx_insn *last = get_last_insn ();
4861 if (fmode != GET_MODE (from))
4862 from = convert_to_mode (fmode, from, 0);
4863
4864 if (must_trunc)
4865 {
4866 rtx temp = gen_reg_rtx (GET_MODE (from));
4867 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4868 temp, 0);
4869 }
4870
4871 if (imode != GET_MODE (to))
4872 target = gen_reg_rtx (imode);
4873
4874 if (maybe_emit_unop_insn (icode, target, from,
4875 doing_unsigned ? UNSIGNED_FIX : FIX))
4876 {
4877 if (target != to)
4878 convert_move (to, target, unsignedp);
4879 return;
4880 }
4881 delete_insns_since (last);
4882 }
4883 }
4884
4885 /* For an unsigned conversion, there is one more way to do it.
4886 If we have a signed conversion, we generate code that compares
4887 the real value to the largest representable positive number. If if
4888 is smaller, the conversion is done normally. Otherwise, subtract
4889 one plus the highest signed number, convert, and add it back.
4890
4891 We only need to check all real modes, since we know we didn't find
4892 anything with a wider integer mode.
4893
4894 This code used to extend FP value into mode wider than the destination.
4895 This is needed for decimal float modes which cannot accurately
4896 represent one plus the highest signed number of the same size, but
4897 not for binary modes. Consider, for instance conversion from SFmode
4898 into DImode.
4899
4900 The hot path through the code is dealing with inputs smaller than 2^63
4901 and doing just the conversion, so there is no bits to lose.
4902
4903 In the other path we know the value is positive in the range 2^63..2^64-1
4904 inclusive. (as for other input overflow happens and result is undefined)
4905 So we know that the most important bit set in mantissa corresponds to
4906 2^63. The subtraction of 2^63 should not generate any rounding as it
4907 simply clears out that bit. The rest is trivial. */
4908
4909 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4910 for (fmode = GET_MODE (from); fmode != VOIDmode;
4911 fmode = GET_MODE_WIDER_MODE (fmode))
4912 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
4913 && (!DECIMAL_FLOAT_MODE_P (fmode)
4914 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
4915 {
4916 int bitsize;
4917 REAL_VALUE_TYPE offset;
4918 rtx limit;
4919 rtx_code_label *lab1, *lab2;
4920 rtx_insn *insn;
4921
4922 bitsize = GET_MODE_PRECISION (GET_MODE (to));
4923 real_2expN (&offset, bitsize - 1, fmode);
4924 limit = const_double_from_real_value (offset, fmode);
4925 lab1 = gen_label_rtx ();
4926 lab2 = gen_label_rtx ();
4927
4928 if (fmode != GET_MODE (from))
4929 from = convert_to_mode (fmode, from, 0);
4930
4931 /* See if we need to do the subtraction. */
4932 do_pending_stack_adjust ();
4933 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4934 0, lab1);
4935
4936 /* If not, do the signed "fix" and branch around fixup code. */
4937 expand_fix (to, from, 0);
4938 emit_jump_insn (targetm.gen_jump (lab2));
4939 emit_barrier ();
4940
4941 /* Otherwise, subtract 2**(N-1), convert to signed number,
4942 then add 2**(N-1). Do the addition using XOR since this
4943 will often generate better code. */
4944 emit_label (lab1);
4945 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4946 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4947 expand_fix (to, target, 0);
4948 target = expand_binop (GET_MODE (to), xor_optab, to,
4949 gen_int_mode
4950 (HOST_WIDE_INT_1 << (bitsize - 1),
4951 GET_MODE (to)),
4952 to, 1, OPTAB_LIB_WIDEN);
4953
4954 if (target != to)
4955 emit_move_insn (to, target);
4956
4957 emit_label (lab2);
4958
4959 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
4960 {
4961 /* Make a place for a REG_NOTE and add it. */
4962 insn = emit_move_insn (to, to);
4963 set_dst_reg_note (insn, REG_EQUAL,
4964 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
4965 copy_rtx (from)),
4966 to);
4967 }
4968
4969 return;
4970 }
4971
4972 /* We can't do it with an insn, so use a library call. But first ensure
4973 that the mode of TO is at least as wide as SImode, since those are the
4974 only library calls we know about. */
4975
4976 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4977 {
4978 target = gen_reg_rtx (SImode);
4979
4980 expand_fix (target, from, unsignedp);
4981 }
4982 else
4983 {
4984 rtx_insn *insns;
4985 rtx value;
4986 rtx libfunc;
4987
4988 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4989 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4990 gcc_assert (libfunc);
4991
4992 start_sequence ();
4993
4994 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4995 GET_MODE (to), 1, from,
4996 GET_MODE (from));
4997 insns = get_insns ();
4998 end_sequence ();
4999
5000 emit_libcall_block (insns, target, value,
5001 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5002 GET_MODE (to), from));
5003 }
5004
5005 if (target != to)
5006 {
5007 if (GET_MODE (to) == GET_MODE (target))
5008 emit_move_insn (to, target);
5009 else
5010 convert_move (to, target, 0);
5011 }
5012 }
5013
5014
5015 /* Promote integer arguments for a libcall if necessary.
5016 emit_library_call_value cannot do the promotion because it does not
5017 know if it should do a signed or unsigned promotion. This is because
5018 there are no tree types defined for libcalls. */
5019
5020 static rtx
5021 prepare_libcall_arg (rtx arg, int uintp)
5022 {
5023 machine_mode mode = GET_MODE (arg);
5024 machine_mode arg_mode;
5025 if (SCALAR_INT_MODE_P (mode))
5026 {
5027 /* If we need to promote the integer function argument we need to do
5028 it here instead of inside emit_library_call_value because in
5029 emit_library_call_value we don't know if we should do a signed or
5030 unsigned promotion. */
5031
5032 int unsigned_p = 0;
5033 arg_mode = promote_function_mode (NULL_TREE, mode,
5034 &unsigned_p, NULL_TREE, 0);
5035 if (arg_mode != mode)
5036 return convert_to_mode (arg_mode, arg, uintp);
5037 }
5038 return arg;
5039 }
5040
5041 /* Generate code to convert FROM or TO a fixed-point.
5042 If UINTP is true, either TO or FROM is an unsigned integer.
5043 If SATP is true, we need to saturate the result. */
5044
5045 void
5046 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5047 {
5048 machine_mode to_mode = GET_MODE (to);
5049 machine_mode from_mode = GET_MODE (from);
5050 convert_optab tab;
5051 enum rtx_code this_code;
5052 enum insn_code code;
5053 rtx_insn *insns;
5054 rtx value;
5055 rtx libfunc;
5056
5057 if (to_mode == from_mode)
5058 {
5059 emit_move_insn (to, from);
5060 return;
5061 }
5062
5063 if (uintp)
5064 {
5065 tab = satp ? satfractuns_optab : fractuns_optab;
5066 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5067 }
5068 else
5069 {
5070 tab = satp ? satfract_optab : fract_optab;
5071 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5072 }
5073 code = convert_optab_handler (tab, to_mode, from_mode);
5074 if (code != CODE_FOR_nothing)
5075 {
5076 emit_unop_insn (code, to, from, this_code);
5077 return;
5078 }
5079
5080 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5081 gcc_assert (libfunc);
5082
5083 from = prepare_libcall_arg (from, uintp);
5084 from_mode = GET_MODE (from);
5085
5086 start_sequence ();
5087 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5088 1, from, from_mode);
5089 insns = get_insns ();
5090 end_sequence ();
5091
5092 emit_libcall_block (insns, to, value,
5093 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5094 }
5095
5096 /* Generate code to convert FROM to fixed point and store in TO. FROM
5097 must be floating point, TO must be signed. Use the conversion optab
5098 TAB to do the conversion. */
5099
5100 bool
5101 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5102 {
5103 enum insn_code icode;
5104 rtx target = to;
5105 machine_mode fmode, imode;
5106
5107 /* We first try to find a pair of modes, one real and one integer, at
5108 least as wide as FROM and TO, respectively, in which we can open-code
5109 this conversion. If the integer mode is wider than the mode of TO,
5110 we can do the conversion either signed or unsigned. */
5111
5112 for (fmode = GET_MODE (from); fmode != VOIDmode;
5113 fmode = GET_MODE_WIDER_MODE (fmode))
5114 for (imode = GET_MODE (to); imode != VOIDmode;
5115 imode = GET_MODE_WIDER_MODE (imode))
5116 {
5117 icode = convert_optab_handler (tab, imode, fmode);
5118 if (icode != CODE_FOR_nothing)
5119 {
5120 rtx_insn *last = get_last_insn ();
5121 if (fmode != GET_MODE (from))
5122 from = convert_to_mode (fmode, from, 0);
5123
5124 if (imode != GET_MODE (to))
5125 target = gen_reg_rtx (imode);
5126
5127 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5128 {
5129 delete_insns_since (last);
5130 continue;
5131 }
5132 if (target != to)
5133 convert_move (to, target, 0);
5134 return true;
5135 }
5136 }
5137
5138 return false;
5139 }
5140 \f
5141 /* Report whether we have an instruction to perform the operation
5142 specified by CODE on operands of mode MODE. */
5143 int
5144 have_insn_for (enum rtx_code code, machine_mode mode)
5145 {
5146 return (code_to_optab (code)
5147 && (optab_handler (code_to_optab (code), mode)
5148 != CODE_FOR_nothing));
5149 }
5150
5151 /* Print information about the current contents of the optabs on
5152 STDERR. */
5153
5154 DEBUG_FUNCTION void
5155 debug_optab_libfuncs (void)
5156 {
5157 int i, j, k;
5158
5159 /* Dump the arithmetic optabs. */
5160 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5161 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5162 {
5163 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5164 if (l)
5165 {
5166 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5167 fprintf (stderr, "%s\t%s:\t%s\n",
5168 GET_RTX_NAME (optab_to_code ((optab) i)),
5169 GET_MODE_NAME (j),
5170 XSTR (l, 0));
5171 }
5172 }
5173
5174 /* Dump the conversion optabs. */
5175 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5176 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5177 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5178 {
5179 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5180 (machine_mode) k);
5181 if (l)
5182 {
5183 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5184 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5185 GET_RTX_NAME (optab_to_code ((optab) i)),
5186 GET_MODE_NAME (j),
5187 GET_MODE_NAME (k),
5188 XSTR (l, 0));
5189 }
5190 }
5191 }
5192
5193 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5194 CODE. Return 0 on failure. */
5195
5196 rtx_insn *
5197 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5198 {
5199 machine_mode mode = GET_MODE (op1);
5200 enum insn_code icode;
5201 rtx_insn *insn;
5202 rtx trap_rtx;
5203
5204 if (mode == VOIDmode)
5205 return 0;
5206
5207 icode = optab_handler (ctrap_optab, mode);
5208 if (icode == CODE_FOR_nothing)
5209 return 0;
5210
5211 /* Some targets only accept a zero trap code. */
5212 if (!insn_operand_matches (icode, 3, tcode))
5213 return 0;
5214
5215 do_pending_stack_adjust ();
5216 start_sequence ();
5217 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5218 &trap_rtx, &mode);
5219 if (!trap_rtx)
5220 insn = NULL;
5221 else
5222 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5223 tcode);
5224
5225 /* If that failed, then give up. */
5226 if (insn == 0)
5227 {
5228 end_sequence ();
5229 return 0;
5230 }
5231
5232 emit_insn (insn);
5233 insn = get_insns ();
5234 end_sequence ();
5235 return insn;
5236 }
5237
5238 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5239 or unsigned operation code. */
5240
5241 enum rtx_code
5242 get_rtx_code (enum tree_code tcode, bool unsignedp)
5243 {
5244 enum rtx_code code;
5245 switch (tcode)
5246 {
5247 case EQ_EXPR:
5248 code = EQ;
5249 break;
5250 case NE_EXPR:
5251 code = NE;
5252 break;
5253 case LT_EXPR:
5254 code = unsignedp ? LTU : LT;
5255 break;
5256 case LE_EXPR:
5257 code = unsignedp ? LEU : LE;
5258 break;
5259 case GT_EXPR:
5260 code = unsignedp ? GTU : GT;
5261 break;
5262 case GE_EXPR:
5263 code = unsignedp ? GEU : GE;
5264 break;
5265
5266 case UNORDERED_EXPR:
5267 code = UNORDERED;
5268 break;
5269 case ORDERED_EXPR:
5270 code = ORDERED;
5271 break;
5272 case UNLT_EXPR:
5273 code = UNLT;
5274 break;
5275 case UNLE_EXPR:
5276 code = UNLE;
5277 break;
5278 case UNGT_EXPR:
5279 code = UNGT;
5280 break;
5281 case UNGE_EXPR:
5282 code = UNGE;
5283 break;
5284 case UNEQ_EXPR:
5285 code = UNEQ;
5286 break;
5287 case LTGT_EXPR:
5288 code = LTGT;
5289 break;
5290
5291 case BIT_AND_EXPR:
5292 code = AND;
5293 break;
5294
5295 case BIT_IOR_EXPR:
5296 code = IOR;
5297 break;
5298
5299 default:
5300 gcc_unreachable ();
5301 }
5302 return code;
5303 }
5304
5305 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5306 select signed or unsigned operators. OPNO holds the index of the
5307 first comparison operand for insn ICODE. Do not generate the
5308 compare instruction itself. */
5309
5310 static rtx
5311 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5312 tree t_op0, tree t_op1, bool unsignedp,
5313 enum insn_code icode, unsigned int opno)
5314 {
5315 struct expand_operand ops[2];
5316 rtx rtx_op0, rtx_op1;
5317 machine_mode m0, m1;
5318 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5319
5320 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5321
5322 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5323 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5324 cases, use the original mode. */
5325 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5326 EXPAND_STACK_PARM);
5327 m0 = GET_MODE (rtx_op0);
5328 if (m0 == VOIDmode)
5329 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5330
5331 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5332 EXPAND_STACK_PARM);
5333 m1 = GET_MODE (rtx_op1);
5334 if (m1 == VOIDmode)
5335 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5336
5337 create_input_operand (&ops[0], rtx_op0, m0);
5338 create_input_operand (&ops[1], rtx_op1, m1);
5339 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5340 gcc_unreachable ();
5341 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5342 }
5343
5344 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5345 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5346 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5347 shift. */
5348 static rtx
5349 shift_amt_for_vec_perm_mask (rtx sel)
5350 {
5351 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5352 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5353
5354 if (GET_CODE (sel) != CONST_VECTOR)
5355 return NULL_RTX;
5356
5357 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5358 if (first >= nelt)
5359 return NULL_RTX;
5360 for (i = 1; i < nelt; i++)
5361 {
5362 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5363 unsigned int expected = i + first;
5364 /* Indices into the second vector are all equivalent. */
5365 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5366 return NULL_RTX;
5367 }
5368
5369 return GEN_INT (first * bitsize);
5370 }
5371
5372 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5373
5374 static rtx
5375 expand_vec_perm_1 (enum insn_code icode, rtx target,
5376 rtx v0, rtx v1, rtx sel)
5377 {
5378 machine_mode tmode = GET_MODE (target);
5379 machine_mode smode = GET_MODE (sel);
5380 struct expand_operand ops[4];
5381
5382 create_output_operand (&ops[0], target, tmode);
5383 create_input_operand (&ops[3], sel, smode);
5384
5385 /* Make an effort to preserve v0 == v1. The target expander is able to
5386 rely on this to determine if we're permuting a single input operand. */
5387 if (rtx_equal_p (v0, v1))
5388 {
5389 if (!insn_operand_matches (icode, 1, v0))
5390 v0 = force_reg (tmode, v0);
5391 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5392 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5393
5394 create_fixed_operand (&ops[1], v0);
5395 create_fixed_operand (&ops[2], v0);
5396 }
5397 else
5398 {
5399 create_input_operand (&ops[1], v0, tmode);
5400 create_input_operand (&ops[2], v1, tmode);
5401 }
5402
5403 if (maybe_expand_insn (icode, 4, ops))
5404 return ops[0].value;
5405 return NULL_RTX;
5406 }
5407
5408 /* Generate instructions for vec_perm optab given its mode
5409 and three operands. */
5410
5411 rtx
5412 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5413 {
5414 enum insn_code icode;
5415 machine_mode qimode;
5416 unsigned int i, w, e, u;
5417 rtx tmp, sel_qi = NULL;
5418 rtvec vec;
5419
5420 if (!target || GET_MODE (target) != mode)
5421 target = gen_reg_rtx (mode);
5422
5423 w = GET_MODE_SIZE (mode);
5424 e = GET_MODE_NUNITS (mode);
5425 u = GET_MODE_UNIT_SIZE (mode);
5426
5427 /* Set QIMODE to a different vector mode with byte elements.
5428 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5429 qimode = VOIDmode;
5430 if (GET_MODE_INNER (mode) != QImode)
5431 {
5432 qimode = mode_for_vector (QImode, w);
5433 if (!VECTOR_MODE_P (qimode))
5434 qimode = VOIDmode;
5435 }
5436
5437 /* If the input is a constant, expand it specially. */
5438 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5439 if (GET_CODE (sel) == CONST_VECTOR)
5440 {
5441 /* See if this can be handled with a vec_shr. We only do this if the
5442 second vector is all zeroes. */
5443 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5444 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5445 ? optab_handler (vec_shr_optab, qimode)
5446 : CODE_FOR_nothing);
5447 rtx shift_amt = NULL_RTX;
5448 if (v1 == CONST0_RTX (GET_MODE (v1))
5449 && (shift_code != CODE_FOR_nothing
5450 || shift_code_qi != CODE_FOR_nothing))
5451 {
5452 shift_amt = shift_amt_for_vec_perm_mask (sel);
5453 if (shift_amt)
5454 {
5455 struct expand_operand ops[3];
5456 if (shift_code != CODE_FOR_nothing)
5457 {
5458 create_output_operand (&ops[0], target, mode);
5459 create_input_operand (&ops[1], v0, mode);
5460 create_convert_operand_from_type (&ops[2], shift_amt,
5461 sizetype);
5462 if (maybe_expand_insn (shift_code, 3, ops))
5463 return ops[0].value;
5464 }
5465 if (shift_code_qi != CODE_FOR_nothing)
5466 {
5467 tmp = gen_reg_rtx (qimode);
5468 create_output_operand (&ops[0], tmp, qimode);
5469 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5470 qimode);
5471 create_convert_operand_from_type (&ops[2], shift_amt,
5472 sizetype);
5473 if (maybe_expand_insn (shift_code_qi, 3, ops))
5474 return gen_lowpart (mode, ops[0].value);
5475 }
5476 }
5477 }
5478
5479 icode = direct_optab_handler (vec_perm_const_optab, mode);
5480 if (icode != CODE_FOR_nothing)
5481 {
5482 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5483 if (tmp)
5484 return tmp;
5485 }
5486
5487 /* Fall back to a constant byte-based permutation. */
5488 if (qimode != VOIDmode)
5489 {
5490 vec = rtvec_alloc (w);
5491 for (i = 0; i < e; ++i)
5492 {
5493 unsigned int j, this_e;
5494
5495 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5496 this_e &= 2 * e - 1;
5497 this_e *= u;
5498
5499 for (j = 0; j < u; ++j)
5500 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5501 }
5502 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5503
5504 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5505 if (icode != CODE_FOR_nothing)
5506 {
5507 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5508 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5509 gen_lowpart (qimode, v1), sel_qi);
5510 if (tmp)
5511 return gen_lowpart (mode, tmp);
5512 }
5513 }
5514 }
5515
5516 /* Otherwise expand as a fully variable permuation. */
5517 icode = direct_optab_handler (vec_perm_optab, mode);
5518 if (icode != CODE_FOR_nothing)
5519 {
5520 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5521 if (tmp)
5522 return tmp;
5523 }
5524
5525 /* As a special case to aid several targets, lower the element-based
5526 permutation to a byte-based permutation and try again. */
5527 if (qimode == VOIDmode)
5528 return NULL_RTX;
5529 icode = direct_optab_handler (vec_perm_optab, qimode);
5530 if (icode == CODE_FOR_nothing)
5531 return NULL_RTX;
5532
5533 if (sel_qi == NULL)
5534 {
5535 /* Multiply each element by its byte size. */
5536 machine_mode selmode = GET_MODE (sel);
5537 if (u == 2)
5538 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5539 NULL, 0, OPTAB_DIRECT);
5540 else
5541 sel = expand_simple_binop (selmode, ASHIFT, sel,
5542 GEN_INT (exact_log2 (u)),
5543 NULL, 0, OPTAB_DIRECT);
5544 gcc_assert (sel != NULL);
5545
5546 /* Broadcast the low byte each element into each of its bytes. */
5547 vec = rtvec_alloc (w);
5548 for (i = 0; i < w; ++i)
5549 {
5550 int this_e = i / u * u;
5551 if (BYTES_BIG_ENDIAN)
5552 this_e += u - 1;
5553 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5554 }
5555 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5556 sel = gen_lowpart (qimode, sel);
5557 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5558 gcc_assert (sel != NULL);
5559
5560 /* Add the byte offset to each byte element. */
5561 /* Note that the definition of the indicies here is memory ordering,
5562 so there should be no difference between big and little endian. */
5563 vec = rtvec_alloc (w);
5564 for (i = 0; i < w; ++i)
5565 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5566 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5567 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5568 sel, 0, OPTAB_DIRECT);
5569 gcc_assert (sel_qi != NULL);
5570 }
5571
5572 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5573 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5574 gen_lowpart (qimode, v1), sel_qi);
5575 if (tmp)
5576 tmp = gen_lowpart (mode, tmp);
5577 return tmp;
5578 }
5579
5580 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5581 three operands. */
5582
5583 rtx
5584 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5585 rtx target)
5586 {
5587 struct expand_operand ops[4];
5588 machine_mode mode = TYPE_MODE (vec_cond_type);
5589 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5590 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5591 rtx mask, rtx_op1, rtx_op2;
5592
5593 if (icode == CODE_FOR_nothing)
5594 return 0;
5595
5596 mask = expand_normal (op0);
5597 rtx_op1 = expand_normal (op1);
5598 rtx_op2 = expand_normal (op2);
5599
5600 mask = force_reg (mask_mode, mask);
5601 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5602
5603 create_output_operand (&ops[0], target, mode);
5604 create_input_operand (&ops[1], rtx_op1, mode);
5605 create_input_operand (&ops[2], rtx_op2, mode);
5606 create_input_operand (&ops[3], mask, mask_mode);
5607 expand_insn (icode, 4, ops);
5608
5609 return ops[0].value;
5610 }
5611
5612 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5613 three operands. */
5614
5615 rtx
5616 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5617 rtx target)
5618 {
5619 struct expand_operand ops[6];
5620 enum insn_code icode;
5621 rtx comparison, rtx_op1, rtx_op2;
5622 machine_mode mode = TYPE_MODE (vec_cond_type);
5623 machine_mode cmp_op_mode;
5624 bool unsignedp;
5625 tree op0a, op0b;
5626 enum tree_code tcode;
5627
5628 if (COMPARISON_CLASS_P (op0))
5629 {
5630 op0a = TREE_OPERAND (op0, 0);
5631 op0b = TREE_OPERAND (op0, 1);
5632 tcode = TREE_CODE (op0);
5633 }
5634 else
5635 {
5636 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5637 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5638 != CODE_FOR_nothing)
5639 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5640 op2, target);
5641 /* Fake op0 < 0. */
5642 else
5643 {
5644 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5645 == MODE_VECTOR_INT);
5646 op0a = op0;
5647 op0b = build_zero_cst (TREE_TYPE (op0));
5648 tcode = LT_EXPR;
5649 }
5650 }
5651 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5652 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5653
5654
5655 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5656 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5657
5658 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5659 if (icode == CODE_FOR_nothing)
5660 {
5661 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5662 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5663 if (icode == CODE_FOR_nothing)
5664 return 0;
5665 }
5666
5667 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5668 icode, 4);
5669 rtx_op1 = expand_normal (op1);
5670 rtx_op2 = expand_normal (op2);
5671
5672 create_output_operand (&ops[0], target, mode);
5673 create_input_operand (&ops[1], rtx_op1, mode);
5674 create_input_operand (&ops[2], rtx_op2, mode);
5675 create_fixed_operand (&ops[3], comparison);
5676 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5677 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5678 expand_insn (icode, 6, ops);
5679 return ops[0].value;
5680 }
5681
5682 /* Generate insns for a vector comparison into a mask. */
5683
5684 rtx
5685 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5686 {
5687 struct expand_operand ops[4];
5688 enum insn_code icode;
5689 rtx comparison;
5690 machine_mode mask_mode = TYPE_MODE (type);
5691 machine_mode vmode;
5692 bool unsignedp;
5693 tree op0a, op0b;
5694 enum tree_code tcode;
5695
5696 op0a = TREE_OPERAND (exp, 0);
5697 op0b = TREE_OPERAND (exp, 1);
5698 tcode = TREE_CODE (exp);
5699
5700 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5701 vmode = TYPE_MODE (TREE_TYPE (op0a));
5702
5703 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5704 if (icode == CODE_FOR_nothing)
5705 {
5706 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5707 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5708 if (icode == CODE_FOR_nothing)
5709 return 0;
5710 }
5711
5712 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5713 unsignedp, icode, 2);
5714 create_output_operand (&ops[0], target, mask_mode);
5715 create_fixed_operand (&ops[1], comparison);
5716 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5717 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5718 expand_insn (icode, 4, ops);
5719 return ops[0].value;
5720 }
5721
5722 /* Expand a highpart multiply. */
5723
5724 rtx
5725 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5726 rtx target, bool uns_p)
5727 {
5728 struct expand_operand eops[3];
5729 enum insn_code icode;
5730 int method, i, nunits;
5731 machine_mode wmode;
5732 rtx m1, m2, perm;
5733 optab tab1, tab2;
5734 rtvec v;
5735
5736 method = can_mult_highpart_p (mode, uns_p);
5737 switch (method)
5738 {
5739 case 0:
5740 return NULL_RTX;
5741 case 1:
5742 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5743 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5744 OPTAB_LIB_WIDEN);
5745 case 2:
5746 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5747 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5748 break;
5749 case 3:
5750 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5751 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5752 if (BYTES_BIG_ENDIAN)
5753 std::swap (tab1, tab2);
5754 break;
5755 default:
5756 gcc_unreachable ();
5757 }
5758
5759 icode = optab_handler (tab1, mode);
5760 nunits = GET_MODE_NUNITS (mode);
5761 wmode = insn_data[icode].operand[0].mode;
5762 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5763 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5764
5765 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5766 create_input_operand (&eops[1], op0, mode);
5767 create_input_operand (&eops[2], op1, mode);
5768 expand_insn (icode, 3, eops);
5769 m1 = gen_lowpart (mode, eops[0].value);
5770
5771 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5772 create_input_operand (&eops[1], op0, mode);
5773 create_input_operand (&eops[2], op1, mode);
5774 expand_insn (optab_handler (tab2, mode), 3, eops);
5775 m2 = gen_lowpart (mode, eops[0].value);
5776
5777 v = rtvec_alloc (nunits);
5778 if (method == 2)
5779 {
5780 for (i = 0; i < nunits; ++i)
5781 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5782 + ((i & 1) ? nunits : 0));
5783 }
5784 else
5785 {
5786 for (i = 0; i < nunits; ++i)
5787 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5788 }
5789 perm = gen_rtx_CONST_VECTOR (mode, v);
5790
5791 return expand_vec_perm (mode, m1, m2, perm, target);
5792 }
5793 \f
5794 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5795 pattern. */
5796
5797 static void
5798 find_cc_set (rtx x, const_rtx pat, void *data)
5799 {
5800 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5801 && GET_CODE (pat) == SET)
5802 {
5803 rtx *p_cc_reg = (rtx *) data;
5804 gcc_assert (!*p_cc_reg);
5805 *p_cc_reg = x;
5806 }
5807 }
5808
5809 /* This is a helper function for the other atomic operations. This function
5810 emits a loop that contains SEQ that iterates until a compare-and-swap
5811 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5812 a set of instructions that takes a value from OLD_REG as an input and
5813 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5814 set to the current contents of MEM. After SEQ, a compare-and-swap will
5815 attempt to update MEM with NEW_REG. The function returns true when the
5816 loop was generated successfully. */
5817
5818 static bool
5819 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5820 {
5821 machine_mode mode = GET_MODE (mem);
5822 rtx_code_label *label;
5823 rtx cmp_reg, success, oldval;
5824
5825 /* The loop we want to generate looks like
5826
5827 cmp_reg = mem;
5828 label:
5829 old_reg = cmp_reg;
5830 seq;
5831 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5832 if (success)
5833 goto label;
5834
5835 Note that we only do the plain load from memory once. Subsequent
5836 iterations use the value loaded by the compare-and-swap pattern. */
5837
5838 label = gen_label_rtx ();
5839 cmp_reg = gen_reg_rtx (mode);
5840
5841 emit_move_insn (cmp_reg, mem);
5842 emit_label (label);
5843 emit_move_insn (old_reg, cmp_reg);
5844 if (seq)
5845 emit_insn (seq);
5846
5847 success = NULL_RTX;
5848 oldval = cmp_reg;
5849 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5850 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5851 MEMMODEL_RELAXED))
5852 return false;
5853
5854 if (oldval != cmp_reg)
5855 emit_move_insn (cmp_reg, oldval);
5856
5857 /* Mark this jump predicted not taken. */
5858 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5859 GET_MODE (success), 1, label, 0);
5860 return true;
5861 }
5862
5863
5864 /* This function tries to emit an atomic_exchange intruction. VAL is written
5865 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5866 using TARGET if possible. */
5867
5868 static rtx
5869 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5870 {
5871 machine_mode mode = GET_MODE (mem);
5872 enum insn_code icode;
5873
5874 /* If the target supports the exchange directly, great. */
5875 icode = direct_optab_handler (atomic_exchange_optab, mode);
5876 if (icode != CODE_FOR_nothing)
5877 {
5878 struct expand_operand ops[4];
5879
5880 create_output_operand (&ops[0], target, mode);
5881 create_fixed_operand (&ops[1], mem);
5882 create_input_operand (&ops[2], val, mode);
5883 create_integer_operand (&ops[3], model);
5884 if (maybe_expand_insn (icode, 4, ops))
5885 return ops[0].value;
5886 }
5887
5888 return NULL_RTX;
5889 }
5890
5891 /* This function tries to implement an atomic exchange operation using
5892 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5893 The previous contents of *MEM are returned, using TARGET if possible.
5894 Since this instructionn is an acquire barrier only, stronger memory
5895 models may require additional barriers to be emitted. */
5896
5897 static rtx
5898 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5899 enum memmodel model)
5900 {
5901 machine_mode mode = GET_MODE (mem);
5902 enum insn_code icode;
5903 rtx_insn *last_insn = get_last_insn ();
5904
5905 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5906
5907 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5908 exists, and the memory model is stronger than acquire, add a release
5909 barrier before the instruction. */
5910
5911 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5912 expand_mem_thread_fence (model);
5913
5914 if (icode != CODE_FOR_nothing)
5915 {
5916 struct expand_operand ops[3];
5917 create_output_operand (&ops[0], target, mode);
5918 create_fixed_operand (&ops[1], mem);
5919 create_input_operand (&ops[2], val, mode);
5920 if (maybe_expand_insn (icode, 3, ops))
5921 return ops[0].value;
5922 }
5923
5924 /* If an external test-and-set libcall is provided, use that instead of
5925 any external compare-and-swap that we might get from the compare-and-
5926 swap-loop expansion later. */
5927 if (!can_compare_and_swap_p (mode, false))
5928 {
5929 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5930 if (libfunc != NULL)
5931 {
5932 rtx addr;
5933
5934 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5935 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5936 mode, 2, addr, ptr_mode,
5937 val, mode);
5938 }
5939 }
5940
5941 /* If the test_and_set can't be emitted, eliminate any barrier that might
5942 have been emitted. */
5943 delete_insns_since (last_insn);
5944 return NULL_RTX;
5945 }
5946
5947 /* This function tries to implement an atomic exchange operation using a
5948 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5949 *MEM are returned, using TARGET if possible. No memory model is required
5950 since a compare_and_swap loop is seq-cst. */
5951
5952 static rtx
5953 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5954 {
5955 machine_mode mode = GET_MODE (mem);
5956
5957 if (can_compare_and_swap_p (mode, true))
5958 {
5959 if (!target || !register_operand (target, mode))
5960 target = gen_reg_rtx (mode);
5961 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5962 return target;
5963 }
5964
5965 return NULL_RTX;
5966 }
5967
5968 /* This function tries to implement an atomic test-and-set operation
5969 using the atomic_test_and_set instruction pattern. A boolean value
5970 is returned from the operation, using TARGET if possible. */
5971
5972 static rtx
5973 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5974 {
5975 machine_mode pat_bool_mode;
5976 struct expand_operand ops[3];
5977
5978 if (!targetm.have_atomic_test_and_set ())
5979 return NULL_RTX;
5980
5981 /* While we always get QImode from __atomic_test_and_set, we get
5982 other memory modes from __sync_lock_test_and_set. Note that we
5983 use no endian adjustment here. This matches the 4.6 behavior
5984 in the Sparc backend. */
5985 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5986 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5987 if (GET_MODE (mem) != QImode)
5988 mem = adjust_address_nv (mem, QImode, 0);
5989
5990 pat_bool_mode = insn_data[icode].operand[0].mode;
5991 create_output_operand (&ops[0], target, pat_bool_mode);
5992 create_fixed_operand (&ops[1], mem);
5993 create_integer_operand (&ops[2], model);
5994
5995 if (maybe_expand_insn (icode, 3, ops))
5996 return ops[0].value;
5997 return NULL_RTX;
5998 }
5999
6000 /* This function expands the legacy _sync_lock test_and_set operation which is
6001 generally an atomic exchange. Some limited targets only allow the
6002 constant 1 to be stored. This is an ACQUIRE operation.
6003
6004 TARGET is an optional place to stick the return value.
6005 MEM is where VAL is stored. */
6006
6007 rtx
6008 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6009 {
6010 rtx ret;
6011
6012 /* Try an atomic_exchange first. */
6013 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6014 if (ret)
6015 return ret;
6016
6017 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6018 MEMMODEL_SYNC_ACQUIRE);
6019 if (ret)
6020 return ret;
6021
6022 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6023 if (ret)
6024 return ret;
6025
6026 /* If there are no other options, try atomic_test_and_set if the value
6027 being stored is 1. */
6028 if (val == const1_rtx)
6029 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6030
6031 return ret;
6032 }
6033
6034 /* This function expands the atomic test_and_set operation:
6035 atomically store a boolean TRUE into MEM and return the previous value.
6036
6037 MEMMODEL is the memory model variant to use.
6038 TARGET is an optional place to stick the return value. */
6039
6040 rtx
6041 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6042 {
6043 machine_mode mode = GET_MODE (mem);
6044 rtx ret, trueval, subtarget;
6045
6046 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6047 if (ret)
6048 return ret;
6049
6050 /* Be binary compatible with non-default settings of trueval, and different
6051 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6052 another only has atomic-exchange. */
6053 if (targetm.atomic_test_and_set_trueval == 1)
6054 {
6055 trueval = const1_rtx;
6056 subtarget = target ? target : gen_reg_rtx (mode);
6057 }
6058 else
6059 {
6060 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6061 subtarget = gen_reg_rtx (mode);
6062 }
6063
6064 /* Try the atomic-exchange optab... */
6065 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6066
6067 /* ... then an atomic-compare-and-swap loop ... */
6068 if (!ret)
6069 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6070
6071 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6072 if (!ret)
6073 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6074
6075 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6076 things with the value 1. Thus we try again without trueval. */
6077 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6078 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6079
6080 /* Failing all else, assume a single threaded environment and simply
6081 perform the operation. */
6082 if (!ret)
6083 {
6084 /* If the result is ignored skip the move to target. */
6085 if (subtarget != const0_rtx)
6086 emit_move_insn (subtarget, mem);
6087
6088 emit_move_insn (mem, trueval);
6089 ret = subtarget;
6090 }
6091
6092 /* Recall that have to return a boolean value; rectify if trueval
6093 is not exactly one. */
6094 if (targetm.atomic_test_and_set_trueval != 1)
6095 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6096
6097 return ret;
6098 }
6099
6100 /* This function expands the atomic exchange operation:
6101 atomically store VAL in MEM and return the previous value in MEM.
6102
6103 MEMMODEL is the memory model variant to use.
6104 TARGET is an optional place to stick the return value. */
6105
6106 rtx
6107 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6108 {
6109 machine_mode mode = GET_MODE (mem);
6110 rtx ret;
6111
6112 /* If loads are not atomic for the required size and we are not called to
6113 provide a __sync builtin, do not do anything so that we stay consistent
6114 with atomic loads of the same size. */
6115 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6116 return NULL_RTX;
6117
6118 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6119
6120 /* Next try a compare-and-swap loop for the exchange. */
6121 if (!ret)
6122 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6123
6124 return ret;
6125 }
6126
6127 /* This function expands the atomic compare exchange operation:
6128
6129 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6130 *PTARGET_OVAL is an optional place to store the old value from memory.
6131 Both target parameters may be NULL or const0_rtx to indicate that we do
6132 not care about that return value. Both target parameters are updated on
6133 success to the actual location of the corresponding result.
6134
6135 MEMMODEL is the memory model variant to use.
6136
6137 The return value of the function is true for success. */
6138
6139 bool
6140 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6141 rtx mem, rtx expected, rtx desired,
6142 bool is_weak, enum memmodel succ_model,
6143 enum memmodel fail_model)
6144 {
6145 machine_mode mode = GET_MODE (mem);
6146 struct expand_operand ops[8];
6147 enum insn_code icode;
6148 rtx target_oval, target_bool = NULL_RTX;
6149 rtx libfunc;
6150
6151 /* If loads are not atomic for the required size and we are not called to
6152 provide a __sync builtin, do not do anything so that we stay consistent
6153 with atomic loads of the same size. */
6154 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6155 return false;
6156
6157 /* Load expected into a register for the compare and swap. */
6158 if (MEM_P (expected))
6159 expected = copy_to_reg (expected);
6160
6161 /* Make sure we always have some place to put the return oldval.
6162 Further, make sure that place is distinct from the input expected,
6163 just in case we need that path down below. */
6164 if (ptarget_oval && *ptarget_oval == const0_rtx)
6165 ptarget_oval = NULL;
6166
6167 if (ptarget_oval == NULL
6168 || (target_oval = *ptarget_oval) == NULL
6169 || reg_overlap_mentioned_p (expected, target_oval))
6170 target_oval = gen_reg_rtx (mode);
6171
6172 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6173 if (icode != CODE_FOR_nothing)
6174 {
6175 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6176
6177 if (ptarget_bool && *ptarget_bool == const0_rtx)
6178 ptarget_bool = NULL;
6179
6180 /* Make sure we always have a place for the bool operand. */
6181 if (ptarget_bool == NULL
6182 || (target_bool = *ptarget_bool) == NULL
6183 || GET_MODE (target_bool) != bool_mode)
6184 target_bool = gen_reg_rtx (bool_mode);
6185
6186 /* Emit the compare_and_swap. */
6187 create_output_operand (&ops[0], target_bool, bool_mode);
6188 create_output_operand (&ops[1], target_oval, mode);
6189 create_fixed_operand (&ops[2], mem);
6190 create_input_operand (&ops[3], expected, mode);
6191 create_input_operand (&ops[4], desired, mode);
6192 create_integer_operand (&ops[5], is_weak);
6193 create_integer_operand (&ops[6], succ_model);
6194 create_integer_operand (&ops[7], fail_model);
6195 if (maybe_expand_insn (icode, 8, ops))
6196 {
6197 /* Return success/failure. */
6198 target_bool = ops[0].value;
6199 target_oval = ops[1].value;
6200 goto success;
6201 }
6202 }
6203
6204 /* Otherwise fall back to the original __sync_val_compare_and_swap
6205 which is always seq-cst. */
6206 icode = optab_handler (sync_compare_and_swap_optab, mode);
6207 if (icode != CODE_FOR_nothing)
6208 {
6209 rtx cc_reg;
6210
6211 create_output_operand (&ops[0], target_oval, mode);
6212 create_fixed_operand (&ops[1], mem);
6213 create_input_operand (&ops[2], expected, mode);
6214 create_input_operand (&ops[3], desired, mode);
6215 if (!maybe_expand_insn (icode, 4, ops))
6216 return false;
6217
6218 target_oval = ops[0].value;
6219
6220 /* If the caller isn't interested in the boolean return value,
6221 skip the computation of it. */
6222 if (ptarget_bool == NULL)
6223 goto success;
6224
6225 /* Otherwise, work out if the compare-and-swap succeeded. */
6226 cc_reg = NULL_RTX;
6227 if (have_insn_for (COMPARE, CCmode))
6228 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6229 if (cc_reg)
6230 {
6231 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6232 const0_rtx, VOIDmode, 0, 1);
6233 goto success;
6234 }
6235 goto success_bool_from_val;
6236 }
6237
6238 /* Also check for library support for __sync_val_compare_and_swap. */
6239 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6240 if (libfunc != NULL)
6241 {
6242 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6243 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6244 mode, 3, addr, ptr_mode,
6245 expected, mode, desired, mode);
6246 emit_move_insn (target_oval, target);
6247
6248 /* Compute the boolean return value only if requested. */
6249 if (ptarget_bool)
6250 goto success_bool_from_val;
6251 else
6252 goto success;
6253 }
6254
6255 /* Failure. */
6256 return false;
6257
6258 success_bool_from_val:
6259 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6260 expected, VOIDmode, 1, 1);
6261 success:
6262 /* Make sure that the oval output winds up where the caller asked. */
6263 if (ptarget_oval)
6264 *ptarget_oval = target_oval;
6265 if (ptarget_bool)
6266 *ptarget_bool = target_bool;
6267 return true;
6268 }
6269
6270 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6271
6272 static void
6273 expand_asm_memory_barrier (void)
6274 {
6275 rtx asm_op, clob;
6276
6277 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
6278 rtvec_alloc (0), rtvec_alloc (0),
6279 rtvec_alloc (0), UNKNOWN_LOCATION);
6280 MEM_VOLATILE_P (asm_op) = 1;
6281
6282 clob = gen_rtx_SCRATCH (VOIDmode);
6283 clob = gen_rtx_MEM (BLKmode, clob);
6284 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6285
6286 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6287 }
6288
6289 /* This routine will either emit the mem_thread_fence pattern or issue a
6290 sync_synchronize to generate a fence for memory model MEMMODEL. */
6291
6292 void
6293 expand_mem_thread_fence (enum memmodel model)
6294 {
6295 if (targetm.have_mem_thread_fence ())
6296 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6297 else if (!is_mm_relaxed (model))
6298 {
6299 if (targetm.have_memory_barrier ())
6300 emit_insn (targetm.gen_memory_barrier ());
6301 else if (synchronize_libfunc != NULL_RTX)
6302 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
6303 else
6304 expand_asm_memory_barrier ();
6305 }
6306 }
6307
6308 /* This routine will either emit the mem_signal_fence pattern or issue a
6309 sync_synchronize to generate a fence for memory model MEMMODEL. */
6310
6311 void
6312 expand_mem_signal_fence (enum memmodel model)
6313 {
6314 if (targetm.have_mem_signal_fence ())
6315 emit_insn (targetm.gen_mem_signal_fence (GEN_INT (model)));
6316 else if (!is_mm_relaxed (model))
6317 {
6318 /* By default targets are coherent between a thread and the signal
6319 handler running on the same thread. Thus this really becomes a
6320 compiler barrier, in that stores must not be sunk past
6321 (or raised above) a given point. */
6322 expand_asm_memory_barrier ();
6323 }
6324 }
6325
6326 /* This function expands the atomic load operation:
6327 return the atomically loaded value in MEM.
6328
6329 MEMMODEL is the memory model variant to use.
6330 TARGET is an option place to stick the return value. */
6331
6332 rtx
6333 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6334 {
6335 machine_mode mode = GET_MODE (mem);
6336 enum insn_code icode;
6337
6338 /* If the target supports the load directly, great. */
6339 icode = direct_optab_handler (atomic_load_optab, mode);
6340 if (icode != CODE_FOR_nothing)
6341 {
6342 struct expand_operand ops[3];
6343
6344 create_output_operand (&ops[0], target, mode);
6345 create_fixed_operand (&ops[1], mem);
6346 create_integer_operand (&ops[2], model);
6347 if (maybe_expand_insn (icode, 3, ops))
6348 return ops[0].value;
6349 }
6350
6351 /* If the size of the object is greater than word size on this target,
6352 then we assume that a load will not be atomic. We could try to
6353 emulate a load with a compare-and-swap operation, but the store that
6354 doing this could result in would be incorrect if this is a volatile
6355 atomic load or targetting read-only-mapped memory. */
6356 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6357 /* If there is no atomic load, leave the library call. */
6358 return NULL_RTX;
6359
6360 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6361 if (!target || target == const0_rtx)
6362 target = gen_reg_rtx (mode);
6363
6364 /* For SEQ_CST, emit a barrier before the load. */
6365 if (is_mm_seq_cst (model))
6366 expand_mem_thread_fence (model);
6367
6368 emit_move_insn (target, mem);
6369
6370 /* Emit the appropriate barrier after the load. */
6371 expand_mem_thread_fence (model);
6372
6373 return target;
6374 }
6375
6376 /* This function expands the atomic store operation:
6377 Atomically store VAL in MEM.
6378 MEMMODEL is the memory model variant to use.
6379 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6380 function returns const0_rtx if a pattern was emitted. */
6381
6382 rtx
6383 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6384 {
6385 machine_mode mode = GET_MODE (mem);
6386 enum insn_code icode;
6387 struct expand_operand ops[3];
6388
6389 /* If the target supports the store directly, great. */
6390 icode = direct_optab_handler (atomic_store_optab, mode);
6391 if (icode != CODE_FOR_nothing)
6392 {
6393 create_fixed_operand (&ops[0], mem);
6394 create_input_operand (&ops[1], val, mode);
6395 create_integer_operand (&ops[2], model);
6396 if (maybe_expand_insn (icode, 3, ops))
6397 return const0_rtx;
6398 }
6399
6400 /* If using __sync_lock_release is a viable alternative, try it.
6401 Note that this will not be set to true if we are expanding a generic
6402 __atomic_store_n. */
6403 if (use_release)
6404 {
6405 icode = direct_optab_handler (sync_lock_release_optab, mode);
6406 if (icode != CODE_FOR_nothing)
6407 {
6408 create_fixed_operand (&ops[0], mem);
6409 create_input_operand (&ops[1], const0_rtx, mode);
6410 if (maybe_expand_insn (icode, 2, ops))
6411 {
6412 /* lock_release is only a release barrier. */
6413 if (is_mm_seq_cst (model))
6414 expand_mem_thread_fence (model);
6415 return const0_rtx;
6416 }
6417 }
6418 }
6419
6420 /* If the size of the object is greater than word size on this target,
6421 a default store will not be atomic. */
6422 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6423 {
6424 /* If loads are atomic or we are called to provide a __sync builtin,
6425 we can try a atomic_exchange and throw away the result. Otherwise,
6426 don't do anything so that we do not create an inconsistency between
6427 loads and stores. */
6428 if (can_atomic_load_p (mode) || is_mm_sync (model))
6429 {
6430 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6431 if (!target)
6432 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6433 val);
6434 if (target)
6435 return const0_rtx;
6436 }
6437 return NULL_RTX;
6438 }
6439
6440 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6441 expand_mem_thread_fence (model);
6442
6443 emit_move_insn (mem, val);
6444
6445 /* For SEQ_CST, also emit a barrier after the store. */
6446 if (is_mm_seq_cst (model))
6447 expand_mem_thread_fence (model);
6448
6449 return const0_rtx;
6450 }
6451
6452
6453 /* Structure containing the pointers and values required to process the
6454 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6455
6456 struct atomic_op_functions
6457 {
6458 direct_optab mem_fetch_before;
6459 direct_optab mem_fetch_after;
6460 direct_optab mem_no_result;
6461 optab fetch_before;
6462 optab fetch_after;
6463 direct_optab no_result;
6464 enum rtx_code reverse_code;
6465 };
6466
6467
6468 /* Fill in structure pointed to by OP with the various optab entries for an
6469 operation of type CODE. */
6470
6471 static void
6472 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6473 {
6474 gcc_assert (op!= NULL);
6475
6476 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6477 in the source code during compilation, and the optab entries are not
6478 computable until runtime. Fill in the values at runtime. */
6479 switch (code)
6480 {
6481 case PLUS:
6482 op->mem_fetch_before = atomic_fetch_add_optab;
6483 op->mem_fetch_after = atomic_add_fetch_optab;
6484 op->mem_no_result = atomic_add_optab;
6485 op->fetch_before = sync_old_add_optab;
6486 op->fetch_after = sync_new_add_optab;
6487 op->no_result = sync_add_optab;
6488 op->reverse_code = MINUS;
6489 break;
6490 case MINUS:
6491 op->mem_fetch_before = atomic_fetch_sub_optab;
6492 op->mem_fetch_after = atomic_sub_fetch_optab;
6493 op->mem_no_result = atomic_sub_optab;
6494 op->fetch_before = sync_old_sub_optab;
6495 op->fetch_after = sync_new_sub_optab;
6496 op->no_result = sync_sub_optab;
6497 op->reverse_code = PLUS;
6498 break;
6499 case XOR:
6500 op->mem_fetch_before = atomic_fetch_xor_optab;
6501 op->mem_fetch_after = atomic_xor_fetch_optab;
6502 op->mem_no_result = atomic_xor_optab;
6503 op->fetch_before = sync_old_xor_optab;
6504 op->fetch_after = sync_new_xor_optab;
6505 op->no_result = sync_xor_optab;
6506 op->reverse_code = XOR;
6507 break;
6508 case AND:
6509 op->mem_fetch_before = atomic_fetch_and_optab;
6510 op->mem_fetch_after = atomic_and_fetch_optab;
6511 op->mem_no_result = atomic_and_optab;
6512 op->fetch_before = sync_old_and_optab;
6513 op->fetch_after = sync_new_and_optab;
6514 op->no_result = sync_and_optab;
6515 op->reverse_code = UNKNOWN;
6516 break;
6517 case IOR:
6518 op->mem_fetch_before = atomic_fetch_or_optab;
6519 op->mem_fetch_after = atomic_or_fetch_optab;
6520 op->mem_no_result = atomic_or_optab;
6521 op->fetch_before = sync_old_ior_optab;
6522 op->fetch_after = sync_new_ior_optab;
6523 op->no_result = sync_ior_optab;
6524 op->reverse_code = UNKNOWN;
6525 break;
6526 case NOT:
6527 op->mem_fetch_before = atomic_fetch_nand_optab;
6528 op->mem_fetch_after = atomic_nand_fetch_optab;
6529 op->mem_no_result = atomic_nand_optab;
6530 op->fetch_before = sync_old_nand_optab;
6531 op->fetch_after = sync_new_nand_optab;
6532 op->no_result = sync_nand_optab;
6533 op->reverse_code = UNKNOWN;
6534 break;
6535 default:
6536 gcc_unreachable ();
6537 }
6538 }
6539
6540 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6541 using memory order MODEL. If AFTER is true the operation needs to return
6542 the value of *MEM after the operation, otherwise the previous value.
6543 TARGET is an optional place to place the result. The result is unused if
6544 it is const0_rtx.
6545 Return the result if there is a better sequence, otherwise NULL_RTX. */
6546
6547 static rtx
6548 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6549 enum memmodel model, bool after)
6550 {
6551 /* If the value is prefetched, or not used, it may be possible to replace
6552 the sequence with a native exchange operation. */
6553 if (!after || target == const0_rtx)
6554 {
6555 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6556 if (code == AND && val == const0_rtx)
6557 {
6558 if (target == const0_rtx)
6559 target = gen_reg_rtx (GET_MODE (mem));
6560 return maybe_emit_atomic_exchange (target, mem, val, model);
6561 }
6562
6563 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6564 if (code == IOR && val == constm1_rtx)
6565 {
6566 if (target == const0_rtx)
6567 target = gen_reg_rtx (GET_MODE (mem));
6568 return maybe_emit_atomic_exchange (target, mem, val, model);
6569 }
6570 }
6571
6572 return NULL_RTX;
6573 }
6574
6575 /* Try to emit an instruction for a specific operation varaition.
6576 OPTAB contains the OP functions.
6577 TARGET is an optional place to return the result. const0_rtx means unused.
6578 MEM is the memory location to operate on.
6579 VAL is the value to use in the operation.
6580 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6581 MODEL is the memory model, if used.
6582 AFTER is true if the returned result is the value after the operation. */
6583
6584 static rtx
6585 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6586 rtx val, bool use_memmodel, enum memmodel model, bool after)
6587 {
6588 machine_mode mode = GET_MODE (mem);
6589 struct expand_operand ops[4];
6590 enum insn_code icode;
6591 int op_counter = 0;
6592 int num_ops;
6593
6594 /* Check to see if there is a result returned. */
6595 if (target == const0_rtx)
6596 {
6597 if (use_memmodel)
6598 {
6599 icode = direct_optab_handler (optab->mem_no_result, mode);
6600 create_integer_operand (&ops[2], model);
6601 num_ops = 3;
6602 }
6603 else
6604 {
6605 icode = direct_optab_handler (optab->no_result, mode);
6606 num_ops = 2;
6607 }
6608 }
6609 /* Otherwise, we need to generate a result. */
6610 else
6611 {
6612 if (use_memmodel)
6613 {
6614 icode = direct_optab_handler (after ? optab->mem_fetch_after
6615 : optab->mem_fetch_before, mode);
6616 create_integer_operand (&ops[3], model);
6617 num_ops = 4;
6618 }
6619 else
6620 {
6621 icode = optab_handler (after ? optab->fetch_after
6622 : optab->fetch_before, mode);
6623 num_ops = 3;
6624 }
6625 create_output_operand (&ops[op_counter++], target, mode);
6626 }
6627 if (icode == CODE_FOR_nothing)
6628 return NULL_RTX;
6629
6630 create_fixed_operand (&ops[op_counter++], mem);
6631 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6632 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6633
6634 if (maybe_expand_insn (icode, num_ops, ops))
6635 return (target == const0_rtx ? const0_rtx : ops[0].value);
6636
6637 return NULL_RTX;
6638 }
6639
6640
6641 /* This function expands an atomic fetch_OP or OP_fetch operation:
6642 TARGET is an option place to stick the return value. const0_rtx indicates
6643 the result is unused.
6644 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6645 CODE is the operation being performed (OP)
6646 MEMMODEL is the memory model variant to use.
6647 AFTER is true to return the result of the operation (OP_fetch).
6648 AFTER is false to return the value before the operation (fetch_OP).
6649
6650 This function will *only* generate instructions if there is a direct
6651 optab. No compare and swap loops or libcalls will be generated. */
6652
6653 static rtx
6654 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6655 enum rtx_code code, enum memmodel model,
6656 bool after)
6657 {
6658 machine_mode mode = GET_MODE (mem);
6659 struct atomic_op_functions optab;
6660 rtx result;
6661 bool unused_result = (target == const0_rtx);
6662
6663 get_atomic_op_for_code (&optab, code);
6664
6665 /* Check to see if there are any better instructions. */
6666 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6667 if (result)
6668 return result;
6669
6670 /* Check for the case where the result isn't used and try those patterns. */
6671 if (unused_result)
6672 {
6673 /* Try the memory model variant first. */
6674 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6675 if (result)
6676 return result;
6677
6678 /* Next try the old style withuot a memory model. */
6679 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6680 if (result)
6681 return result;
6682
6683 /* There is no no-result pattern, so try patterns with a result. */
6684 target = NULL_RTX;
6685 }
6686
6687 /* Try the __atomic version. */
6688 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6689 if (result)
6690 return result;
6691
6692 /* Try the older __sync version. */
6693 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6694 if (result)
6695 return result;
6696
6697 /* If the fetch value can be calculated from the other variation of fetch,
6698 try that operation. */
6699 if (after || unused_result || optab.reverse_code != UNKNOWN)
6700 {
6701 /* Try the __atomic version, then the older __sync version. */
6702 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6703 if (!result)
6704 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6705
6706 if (result)
6707 {
6708 /* If the result isn't used, no need to do compensation code. */
6709 if (unused_result)
6710 return result;
6711
6712 /* Issue compensation code. Fetch_after == fetch_before OP val.
6713 Fetch_before == after REVERSE_OP val. */
6714 if (!after)
6715 code = optab.reverse_code;
6716 if (code == NOT)
6717 {
6718 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6719 true, OPTAB_LIB_WIDEN);
6720 result = expand_simple_unop (mode, NOT, result, target, true);
6721 }
6722 else
6723 result = expand_simple_binop (mode, code, result, val, target,
6724 true, OPTAB_LIB_WIDEN);
6725 return result;
6726 }
6727 }
6728
6729 /* No direct opcode can be generated. */
6730 return NULL_RTX;
6731 }
6732
6733
6734
6735 /* This function expands an atomic fetch_OP or OP_fetch operation:
6736 TARGET is an option place to stick the return value. const0_rtx indicates
6737 the result is unused.
6738 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6739 CODE is the operation being performed (OP)
6740 MEMMODEL is the memory model variant to use.
6741 AFTER is true to return the result of the operation (OP_fetch).
6742 AFTER is false to return the value before the operation (fetch_OP). */
6743 rtx
6744 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6745 enum memmodel model, bool after)
6746 {
6747 machine_mode mode = GET_MODE (mem);
6748 rtx result;
6749 bool unused_result = (target == const0_rtx);
6750
6751 /* If loads are not atomic for the required size and we are not called to
6752 provide a __sync builtin, do not do anything so that we stay consistent
6753 with atomic loads of the same size. */
6754 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6755 return NULL_RTX;
6756
6757 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6758 after);
6759
6760 if (result)
6761 return result;
6762
6763 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6764 if (code == PLUS || code == MINUS)
6765 {
6766 rtx tmp;
6767 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6768
6769 start_sequence ();
6770 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6771 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6772 model, after);
6773 if (result)
6774 {
6775 /* PLUS worked so emit the insns and return. */
6776 tmp = get_insns ();
6777 end_sequence ();
6778 emit_insn (tmp);
6779 return result;
6780 }
6781
6782 /* PLUS did not work, so throw away the negation code and continue. */
6783 end_sequence ();
6784 }
6785
6786 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6787 if (!can_compare_and_swap_p (mode, false))
6788 {
6789 rtx libfunc;
6790 bool fixup = false;
6791 enum rtx_code orig_code = code;
6792 struct atomic_op_functions optab;
6793
6794 get_atomic_op_for_code (&optab, code);
6795 libfunc = optab_libfunc (after ? optab.fetch_after
6796 : optab.fetch_before, mode);
6797 if (libfunc == NULL
6798 && (after || unused_result || optab.reverse_code != UNKNOWN))
6799 {
6800 fixup = true;
6801 if (!after)
6802 code = optab.reverse_code;
6803 libfunc = optab_libfunc (after ? optab.fetch_before
6804 : optab.fetch_after, mode);
6805 }
6806 if (libfunc != NULL)
6807 {
6808 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6809 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6810 2, addr, ptr_mode, val, mode);
6811
6812 if (!unused_result && fixup)
6813 result = expand_simple_binop (mode, code, result, val, target,
6814 true, OPTAB_LIB_WIDEN);
6815 return result;
6816 }
6817
6818 /* We need the original code for any further attempts. */
6819 code = orig_code;
6820 }
6821
6822 /* If nothing else has succeeded, default to a compare and swap loop. */
6823 if (can_compare_and_swap_p (mode, true))
6824 {
6825 rtx_insn *insn;
6826 rtx t0 = gen_reg_rtx (mode), t1;
6827
6828 start_sequence ();
6829
6830 /* If the result is used, get a register for it. */
6831 if (!unused_result)
6832 {
6833 if (!target || !register_operand (target, mode))
6834 target = gen_reg_rtx (mode);
6835 /* If fetch_before, copy the value now. */
6836 if (!after)
6837 emit_move_insn (target, t0);
6838 }
6839 else
6840 target = const0_rtx;
6841
6842 t1 = t0;
6843 if (code == NOT)
6844 {
6845 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6846 true, OPTAB_LIB_WIDEN);
6847 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6848 }
6849 else
6850 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6851 OPTAB_LIB_WIDEN);
6852
6853 /* For after, copy the value now. */
6854 if (!unused_result && after)
6855 emit_move_insn (target, t1);
6856 insn = get_insns ();
6857 end_sequence ();
6858
6859 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6860 return target;
6861 }
6862
6863 return NULL_RTX;
6864 }
6865 \f
6866 /* Return true if OPERAND is suitable for operand number OPNO of
6867 instruction ICODE. */
6868
6869 bool
6870 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6871 {
6872 return (!insn_data[(int) icode].operand[opno].predicate
6873 || (insn_data[(int) icode].operand[opno].predicate
6874 (operand, insn_data[(int) icode].operand[opno].mode)));
6875 }
6876 \f
6877 /* TARGET is a target of a multiword operation that we are going to
6878 implement as a series of word-mode operations. Return true if
6879 TARGET is suitable for this purpose. */
6880
6881 bool
6882 valid_multiword_target_p (rtx target)
6883 {
6884 machine_mode mode;
6885 int i;
6886
6887 mode = GET_MODE (target);
6888 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6889 if (!validate_subreg (word_mode, mode, target, i))
6890 return false;
6891 return true;
6892 }
6893
6894 /* Like maybe_legitimize_operand, but do not change the code of the
6895 current rtx value. */
6896
6897 static bool
6898 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6899 struct expand_operand *op)
6900 {
6901 /* See if the operand matches in its current form. */
6902 if (insn_operand_matches (icode, opno, op->value))
6903 return true;
6904
6905 /* If the operand is a memory whose address has no side effects,
6906 try forcing the address into a non-virtual pseudo register.
6907 The check for side effects is important because copy_to_mode_reg
6908 cannot handle things like auto-modified addresses. */
6909 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6910 {
6911 rtx addr, mem;
6912
6913 mem = op->value;
6914 addr = XEXP (mem, 0);
6915 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6916 && !side_effects_p (addr))
6917 {
6918 rtx_insn *last;
6919 machine_mode mode;
6920
6921 last = get_last_insn ();
6922 mode = get_address_mode (mem);
6923 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6924 if (insn_operand_matches (icode, opno, mem))
6925 {
6926 op->value = mem;
6927 return true;
6928 }
6929 delete_insns_since (last);
6930 }
6931 }
6932
6933 return false;
6934 }
6935
6936 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6937 on success, storing the new operand value back in OP. */
6938
6939 static bool
6940 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6941 struct expand_operand *op)
6942 {
6943 machine_mode mode, imode;
6944 bool old_volatile_ok, result;
6945
6946 mode = op->mode;
6947 switch (op->type)
6948 {
6949 case EXPAND_FIXED:
6950 old_volatile_ok = volatile_ok;
6951 volatile_ok = true;
6952 result = maybe_legitimize_operand_same_code (icode, opno, op);
6953 volatile_ok = old_volatile_ok;
6954 return result;
6955
6956 case EXPAND_OUTPUT:
6957 gcc_assert (mode != VOIDmode);
6958 if (op->value
6959 && op->value != const0_rtx
6960 && GET_MODE (op->value) == mode
6961 && maybe_legitimize_operand_same_code (icode, opno, op))
6962 return true;
6963
6964 op->value = gen_reg_rtx (mode);
6965 op->target = 0;
6966 break;
6967
6968 case EXPAND_INPUT:
6969 input:
6970 gcc_assert (mode != VOIDmode);
6971 gcc_assert (GET_MODE (op->value) == VOIDmode
6972 || GET_MODE (op->value) == mode);
6973 if (maybe_legitimize_operand_same_code (icode, opno, op))
6974 return true;
6975
6976 op->value = copy_to_mode_reg (mode, op->value);
6977 break;
6978
6979 case EXPAND_CONVERT_TO:
6980 gcc_assert (mode != VOIDmode);
6981 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6982 goto input;
6983
6984 case EXPAND_CONVERT_FROM:
6985 if (GET_MODE (op->value) != VOIDmode)
6986 mode = GET_MODE (op->value);
6987 else
6988 /* The caller must tell us what mode this value has. */
6989 gcc_assert (mode != VOIDmode);
6990
6991 imode = insn_data[(int) icode].operand[opno].mode;
6992 if (imode != VOIDmode && imode != mode)
6993 {
6994 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
6995 mode = imode;
6996 }
6997 goto input;
6998
6999 case EXPAND_ADDRESS:
7000 gcc_assert (mode != VOIDmode);
7001 op->value = convert_memory_address (mode, op->value);
7002 goto input;
7003
7004 case EXPAND_INTEGER:
7005 mode = insn_data[(int) icode].operand[opno].mode;
7006 if (mode != VOIDmode && const_int_operand (op->value, mode))
7007 goto input;
7008 break;
7009 }
7010 return insn_operand_matches (icode, opno, op->value);
7011 }
7012
7013 /* Make OP describe an input operand that should have the same value
7014 as VALUE, after any mode conversion that the target might request.
7015 TYPE is the type of VALUE. */
7016
7017 void
7018 create_convert_operand_from_type (struct expand_operand *op,
7019 rtx value, tree type)
7020 {
7021 create_convert_operand_from (op, value, TYPE_MODE (type),
7022 TYPE_UNSIGNED (type));
7023 }
7024
7025 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7026 of instruction ICODE. Return true on success, leaving the new operand
7027 values in the OPS themselves. Emit no code on failure. */
7028
7029 bool
7030 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7031 unsigned int nops, struct expand_operand *ops)
7032 {
7033 rtx_insn *last;
7034 unsigned int i;
7035
7036 last = get_last_insn ();
7037 for (i = 0; i < nops; i++)
7038 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7039 {
7040 delete_insns_since (last);
7041 return false;
7042 }
7043 return true;
7044 }
7045
7046 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7047 as its operands. Return the instruction pattern on success,
7048 and emit any necessary set-up code. Return null and emit no
7049 code on failure. */
7050
7051 rtx_insn *
7052 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7053 struct expand_operand *ops)
7054 {
7055 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7056 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7057 return NULL;
7058
7059 switch (nops)
7060 {
7061 case 1:
7062 return GEN_FCN (icode) (ops[0].value);
7063 case 2:
7064 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7065 case 3:
7066 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7067 case 4:
7068 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7069 ops[3].value);
7070 case 5:
7071 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7072 ops[3].value, ops[4].value);
7073 case 6:
7074 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7075 ops[3].value, ops[4].value, ops[5].value);
7076 case 7:
7077 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7078 ops[3].value, ops[4].value, ops[5].value,
7079 ops[6].value);
7080 case 8:
7081 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7082 ops[3].value, ops[4].value, ops[5].value,
7083 ops[6].value, ops[7].value);
7084 case 9:
7085 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7086 ops[3].value, ops[4].value, ops[5].value,
7087 ops[6].value, ops[7].value, ops[8].value);
7088 }
7089 gcc_unreachable ();
7090 }
7091
7092 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7093 as its operands. Return true on success and emit no code on failure. */
7094
7095 bool
7096 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7097 struct expand_operand *ops)
7098 {
7099 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7100 if (pat)
7101 {
7102 emit_insn (pat);
7103 return true;
7104 }
7105 return false;
7106 }
7107
7108 /* Like maybe_expand_insn, but for jumps. */
7109
7110 bool
7111 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7112 struct expand_operand *ops)
7113 {
7114 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7115 if (pat)
7116 {
7117 emit_jump_insn (pat);
7118 return true;
7119 }
7120 return false;
7121 }
7122
7123 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7124 as its operands. */
7125
7126 void
7127 expand_insn (enum insn_code icode, unsigned int nops,
7128 struct expand_operand *ops)
7129 {
7130 if (!maybe_expand_insn (icode, nops, ops))
7131 gcc_unreachable ();
7132 }
7133
7134 /* Like expand_insn, but for jumps. */
7135
7136 void
7137 expand_jump_insn (enum insn_code icode, unsigned int nops,
7138 struct expand_operand *ops)
7139 {
7140 if (!maybe_expand_jump_insn (icode, nops, ops))
7141 gcc_unreachable ();
7142 }