1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
44 #include "basic-block.h"
47 struct target_optabs default_target_optabs
;
48 struct target_libfuncs default_target_libfuncs
;
50 struct target_optabs
*this_target_optabs
= &default_target_optabs
;
51 struct target_libfuncs
*this_target_libfuncs
= &default_target_libfuncs
;
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab_
[NUM_RTX_CODE
+ 1];
60 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
62 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
63 static void emit_libcall_block_1 (rtx
, rtx
, rtx
, rtx
, bool);
65 /* Debug facility for use in GDB. */
66 void debug_optab_libfuncs (void);
68 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
69 #if ENABLE_DECIMAL_BID_FORMAT
70 #define DECIMAL_PREFIX "bid_"
72 #define DECIMAL_PREFIX "dpd_"
75 /* Used for libfunc_hash. */
78 hash_libfunc (const void *p
)
80 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
82 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
86 /* Used for libfunc_hash. */
89 eq_libfunc (const void *p
, const void *q
)
91 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
92 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
94 return (e1
->optab
== e2
->optab
95 && e1
->mode1
== e2
->mode1
96 && e1
->mode2
== e2
->mode2
);
99 /* Return libfunc corresponding operation defined by OPTAB converting
100 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
101 if no libfunc is available. */
103 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
104 enum machine_mode mode2
)
106 struct libfunc_entry e
;
107 struct libfunc_entry
**slot
;
109 e
.optab
= (size_t) (optab
- &convert_optab_table
[0]);
112 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
115 if (optab
->libcall_gen
)
117 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
118 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
120 return (*slot
)->libfunc
;
126 return (*slot
)->libfunc
;
129 /* Return libfunc corresponding operation defined by OPTAB in MODE.
130 Trigger lazy initialization if needed, return NULL if no libfunc is
133 optab_libfunc (optab optab
, enum machine_mode mode
)
135 struct libfunc_entry e
;
136 struct libfunc_entry
**slot
;
138 e
.optab
= (size_t) (optab
- &optab_table
[0]);
141 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
144 if (optab
->libcall_gen
)
146 optab
->libcall_gen (optab
, optab
->libcall_basename
,
147 optab
->libcall_suffix
, mode
);
148 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
151 return (*slot
)->libfunc
;
157 return (*slot
)->libfunc
;
161 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
162 the result of operation CODE applied to OP0 (and OP1 if it is a binary
165 If the last insn does not set TARGET, don't do anything, but return 1.
167 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
168 don't add the REG_EQUAL note but return 0. Our caller can then try
169 again, ensuring that TARGET is not one of the operands. */
172 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
174 rtx last_insn
, insn
, set
;
177 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
179 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
180 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
181 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
182 && GET_RTX_CLASS (code
) != RTX_COMPARE
183 && GET_RTX_CLASS (code
) != RTX_UNARY
)
186 if (GET_CODE (target
) == ZERO_EXTRACT
)
189 for (last_insn
= insns
;
190 NEXT_INSN (last_insn
) != NULL_RTX
;
191 last_insn
= NEXT_INSN (last_insn
))
194 set
= single_set (last_insn
);
198 if (! rtx_equal_p (SET_DEST (set
), target
)
199 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
200 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
201 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
204 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
205 besides the last insn. */
206 if (reg_overlap_mentioned_p (target
, op0
)
207 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
209 insn
= PREV_INSN (last_insn
);
210 while (insn
!= NULL_RTX
)
212 if (reg_set_p (target
, insn
))
215 insn
= PREV_INSN (insn
);
219 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
229 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
231 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
232 if (GET_MODE_SIZE (GET_MODE (op0
))
233 > GET_MODE_SIZE (GET_MODE (target
)))
234 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
235 note
, GET_MODE (op0
));
237 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
238 note
, GET_MODE (op0
));
243 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
247 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
249 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
254 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
255 for a widening operation would be. In most cases this would be OP0, but if
256 that's a constant it'll be VOIDmode, which isn't useful. */
258 static enum machine_mode
259 widened_mode (enum machine_mode to_mode
, rtx op0
, rtx op1
)
261 enum machine_mode m0
= GET_MODE (op0
);
262 enum machine_mode m1
= GET_MODE (op1
);
263 enum machine_mode result
;
265 if (m0
== VOIDmode
&& m1
== VOIDmode
)
267 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
272 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
278 /* Find a widening optab even if it doesn't widen as much as we want.
279 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
280 direct HI->SI insn, then return SI->DI, if that exists.
281 If PERMIT_NON_WIDENING is non-zero then this can be used with
282 non-widening optabs also. */
285 find_widening_optab_handler_and_mode (optab op
, enum machine_mode to_mode
,
286 enum machine_mode from_mode
,
287 int permit_non_widening
,
288 enum machine_mode
*found_mode
)
290 for (; (permit_non_widening
|| from_mode
!= to_mode
)
291 && GET_MODE_SIZE (from_mode
) <= GET_MODE_SIZE (to_mode
)
292 && from_mode
!= VOIDmode
;
293 from_mode
= GET_MODE_WIDER_MODE (from_mode
))
295 enum insn_code handler
= widening_optab_handler (op
, to_mode
,
298 if (handler
!= CODE_FOR_nothing
)
301 *found_mode
= from_mode
;
306 return CODE_FOR_nothing
;
309 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
310 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
311 not actually do a sign-extend or zero-extend, but can leave the
312 higher-order bits of the result rtx undefined, for example, in the case
313 of logical operations, but not right shifts. */
316 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
317 int unsignedp
, int no_extend
)
321 /* If we don't have to extend and this is a constant, return it. */
322 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
325 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
326 extend since it will be more efficient to do so unless the signedness of
327 a promoted object differs from our extension. */
329 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
330 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
331 return convert_modes (mode
, oldmode
, op
, unsignedp
);
333 /* If MODE is no wider than a single word, we return a paradoxical
335 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
336 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
338 /* Otherwise, get an object of MODE, clobber it, and set the low-order
341 result
= gen_reg_rtx (mode
);
342 emit_clobber (result
);
343 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
347 /* Return the optab used for computing the operation given by the tree code,
348 CODE and the tree EXP. This function is not always usable (for example, it
349 cannot give complete results for multiplication or division) but probably
350 ought to be relied on more widely throughout the expander. */
352 optab_for_tree_code (enum tree_code code
, const_tree type
,
353 enum optab_subtype subtype
)
365 return one_cmpl_optab
;
370 case MULT_HIGHPART_EXPR
:
371 return TYPE_UNSIGNED (type
) ? umul_highpart_optab
: smul_highpart_optab
;
377 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
385 if (TYPE_SATURATING(type
))
386 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
387 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
390 if (TREE_CODE (type
) == VECTOR_TYPE
)
392 if (subtype
== optab_vector
)
393 return TYPE_SATURATING (type
) ? unknown_optab
: vashl_optab
;
395 gcc_assert (subtype
== optab_scalar
);
397 if (TYPE_SATURATING(type
))
398 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
402 if (TREE_CODE (type
) == VECTOR_TYPE
)
404 if (subtype
== optab_vector
)
405 return TYPE_UNSIGNED (type
) ? vlshr_optab
: vashr_optab
;
407 gcc_assert (subtype
== optab_scalar
);
409 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
412 if (TREE_CODE (type
) == VECTOR_TYPE
)
414 if (subtype
== optab_vector
)
417 gcc_assert (subtype
== optab_scalar
);
422 if (TREE_CODE (type
) == VECTOR_TYPE
)
424 if (subtype
== optab_vector
)
427 gcc_assert (subtype
== optab_scalar
);
432 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
435 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
437 case REALIGN_LOAD_EXPR
:
438 return vec_realign_load_optab
;
441 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
444 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
446 case WIDEN_MULT_PLUS_EXPR
:
447 return (TYPE_UNSIGNED (type
)
448 ? (TYPE_SATURATING (type
)
449 ? usmadd_widen_optab
: umadd_widen_optab
)
450 : (TYPE_SATURATING (type
)
451 ? ssmadd_widen_optab
: smadd_widen_optab
));
453 case WIDEN_MULT_MINUS_EXPR
:
454 return (TYPE_UNSIGNED (type
)
455 ? (TYPE_SATURATING (type
)
456 ? usmsub_widen_optab
: umsub_widen_optab
)
457 : (TYPE_SATURATING (type
)
458 ? ssmsub_widen_optab
: smsub_widen_optab
));
464 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
467 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
469 case REDUC_PLUS_EXPR
:
470 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
472 case VEC_LSHIFT_EXPR
:
473 return vec_shl_optab
;
475 case VEC_RSHIFT_EXPR
:
476 return vec_shr_optab
;
478 case VEC_WIDEN_MULT_HI_EXPR
:
479 return TYPE_UNSIGNED (type
) ?
480 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
482 case VEC_WIDEN_MULT_LO_EXPR
:
483 return TYPE_UNSIGNED (type
) ?
484 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
486 case VEC_WIDEN_MULT_EVEN_EXPR
:
487 return TYPE_UNSIGNED (type
) ?
488 vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
490 case VEC_WIDEN_MULT_ODD_EXPR
:
491 return TYPE_UNSIGNED (type
) ?
492 vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
494 case VEC_WIDEN_LSHIFT_HI_EXPR
:
495 return TYPE_UNSIGNED (type
) ?
496 vec_widen_ushiftl_hi_optab
: vec_widen_sshiftl_hi_optab
;
498 case VEC_WIDEN_LSHIFT_LO_EXPR
:
499 return TYPE_UNSIGNED (type
) ?
500 vec_widen_ushiftl_lo_optab
: vec_widen_sshiftl_lo_optab
;
502 case VEC_UNPACK_HI_EXPR
:
503 return TYPE_UNSIGNED (type
) ?
504 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
506 case VEC_UNPACK_LO_EXPR
:
507 return TYPE_UNSIGNED (type
) ?
508 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
510 case VEC_UNPACK_FLOAT_HI_EXPR
:
511 /* The signedness is determined from input operand. */
512 return TYPE_UNSIGNED (type
) ?
513 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
515 case VEC_UNPACK_FLOAT_LO_EXPR
:
516 /* The signedness is determined from input operand. */
517 return TYPE_UNSIGNED (type
) ?
518 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
520 case VEC_PACK_TRUNC_EXPR
:
521 return vec_pack_trunc_optab
;
523 case VEC_PACK_SAT_EXPR
:
524 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
526 case VEC_PACK_FIX_TRUNC_EXPR
:
527 /* The signedness is determined from output operand. */
528 return TYPE_UNSIGNED (type
) ?
529 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
535 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
538 case POINTER_PLUS_EXPR
:
540 if (TYPE_SATURATING(type
))
541 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
542 return trapv
? addv_optab
: add_optab
;
545 if (TYPE_SATURATING(type
))
546 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
547 return trapv
? subv_optab
: sub_optab
;
550 if (TYPE_SATURATING(type
))
551 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
552 return trapv
? smulv_optab
: smul_optab
;
555 if (TYPE_SATURATING(type
))
556 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
557 return trapv
? negv_optab
: neg_optab
;
560 return trapv
? absv_optab
: abs_optab
;
563 return unknown_optab
;
568 /* Expand vector widening operations.
570 There are two different classes of operations handled here:
571 1) Operations whose result is wider than all the arguments to the operation.
572 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
573 In this case OP0 and optionally OP1 would be initialized,
574 but WIDE_OP wouldn't (not relevant for this case).
575 2) Operations whose result is of the same size as the last argument to the
576 operation, but wider than all the other arguments to the operation.
577 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
578 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
580 E.g, when called to expand the following operations, this is how
581 the arguments will be initialized:
583 widening-sum 2 oprnd0 - oprnd1
584 widening-dot-product 3 oprnd0 oprnd1 oprnd2
585 widening-mult 2 oprnd0 oprnd1 -
586 type-promotion (vec-unpack) 1 oprnd0 - - */
589 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
590 rtx target
, int unsignedp
)
592 struct expand_operand eops
[4];
593 tree oprnd0
, oprnd1
, oprnd2
;
594 enum machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
595 optab widen_pattern_optab
;
596 enum insn_code icode
;
597 int nops
= TREE_CODE_LENGTH (ops
->code
);
601 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
602 widen_pattern_optab
=
603 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
604 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
605 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
606 icode
= find_widening_optab_handler (widen_pattern_optab
,
607 TYPE_MODE (TREE_TYPE (ops
->op2
)),
610 icode
= optab_handler (widen_pattern_optab
, tmode0
);
611 gcc_assert (icode
!= CODE_FOR_nothing
);
616 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
619 /* The last operand is of a wider mode than the rest of the operands. */
624 gcc_assert (tmode1
== tmode0
);
627 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
631 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
632 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
634 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
636 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
637 expand_insn (icode
, op
, eops
);
638 return eops
[0].value
;
641 /* Generate code to perform an operation specified by TERNARY_OPTAB
642 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
644 UNSIGNEDP is for the case where we have to widen the operands
645 to perform the operation. It says to use zero-extension.
647 If TARGET is nonzero, the value
648 is generated there, if it is convenient to do so.
649 In all cases an rtx is returned for the locus of the value;
650 this may or may not be TARGET. */
653 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
654 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
656 struct expand_operand ops
[4];
657 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
659 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
661 create_output_operand (&ops
[0], target
, mode
);
662 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
663 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
664 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
665 expand_insn (icode
, 4, ops
);
670 /* Like expand_binop, but return a constant rtx if the result can be
671 calculated at compile time. The arguments and return value are
672 otherwise the same as for expand_binop. */
675 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
676 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
677 enum optab_methods methods
)
679 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
681 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
687 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
690 /* Like simplify_expand_binop, but always put the result in TARGET.
691 Return true if the expansion succeeded. */
694 force_expand_binop (enum machine_mode mode
, optab binoptab
,
695 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
696 enum optab_methods methods
)
698 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
699 target
, unsignedp
, methods
);
703 emit_move_insn (target
, x
);
707 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
710 expand_vec_shift_expr (sepops ops
, rtx target
)
712 struct expand_operand eops
[3];
713 enum insn_code icode
;
714 rtx rtx_op1
, rtx_op2
;
715 enum machine_mode mode
= TYPE_MODE (ops
->type
);
716 tree vec_oprnd
= ops
->op0
;
717 tree shift_oprnd
= ops
->op1
;
722 case VEC_RSHIFT_EXPR
:
723 shift_optab
= vec_shr_optab
;
725 case VEC_LSHIFT_EXPR
:
726 shift_optab
= vec_shl_optab
;
732 icode
= optab_handler (shift_optab
, mode
);
733 gcc_assert (icode
!= CODE_FOR_nothing
);
735 rtx_op1
= expand_normal (vec_oprnd
);
736 rtx_op2
= expand_normal (shift_oprnd
);
738 create_output_operand (&eops
[0], target
, mode
);
739 create_input_operand (&eops
[1], rtx_op1
, GET_MODE (rtx_op1
));
740 create_convert_operand_from_type (&eops
[2], rtx_op2
, TREE_TYPE (shift_oprnd
));
741 expand_insn (icode
, 3, eops
);
743 return eops
[0].value
;
746 /* Create a new vector value in VMODE with all elements set to OP. The
747 mode of OP must be the element mode of VMODE. If OP is a constant,
748 then the return value will be a constant. */
751 expand_vector_broadcast (enum machine_mode vmode
, rtx op
)
753 enum insn_code icode
;
758 gcc_checking_assert (VECTOR_MODE_P (vmode
));
760 n
= GET_MODE_NUNITS (vmode
);
761 vec
= rtvec_alloc (n
);
762 for (i
= 0; i
< n
; ++i
)
763 RTVEC_ELT (vec
, i
) = op
;
766 return gen_rtx_CONST_VECTOR (vmode
, vec
);
768 /* ??? If the target doesn't have a vec_init, then we have no easy way
769 of performing this operation. Most of this sort of generic support
770 is hidden away in the vector lowering support in gimple. */
771 icode
= optab_handler (vec_init_optab
, vmode
);
772 if (icode
== CODE_FOR_nothing
)
775 ret
= gen_reg_rtx (vmode
);
776 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
781 /* This subroutine of expand_doubleword_shift handles the cases in which
782 the effective shift value is >= BITS_PER_WORD. The arguments and return
783 value are the same as for the parent routine, except that SUPERWORD_OP1
784 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
785 INTO_TARGET may be null if the caller has decided to calculate it. */
788 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
789 rtx outof_target
, rtx into_target
,
790 int unsignedp
, enum optab_methods methods
)
792 if (into_target
!= 0)
793 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
794 into_target
, unsignedp
, methods
))
797 if (outof_target
!= 0)
799 /* For a signed right shift, we must fill OUTOF_TARGET with copies
800 of the sign bit, otherwise we must fill it with zeros. */
801 if (binoptab
!= ashr_optab
)
802 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
804 if (!force_expand_binop (word_mode
, binoptab
,
805 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
806 outof_target
, unsignedp
, methods
))
812 /* This subroutine of expand_doubleword_shift handles the cases in which
813 the effective shift value is < BITS_PER_WORD. The arguments and return
814 value are the same as for the parent routine. */
817 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
818 rtx outof_input
, rtx into_input
, rtx op1
,
819 rtx outof_target
, rtx into_target
,
820 int unsignedp
, enum optab_methods methods
,
821 unsigned HOST_WIDE_INT shift_mask
)
823 optab reverse_unsigned_shift
, unsigned_shift
;
826 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
827 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
829 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
830 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
831 the opposite direction to BINOPTAB. */
832 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
834 carries
= outof_input
;
835 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
836 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
841 /* We must avoid shifting by BITS_PER_WORD bits since that is either
842 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
843 has unknown behavior. Do a single shift first, then shift by the
844 remainder. It's OK to use ~OP1 as the remainder if shift counts
845 are truncated to the mode size. */
846 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
847 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
848 if (shift_mask
== BITS_PER_WORD
- 1)
850 tmp
= immed_double_const (-1, -1, op1_mode
);
851 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
856 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
857 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
861 if (tmp
== 0 || carries
== 0)
863 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
864 carries
, tmp
, 0, unsignedp
, methods
);
868 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
869 so the result can go directly into INTO_TARGET if convenient. */
870 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
871 into_target
, unsignedp
, methods
);
875 /* Now OR in the bits carried over from OUTOF_INPUT. */
876 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
877 into_target
, unsignedp
, methods
))
880 /* Use a standard word_mode shift for the out-of half. */
881 if (outof_target
!= 0)
882 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
883 outof_target
, unsignedp
, methods
))
890 #ifdef HAVE_conditional_move
891 /* Try implementing expand_doubleword_shift using conditional moves.
892 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
893 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
894 are the shift counts to use in the former and latter case. All other
895 arguments are the same as the parent routine. */
898 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
899 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
900 rtx outof_input
, rtx into_input
,
901 rtx subword_op1
, rtx superword_op1
,
902 rtx outof_target
, rtx into_target
,
903 int unsignedp
, enum optab_methods methods
,
904 unsigned HOST_WIDE_INT shift_mask
)
906 rtx outof_superword
, into_superword
;
908 /* Put the superword version of the output into OUTOF_SUPERWORD and
910 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
911 if (outof_target
!= 0 && subword_op1
== superword_op1
)
913 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
914 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
915 into_superword
= outof_target
;
916 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
917 outof_superword
, 0, unsignedp
, methods
))
922 into_superword
= gen_reg_rtx (word_mode
);
923 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
924 outof_superword
, into_superword
,
929 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
930 if (!expand_subword_shift (op1_mode
, binoptab
,
931 outof_input
, into_input
, subword_op1
,
932 outof_target
, into_target
,
933 unsignedp
, methods
, shift_mask
))
936 /* Select between them. Do the INTO half first because INTO_SUPERWORD
937 might be the current value of OUTOF_TARGET. */
938 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
939 into_target
, into_superword
, word_mode
, false))
942 if (outof_target
!= 0)
943 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
944 outof_target
, outof_superword
,
952 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
953 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
954 input operand; the shift moves bits in the direction OUTOF_INPUT->
955 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
956 of the target. OP1 is the shift count and OP1_MODE is its mode.
957 If OP1 is constant, it will have been truncated as appropriate
958 and is known to be nonzero.
960 If SHIFT_MASK is zero, the result of word shifts is undefined when the
961 shift count is outside the range [0, BITS_PER_WORD). This routine must
962 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
964 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
965 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
966 fill with zeros or sign bits as appropriate.
968 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
969 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
970 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
971 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
974 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
975 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
976 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
977 function wants to calculate it itself.
979 Return true if the shift could be successfully synthesized. */
982 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
983 rtx outof_input
, rtx into_input
, rtx op1
,
984 rtx outof_target
, rtx into_target
,
985 int unsignedp
, enum optab_methods methods
,
986 unsigned HOST_WIDE_INT shift_mask
)
988 rtx superword_op1
, tmp
, cmp1
, cmp2
;
989 rtx subword_label
, done_label
;
990 enum rtx_code cmp_code
;
992 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
993 fill the result with sign or zero bits as appropriate. If so, the value
994 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
995 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
996 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
998 This isn't worthwhile for constant shifts since the optimizers will
999 cope better with in-range shift counts. */
1000 if (shift_mask
>= BITS_PER_WORD
1001 && outof_target
!= 0
1002 && !CONSTANT_P (op1
))
1004 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1005 outof_input
, into_input
, op1
,
1007 unsignedp
, methods
, shift_mask
))
1009 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1010 outof_target
, unsignedp
, methods
))
1015 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1016 is true when the effective shift value is less than BITS_PER_WORD.
1017 Set SUPERWORD_OP1 to the shift count that should be used to shift
1018 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1019 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1020 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1022 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1023 is a subword shift count. */
1024 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1026 cmp2
= CONST0_RTX (op1_mode
);
1028 superword_op1
= op1
;
1032 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1033 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1035 cmp2
= CONST0_RTX (op1_mode
);
1037 superword_op1
= cmp1
;
1042 /* If we can compute the condition at compile time, pick the
1043 appropriate subroutine. */
1044 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1045 if (tmp
!= 0 && CONST_INT_P (tmp
))
1047 if (tmp
== const0_rtx
)
1048 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1049 outof_target
, into_target
,
1050 unsignedp
, methods
);
1052 return expand_subword_shift (op1_mode
, binoptab
,
1053 outof_input
, into_input
, op1
,
1054 outof_target
, into_target
,
1055 unsignedp
, methods
, shift_mask
);
1058 #ifdef HAVE_conditional_move
1059 /* Try using conditional moves to generate straight-line code. */
1061 rtx start
= get_last_insn ();
1062 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1063 cmp_code
, cmp1
, cmp2
,
1064 outof_input
, into_input
,
1066 outof_target
, into_target
,
1067 unsignedp
, methods
, shift_mask
))
1069 delete_insns_since (start
);
1073 /* As a last resort, use branches to select the correct alternative. */
1074 subword_label
= gen_label_rtx ();
1075 done_label
= gen_label_rtx ();
1078 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1079 0, 0, subword_label
, -1);
1082 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1083 outof_target
, into_target
,
1084 unsignedp
, methods
))
1087 emit_jump_insn (gen_jump (done_label
));
1089 emit_label (subword_label
);
1091 if (!expand_subword_shift (op1_mode
, binoptab
,
1092 outof_input
, into_input
, op1
,
1093 outof_target
, into_target
,
1094 unsignedp
, methods
, shift_mask
))
1097 emit_label (done_label
);
1101 /* Subroutine of expand_binop. Perform a double word multiplication of
1102 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1103 as the target's word_mode. This function return NULL_RTX if anything
1104 goes wrong, in which case it may have already emitted instructions
1105 which need to be deleted.
1107 If we want to multiply two two-word values and have normal and widening
1108 multiplies of single-word values, we can do this with three smaller
1111 The multiplication proceeds as follows:
1112 _______________________
1113 [__op0_high_|__op0_low__]
1114 _______________________
1115 * [__op1_high_|__op1_low__]
1116 _______________________________________________
1117 _______________________
1118 (1) [__op0_low__*__op1_low__]
1119 _______________________
1120 (2a) [__op0_low__*__op1_high_]
1121 _______________________
1122 (2b) [__op0_high_*__op1_low__]
1123 _______________________
1124 (3) [__op0_high_*__op1_high_]
1127 This gives a 4-word result. Since we are only interested in the
1128 lower 2 words, partial result (3) and the upper words of (2a) and
1129 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1130 calculated using non-widening multiplication.
1132 (1), however, needs to be calculated with an unsigned widening
1133 multiplication. If this operation is not directly supported we
1134 try using a signed widening multiplication and adjust the result.
1135 This adjustment works as follows:
1137 If both operands are positive then no adjustment is needed.
1139 If the operands have different signs, for example op0_low < 0 and
1140 op1_low >= 0, the instruction treats the most significant bit of
1141 op0_low as a sign bit instead of a bit with significance
1142 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1143 with 2**BITS_PER_WORD - op0_low, and two's complements the
1144 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1147 Similarly, if both operands are negative, we need to add
1148 (op0_low + op1_low) * 2**BITS_PER_WORD.
1150 We use a trick to adjust quickly. We logically shift op0_low right
1151 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1152 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1153 logical shift exists, we do an arithmetic right shift and subtract
1157 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1158 bool umulp
, enum optab_methods methods
)
1160 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1161 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1162 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1163 rtx product
, adjust
, product_high
, temp
;
1165 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1166 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1167 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1168 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1170 /* If we're using an unsigned multiply to directly compute the product
1171 of the low-order words of the operands and perform any required
1172 adjustments of the operands, we begin by trying two more multiplications
1173 and then computing the appropriate sum.
1175 We have checked above that the required addition is provided.
1176 Full-word addition will normally always succeed, especially if
1177 it is provided at all, so we don't worry about its failure. The
1178 multiplication may well fail, however, so we do handle that. */
1182 /* ??? This could be done with emit_store_flag where available. */
1183 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1184 NULL_RTX
, 1, methods
);
1186 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1187 NULL_RTX
, 0, OPTAB_DIRECT
);
1190 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1191 NULL_RTX
, 0, methods
);
1194 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1195 NULL_RTX
, 0, OPTAB_DIRECT
);
1202 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1203 NULL_RTX
, 0, OPTAB_DIRECT
);
1207 /* OP0_HIGH should now be dead. */
1211 /* ??? This could be done with emit_store_flag where available. */
1212 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1213 NULL_RTX
, 1, methods
);
1215 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1216 NULL_RTX
, 0, OPTAB_DIRECT
);
1219 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1220 NULL_RTX
, 0, methods
);
1223 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1224 NULL_RTX
, 0, OPTAB_DIRECT
);
1231 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1232 NULL_RTX
, 0, OPTAB_DIRECT
);
1236 /* OP1_HIGH should now be dead. */
1238 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1239 NULL_RTX
, 0, OPTAB_DIRECT
);
1241 if (target
&& !REG_P (target
))
1245 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1246 target
, 1, OPTAB_DIRECT
);
1248 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1249 target
, 1, OPTAB_DIRECT
);
1254 product_high
= operand_subword (product
, high
, 1, mode
);
1255 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1256 NULL_RTX
, 0, OPTAB_DIRECT
);
1257 emit_move_insn (product_high
, adjust
);
1261 /* Wrapper around expand_binop which takes an rtx code to specify
1262 the operation to perform, not an optab pointer. All other
1263 arguments are the same. */
1265 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1266 rtx op1
, rtx target
, int unsignedp
,
1267 enum optab_methods methods
)
1269 optab binop
= code_to_optab (code
);
1272 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1275 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1276 binop. Order them according to commutative_operand_precedence and, if
1277 possible, try to put TARGET or a pseudo first. */
1279 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1281 int op0_prec
= commutative_operand_precedence (op0
);
1282 int op1_prec
= commutative_operand_precedence (op1
);
1284 if (op0_prec
< op1_prec
)
1287 if (op0_prec
> op1_prec
)
1290 /* With equal precedence, both orders are ok, but it is better if the
1291 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1292 if (target
== 0 || REG_P (target
))
1293 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1295 return rtx_equal_p (op1
, target
);
1298 /* Return true if BINOPTAB implements a shift operation. */
1301 shift_optab_p (optab binoptab
)
1303 switch (optab_to_code (binoptab
))
1319 /* Return true if BINOPTAB implements a commutative binary operation. */
1322 commutative_optab_p (optab binoptab
)
1324 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1325 || binoptab
== smul_widen_optab
1326 || binoptab
== umul_widen_optab
1327 || binoptab
== smul_highpart_optab
1328 || binoptab
== umul_highpart_optab
);
1331 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1332 optimizing, and if the operand is a constant that costs more than
1333 1 instruction, force the constant into a register and return that
1334 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1337 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1338 int opn
, rtx x
, bool unsignedp
)
1340 bool speed
= optimize_insn_for_speed_p ();
1342 if (mode
!= VOIDmode
1345 && (rtx_cost (x
, optab_to_code (binoptab
), opn
, speed
)
1346 > set_src_cost (x
, speed
)))
1348 if (CONST_INT_P (x
))
1350 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1351 if (intval
!= INTVAL (x
))
1352 x
= GEN_INT (intval
);
1355 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1356 x
= force_reg (mode
, x
);
1361 /* Helper function for expand_binop: handle the case where there
1362 is an insn that directly implements the indicated operation.
1363 Returns null if this is not possible. */
1365 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1367 rtx target
, int unsignedp
, enum optab_methods methods
,
1370 enum machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1371 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
1373 enum machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1374 enum machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1375 enum machine_mode mode0
, mode1
, tmp_mode
;
1376 struct expand_operand ops
[3];
1379 rtx xop0
= op0
, xop1
= op1
;
1382 /* If it is a commutative operator and the modes would match
1383 if we would swap the operands, we can save the conversions. */
1384 commutative_p
= commutative_optab_p (binoptab
);
1386 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1387 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1394 /* If we are optimizing, force expensive constants into a register. */
1395 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1396 if (!shift_optab_p (binoptab
))
1397 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1399 /* In case the insn wants input operands in modes different from
1400 those of the actual operands, convert the operands. It would
1401 seem that we don't need to convert CONST_INTs, but we do, so
1402 that they're properly zero-extended, sign-extended or truncated
1405 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1406 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1408 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1412 mode1
= GET_MODE (xop1
) != VOIDmode
? GET_MODE (xop1
) : mode
;
1413 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1415 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1419 /* If operation is commutative,
1420 try to make the first operand a register.
1421 Even better, try to make it the same as the target.
1422 Also try to make the last operand a constant. */
1424 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1431 /* Now, if insn's predicates don't allow our operands, put them into
1434 if (binoptab
== vec_pack_trunc_optab
1435 || binoptab
== vec_pack_usat_optab
1436 || binoptab
== vec_pack_ssat_optab
1437 || binoptab
== vec_pack_ufix_trunc_optab
1438 || binoptab
== vec_pack_sfix_trunc_optab
)
1440 /* The mode of the result is different then the mode of the
1442 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1443 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1445 delete_insns_since (last
);
1452 create_output_operand (&ops
[0], target
, tmp_mode
);
1453 create_input_operand (&ops
[1], xop0
, mode0
);
1454 create_input_operand (&ops
[2], xop1
, mode1
);
1455 pat
= maybe_gen_insn (icode
, 3, ops
);
1458 /* If PAT is composed of more than one insn, try to add an appropriate
1459 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1460 operand, call expand_binop again, this time without a target. */
1461 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1462 && ! add_equal_note (pat
, ops
[0].value
, optab_to_code (binoptab
),
1463 ops
[1].value
, ops
[2].value
))
1465 delete_insns_since (last
);
1466 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1467 unsignedp
, methods
);
1471 return ops
[0].value
;
1473 delete_insns_since (last
);
1477 /* Generate code to perform an operation specified by BINOPTAB
1478 on operands OP0 and OP1, with result having machine-mode MODE.
1480 UNSIGNEDP is for the case where we have to widen the operands
1481 to perform the operation. It says to use zero-extension.
1483 If TARGET is nonzero, the value
1484 is generated there, if it is convenient to do so.
1485 In all cases an rtx is returned for the locus of the value;
1486 this may or may not be TARGET. */
1489 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1490 rtx target
, int unsignedp
, enum optab_methods methods
)
1492 enum optab_methods next_methods
1493 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1494 ? OPTAB_WIDEN
: methods
);
1495 enum mode_class mclass
;
1496 enum machine_mode wider_mode
;
1499 rtx entry_last
= get_last_insn ();
1502 mclass
= GET_MODE_CLASS (mode
);
1504 /* If subtracting an integer constant, convert this into an addition of
1505 the negated constant. */
1507 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1509 op1
= negate_rtx (mode
, op1
);
1510 binoptab
= add_optab
;
1513 /* Record where to delete back to if we backtrack. */
1514 last
= get_last_insn ();
1516 /* If we can do it with a three-operand insn, do so. */
1518 if (methods
!= OPTAB_MUST_WIDEN
1519 && find_widening_optab_handler (binoptab
, mode
,
1520 widened_mode (mode
, op0
, op1
), 1)
1521 != CODE_FOR_nothing
)
1523 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1524 unsignedp
, methods
, last
);
1529 /* If we were trying to rotate, and that didn't work, try rotating
1530 the other direction before falling back to shifts and bitwise-or. */
1531 if (((binoptab
== rotl_optab
1532 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1533 || (binoptab
== rotr_optab
1534 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1535 && mclass
== MODE_INT
)
1537 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1539 unsigned int bits
= GET_MODE_PRECISION (mode
);
1541 if (CONST_INT_P (op1
))
1542 newop1
= GEN_INT (bits
- INTVAL (op1
));
1543 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1544 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1546 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1547 GEN_INT (bits
), op1
,
1548 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1550 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1551 target
, unsignedp
, methods
, last
);
1556 /* If this is a multiply, see if we can do a widening operation that
1557 takes operands of this mode and makes a wider mode. */
1559 if (binoptab
== smul_optab
1560 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1561 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1562 : smul_widen_optab
),
1563 GET_MODE_2XWIDER_MODE (mode
), mode
)
1564 != CODE_FOR_nothing
))
1566 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1567 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1568 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1572 if (GET_MODE_CLASS (mode
) == MODE_INT
1573 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1574 return gen_lowpart (mode
, temp
);
1576 return convert_to_mode (mode
, temp
, unsignedp
);
1580 /* If this is a vector shift by a scalar, see if we can do a vector
1581 shift by a vector. If so, broadcast the scalar into a vector. */
1582 if (mclass
== MODE_VECTOR_INT
)
1584 optab otheroptab
= unknown_optab
;
1586 if (binoptab
== ashl_optab
)
1587 otheroptab
= vashl_optab
;
1588 else if (binoptab
== ashr_optab
)
1589 otheroptab
= vashr_optab
;
1590 else if (binoptab
== lshr_optab
)
1591 otheroptab
= vlshr_optab
;
1592 else if (binoptab
== rotl_optab
)
1593 otheroptab
= vrotl_optab
;
1594 else if (binoptab
== rotr_optab
)
1595 otheroptab
= vrotr_optab
;
1597 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1599 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1602 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1603 target
, unsignedp
, methods
, last
);
1610 /* Look for a wider mode of the same class for which we think we
1611 can open-code the operation. Check for a widening multiply at the
1612 wider mode as well. */
1614 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1615 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1616 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1617 wider_mode
!= VOIDmode
;
1618 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1620 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1621 || (binoptab
== smul_optab
1622 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1623 && (find_widening_optab_handler ((unsignedp
1625 : smul_widen_optab
),
1626 GET_MODE_WIDER_MODE (wider_mode
),
1628 != CODE_FOR_nothing
)))
1630 rtx xop0
= op0
, xop1
= op1
;
1633 /* For certain integer operations, we need not actually extend
1634 the narrow operands, as long as we will truncate
1635 the results to the same narrowness. */
1637 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1638 || binoptab
== xor_optab
1639 || binoptab
== add_optab
|| binoptab
== sub_optab
1640 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1641 && mclass
== MODE_INT
)
1644 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1646 if (binoptab
!= ashl_optab
)
1647 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1651 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1653 /* The second operand of a shift must always be extended. */
1654 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1655 no_extend
&& binoptab
!= ashl_optab
);
1657 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1658 unsignedp
, OPTAB_DIRECT
);
1661 if (mclass
!= MODE_INT
1662 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1665 target
= gen_reg_rtx (mode
);
1666 convert_move (target
, temp
, 0);
1670 return gen_lowpart (mode
, temp
);
1673 delete_insns_since (last
);
1677 /* If operation is commutative,
1678 try to make the first operand a register.
1679 Even better, try to make it the same as the target.
1680 Also try to make the last operand a constant. */
1681 if (commutative_optab_p (binoptab
)
1682 && swap_commutative_operands_with_target (target
, op0
, op1
))
1689 /* These can be done a word at a time. */
1690 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1691 && mclass
== MODE_INT
1692 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1693 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1698 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1699 won't be accurate, so use a new target. */
1703 || !valid_multiword_target_p (target
))
1704 target
= gen_reg_rtx (mode
);
1708 /* Do the actual arithmetic. */
1709 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1711 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1712 rtx x
= expand_binop (word_mode
, binoptab
,
1713 operand_subword_force (op0
, i
, mode
),
1714 operand_subword_force (op1
, i
, mode
),
1715 target_piece
, unsignedp
, next_methods
);
1720 if (target_piece
!= x
)
1721 emit_move_insn (target_piece
, x
);
1724 insns
= get_insns ();
1727 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1734 /* Synthesize double word shifts from single word shifts. */
1735 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1736 || binoptab
== ashr_optab
)
1737 && mclass
== MODE_INT
1738 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1739 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1740 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1741 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1742 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1743 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1745 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1746 enum machine_mode op1_mode
;
1748 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1749 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1750 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1752 /* Apply the truncation to constant shifts. */
1753 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1754 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1756 if (op1
== CONST0_RTX (op1_mode
))
1759 /* Make sure that this is a combination that expand_doubleword_shift
1760 can handle. See the comments there for details. */
1761 if (double_shift_mask
== 0
1762 || (shift_mask
== BITS_PER_WORD
- 1
1763 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1766 rtx into_target
, outof_target
;
1767 rtx into_input
, outof_input
;
1768 int left_shift
, outof_word
;
1770 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1771 won't be accurate, so use a new target. */
1775 || !valid_multiword_target_p (target
))
1776 target
= gen_reg_rtx (mode
);
1780 /* OUTOF_* is the word we are shifting bits away from, and
1781 INTO_* is the word that we are shifting bits towards, thus
1782 they differ depending on the direction of the shift and
1783 WORDS_BIG_ENDIAN. */
1785 left_shift
= binoptab
== ashl_optab
;
1786 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1788 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1789 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1791 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1792 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1794 if (expand_doubleword_shift (op1_mode
, binoptab
,
1795 outof_input
, into_input
, op1
,
1796 outof_target
, into_target
,
1797 unsignedp
, next_methods
, shift_mask
))
1799 insns
= get_insns ();
1809 /* Synthesize double word rotates from single word shifts. */
1810 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1811 && mclass
== MODE_INT
1812 && CONST_INT_P (op1
)
1813 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1814 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1815 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1818 rtx into_target
, outof_target
;
1819 rtx into_input
, outof_input
;
1821 int shift_count
, left_shift
, outof_word
;
1823 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1824 won't be accurate, so use a new target. Do this also if target is not
1825 a REG, first because having a register instead may open optimization
1826 opportunities, and second because if target and op0 happen to be MEMs
1827 designating the same location, we would risk clobbering it too early
1828 in the code sequence we generate below. */
1833 || !valid_multiword_target_p (target
))
1834 target
= gen_reg_rtx (mode
);
1838 shift_count
= INTVAL (op1
);
1840 /* OUTOF_* is the word we are shifting bits away from, and
1841 INTO_* is the word that we are shifting bits towards, thus
1842 they differ depending on the direction of the shift and
1843 WORDS_BIG_ENDIAN. */
1845 left_shift
= (binoptab
== rotl_optab
);
1846 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1848 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1849 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1851 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1852 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1854 if (shift_count
== BITS_PER_WORD
)
1856 /* This is just a word swap. */
1857 emit_move_insn (outof_target
, into_input
);
1858 emit_move_insn (into_target
, outof_input
);
1863 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1864 rtx first_shift_count
, second_shift_count
;
1865 optab reverse_unsigned_shift
, unsigned_shift
;
1867 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1868 ? lshr_optab
: ashl_optab
);
1870 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1871 ? ashl_optab
: lshr_optab
);
1873 if (shift_count
> BITS_PER_WORD
)
1875 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1876 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1880 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1881 second_shift_count
= GEN_INT (shift_count
);
1884 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1885 outof_input
, first_shift_count
,
1886 NULL_RTX
, unsignedp
, next_methods
);
1887 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1888 into_input
, second_shift_count
,
1889 NULL_RTX
, unsignedp
, next_methods
);
1891 if (into_temp1
!= 0 && into_temp2
!= 0)
1892 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1893 into_target
, unsignedp
, next_methods
);
1897 if (inter
!= 0 && inter
!= into_target
)
1898 emit_move_insn (into_target
, inter
);
1900 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1901 into_input
, first_shift_count
,
1902 NULL_RTX
, unsignedp
, next_methods
);
1903 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1904 outof_input
, second_shift_count
,
1905 NULL_RTX
, unsignedp
, next_methods
);
1907 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1908 inter
= expand_binop (word_mode
, ior_optab
,
1909 outof_temp1
, outof_temp2
,
1910 outof_target
, unsignedp
, next_methods
);
1912 if (inter
!= 0 && inter
!= outof_target
)
1913 emit_move_insn (outof_target
, inter
);
1916 insns
= get_insns ();
1926 /* These can be done a word at a time by propagating carries. */
1927 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1928 && mclass
== MODE_INT
1929 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1930 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1933 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1934 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1935 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1936 rtx xop0
, xop1
, xtarget
;
1938 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1939 value is one of those, use it. Otherwise, use 1 since it is the
1940 one easiest to get. */
1941 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1942 int normalizep
= STORE_FLAG_VALUE
;
1947 /* Prepare the operands. */
1948 xop0
= force_reg (mode
, op0
);
1949 xop1
= force_reg (mode
, op1
);
1951 xtarget
= gen_reg_rtx (mode
);
1953 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1956 /* Indicate for flow that the entire target reg is being set. */
1958 emit_clobber (xtarget
);
1960 /* Do the actual arithmetic. */
1961 for (i
= 0; i
< nwords
; i
++)
1963 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1964 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1965 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1966 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1969 /* Main add/subtract of the input operands. */
1970 x
= expand_binop (word_mode
, binoptab
,
1971 op0_piece
, op1_piece
,
1972 target_piece
, unsignedp
, next_methods
);
1978 /* Store carry from main add/subtract. */
1979 carry_out
= gen_reg_rtx (word_mode
);
1980 carry_out
= emit_store_flag_force (carry_out
,
1981 (binoptab
== add_optab
1984 word_mode
, 1, normalizep
);
1991 /* Add/subtract previous carry to main result. */
1992 newx
= expand_binop (word_mode
,
1993 normalizep
== 1 ? binoptab
: otheroptab
,
1995 NULL_RTX
, 1, next_methods
);
1999 /* Get out carry from adding/subtracting carry in. */
2000 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2001 carry_tmp
= emit_store_flag_force (carry_tmp
,
2002 (binoptab
== add_optab
2005 word_mode
, 1, normalizep
);
2007 /* Logical-ior the two poss. carry together. */
2008 carry_out
= expand_binop (word_mode
, ior_optab
,
2009 carry_out
, carry_tmp
,
2010 carry_out
, 0, next_methods
);
2014 emit_move_insn (target_piece
, newx
);
2018 if (x
!= target_piece
)
2019 emit_move_insn (target_piece
, x
);
2022 carry_in
= carry_out
;
2025 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2027 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
2028 || ! rtx_equal_p (target
, xtarget
))
2030 rtx temp
= emit_move_insn (target
, xtarget
);
2032 set_dst_reg_note (temp
, REG_EQUAL
,
2033 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2034 mode
, copy_rtx (xop0
),
2045 delete_insns_since (last
);
2048 /* Attempt to synthesize double word multiplies using a sequence of word
2049 mode multiplications. We first attempt to generate a sequence using a
2050 more efficient unsigned widening multiply, and if that fails we then
2051 try using a signed widening multiply. */
2053 if (binoptab
== smul_optab
2054 && mclass
== MODE_INT
2055 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2056 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2057 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2059 rtx product
= NULL_RTX
;
2060 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
2061 != CODE_FOR_nothing
)
2063 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2066 delete_insns_since (last
);
2069 if (product
== NULL_RTX
2070 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
2071 != CODE_FOR_nothing
)
2073 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2076 delete_insns_since (last
);
2079 if (product
!= NULL_RTX
)
2081 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
2083 temp
= emit_move_insn (target
? target
: product
, product
);
2084 set_dst_reg_note (temp
,
2086 gen_rtx_fmt_ee (MULT
, mode
,
2089 target
? target
: product
);
2095 /* It can't be open-coded in this mode.
2096 Use a library call if one is available and caller says that's ok. */
2098 libfunc
= optab_libfunc (binoptab
, mode
);
2100 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2104 enum machine_mode op1_mode
= mode
;
2109 if (shift_optab_p (binoptab
))
2111 op1_mode
= targetm
.libgcc_shift_count_mode ();
2112 /* Specify unsigned here,
2113 since negative shift counts are meaningless. */
2114 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2117 if (GET_MODE (op0
) != VOIDmode
2118 && GET_MODE (op0
) != mode
)
2119 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2121 /* Pass 1 for NO_QUEUE so we don't lose any increments
2122 if the libcall is cse'd or moved. */
2123 value
= emit_library_call_value (libfunc
,
2124 NULL_RTX
, LCT_CONST
, mode
, 2,
2125 op0
, mode
, op1x
, op1_mode
);
2127 insns
= get_insns ();
2130 target
= gen_reg_rtx (mode
);
2131 emit_libcall_block_1 (insns
, target
, value
,
2132 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2134 trapv_binoptab_p (binoptab
));
2139 delete_insns_since (last
);
2141 /* It can't be done in this mode. Can we do it in a wider mode? */
2143 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2144 || methods
== OPTAB_MUST_WIDEN
))
2146 /* Caller says, don't even try. */
2147 delete_insns_since (entry_last
);
2151 /* Compute the value of METHODS to pass to recursive calls.
2152 Don't allow widening to be tried recursively. */
2154 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2156 /* Look for a wider mode of the same class for which it appears we can do
2159 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2161 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2162 wider_mode
!= VOIDmode
;
2163 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2165 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
2167 || (methods
== OPTAB_LIB
2168 && optab_libfunc (binoptab
, wider_mode
)))
2170 rtx xop0
= op0
, xop1
= op1
;
2173 /* For certain integer operations, we need not actually extend
2174 the narrow operands, as long as we will truncate
2175 the results to the same narrowness. */
2177 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2178 || binoptab
== xor_optab
2179 || binoptab
== add_optab
|| binoptab
== sub_optab
2180 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2181 && mclass
== MODE_INT
)
2184 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2185 unsignedp
, no_extend
);
2187 /* The second operand of a shift must always be extended. */
2188 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2189 no_extend
&& binoptab
!= ashl_optab
);
2191 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2192 unsignedp
, methods
);
2195 if (mclass
!= MODE_INT
2196 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2199 target
= gen_reg_rtx (mode
);
2200 convert_move (target
, temp
, 0);
2204 return gen_lowpart (mode
, temp
);
2207 delete_insns_since (last
);
2212 delete_insns_since (entry_last
);
2216 /* Expand a binary operator which has both signed and unsigned forms.
2217 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2220 If we widen unsigned operands, we may use a signed wider operation instead
2221 of an unsigned wider operation, since the result would be the same. */
2224 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2225 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2226 enum optab_methods methods
)
2229 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2230 struct optab_d wide_soptab
;
2232 /* Do it without widening, if possible. */
2233 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2234 unsignedp
, OPTAB_DIRECT
);
2235 if (temp
|| methods
== OPTAB_DIRECT
)
2238 /* Try widening to a signed int. Make a fake signed optab that
2239 hides any signed insn for direct use. */
2240 wide_soptab
= *soptab
;
2241 set_optab_handler (&wide_soptab
, mode
, CODE_FOR_nothing
);
2242 /* We don't want to generate new hash table entries from this fake
2244 wide_soptab
.libcall_gen
= NULL
;
2246 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2247 unsignedp
, OPTAB_WIDEN
);
2249 /* For unsigned operands, try widening to an unsigned int. */
2250 if (temp
== 0 && unsignedp
)
2251 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2252 unsignedp
, OPTAB_WIDEN
);
2253 if (temp
|| methods
== OPTAB_WIDEN
)
2256 /* Use the right width libcall if that exists. */
2257 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2258 if (temp
|| methods
== OPTAB_LIB
)
2261 /* Must widen and use a libcall, use either signed or unsigned. */
2262 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2263 unsignedp
, methods
);
2267 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2268 unsignedp
, methods
);
2272 /* Generate code to perform an operation specified by UNOPPTAB
2273 on operand OP0, with two results to TARG0 and TARG1.
2274 We assume that the order of the operands for the instruction
2275 is TARG0, TARG1, OP0.
2277 Either TARG0 or TARG1 may be zero, but what that means is that
2278 the result is not actually wanted. We will generate it into
2279 a dummy pseudo-reg and discard it. They may not both be zero.
2281 Returns 1 if this operation can be performed; 0 if not. */
2284 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2287 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2288 enum mode_class mclass
;
2289 enum machine_mode wider_mode
;
2290 rtx entry_last
= get_last_insn ();
2293 mclass
= GET_MODE_CLASS (mode
);
2296 targ0
= gen_reg_rtx (mode
);
2298 targ1
= gen_reg_rtx (mode
);
2300 /* Record where to go back to if we fail. */
2301 last
= get_last_insn ();
2303 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2305 struct expand_operand ops
[3];
2306 enum insn_code icode
= optab_handler (unoptab
, mode
);
2308 create_fixed_operand (&ops
[0], targ0
);
2309 create_fixed_operand (&ops
[1], targ1
);
2310 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2311 if (maybe_expand_insn (icode
, 3, ops
))
2315 /* It can't be done in this mode. Can we do it in a wider mode? */
2317 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2319 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2320 wider_mode
!= VOIDmode
;
2321 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2323 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2325 rtx t0
= gen_reg_rtx (wider_mode
);
2326 rtx t1
= gen_reg_rtx (wider_mode
);
2327 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2329 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2331 convert_move (targ0
, t0
, unsignedp
);
2332 convert_move (targ1
, t1
, unsignedp
);
2336 delete_insns_since (last
);
2341 delete_insns_since (entry_last
);
2345 /* Generate code to perform an operation specified by BINOPTAB
2346 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2347 We assume that the order of the operands for the instruction
2348 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2349 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2351 Either TARG0 or TARG1 may be zero, but what that means is that
2352 the result is not actually wanted. We will generate it into
2353 a dummy pseudo-reg and discard it. They may not both be zero.
2355 Returns 1 if this operation can be performed; 0 if not. */
2358 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2361 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2362 enum mode_class mclass
;
2363 enum machine_mode wider_mode
;
2364 rtx entry_last
= get_last_insn ();
2367 mclass
= GET_MODE_CLASS (mode
);
2370 targ0
= gen_reg_rtx (mode
);
2372 targ1
= gen_reg_rtx (mode
);
2374 /* Record where to go back to if we fail. */
2375 last
= get_last_insn ();
2377 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2379 struct expand_operand ops
[4];
2380 enum insn_code icode
= optab_handler (binoptab
, mode
);
2381 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2382 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2383 rtx xop0
= op0
, xop1
= op1
;
2385 /* If we are optimizing, force expensive constants into a register. */
2386 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2387 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2389 create_fixed_operand (&ops
[0], targ0
);
2390 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2391 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2392 create_fixed_operand (&ops
[3], targ1
);
2393 if (maybe_expand_insn (icode
, 4, ops
))
2395 delete_insns_since (last
);
2398 /* It can't be done in this mode. Can we do it in a wider mode? */
2400 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2402 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2403 wider_mode
!= VOIDmode
;
2404 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2406 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2408 rtx t0
= gen_reg_rtx (wider_mode
);
2409 rtx t1
= gen_reg_rtx (wider_mode
);
2410 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2411 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2413 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2416 convert_move (targ0
, t0
, unsignedp
);
2417 convert_move (targ1
, t1
, unsignedp
);
2421 delete_insns_since (last
);
2426 delete_insns_since (entry_last
);
2430 /* Expand the two-valued library call indicated by BINOPTAB, but
2431 preserve only one of the values. If TARG0 is non-NULL, the first
2432 value is placed into TARG0; otherwise the second value is placed
2433 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2434 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2435 This routine assumes that the value returned by the library call is
2436 as if the return value was of an integral mode twice as wide as the
2437 mode of OP0. Returns 1 if the call was successful. */
2440 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2441 rtx targ0
, rtx targ1
, enum rtx_code code
)
2443 enum machine_mode mode
;
2444 enum machine_mode libval_mode
;
2449 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2450 gcc_assert (!targ0
!= !targ1
);
2452 mode
= GET_MODE (op0
);
2453 libfunc
= optab_libfunc (binoptab
, mode
);
2457 /* The value returned by the library function will have twice as
2458 many bits as the nominal MODE. */
2459 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2462 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2466 /* Get the part of VAL containing the value that we want. */
2467 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2468 targ0
? 0 : GET_MODE_SIZE (mode
));
2469 insns
= get_insns ();
2471 /* Move the into the desired location. */
2472 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2473 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2479 /* Wrapper around expand_unop which takes an rtx code to specify
2480 the operation to perform, not an optab pointer. All other
2481 arguments are the same. */
2483 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2484 rtx target
, int unsignedp
)
2486 optab unop
= code_to_optab (code
);
2489 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2495 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2497 A similar operation can be used for clrsb. UNOPTAB says which operation
2498 we are trying to expand. */
2500 widen_leading (enum machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2502 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2503 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2505 enum machine_mode wider_mode
;
2506 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2507 wider_mode
!= VOIDmode
;
2508 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2510 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2512 rtx xop0
, temp
, last
;
2514 last
= get_last_insn ();
2517 target
= gen_reg_rtx (mode
);
2518 xop0
= widen_operand (op0
, wider_mode
, mode
,
2519 unoptab
!= clrsb_optab
, false);
2520 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2521 unoptab
!= clrsb_optab
);
2523 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2524 GEN_INT (GET_MODE_PRECISION (wider_mode
)
2525 - GET_MODE_PRECISION (mode
)),
2526 target
, true, OPTAB_DIRECT
);
2528 delete_insns_since (last
);
2537 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2538 quantities, choosing which based on whether the high word is nonzero. */
2540 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2542 rtx xop0
= force_reg (mode
, op0
);
2543 rtx subhi
= gen_highpart (word_mode
, xop0
);
2544 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2545 rtx hi0_label
= gen_label_rtx ();
2546 rtx after_label
= gen_label_rtx ();
2547 rtx seq
, temp
, result
;
2549 /* If we were not given a target, use a word_mode register, not a
2550 'mode' register. The result will fit, and nobody is expecting
2551 anything bigger (the return type of __builtin_clz* is int). */
2553 target
= gen_reg_rtx (word_mode
);
2555 /* In any case, write to a word_mode scratch in both branches of the
2556 conditional, so we can ensure there is a single move insn setting
2557 'target' to tag a REG_EQUAL note on. */
2558 result
= gen_reg_rtx (word_mode
);
2562 /* If the high word is not equal to zero,
2563 then clz of the full value is clz of the high word. */
2564 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2565 word_mode
, true, hi0_label
);
2567 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2572 convert_move (result
, temp
, true);
2574 emit_jump_insn (gen_jump (after_label
));
2577 /* Else clz of the full value is clz of the low word plus the number
2578 of bits in the high word. */
2579 emit_label (hi0_label
);
2581 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2584 temp
= expand_binop (word_mode
, add_optab
, temp
,
2585 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2586 result
, true, OPTAB_DIRECT
);
2590 convert_move (result
, temp
, true);
2592 emit_label (after_label
);
2593 convert_move (target
, result
, true);
2598 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2610 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2612 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2614 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2615 enum machine_mode wider_mode
;
2618 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2621 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2622 wider_mode
!= VOIDmode
;
2623 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2624 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2629 last
= get_last_insn ();
2631 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2632 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2634 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2635 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2637 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2638 GET_MODE_BITSIZE (wider_mode
)
2639 - GET_MODE_BITSIZE (mode
),
2645 target
= gen_reg_rtx (mode
);
2646 emit_move_insn (target
, gen_lowpart (mode
, x
));
2649 delete_insns_since (last
);
2654 /* Try calculating bswap as two bswaps of two word-sized operands. */
2657 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2661 t1
= expand_unop (word_mode
, bswap_optab
,
2662 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2663 t0
= expand_unop (word_mode
, bswap_optab
,
2664 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2666 if (target
== 0 || !valid_multiword_target_p (target
))
2667 target
= gen_reg_rtx (mode
);
2669 emit_clobber (target
);
2670 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2671 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2676 /* Try calculating (parity x) as (and (popcount x) 1), where
2677 popcount can also be done in a wider mode. */
2679 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2681 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2682 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2684 enum machine_mode wider_mode
;
2685 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2686 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2688 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2690 rtx xop0
, temp
, last
;
2692 last
= get_last_insn ();
2695 target
= gen_reg_rtx (mode
);
2696 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2697 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2700 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2701 target
, true, OPTAB_DIRECT
);
2703 delete_insns_since (last
);
2712 /* Try calculating ctz(x) as K - clz(x & -x) ,
2713 where K is GET_MODE_PRECISION(mode) - 1.
2715 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2716 don't have to worry about what the hardware does in that case. (If
2717 the clz instruction produces the usual value at 0, which is K, the
2718 result of this code sequence will be -1; expand_ffs, below, relies
2719 on this. It might be nice to have it be K instead, for consistency
2720 with the (very few) processors that provide a ctz with a defined
2721 value, but that would take one more instruction, and it would be
2722 less convenient for expand_ffs anyway. */
2725 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2729 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2734 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2736 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2737 true, OPTAB_DIRECT
);
2739 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2741 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_PRECISION (mode
) - 1),
2743 true, OPTAB_DIRECT
);
2753 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2759 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2760 else with the sequence used by expand_clz.
2762 The ffs builtin promises to return zero for a zero value and ctz/clz
2763 may have an undefined value in that case. If they do not give us a
2764 convenient value, we have to generate a test and branch. */
2766 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2768 HOST_WIDE_INT val
= 0;
2769 bool defined_at_zero
= false;
2772 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2776 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2780 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2782 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2785 temp
= expand_ctz (mode
, op0
, 0);
2789 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2791 defined_at_zero
= true;
2792 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2798 if (defined_at_zero
&& val
== -1)
2799 /* No correction needed at zero. */;
2802 /* We don't try to do anything clever with the situation found
2803 on some processors (eg Alpha) where ctz(0:mode) ==
2804 bitsize(mode). If someone can think of a way to send N to -1
2805 and leave alone all values in the range 0..N-1 (where N is a
2806 power of two), cheaper than this test-and-branch, please add it.
2808 The test-and-branch is done after the operation itself, in case
2809 the operation sets condition codes that can be recycled for this.
2810 (This is true on i386, for instance.) */
2812 rtx nonzero_label
= gen_label_rtx ();
2813 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2814 mode
, true, nonzero_label
);
2816 convert_move (temp
, GEN_INT (-1), false);
2817 emit_label (nonzero_label
);
2820 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2821 to produce a value in the range 0..bitsize. */
2822 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2823 target
, false, OPTAB_DIRECT
);
2830 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2839 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2840 conditions, VAL may already be a SUBREG against which we cannot generate
2841 a further SUBREG. In this case, we expect forcing the value into a
2842 register will work around the situation. */
2845 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2846 enum machine_mode imode
)
2849 ret
= lowpart_subreg (omode
, val
, imode
);
2852 val
= force_reg (imode
, val
);
2853 ret
= lowpart_subreg (omode
, val
, imode
);
2854 gcc_assert (ret
!= NULL
);
2859 /* Expand a floating point absolute value or negation operation via a
2860 logical operation on the sign bit. */
2863 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2864 rtx op0
, rtx target
)
2866 const struct real_format
*fmt
;
2867 int bitpos
, word
, nwords
, i
;
2868 enum machine_mode imode
;
2872 /* The format has to have a simple sign bit. */
2873 fmt
= REAL_MODE_FORMAT (mode
);
2877 bitpos
= fmt
->signbit_rw
;
2881 /* Don't create negative zeros if the format doesn't support them. */
2882 if (code
== NEG
&& !fmt
->has_signed_zero
)
2885 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2887 imode
= int_mode_for_mode (mode
);
2888 if (imode
== BLKmode
)
2897 if (FLOAT_WORDS_BIG_ENDIAN
)
2898 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2900 word
= bitpos
/ BITS_PER_WORD
;
2901 bitpos
= bitpos
% BITS_PER_WORD
;
2902 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2905 mask
= double_int_setbit (double_int_zero
, bitpos
);
2907 mask
= double_int_not (mask
);
2911 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2912 target
= gen_reg_rtx (mode
);
2918 for (i
= 0; i
< nwords
; ++i
)
2920 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2921 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2925 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2927 immed_double_int_const (mask
, imode
),
2928 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2929 if (temp
!= targ_piece
)
2930 emit_move_insn (targ_piece
, temp
);
2933 emit_move_insn (targ_piece
, op0_piece
);
2936 insns
= get_insns ();
2943 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2944 gen_lowpart (imode
, op0
),
2945 immed_double_int_const (mask
, imode
),
2946 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2947 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2949 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2950 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2957 /* As expand_unop, but will fail rather than attempt the operation in a
2958 different mode or with a libcall. */
2960 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2963 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2965 struct expand_operand ops
[2];
2966 enum insn_code icode
= optab_handler (unoptab
, mode
);
2967 rtx last
= get_last_insn ();
2970 create_output_operand (&ops
[0], target
, mode
);
2971 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2972 pat
= maybe_gen_insn (icode
, 2, ops
);
2975 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2976 && ! add_equal_note (pat
, ops
[0].value
, optab_to_code (unoptab
),
2977 ops
[1].value
, NULL_RTX
))
2979 delete_insns_since (last
);
2980 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2985 return ops
[0].value
;
2991 /* Generate code to perform an operation specified by UNOPTAB
2992 on operand OP0, with result having machine-mode MODE.
2994 UNSIGNEDP is for the case where we have to widen the operands
2995 to perform the operation. It says to use zero-extension.
2997 If TARGET is nonzero, the value
2998 is generated there, if it is convenient to do so.
2999 In all cases an rtx is returned for the locus of the value;
3000 this may or may not be TARGET. */
3003 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3006 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3007 enum machine_mode wider_mode
;
3011 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3015 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3017 /* Widening (or narrowing) clz needs special treatment. */
3018 if (unoptab
== clz_optab
)
3020 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3024 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3025 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3027 temp
= expand_doubleword_clz (mode
, op0
, target
);
3035 if (unoptab
== clrsb_optab
)
3037 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3043 /* Widening (or narrowing) bswap needs special treatment. */
3044 if (unoptab
== bswap_optab
)
3046 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3047 or ROTATERT. First try these directly; if this fails, then try the
3048 obvious pair of shifts with allowed widening, as this will probably
3049 be always more efficient than the other fallback methods. */
3052 rtx last
, temp1
, temp2
;
3054 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3056 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
3057 unsignedp
, OPTAB_DIRECT
);
3062 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3064 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
3065 unsignedp
, OPTAB_DIRECT
);
3070 last
= get_last_insn ();
3072 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
3073 unsignedp
, OPTAB_WIDEN
);
3074 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
3075 unsignedp
, OPTAB_WIDEN
);
3078 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3079 unsignedp
, OPTAB_WIDEN
);
3084 delete_insns_since (last
);
3087 temp
= widen_bswap (mode
, op0
, target
);
3091 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3092 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3094 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3102 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3103 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3104 wider_mode
!= VOIDmode
;
3105 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3107 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3110 rtx last
= get_last_insn ();
3112 /* For certain operations, we need not actually extend
3113 the narrow operand, as long as we will truncate the
3114 results to the same narrowness. */
3116 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3117 (unoptab
== neg_optab
3118 || unoptab
== one_cmpl_optab
)
3119 && mclass
== MODE_INT
);
3121 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3126 if (mclass
!= MODE_INT
3127 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3130 target
= gen_reg_rtx (mode
);
3131 convert_move (target
, temp
, 0);
3135 return gen_lowpart (mode
, temp
);
3138 delete_insns_since (last
);
3142 /* These can be done a word at a time. */
3143 if (unoptab
== one_cmpl_optab
3144 && mclass
== MODE_INT
3145 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3146 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3151 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
3152 target
= gen_reg_rtx (mode
);
3156 /* Do the actual arithmetic. */
3157 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3159 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3160 rtx x
= expand_unop (word_mode
, unoptab
,
3161 operand_subword_force (op0
, i
, mode
),
3162 target_piece
, unsignedp
);
3164 if (target_piece
!= x
)
3165 emit_move_insn (target_piece
, x
);
3168 insns
= get_insns ();
3175 if (optab_to_code (unoptab
) == NEG
)
3177 /* Try negating floating point values by flipping the sign bit. */
3178 if (SCALAR_FLOAT_MODE_P (mode
))
3180 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3185 /* If there is no negation pattern, and we have no negative zero,
3186 try subtracting from zero. */
3187 if (!HONOR_SIGNED_ZEROS (mode
))
3189 temp
= expand_binop (mode
, (unoptab
== negv_optab
3190 ? subv_optab
: sub_optab
),
3191 CONST0_RTX (mode
), op0
, target
,
3192 unsignedp
, OPTAB_DIRECT
);
3198 /* Try calculating parity (x) as popcount (x) % 2. */
3199 if (unoptab
== parity_optab
)
3201 temp
= expand_parity (mode
, op0
, target
);
3206 /* Try implementing ffs (x) in terms of clz (x). */
3207 if (unoptab
== ffs_optab
)
3209 temp
= expand_ffs (mode
, op0
, target
);
3214 /* Try implementing ctz (x) in terms of clz (x). */
3215 if (unoptab
== ctz_optab
)
3217 temp
= expand_ctz (mode
, op0
, target
);
3223 /* Now try a library call in this mode. */
3224 libfunc
= optab_libfunc (unoptab
, mode
);
3230 enum machine_mode outmode
= mode
;
3232 /* All of these functions return small values. Thus we choose to
3233 have them return something that isn't a double-word. */
3234 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3235 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3236 || unoptab
== parity_optab
)
3238 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3239 optab_libfunc (unoptab
, mode
)));
3243 /* Pass 1 for NO_QUEUE so we don't lose any increments
3244 if the libcall is cse'd or moved. */
3245 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3247 insns
= get_insns ();
3250 target
= gen_reg_rtx (outmode
);
3251 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3252 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
3253 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3254 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
3255 eq_value
= simplify_gen_unary (ZERO_EXTEND
, outmode
, eq_value
, mode
);
3256 emit_libcall_block_1 (insns
, target
, value
, eq_value
,
3257 trapv_unoptab_p (unoptab
));
3262 /* It can't be done in this mode. Can we do it in a wider mode? */
3264 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3266 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3267 wider_mode
!= VOIDmode
;
3268 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3270 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3271 || optab_libfunc (unoptab
, wider_mode
))
3274 rtx last
= get_last_insn ();
3276 /* For certain operations, we need not actually extend
3277 the narrow operand, as long as we will truncate the
3278 results to the same narrowness. */
3279 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3280 (unoptab
== neg_optab
3281 || unoptab
== one_cmpl_optab
3282 || unoptab
== bswap_optab
)
3283 && mclass
== MODE_INT
);
3285 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3288 /* If we are generating clz using wider mode, adjust the
3289 result. Similarly for clrsb. */
3290 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3292 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3293 GEN_INT (GET_MODE_PRECISION (wider_mode
)
3294 - GET_MODE_PRECISION (mode
)),
3295 target
, true, OPTAB_DIRECT
);
3297 /* Likewise for bswap. */
3298 if (unoptab
== bswap_optab
&& temp
!= 0)
3300 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3301 == GET_MODE_BITSIZE (wider_mode
)
3302 && GET_MODE_PRECISION (mode
)
3303 == GET_MODE_BITSIZE (mode
));
3305 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3306 GET_MODE_BITSIZE (wider_mode
)
3307 - GET_MODE_BITSIZE (mode
),
3313 if (mclass
!= MODE_INT
)
3316 target
= gen_reg_rtx (mode
);
3317 convert_move (target
, temp
, 0);
3321 return gen_lowpart (mode
, temp
);
3324 delete_insns_since (last
);
3329 /* One final attempt at implementing negation via subtraction,
3330 this time allowing widening of the operand. */
3331 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3334 temp
= expand_binop (mode
,
3335 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3336 CONST0_RTX (mode
), op0
,
3337 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3345 /* Emit code to compute the absolute value of OP0, with result to
3346 TARGET if convenient. (TARGET may be 0.) The return value says
3347 where the result actually is to be found.
3349 MODE is the mode of the operand; the mode of the result is
3350 different but can be deduced from MODE.
3355 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3356 int result_unsignedp
)
3361 result_unsignedp
= 1;
3363 /* First try to do it with a special abs instruction. */
3364 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3369 /* For floating point modes, try clearing the sign bit. */
3370 if (SCALAR_FLOAT_MODE_P (mode
))
3372 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3377 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3378 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3379 && !HONOR_SIGNED_ZEROS (mode
))
3381 rtx last
= get_last_insn ();
3383 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3385 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3391 delete_insns_since (last
);
3394 /* If this machine has expensive jumps, we can do integer absolute
3395 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3396 where W is the width of MODE. */
3398 if (GET_MODE_CLASS (mode
) == MODE_INT
3399 && BRANCH_COST (optimize_insn_for_speed_p (),
3402 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3403 GET_MODE_PRECISION (mode
) - 1,
3406 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3409 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3410 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3420 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3421 int result_unsignedp
, int safe
)
3426 result_unsignedp
= 1;
3428 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3432 /* If that does not win, use conditional jump and negate. */
3434 /* It is safe to use the target if it is the same
3435 as the source if this is also a pseudo register */
3436 if (op0
== target
&& REG_P (op0
)
3437 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3440 op1
= gen_label_rtx ();
3441 if (target
== 0 || ! safe
3442 || GET_MODE (target
) != mode
3443 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3445 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3446 target
= gen_reg_rtx (mode
);
3448 emit_move_insn (target
, op0
);
3451 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3452 NULL_RTX
, NULL_RTX
, op1
, -1);
3454 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3457 emit_move_insn (target
, op0
);
3463 /* Emit code to compute the one's complement absolute value of OP0
3464 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3465 (TARGET may be NULL_RTX.) The return value says where the result
3466 actually is to be found.
3468 MODE is the mode of the operand; the mode of the result is
3469 different but can be deduced from MODE. */
3472 expand_one_cmpl_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
)
3476 /* Not applicable for floating point modes. */
3477 if (FLOAT_MODE_P (mode
))
3480 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3481 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3483 rtx last
= get_last_insn ();
3485 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3487 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3493 delete_insns_since (last
);
3496 /* If this machine has expensive jumps, we can do one's complement
3497 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3499 if (GET_MODE_CLASS (mode
) == MODE_INT
3500 && BRANCH_COST (optimize_insn_for_speed_p (),
3503 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3504 GET_MODE_PRECISION (mode
) - 1,
3507 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3517 /* A subroutine of expand_copysign, perform the copysign operation using the
3518 abs and neg primitives advertised to exist on the target. The assumption
3519 is that we have a split register file, and leaving op0 in fp registers,
3520 and not playing with subregs so much, will help the register allocator. */
3523 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3524 int bitpos
, bool op0_is_abs
)
3526 enum machine_mode imode
;
3527 enum insn_code icode
;
3533 /* Check if the back end provides an insn that handles signbit for the
3535 icode
= optab_handler (signbit_optab
, mode
);
3536 if (icode
!= CODE_FOR_nothing
)
3538 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3539 sign
= gen_reg_rtx (imode
);
3540 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3546 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3548 imode
= int_mode_for_mode (mode
);
3549 if (imode
== BLKmode
)
3551 op1
= gen_lowpart (imode
, op1
);
3558 if (FLOAT_WORDS_BIG_ENDIAN
)
3559 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3561 word
= bitpos
/ BITS_PER_WORD
;
3562 bitpos
= bitpos
% BITS_PER_WORD
;
3563 op1
= operand_subword_force (op1
, word
, mode
);
3566 mask
= double_int_setbit (double_int_zero
, bitpos
);
3568 sign
= expand_binop (imode
, and_optab
, op1
,
3569 immed_double_int_const (mask
, imode
),
3570 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3575 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3582 if (target
== NULL_RTX
)
3583 target
= copy_to_reg (op0
);
3585 emit_move_insn (target
, op0
);
3588 label
= gen_label_rtx ();
3589 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3591 if (GET_CODE (op0
) == CONST_DOUBLE
)
3592 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3594 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3596 emit_move_insn (target
, op0
);
3604 /* A subroutine of expand_copysign, perform the entire copysign operation
3605 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3606 is true if op0 is known to have its sign bit clear. */
3609 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3610 int bitpos
, bool op0_is_abs
)
3612 enum machine_mode imode
;
3614 int word
, nwords
, i
;
3617 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3619 imode
= int_mode_for_mode (mode
);
3620 if (imode
== BLKmode
)
3629 if (FLOAT_WORDS_BIG_ENDIAN
)
3630 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3632 word
= bitpos
/ BITS_PER_WORD
;
3633 bitpos
= bitpos
% BITS_PER_WORD
;
3634 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3637 mask
= double_int_setbit (double_int_zero
, bitpos
);
3642 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3643 target
= gen_reg_rtx (mode
);
3649 for (i
= 0; i
< nwords
; ++i
)
3651 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3652 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3658 = expand_binop (imode
, and_optab
, op0_piece
,
3659 immed_double_int_const (double_int_not (mask
),
3661 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3663 op1
= expand_binop (imode
, and_optab
,
3664 operand_subword_force (op1
, i
, mode
),
3665 immed_double_int_const (mask
, imode
),
3666 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3668 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3669 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3670 if (temp
!= targ_piece
)
3671 emit_move_insn (targ_piece
, temp
);
3674 emit_move_insn (targ_piece
, op0_piece
);
3677 insns
= get_insns ();
3684 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3685 immed_double_int_const (mask
, imode
),
3686 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3688 op0
= gen_lowpart (imode
, op0
);
3690 op0
= expand_binop (imode
, and_optab
, op0
,
3691 immed_double_int_const (double_int_not (mask
),
3693 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3695 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3696 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3697 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3703 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3704 scalar floating point mode. Return NULL if we do not know how to
3705 expand the operation inline. */
3708 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3710 enum machine_mode mode
= GET_MODE (op0
);
3711 const struct real_format
*fmt
;
3715 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3716 gcc_assert (GET_MODE (op1
) == mode
);
3718 /* First try to do it with a special instruction. */
3719 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3720 target
, 0, OPTAB_DIRECT
);
3724 fmt
= REAL_MODE_FORMAT (mode
);
3725 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3729 if (GET_CODE (op0
) == CONST_DOUBLE
)
3731 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3732 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3736 if (fmt
->signbit_ro
>= 0
3737 && (GET_CODE (op0
) == CONST_DOUBLE
3738 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3739 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3741 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3742 fmt
->signbit_ro
, op0_is_abs
);
3747 if (fmt
->signbit_rw
< 0)
3749 return expand_copysign_bit (mode
, op0
, op1
, target
,
3750 fmt
->signbit_rw
, op0_is_abs
);
3753 /* Generate an instruction whose insn-code is INSN_CODE,
3754 with two operands: an output TARGET and an input OP0.
3755 TARGET *must* be nonzero, and the output is always stored there.
3756 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3757 the value that is stored into TARGET.
3759 Return false if expansion failed. */
3762 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3765 struct expand_operand ops
[2];
3768 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3769 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3770 pat
= maybe_gen_insn (icode
, 2, ops
);
3774 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3775 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3779 if (ops
[0].value
!= target
)
3780 emit_move_insn (target
, ops
[0].value
);
3783 /* Generate an instruction whose insn-code is INSN_CODE,
3784 with two operands: an output TARGET and an input OP0.
3785 TARGET *must* be nonzero, and the output is always stored there.
3786 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3787 the value that is stored into TARGET. */
3790 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3792 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3796 struct no_conflict_data
3798 rtx target
, first
, insn
;
3802 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3803 the currently examined clobber / store has to stay in the list of
3804 insns that constitute the actual libcall block. */
3806 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3808 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3810 /* If this inns directly contributes to setting the target, it must stay. */
3811 if (reg_overlap_mentioned_p (p
->target
, dest
))
3812 p
->must_stay
= true;
3813 /* If we haven't committed to keeping any other insns in the list yet,
3814 there is nothing more to check. */
3815 else if (p
->insn
== p
->first
)
3817 /* If this insn sets / clobbers a register that feeds one of the insns
3818 already in the list, this insn has to stay too. */
3819 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3820 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3821 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3822 /* Likewise if this insn depends on a register set by a previous
3823 insn in the list, or if it sets a result (presumably a hard
3824 register) that is set or clobbered by a previous insn.
3825 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3826 SET_DEST perform the former check on the address, and the latter
3827 check on the MEM. */
3828 || (GET_CODE (set
) == SET
3829 && (modified_in_p (SET_SRC (set
), p
->first
)
3830 || modified_in_p (SET_DEST (set
), p
->first
)
3831 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3832 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3833 p
->must_stay
= true;
3837 /* Emit code to make a call to a constant function or a library call.
3839 INSNS is a list containing all insns emitted in the call.
3840 These insns leave the result in RESULT. Our block is to copy RESULT
3841 to TARGET, which is logically equivalent to EQUIV.
3843 We first emit any insns that set a pseudo on the assumption that these are
3844 loading constants into registers; doing so allows them to be safely cse'ed
3845 between blocks. Then we emit all the other insns in the block, followed by
3846 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3847 note with an operand of EQUIV. */
3850 emit_libcall_block_1 (rtx insns
, rtx target
, rtx result
, rtx equiv
,
3851 bool equiv_may_trap
)
3853 rtx final_dest
= target
;
3854 rtx next
, last
, insn
;
3856 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3857 into a MEM later. Protect the libcall block from this change. */
3858 if (! REG_P (target
) || REG_USERVAR_P (target
))
3859 target
= gen_reg_rtx (GET_MODE (target
));
3861 /* If we're using non-call exceptions, a libcall corresponding to an
3862 operation that may trap may also trap. */
3863 /* ??? See the comment in front of make_reg_eh_region_note. */
3864 if (cfun
->can_throw_non_call_exceptions
3865 && (equiv_may_trap
|| may_trap_p (equiv
)))
3867 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3870 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3873 int lp_nr
= INTVAL (XEXP (note
, 0));
3874 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3875 remove_note (insn
, note
);
3881 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3882 reg note to indicate that this call cannot throw or execute a nonlocal
3883 goto (unless there is already a REG_EH_REGION note, in which case
3885 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3887 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3890 /* First emit all insns that set pseudos. Remove them from the list as
3891 we go. Avoid insns that set pseudos which were referenced in previous
3892 insns. These can be generated by move_by_pieces, for example,
3893 to update an address. Similarly, avoid insns that reference things
3894 set in previous insns. */
3896 for (insn
= insns
; insn
; insn
= next
)
3898 rtx set
= single_set (insn
);
3900 next
= NEXT_INSN (insn
);
3902 if (set
!= 0 && REG_P (SET_DEST (set
))
3903 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3905 struct no_conflict_data data
;
3907 data
.target
= const0_rtx
;
3911 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3912 if (! data
.must_stay
)
3914 if (PREV_INSN (insn
))
3915 NEXT_INSN (PREV_INSN (insn
)) = next
;
3920 PREV_INSN (next
) = PREV_INSN (insn
);
3926 /* Some ports use a loop to copy large arguments onto the stack.
3927 Don't move anything outside such a loop. */
3932 /* Write the remaining insns followed by the final copy. */
3933 for (insn
= insns
; insn
; insn
= next
)
3935 next
= NEXT_INSN (insn
);
3940 last
= emit_move_insn (target
, result
);
3941 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3943 if (final_dest
!= target
)
3944 emit_move_insn (final_dest
, target
);
3948 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3950 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3953 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3954 PURPOSE describes how this comparison will be used. CODE is the rtx
3955 comparison code we will be using.
3957 ??? Actually, CODE is slightly weaker than that. A target is still
3958 required to implement all of the normal bcc operations, but not
3959 required to implement all (or any) of the unordered bcc operations. */
3962 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3963 enum can_compare_purpose purpose
)
3966 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3969 enum insn_code icode
;
3971 if (purpose
== ccp_jump
3972 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3973 && insn_operand_matches (icode
, 0, test
))
3975 if (purpose
== ccp_store_flag
3976 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3977 && insn_operand_matches (icode
, 1, test
))
3979 if (purpose
== ccp_cmov
3980 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3983 mode
= GET_MODE_WIDER_MODE (mode
);
3984 PUT_MODE (test
, mode
);
3986 while (mode
!= VOIDmode
);
3991 /* This function is called when we are going to emit a compare instruction that
3992 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3994 *PMODE is the mode of the inputs (in case they are const_int).
3995 *PUNSIGNEDP nonzero says that the operands are unsigned;
3996 this matters if they need to be widened (as given by METHODS).
3998 If they have mode BLKmode, then SIZE specifies the size of both operands.
4000 This function performs all the setup necessary so that the caller only has
4001 to emit a single comparison insn. This setup can involve doing a BLKmode
4002 comparison or emitting a library call to perform the comparison if no insn
4003 is available to handle it.
4004 The values which are passed in through pointers can be modified; the caller
4005 should perform the comparison on the modified values. Constant
4006 comparisons must have already been folded. */
4009 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4010 int unsignedp
, enum optab_methods methods
,
4011 rtx
*ptest
, enum machine_mode
*pmode
)
4013 enum machine_mode mode
= *pmode
;
4015 enum machine_mode cmp_mode
;
4016 enum mode_class mclass
;
4018 /* The other methods are not needed. */
4019 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4020 || methods
== OPTAB_LIB_WIDEN
);
4022 /* If we are optimizing, force expensive constants into a register. */
4023 if (CONSTANT_P (x
) && optimize
4024 && (rtx_cost (x
, COMPARE
, 0, optimize_insn_for_speed_p ())
4025 > COSTS_N_INSNS (1)))
4026 x
= force_reg (mode
, x
);
4028 if (CONSTANT_P (y
) && optimize
4029 && (rtx_cost (y
, COMPARE
, 1, optimize_insn_for_speed_p ())
4030 > COSTS_N_INSNS (1)))
4031 y
= force_reg (mode
, y
);
4034 /* Make sure if we have a canonical comparison. The RTL
4035 documentation states that canonical comparisons are required only
4036 for targets which have cc0. */
4037 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4040 /* Don't let both operands fail to indicate the mode. */
4041 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4042 x
= force_reg (mode
, x
);
4043 if (mode
== VOIDmode
)
4044 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4046 /* Handle all BLKmode compares. */
4048 if (mode
== BLKmode
)
4050 enum machine_mode result_mode
;
4051 enum insn_code cmp_code
;
4056 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4060 /* Try to use a memory block compare insn - either cmpstr
4061 or cmpmem will do. */
4062 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4063 cmp_mode
!= VOIDmode
;
4064 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4066 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4067 if (cmp_code
== CODE_FOR_nothing
)
4068 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4069 if (cmp_code
== CODE_FOR_nothing
)
4070 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4071 if (cmp_code
== CODE_FOR_nothing
)
4074 /* Must make sure the size fits the insn's mode. */
4075 if ((CONST_INT_P (size
)
4076 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4077 || (GET_MODE_BITSIZE (GET_MODE (size
))
4078 > GET_MODE_BITSIZE (cmp_mode
)))
4081 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4082 result
= gen_reg_rtx (result_mode
);
4083 size
= convert_to_mode (cmp_mode
, size
, 1);
4084 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4086 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4087 *pmode
= result_mode
;
4091 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4094 /* Otherwise call a library function, memcmp. */
4095 libfunc
= memcmp_libfunc
;
4096 length_type
= sizetype
;
4097 result_mode
= TYPE_MODE (integer_type_node
);
4098 cmp_mode
= TYPE_MODE (length_type
);
4099 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4100 TYPE_UNSIGNED (length_type
));
4102 result
= emit_library_call_value (libfunc
, 0, LCT_PURE
,
4108 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4109 *pmode
= result_mode
;
4113 /* Don't allow operands to the compare to trap, as that can put the
4114 compare and branch in different basic blocks. */
4115 if (cfun
->can_throw_non_call_exceptions
)
4118 x
= force_reg (mode
, x
);
4120 y
= force_reg (mode
, y
);
4123 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4125 gcc_assert (can_compare_p (comparison
, CCmode
, ccp_jump
));
4126 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4130 mclass
= GET_MODE_CLASS (mode
);
4131 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4135 enum insn_code icode
;
4136 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4137 if (icode
!= CODE_FOR_nothing
4138 && insn_operand_matches (icode
, 0, test
))
4140 rtx last
= get_last_insn ();
4141 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4142 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4144 && insn_operand_matches (icode
, 1, op0
)
4145 && insn_operand_matches (icode
, 2, op1
))
4147 XEXP (test
, 0) = op0
;
4148 XEXP (test
, 1) = op1
;
4153 delete_insns_since (last
);
4156 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4158 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
4160 while (cmp_mode
!= VOIDmode
);
4162 if (methods
!= OPTAB_LIB_WIDEN
)
4165 if (!SCALAR_FLOAT_MODE_P (mode
))
4168 enum machine_mode ret_mode
;
4170 /* Handle a libcall just for the mode we are using. */
4171 libfunc
= optab_libfunc (cmp_optab
, mode
);
4172 gcc_assert (libfunc
);
4174 /* If we want unsigned, and this mode has a distinct unsigned
4175 comparison routine, use that. */
4178 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4183 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4184 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4185 ret_mode
, 2, x
, mode
, y
, mode
);
4187 /* There are two kinds of comparison routines. Biased routines
4188 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4189 of gcc expect that the comparison operation is equivalent
4190 to the modified comparison. For signed comparisons compare the
4191 result against 1 in the biased case, and zero in the unbiased
4192 case. For unsigned comparisons always compare against 1 after
4193 biasing the unbiased result by adding 1. This gives us a way to
4195 The comparisons in the fixed-point helper library are always
4200 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4203 x
= plus_constant (ret_mode
, result
, 1);
4209 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4213 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4221 /* Before emitting an insn with code ICODE, make sure that X, which is going
4222 to be used for operand OPNUM of the insn, is converted from mode MODE to
4223 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4224 that it is accepted by the operand predicate. Return the new value. */
4227 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, enum machine_mode mode
,
4228 enum machine_mode wider_mode
, int unsignedp
)
4230 if (mode
!= wider_mode
)
4231 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4233 if (!insn_operand_matches (icode
, opnum
, x
))
4235 if (reload_completed
)
4237 x
= copy_to_mode_reg (insn_data
[(int) icode
].operand
[opnum
].mode
, x
);
4243 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4244 we can do the branch. */
4247 emit_cmp_and_jump_insn_1 (rtx test
, enum machine_mode mode
, rtx label
)
4249 enum machine_mode optab_mode
;
4250 enum mode_class mclass
;
4251 enum insn_code icode
;
4253 mclass
= GET_MODE_CLASS (mode
);
4254 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4255 icode
= optab_handler (cbranch_optab
, optab_mode
);
4257 gcc_assert (icode
!= CODE_FOR_nothing
);
4258 gcc_assert (insn_operand_matches (icode
, 0, test
));
4259 emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0), XEXP (test
, 1), label
));
4262 /* Generate code to compare X with Y so that the condition codes are
4263 set and to jump to LABEL if the condition is true. If X is a
4264 constant and Y is not a constant, then the comparison is swapped to
4265 ensure that the comparison RTL has the canonical form.
4267 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4268 need to be widened. UNSIGNEDP is also used to select the proper
4269 branch condition code.
4271 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4273 MODE is the mode of the inputs (in case they are const_int).
4275 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4276 It will be potentially converted into an unsigned variant based on
4277 UNSIGNEDP to select a proper jump instruction. */
4280 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4281 enum machine_mode mode
, int unsignedp
, rtx label
)
4283 rtx op0
= x
, op1
= y
;
4286 /* Swap operands and condition to ensure canonical RTL. */
4287 if (swap_commutative_operands_p (x
, y
)
4288 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4291 comparison
= swap_condition (comparison
);
4294 /* If OP0 is still a constant, then both X and Y must be constants
4295 or the opposite comparison is not supported. Force X into a register
4296 to create canonical RTL. */
4297 if (CONSTANT_P (op0
))
4298 op0
= force_reg (mode
, op0
);
4301 comparison
= unsigned_condition (comparison
);
4303 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4305 emit_cmp_and_jump_insn_1 (test
, mode
, label
);
4309 /* Emit a library call comparison between floating point X and Y.
4310 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4313 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4314 rtx
*ptest
, enum machine_mode
*pmode
)
4316 enum rtx_code swapped
= swap_condition (comparison
);
4317 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4318 enum machine_mode orig_mode
= GET_MODE (x
);
4319 enum machine_mode mode
, cmp_mode
;
4320 rtx true_rtx
, false_rtx
;
4321 rtx value
, target
, insns
, equiv
;
4323 bool reversed_p
= false;
4324 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4326 for (mode
= orig_mode
;
4328 mode
= GET_MODE_WIDER_MODE (mode
))
4330 if (code_to_optab (comparison
)
4331 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4334 if (code_to_optab (swapped
)
4335 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4338 tmp
= x
; x
= y
; y
= tmp
;
4339 comparison
= swapped
;
4343 if (code_to_optab (reversed
)
4344 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4346 comparison
= reversed
;
4352 gcc_assert (mode
!= VOIDmode
);
4354 if (mode
!= orig_mode
)
4356 x
= convert_to_mode (mode
, x
, 0);
4357 y
= convert_to_mode (mode
, y
, 0);
4360 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4361 the RTL. The allows the RTL optimizers to delete the libcall if the
4362 condition can be determined at compile-time. */
4363 if (comparison
== UNORDERED
4364 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4366 true_rtx
= const_true_rtx
;
4367 false_rtx
= const0_rtx
;
4374 true_rtx
= const0_rtx
;
4375 false_rtx
= const_true_rtx
;
4379 true_rtx
= const_true_rtx
;
4380 false_rtx
= const0_rtx
;
4384 true_rtx
= const1_rtx
;
4385 false_rtx
= const0_rtx
;
4389 true_rtx
= const0_rtx
;
4390 false_rtx
= constm1_rtx
;
4394 true_rtx
= constm1_rtx
;
4395 false_rtx
= const0_rtx
;
4399 true_rtx
= const0_rtx
;
4400 false_rtx
= const1_rtx
;
4408 if (comparison
== UNORDERED
)
4410 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4411 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4412 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4413 temp
, const_true_rtx
, equiv
);
4417 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4418 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4419 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4420 equiv
, true_rtx
, false_rtx
);
4424 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4425 cmp_mode
, 2, x
, mode
, y
, mode
);
4426 insns
= get_insns ();
4429 target
= gen_reg_rtx (cmp_mode
);
4430 emit_libcall_block (insns
, target
, value
, equiv
);
4432 if (comparison
== UNORDERED
4433 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4435 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4437 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4442 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4445 emit_indirect_jump (rtx loc
)
4447 struct expand_operand ops
[1];
4449 create_address_operand (&ops
[0], loc
);
4450 expand_jump_insn (CODE_FOR_indirect_jump
, 1, ops
);
4454 #ifdef HAVE_conditional_move
4456 /* Emit a conditional move instruction if the machine supports one for that
4457 condition and machine mode.
4459 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4460 the mode to use should they be constants. If it is VOIDmode, they cannot
4463 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4464 should be stored there. MODE is the mode to use should they be constants.
4465 If it is VOIDmode, they cannot both be constants.
4467 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4468 is not supported. */
4471 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4472 enum machine_mode cmode
, rtx op2
, rtx op3
,
4473 enum machine_mode mode
, int unsignedp
)
4475 rtx tem
, comparison
, last
;
4476 enum insn_code icode
;
4477 enum rtx_code reversed
;
4479 /* If one operand is constant, make it the second one. Only do this
4480 if the other operand is not constant as well. */
4482 if (swap_commutative_operands_p (op0
, op1
))
4487 code
= swap_condition (code
);
4490 /* get_condition will prefer to generate LT and GT even if the old
4491 comparison was against zero, so undo that canonicalization here since
4492 comparisons against zero are cheaper. */
4493 if (code
== LT
&& op1
== const1_rtx
)
4494 code
= LE
, op1
= const0_rtx
;
4495 else if (code
== GT
&& op1
== constm1_rtx
)
4496 code
= GE
, op1
= const0_rtx
;
4498 if (cmode
== VOIDmode
)
4499 cmode
= GET_MODE (op0
);
4501 if (swap_commutative_operands_p (op2
, op3
)
4502 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4511 if (mode
== VOIDmode
)
4512 mode
= GET_MODE (op2
);
4514 icode
= direct_optab_handler (movcc_optab
, mode
);
4516 if (icode
== CODE_FOR_nothing
)
4520 target
= gen_reg_rtx (mode
);
4522 code
= unsignedp
? unsigned_condition (code
) : code
;
4523 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4525 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4526 return NULL and let the caller figure out how best to deal with this
4528 if (!COMPARISON_P (comparison
))
4531 do_pending_stack_adjust ();
4532 last
= get_last_insn ();
4533 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4534 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4535 &comparison
, &cmode
);
4538 struct expand_operand ops
[4];
4540 create_output_operand (&ops
[0], target
, mode
);
4541 create_fixed_operand (&ops
[1], comparison
);
4542 create_input_operand (&ops
[2], op2
, mode
);
4543 create_input_operand (&ops
[3], op3
, mode
);
4544 if (maybe_expand_insn (icode
, 4, ops
))
4546 if (ops
[0].value
!= target
)
4547 convert_move (target
, ops
[0].value
, false);
4551 delete_insns_since (last
);
4555 /* Return nonzero if a conditional move of mode MODE is supported.
4557 This function is for combine so it can tell whether an insn that looks
4558 like a conditional move is actually supported by the hardware. If we
4559 guess wrong we lose a bit on optimization, but that's it. */
4560 /* ??? sparc64 supports conditionally moving integers values based on fp
4561 comparisons, and vice versa. How do we handle them? */
4564 can_conditionally_move_p (enum machine_mode mode
)
4566 if (direct_optab_handler (movcc_optab
, mode
) != CODE_FOR_nothing
)
4572 #endif /* HAVE_conditional_move */
4574 /* Emit a conditional addition instruction if the machine supports one for that
4575 condition and machine mode.
4577 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4578 the mode to use should they be constants. If it is VOIDmode, they cannot
4581 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4582 should be stored there. MODE is the mode to use should they be constants.
4583 If it is VOIDmode, they cannot both be constants.
4585 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4586 is not supported. */
4589 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4590 enum machine_mode cmode
, rtx op2
, rtx op3
,
4591 enum machine_mode mode
, int unsignedp
)
4593 rtx tem
, comparison
, last
;
4594 enum insn_code icode
;
4595 enum rtx_code reversed
;
4597 /* If one operand is constant, make it the second one. Only do this
4598 if the other operand is not constant as well. */
4600 if (swap_commutative_operands_p (op0
, op1
))
4605 code
= swap_condition (code
);
4608 /* get_condition will prefer to generate LT and GT even if the old
4609 comparison was against zero, so undo that canonicalization here since
4610 comparisons against zero are cheaper. */
4611 if (code
== LT
&& op1
== const1_rtx
)
4612 code
= LE
, op1
= const0_rtx
;
4613 else if (code
== GT
&& op1
== constm1_rtx
)
4614 code
= GE
, op1
= const0_rtx
;
4616 if (cmode
== VOIDmode
)
4617 cmode
= GET_MODE (op0
);
4619 if (swap_commutative_operands_p (op2
, op3
)
4620 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4629 if (mode
== VOIDmode
)
4630 mode
= GET_MODE (op2
);
4632 icode
= optab_handler (addcc_optab
, mode
);
4634 if (icode
== CODE_FOR_nothing
)
4638 target
= gen_reg_rtx (mode
);
4640 code
= unsignedp
? unsigned_condition (code
) : code
;
4641 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4643 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4644 return NULL and let the caller figure out how best to deal with this
4646 if (!COMPARISON_P (comparison
))
4649 do_pending_stack_adjust ();
4650 last
= get_last_insn ();
4651 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4652 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4653 &comparison
, &cmode
);
4656 struct expand_operand ops
[4];
4658 create_output_operand (&ops
[0], target
, mode
);
4659 create_fixed_operand (&ops
[1], comparison
);
4660 create_input_operand (&ops
[2], op2
, mode
);
4661 create_input_operand (&ops
[3], op3
, mode
);
4662 if (maybe_expand_insn (icode
, 4, ops
))
4664 if (ops
[0].value
!= target
)
4665 convert_move (target
, ops
[0].value
, false);
4669 delete_insns_since (last
);
4673 /* These functions attempt to generate an insn body, rather than
4674 emitting the insn, but if the gen function already emits them, we
4675 make no attempt to turn them back into naked patterns. */
4677 /* Generate and return an insn body to add Y to X. */
4680 gen_add2_insn (rtx x
, rtx y
)
4682 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4684 gcc_assert (insn_operand_matches (icode
, 0, x
));
4685 gcc_assert (insn_operand_matches (icode
, 1, x
));
4686 gcc_assert (insn_operand_matches (icode
, 2, y
));
4688 return GEN_FCN (icode
) (x
, x
, y
);
4691 /* Generate and return an insn body to add r1 and c,
4692 storing the result in r0. */
4695 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4697 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4699 if (icode
== CODE_FOR_nothing
4700 || !insn_operand_matches (icode
, 0, r0
)
4701 || !insn_operand_matches (icode
, 1, r1
)
4702 || !insn_operand_matches (icode
, 2, c
))
4705 return GEN_FCN (icode
) (r0
, r1
, c
);
4709 have_add2_insn (rtx x
, rtx y
)
4711 enum insn_code icode
;
4713 gcc_assert (GET_MODE (x
) != VOIDmode
);
4715 icode
= optab_handler (add_optab
, GET_MODE (x
));
4717 if (icode
== CODE_FOR_nothing
)
4720 if (!insn_operand_matches (icode
, 0, x
)
4721 || !insn_operand_matches (icode
, 1, x
)
4722 || !insn_operand_matches (icode
, 2, y
))
4728 /* Generate and return an insn body to subtract Y from X. */
4731 gen_sub2_insn (rtx x
, rtx y
)
4733 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4735 gcc_assert (insn_operand_matches (icode
, 0, x
));
4736 gcc_assert (insn_operand_matches (icode
, 1, x
));
4737 gcc_assert (insn_operand_matches (icode
, 2, y
));
4739 return GEN_FCN (icode
) (x
, x
, y
);
4742 /* Generate and return an insn body to subtract r1 and c,
4743 storing the result in r0. */
4746 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4748 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4750 if (icode
== CODE_FOR_nothing
4751 || !insn_operand_matches (icode
, 0, r0
)
4752 || !insn_operand_matches (icode
, 1, r1
)
4753 || !insn_operand_matches (icode
, 2, c
))
4756 return GEN_FCN (icode
) (r0
, r1
, c
);
4760 have_sub2_insn (rtx x
, rtx y
)
4762 enum insn_code icode
;
4764 gcc_assert (GET_MODE (x
) != VOIDmode
);
4766 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4768 if (icode
== CODE_FOR_nothing
)
4771 if (!insn_operand_matches (icode
, 0, x
)
4772 || !insn_operand_matches (icode
, 1, x
)
4773 || !insn_operand_matches (icode
, 2, y
))
4779 /* Generate the body of an instruction to copy Y into X.
4780 It may be a list of insns, if one insn isn't enough. */
4783 gen_move_insn (rtx x
, rtx y
)
4788 emit_move_insn_1 (x
, y
);
4794 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4795 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4796 no such operation exists, CODE_FOR_nothing will be returned. */
4799 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4803 #ifdef HAVE_ptr_extend
4805 return CODE_FOR_ptr_extend
;
4808 tab
= unsignedp
? zext_optab
: sext_optab
;
4809 return convert_optab_handler (tab
, to_mode
, from_mode
);
4812 /* Generate the body of an insn to extend Y (with mode MFROM)
4813 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4816 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4817 enum machine_mode mfrom
, int unsignedp
)
4819 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4820 return GEN_FCN (icode
) (x
, y
);
4823 /* can_fix_p and can_float_p say whether the target machine
4824 can directly convert a given fixed point type to
4825 a given floating point type, or vice versa.
4826 The returned value is the CODE_FOR_... value to use,
4827 or CODE_FOR_nothing if these modes cannot be directly converted.
4829 *TRUNCP_PTR is set to 1 if it is necessary to output
4830 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4832 static enum insn_code
4833 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4834 int unsignedp
, int *truncp_ptr
)
4837 enum insn_code icode
;
4839 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4840 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4841 if (icode
!= CODE_FOR_nothing
)
4847 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4848 for this to work. We need to rework the fix* and ftrunc* patterns
4849 and documentation. */
4850 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4851 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4852 if (icode
!= CODE_FOR_nothing
4853 && optab_handler (ftrunc_optab
, fltmode
) != CODE_FOR_nothing
)
4860 return CODE_FOR_nothing
;
4864 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4869 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4870 return convert_optab_handler (tab
, fltmode
, fixmode
);
4873 /* Function supportable_convert_operation
4875 Check whether an operation represented by the code CODE is a
4876 convert operation that is supported by the target platform in
4877 vector form (i.e., when operating on arguments of type VECTYPE_IN
4878 producing a result of type VECTYPE_OUT).
4880 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4881 This function checks if these operations are supported
4882 by the target platform either directly (via vector tree-codes), or via
4886 - CODE1 is code of vector operation to be used when
4887 vectorizing the operation, if available.
4888 - DECL is decl of target builtin functions to be used
4889 when vectorizing the operation, if available. In this case,
4890 CODE1 is CALL_EXPR. */
4893 supportable_convert_operation (enum tree_code code
,
4894 tree vectype_out
, tree vectype_in
,
4895 tree
*decl
, enum tree_code
*code1
)
4897 enum machine_mode m1
,m2
;
4900 m1
= TYPE_MODE (vectype_out
);
4901 m2
= TYPE_MODE (vectype_in
);
4903 /* First check if we can done conversion directly. */
4904 if ((code
== FIX_TRUNC_EXPR
4905 && can_fix_p (m1
,m2
,TYPE_UNSIGNED (vectype_out
), &truncp
)
4906 != CODE_FOR_nothing
)
4907 || (code
== FLOAT_EXPR
4908 && can_float_p (m1
,m2
,TYPE_UNSIGNED (vectype_in
))
4909 != CODE_FOR_nothing
))
4915 /* Now check for builtin. */
4916 if (targetm
.vectorize
.builtin_conversion
4917 && targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
4920 *decl
= targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
);
4927 /* Generate code to convert FROM to floating point
4928 and store in TO. FROM must be fixed point and not VOIDmode.
4929 UNSIGNEDP nonzero means regard FROM as unsigned.
4930 Normally this is done by correcting the final value
4931 if it is negative. */
4934 expand_float (rtx to
, rtx from
, int unsignedp
)
4936 enum insn_code icode
;
4938 enum machine_mode fmode
, imode
;
4939 bool can_do_signed
= false;
4941 /* Crash now, because we won't be able to decide which mode to use. */
4942 gcc_assert (GET_MODE (from
) != VOIDmode
);
4944 /* Look for an insn to do the conversion. Do it in the specified
4945 modes if possible; otherwise convert either input, output or both to
4946 wider mode. If the integer mode is wider than the mode of FROM,
4947 we can do the conversion signed even if the input is unsigned. */
4949 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4950 fmode
= GET_MODE_WIDER_MODE (fmode
))
4951 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4952 imode
= GET_MODE_WIDER_MODE (imode
))
4954 int doing_unsigned
= unsignedp
;
4956 if (fmode
!= GET_MODE (to
)
4957 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4960 icode
= can_float_p (fmode
, imode
, unsignedp
);
4961 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4963 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4964 if (scode
!= CODE_FOR_nothing
)
4965 can_do_signed
= true;
4966 if (imode
!= GET_MODE (from
))
4967 icode
= scode
, doing_unsigned
= 0;
4970 if (icode
!= CODE_FOR_nothing
)
4972 if (imode
!= GET_MODE (from
))
4973 from
= convert_to_mode (imode
, from
, unsignedp
);
4975 if (fmode
!= GET_MODE (to
))
4976 target
= gen_reg_rtx (fmode
);
4978 emit_unop_insn (icode
, target
, from
,
4979 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4982 convert_move (to
, target
, 0);
4987 /* Unsigned integer, and no way to convert directly. Convert as signed,
4988 then unconditionally adjust the result. */
4989 if (unsignedp
&& can_do_signed
)
4991 rtx label
= gen_label_rtx ();
4993 REAL_VALUE_TYPE offset
;
4995 /* Look for a usable floating mode FMODE wider than the source and at
4996 least as wide as the target. Using FMODE will avoid rounding woes
4997 with unsigned values greater than the signed maximum value. */
4999 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5000 fmode
= GET_MODE_WIDER_MODE (fmode
))
5001 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5002 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5005 if (fmode
== VOIDmode
)
5007 /* There is no such mode. Pretend the target is wide enough. */
5008 fmode
= GET_MODE (to
);
5010 /* Avoid double-rounding when TO is narrower than FROM. */
5011 if ((significand_size (fmode
) + 1)
5012 < GET_MODE_PRECISION (GET_MODE (from
)))
5015 rtx neglabel
= gen_label_rtx ();
5017 /* Don't use TARGET if it isn't a register, is a hard register,
5018 or is the wrong mode. */
5020 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5021 || GET_MODE (target
) != fmode
)
5022 target
= gen_reg_rtx (fmode
);
5024 imode
= GET_MODE (from
);
5025 do_pending_stack_adjust ();
5027 /* Test whether the sign bit is set. */
5028 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5031 /* The sign bit is not set. Convert as signed. */
5032 expand_float (target
, from
, 0);
5033 emit_jump_insn (gen_jump (label
));
5036 /* The sign bit is set.
5037 Convert to a usable (positive signed) value by shifting right
5038 one bit, while remembering if a nonzero bit was shifted
5039 out; i.e., compute (from & 1) | (from >> 1). */
5041 emit_label (neglabel
);
5042 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5043 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5044 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5045 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5047 expand_float (target
, temp
, 0);
5049 /* Multiply by 2 to undo the shift above. */
5050 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5051 target
, 0, OPTAB_LIB_WIDEN
);
5053 emit_move_insn (target
, temp
);
5055 do_pending_stack_adjust ();
5061 /* If we are about to do some arithmetic to correct for an
5062 unsigned operand, do it in a pseudo-register. */
5064 if (GET_MODE (to
) != fmode
5065 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5066 target
= gen_reg_rtx (fmode
);
5068 /* Convert as signed integer to floating. */
5069 expand_float (target
, from
, 0);
5071 /* If FROM is negative (and therefore TO is negative),
5072 correct its value by 2**bitwidth. */
5074 do_pending_stack_adjust ();
5075 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5079 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
5080 temp
= expand_binop (fmode
, add_optab
, target
,
5081 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5082 target
, 0, OPTAB_LIB_WIDEN
);
5084 emit_move_insn (target
, temp
);
5086 do_pending_stack_adjust ();
5091 /* No hardware instruction available; call a library routine. */
5096 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5098 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5099 from
= convert_to_mode (SImode
, from
, unsignedp
);
5101 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5102 gcc_assert (libfunc
);
5106 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5107 GET_MODE (to
), 1, from
,
5109 insns
= get_insns ();
5112 emit_libcall_block (insns
, target
, value
,
5113 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5114 GET_MODE (to
), from
));
5119 /* Copy result to requested destination
5120 if we have been computing in a temp location. */
5124 if (GET_MODE (target
) == GET_MODE (to
))
5125 emit_move_insn (to
, target
);
5127 convert_move (to
, target
, 0);
5131 /* Generate code to convert FROM to fixed point and store in TO. FROM
5132 must be floating point. */
5135 expand_fix (rtx to
, rtx from
, int unsignedp
)
5137 enum insn_code icode
;
5139 enum machine_mode fmode
, imode
;
5142 /* We first try to find a pair of modes, one real and one integer, at
5143 least as wide as FROM and TO, respectively, in which we can open-code
5144 this conversion. If the integer mode is wider than the mode of TO,
5145 we can do the conversion either signed or unsigned. */
5147 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5148 fmode
= GET_MODE_WIDER_MODE (fmode
))
5149 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5150 imode
= GET_MODE_WIDER_MODE (imode
))
5152 int doing_unsigned
= unsignedp
;
5154 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5155 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5156 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5158 if (icode
!= CODE_FOR_nothing
)
5160 rtx last
= get_last_insn ();
5161 if (fmode
!= GET_MODE (from
))
5162 from
= convert_to_mode (fmode
, from
, 0);
5166 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5167 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5171 if (imode
!= GET_MODE (to
))
5172 target
= gen_reg_rtx (imode
);
5174 if (maybe_emit_unop_insn (icode
, target
, from
,
5175 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5178 convert_move (to
, target
, unsignedp
);
5181 delete_insns_since (last
);
5185 /* For an unsigned conversion, there is one more way to do it.
5186 If we have a signed conversion, we generate code that compares
5187 the real value to the largest representable positive number. If if
5188 is smaller, the conversion is done normally. Otherwise, subtract
5189 one plus the highest signed number, convert, and add it back.
5191 We only need to check all real modes, since we know we didn't find
5192 anything with a wider integer mode.
5194 This code used to extend FP value into mode wider than the destination.
5195 This is needed for decimal float modes which cannot accurately
5196 represent one plus the highest signed number of the same size, but
5197 not for binary modes. Consider, for instance conversion from SFmode
5200 The hot path through the code is dealing with inputs smaller than 2^63
5201 and doing just the conversion, so there is no bits to lose.
5203 In the other path we know the value is positive in the range 2^63..2^64-1
5204 inclusive. (as for other input overflow happens and result is undefined)
5205 So we know that the most important bit set in mantissa corresponds to
5206 2^63. The subtraction of 2^63 should not generate any rounding as it
5207 simply clears out that bit. The rest is trivial. */
5209 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5210 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5211 fmode
= GET_MODE_WIDER_MODE (fmode
))
5212 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5213 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5214 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
5217 REAL_VALUE_TYPE offset
;
5218 rtx limit
, lab1
, lab2
, insn
;
5220 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
5221 real_2expN (&offset
, bitsize
- 1, fmode
);
5222 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5223 lab1
= gen_label_rtx ();
5224 lab2
= gen_label_rtx ();
5226 if (fmode
!= GET_MODE (from
))
5227 from
= convert_to_mode (fmode
, from
, 0);
5229 /* See if we need to do the subtraction. */
5230 do_pending_stack_adjust ();
5231 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5234 /* If not, do the signed "fix" and branch around fixup code. */
5235 expand_fix (to
, from
, 0);
5236 emit_jump_insn (gen_jump (lab2
));
5239 /* Otherwise, subtract 2**(N-1), convert to signed number,
5240 then add 2**(N-1). Do the addition using XOR since this
5241 will often generate better code. */
5243 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5244 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5245 expand_fix (to
, target
, 0);
5246 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5248 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5250 to
, 1, OPTAB_LIB_WIDEN
);
5253 emit_move_insn (to
, target
);
5257 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
5259 /* Make a place for a REG_NOTE and add it. */
5260 insn
= emit_move_insn (to
, to
);
5261 set_dst_reg_note (insn
, REG_EQUAL
,
5262 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
5270 /* We can't do it with an insn, so use a library call. But first ensure
5271 that the mode of TO is at least as wide as SImode, since those are the
5272 only library calls we know about. */
5274 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5276 target
= gen_reg_rtx (SImode
);
5278 expand_fix (target
, from
, unsignedp
);
5286 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5287 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5288 gcc_assert (libfunc
);
5292 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5293 GET_MODE (to
), 1, from
,
5295 insns
= get_insns ();
5298 emit_libcall_block (insns
, target
, value
,
5299 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5300 GET_MODE (to
), from
));
5305 if (GET_MODE (to
) == GET_MODE (target
))
5306 emit_move_insn (to
, target
);
5308 convert_move (to
, target
, 0);
5312 /* Generate code to convert FROM or TO a fixed-point.
5313 If UINTP is true, either TO or FROM is an unsigned integer.
5314 If SATP is true, we need to saturate the result. */
5317 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5319 enum machine_mode to_mode
= GET_MODE (to
);
5320 enum machine_mode from_mode
= GET_MODE (from
);
5322 enum rtx_code this_code
;
5323 enum insn_code code
;
5327 if (to_mode
== from_mode
)
5329 emit_move_insn (to
, from
);
5335 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5336 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5340 tab
= satp
? satfract_optab
: fract_optab
;
5341 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5343 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5344 if (code
!= CODE_FOR_nothing
)
5346 emit_unop_insn (code
, to
, from
, this_code
);
5350 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5351 gcc_assert (libfunc
);
5354 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5355 1, from
, from_mode
);
5356 insns
= get_insns ();
5359 emit_libcall_block (insns
, to
, value
,
5360 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5363 /* Generate code to convert FROM to fixed point and store in TO. FROM
5364 must be floating point, TO must be signed. Use the conversion optab
5365 TAB to do the conversion. */
5368 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5370 enum insn_code icode
;
5372 enum machine_mode fmode
, imode
;
5374 /* We first try to find a pair of modes, one real and one integer, at
5375 least as wide as FROM and TO, respectively, in which we can open-code
5376 this conversion. If the integer mode is wider than the mode of TO,
5377 we can do the conversion either signed or unsigned. */
5379 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5380 fmode
= GET_MODE_WIDER_MODE (fmode
))
5381 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5382 imode
= GET_MODE_WIDER_MODE (imode
))
5384 icode
= convert_optab_handler (tab
, imode
, fmode
);
5385 if (icode
!= CODE_FOR_nothing
)
5387 rtx last
= get_last_insn ();
5388 if (fmode
!= GET_MODE (from
))
5389 from
= convert_to_mode (fmode
, from
, 0);
5391 if (imode
!= GET_MODE (to
))
5392 target
= gen_reg_rtx (imode
);
5394 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5396 delete_insns_since (last
);
5400 convert_move (to
, target
, 0);
5408 /* Report whether we have an instruction to perform the operation
5409 specified by CODE on operands of mode MODE. */
5411 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5413 return (code_to_optab (code
)
5414 && (optab_handler (code_to_optab (code
), mode
)
5415 != CODE_FOR_nothing
));
5418 /* Set all insn_code fields to CODE_FOR_nothing. */
5421 init_insn_codes (void)
5423 memset (optab_table
, 0, sizeof (optab_table
));
5424 memset (convert_optab_table
, 0, sizeof (convert_optab_table
));
5425 memset (direct_optab_table
, 0, sizeof (direct_optab_table
));
5428 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5430 init_optab (optab op
, enum rtx_code code
)
5433 code_to_optab_
[(int) code
] = op
;
5436 /* Same, but fill in its code as CODE, and do _not_ write it into
5437 the code_to_optab table. */
5439 init_optabv (optab op
, enum rtx_code code
)
5444 /* Conversion optabs never go in the code_to_optab table. */
5446 init_convert_optab (convert_optab op
, enum rtx_code code
)
5451 /* Initialize the libfunc fields of an entire group of entries in some
5452 optab. Each entry is set equal to a string consisting of a leading
5453 pair of underscores followed by a generic operation name followed by
5454 a mode name (downshifted to lowercase) followed by a single character
5455 representing the number of operands for the given operation (which is
5456 usually one of the characters '2', '3', or '4').
5458 OPTABLE is the table in which libfunc fields are to be initialized.
5459 OPNAME is the generic (string) name of the operation.
5460 SUFFIX is the character which specifies the number of operands for
5461 the given generic operation.
5462 MODE is the mode to generate for.
5466 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5468 unsigned opname_len
= strlen (opname
);
5469 const char *mname
= GET_MODE_NAME (mode
);
5470 unsigned mname_len
= strlen (mname
);
5471 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5472 int len
= prefix_len
+ opname_len
+ mname_len
+ 1 + 1;
5473 char *libfunc_name
= XALLOCAVEC (char, len
);
5480 if (targetm
.libfunc_gnu_prefix
)
5487 for (q
= opname
; *q
; )
5489 for (q
= mname
; *q
; q
++)
5490 *p
++ = TOLOWER (*q
);
5494 set_optab_libfunc (optable
, mode
,
5495 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5498 /* Like gen_libfunc, but verify that integer operation is involved. */
5501 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5502 enum machine_mode mode
)
5504 int maxsize
= 2 * BITS_PER_WORD
;
5506 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5508 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5509 maxsize
= LONG_LONG_TYPE_SIZE
;
5510 if (GET_MODE_CLASS (mode
) != MODE_INT
5511 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5513 gen_libfunc (optable
, opname
, suffix
, mode
);
5516 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5519 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5520 enum machine_mode mode
)
5524 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5525 gen_libfunc (optable
, opname
, suffix
, mode
);
5526 if (DECIMAL_FLOAT_MODE_P (mode
))
5528 dec_opname
= XALLOCAVEC (char, sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5529 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5530 depending on the low level floating format used. */
5531 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5532 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5533 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5537 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5540 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5541 enum machine_mode mode
)
5543 if (!ALL_FIXED_POINT_MODE_P (mode
))
5545 gen_libfunc (optable
, opname
, suffix
, mode
);
5548 /* Like gen_libfunc, but verify that signed fixed-point operation is
5552 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5553 enum machine_mode mode
)
5555 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5557 gen_libfunc (optable
, opname
, suffix
, mode
);
5560 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5564 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5565 enum machine_mode mode
)
5567 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5569 gen_libfunc (optable
, opname
, suffix
, mode
);
5572 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5575 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5576 enum machine_mode mode
)
5578 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5579 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5580 if (INTEGRAL_MODE_P (mode
))
5581 gen_int_libfunc (optable
, name
, suffix
, mode
);
5584 /* Like gen_libfunc, but verify that FP or INT operation is involved
5585 and add 'v' suffix for integer operation. */
5588 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5589 enum machine_mode mode
)
5591 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5592 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5593 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5595 int len
= strlen (name
);
5596 char *v_name
= XALLOCAVEC (char, len
+ 2);
5597 strcpy (v_name
, name
);
5599 v_name
[len
+ 1] = 0;
5600 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5604 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5608 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5609 enum machine_mode mode
)
5611 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5612 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5613 if (INTEGRAL_MODE_P (mode
))
5614 gen_int_libfunc (optable
, name
, suffix
, mode
);
5615 if (ALL_FIXED_POINT_MODE_P (mode
))
5616 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5619 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5623 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5624 enum machine_mode mode
)
5626 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5627 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5628 if (INTEGRAL_MODE_P (mode
))
5629 gen_int_libfunc (optable
, name
, suffix
, mode
);
5630 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5631 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5634 /* Like gen_libfunc, but verify that INT or FIXED operation is
5638 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5639 enum machine_mode mode
)
5641 if (INTEGRAL_MODE_P (mode
))
5642 gen_int_libfunc (optable
, name
, suffix
, mode
);
5643 if (ALL_FIXED_POINT_MODE_P (mode
))
5644 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5647 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5651 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5652 enum machine_mode mode
)
5654 if (INTEGRAL_MODE_P (mode
))
5655 gen_int_libfunc (optable
, name
, suffix
, mode
);
5656 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5657 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5660 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5664 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5665 enum machine_mode mode
)
5667 if (INTEGRAL_MODE_P (mode
))
5668 gen_int_libfunc (optable
, name
, suffix
, mode
);
5669 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5670 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5673 /* Initialize the libfunc fields of an entire group of entries of an
5674 inter-mode-class conversion optab. The string formation rules are
5675 similar to the ones for init_libfuncs, above, but instead of having
5676 a mode name and an operand count these functions have two mode names
5677 and no operand count. */
5680 gen_interclass_conv_libfunc (convert_optab tab
,
5682 enum machine_mode tmode
,
5683 enum machine_mode fmode
)
5685 size_t opname_len
= strlen (opname
);
5686 size_t mname_len
= 0;
5688 const char *fname
, *tname
;
5690 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5691 char *libfunc_name
, *suffix
;
5692 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5695 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5696 depends on which underlying decimal floating point format is used. */
5697 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5699 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5701 nondec_name
= XALLOCAVEC (char, prefix_len
+ opname_len
+ mname_len
+ 1 + 1);
5702 nondec_name
[0] = '_';
5703 nondec_name
[1] = '_';
5704 if (targetm
.libfunc_gnu_prefix
)
5706 nondec_name
[2] = 'g';
5707 nondec_name
[3] = 'n';
5708 nondec_name
[4] = 'u';
5709 nondec_name
[5] = '_';
5712 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5713 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5715 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5718 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5719 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5720 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5722 fname
= GET_MODE_NAME (fmode
);
5723 tname
= GET_MODE_NAME (tmode
);
5725 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5727 libfunc_name
= dec_name
;
5728 suffix
= dec_suffix
;
5732 libfunc_name
= nondec_name
;
5733 suffix
= nondec_suffix
;
5737 for (q
= fname
; *q
; p
++, q
++)
5739 for (q
= tname
; *q
; p
++, q
++)
5744 set_conv_libfunc (tab
, tmode
, fmode
,
5745 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5748 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5749 int->fp conversion. */
5752 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5754 enum machine_mode tmode
,
5755 enum machine_mode fmode
)
5757 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5759 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5761 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5764 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5768 gen_ufloat_conv_libfunc (convert_optab tab
,
5769 const char *opname ATTRIBUTE_UNUSED
,
5770 enum machine_mode tmode
,
5771 enum machine_mode fmode
)
5773 if (DECIMAL_FLOAT_MODE_P (tmode
))
5774 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5776 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5779 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5780 fp->int conversion. */
5783 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5785 enum machine_mode tmode
,
5786 enum machine_mode fmode
)
5788 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5790 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5792 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5795 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5796 fp->int conversion with no decimal floating point involved. */
5799 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5801 enum machine_mode tmode
,
5802 enum machine_mode fmode
)
5804 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5806 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5808 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5811 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5812 The string formation rules are
5813 similar to the ones for init_libfunc, above. */
5816 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5817 enum machine_mode tmode
, enum machine_mode fmode
)
5819 size_t opname_len
= strlen (opname
);
5820 size_t mname_len
= 0;
5822 const char *fname
, *tname
;
5824 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5825 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5826 char *libfunc_name
, *suffix
;
5829 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5830 depends on which underlying decimal floating point format is used. */
5831 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5833 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5835 nondec_name
= XALLOCAVEC (char, 2 + opname_len
+ mname_len
+ 1 + 1);
5836 nondec_name
[0] = '_';
5837 nondec_name
[1] = '_';
5838 if (targetm
.libfunc_gnu_prefix
)
5840 nondec_name
[2] = 'g';
5841 nondec_name
[3] = 'n';
5842 nondec_name
[4] = 'u';
5843 nondec_name
[5] = '_';
5845 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5846 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5848 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5851 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5852 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5853 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5855 fname
= GET_MODE_NAME (fmode
);
5856 tname
= GET_MODE_NAME (tmode
);
5858 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5860 libfunc_name
= dec_name
;
5861 suffix
= dec_suffix
;
5865 libfunc_name
= nondec_name
;
5866 suffix
= nondec_suffix
;
5870 for (q
= fname
; *q
; p
++, q
++)
5872 for (q
= tname
; *q
; p
++, q
++)
5878 set_conv_libfunc (tab
, tmode
, fmode
,
5879 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5882 /* Pick proper libcall for trunc_optab. We need to chose if we do
5883 truncation or extension and interclass or intraclass. */
5886 gen_trunc_conv_libfunc (convert_optab tab
,
5888 enum machine_mode tmode
,
5889 enum machine_mode fmode
)
5891 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5893 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5898 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5899 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5900 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5902 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
5905 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5906 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5907 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5908 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5911 /* Pick proper libcall for extend_optab. We need to chose if we do
5912 truncation or extension and interclass or intraclass. */
5915 gen_extend_conv_libfunc (convert_optab tab
,
5916 const char *opname ATTRIBUTE_UNUSED
,
5917 enum machine_mode tmode
,
5918 enum machine_mode fmode
)
5920 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5922 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5927 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5928 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5929 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5931 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
5934 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5935 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5936 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5937 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5940 /* Pick proper libcall for fract_optab. We need to chose if we do
5941 interclass or intraclass. */
5944 gen_fract_conv_libfunc (convert_optab tab
,
5946 enum machine_mode tmode
,
5947 enum machine_mode fmode
)
5951 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
5954 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5955 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5957 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5960 /* Pick proper libcall for fractuns_optab. */
5963 gen_fractuns_conv_libfunc (convert_optab tab
,
5965 enum machine_mode tmode
,
5966 enum machine_mode fmode
)
5970 /* One mode must be a fixed-point mode, and the other must be an integer
5972 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
5973 || (ALL_FIXED_POINT_MODE_P (fmode
)
5974 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
5977 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5980 /* Pick proper libcall for satfract_optab. We need to chose if we do
5981 interclass or intraclass. */
5984 gen_satfract_conv_libfunc (convert_optab tab
,
5986 enum machine_mode tmode
,
5987 enum machine_mode fmode
)
5991 /* TMODE must be a fixed-point mode. */
5992 if (!ALL_FIXED_POINT_MODE_P (tmode
))
5995 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5996 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5998 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6001 /* Pick proper libcall for satfractuns_optab. */
6004 gen_satfractuns_conv_libfunc (convert_optab tab
,
6006 enum machine_mode tmode
,
6007 enum machine_mode fmode
)
6011 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6012 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
6015 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6018 /* A table of previously-created libfuncs, hashed by name. */
6019 static GTY ((param_is (union tree_node
))) htab_t libfunc_decls
;
6021 /* Hashtable callbacks for libfunc_decls. */
6024 libfunc_decl_hash (const void *entry
)
6026 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree
) entry
));
6030 libfunc_decl_eq (const void *entry1
, const void *entry2
)
6032 return DECL_NAME ((const_tree
) entry1
) == (const_tree
) entry2
;
6035 /* Build a decl for a libfunc named NAME. */
6038 build_libfunc_function (const char *name
)
6040 tree decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
6041 get_identifier (name
),
6042 build_function_type (integer_type_node
, NULL_TREE
));
6043 /* ??? We don't have any type information except for this is
6044 a function. Pretend this is "int foo()". */
6045 DECL_ARTIFICIAL (decl
) = 1;
6046 DECL_EXTERNAL (decl
) = 1;
6047 TREE_PUBLIC (decl
) = 1;
6048 gcc_assert (DECL_ASSEMBLER_NAME (decl
));
6050 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6051 are the flags assigned by targetm.encode_section_info. */
6052 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
6058 init_one_libfunc (const char *name
)
6064 if (libfunc_decls
== NULL
)
6065 libfunc_decls
= htab_create_ggc (37, libfunc_decl_hash
,
6066 libfunc_decl_eq
, NULL
);
6068 /* See if we have already created a libfunc decl for this function. */
6069 id
= get_identifier (name
);
6070 hash
= IDENTIFIER_HASH_VALUE (id
);
6071 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, INSERT
);
6072 decl
= (tree
) *slot
;
6075 /* Create a new decl, so that it can be passed to
6076 targetm.encode_section_info. */
6077 decl
= build_libfunc_function (name
);
6080 return XEXP (DECL_RTL (decl
), 0);
6083 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6086 set_user_assembler_libfunc (const char *name
, const char *asmspec
)
6092 id
= get_identifier (name
);
6093 hash
= IDENTIFIER_HASH_VALUE (id
);
6094 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, NO_INSERT
);
6096 decl
= (tree
) *slot
;
6097 set_user_assembler_name (decl
, asmspec
);
6098 return XEXP (DECL_RTL (decl
), 0);
6101 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6102 MODE to NAME, which should be either 0 or a string constant. */
6104 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
6107 struct libfunc_entry e
;
6108 struct libfunc_entry
**slot
;
6109 e
.optab
= (size_t) (optable
- &optab_table
[0]);
6114 val
= init_one_libfunc (name
);
6117 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6119 *slot
= ggc_alloc_libfunc_entry ();
6120 (*slot
)->optab
= (size_t) (optable
- &optab_table
[0]);
6121 (*slot
)->mode1
= mode
;
6122 (*slot
)->mode2
= VOIDmode
;
6123 (*slot
)->libfunc
= val
;
6126 /* Call this to reset the function entry for one conversion optab
6127 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6128 either 0 or a string constant. */
6130 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6131 enum machine_mode fmode
, const char *name
)
6134 struct libfunc_entry e
;
6135 struct libfunc_entry
**slot
;
6136 e
.optab
= (size_t) (optable
- &convert_optab_table
[0]);
6141 val
= init_one_libfunc (name
);
6144 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6146 *slot
= ggc_alloc_libfunc_entry ();
6147 (*slot
)->optab
= (size_t) (optable
- &convert_optab_table
[0]);
6148 (*slot
)->mode1
= tmode
;
6149 (*slot
)->mode2
= fmode
;
6150 (*slot
)->libfunc
= val
;
6153 /* Call this to initialize the contents of the optabs
6154 appropriately for the current target machine. */
6161 htab_empty (libfunc_hash
);
6162 /* We statically initialize the insn_codes with the equivalent of
6163 CODE_FOR_nothing. Repeat the process if reinitialising. */
6167 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6169 init_optab (add_optab
, PLUS
);
6170 init_optabv (addv_optab
, PLUS
);
6171 init_optab (sub_optab
, MINUS
);
6172 init_optabv (subv_optab
, MINUS
);
6173 init_optab (ssadd_optab
, SS_PLUS
);
6174 init_optab (usadd_optab
, US_PLUS
);
6175 init_optab (sssub_optab
, SS_MINUS
);
6176 init_optab (ussub_optab
, US_MINUS
);
6177 init_optab (smul_optab
, MULT
);
6178 init_optab (ssmul_optab
, SS_MULT
);
6179 init_optab (usmul_optab
, US_MULT
);
6180 init_optabv (smulv_optab
, MULT
);
6181 init_optab (smul_highpart_optab
, UNKNOWN
);
6182 init_optab (umul_highpart_optab
, UNKNOWN
);
6183 init_optab (smul_widen_optab
, UNKNOWN
);
6184 init_optab (umul_widen_optab
, UNKNOWN
);
6185 init_optab (usmul_widen_optab
, UNKNOWN
);
6186 init_optab (smadd_widen_optab
, UNKNOWN
);
6187 init_optab (umadd_widen_optab
, UNKNOWN
);
6188 init_optab (ssmadd_widen_optab
, UNKNOWN
);
6189 init_optab (usmadd_widen_optab
, UNKNOWN
);
6190 init_optab (smsub_widen_optab
, UNKNOWN
);
6191 init_optab (umsub_widen_optab
, UNKNOWN
);
6192 init_optab (ssmsub_widen_optab
, UNKNOWN
);
6193 init_optab (usmsub_widen_optab
, UNKNOWN
);
6194 init_optab (sdiv_optab
, DIV
);
6195 init_optab (ssdiv_optab
, SS_DIV
);
6196 init_optab (usdiv_optab
, US_DIV
);
6197 init_optabv (sdivv_optab
, DIV
);
6198 init_optab (sdivmod_optab
, UNKNOWN
);
6199 init_optab (udiv_optab
, UDIV
);
6200 init_optab (udivmod_optab
, UNKNOWN
);
6201 init_optab (smod_optab
, MOD
);
6202 init_optab (umod_optab
, UMOD
);
6203 init_optab (fmod_optab
, UNKNOWN
);
6204 init_optab (remainder_optab
, UNKNOWN
);
6205 init_optab (ftrunc_optab
, UNKNOWN
);
6206 init_optab (and_optab
, AND
);
6207 init_optab (ior_optab
, IOR
);
6208 init_optab (xor_optab
, XOR
);
6209 init_optab (ashl_optab
, ASHIFT
);
6210 init_optab (ssashl_optab
, SS_ASHIFT
);
6211 init_optab (usashl_optab
, US_ASHIFT
);
6212 init_optab (ashr_optab
, ASHIFTRT
);
6213 init_optab (lshr_optab
, LSHIFTRT
);
6214 init_optabv (vashl_optab
, ASHIFT
);
6215 init_optabv (vashr_optab
, ASHIFTRT
);
6216 init_optabv (vlshr_optab
, LSHIFTRT
);
6217 init_optab (rotl_optab
, ROTATE
);
6218 init_optab (rotr_optab
, ROTATERT
);
6219 init_optab (smin_optab
, SMIN
);
6220 init_optab (smax_optab
, SMAX
);
6221 init_optab (umin_optab
, UMIN
);
6222 init_optab (umax_optab
, UMAX
);
6223 init_optab (pow_optab
, UNKNOWN
);
6224 init_optab (atan2_optab
, UNKNOWN
);
6225 init_optab (fma_optab
, FMA
);
6226 init_optab (fms_optab
, UNKNOWN
);
6227 init_optab (fnma_optab
, UNKNOWN
);
6228 init_optab (fnms_optab
, UNKNOWN
);
6230 /* These three have codes assigned exclusively for the sake of
6232 init_optab (mov_optab
, SET
);
6233 init_optab (movstrict_optab
, STRICT_LOW_PART
);
6234 init_optab (cbranch_optab
, COMPARE
);
6236 init_optab (cmov_optab
, UNKNOWN
);
6237 init_optab (cstore_optab
, UNKNOWN
);
6238 init_optab (ctrap_optab
, UNKNOWN
);
6240 init_optab (storent_optab
, UNKNOWN
);
6242 init_optab (cmp_optab
, UNKNOWN
);
6243 init_optab (ucmp_optab
, UNKNOWN
);
6245 init_optab (eq_optab
, EQ
);
6246 init_optab (ne_optab
, NE
);
6247 init_optab (gt_optab
, GT
);
6248 init_optab (ge_optab
, GE
);
6249 init_optab (lt_optab
, LT
);
6250 init_optab (le_optab
, LE
);
6251 init_optab (unord_optab
, UNORDERED
);
6253 init_optab (neg_optab
, NEG
);
6254 init_optab (ssneg_optab
, SS_NEG
);
6255 init_optab (usneg_optab
, US_NEG
);
6256 init_optabv (negv_optab
, NEG
);
6257 init_optab (abs_optab
, ABS
);
6258 init_optabv (absv_optab
, ABS
);
6259 init_optab (addcc_optab
, UNKNOWN
);
6260 init_optab (one_cmpl_optab
, NOT
);
6261 init_optab (bswap_optab
, BSWAP
);
6262 init_optab (ffs_optab
, FFS
);
6263 init_optab (clz_optab
, CLZ
);
6264 init_optab (ctz_optab
, CTZ
);
6265 init_optab (clrsb_optab
, CLRSB
);
6266 init_optab (popcount_optab
, POPCOUNT
);
6267 init_optab (parity_optab
, PARITY
);
6268 init_optab (sqrt_optab
, SQRT
);
6269 init_optab (floor_optab
, UNKNOWN
);
6270 init_optab (ceil_optab
, UNKNOWN
);
6271 init_optab (round_optab
, UNKNOWN
);
6272 init_optab (btrunc_optab
, UNKNOWN
);
6273 init_optab (nearbyint_optab
, UNKNOWN
);
6274 init_optab (rint_optab
, UNKNOWN
);
6275 init_optab (sincos_optab
, UNKNOWN
);
6276 init_optab (sin_optab
, UNKNOWN
);
6277 init_optab (asin_optab
, UNKNOWN
);
6278 init_optab (cos_optab
, UNKNOWN
);
6279 init_optab (acos_optab
, UNKNOWN
);
6280 init_optab (exp_optab
, UNKNOWN
);
6281 init_optab (exp10_optab
, UNKNOWN
);
6282 init_optab (exp2_optab
, UNKNOWN
);
6283 init_optab (expm1_optab
, UNKNOWN
);
6284 init_optab (ldexp_optab
, UNKNOWN
);
6285 init_optab (scalb_optab
, UNKNOWN
);
6286 init_optab (significand_optab
, UNKNOWN
);
6287 init_optab (logb_optab
, UNKNOWN
);
6288 init_optab (ilogb_optab
, UNKNOWN
);
6289 init_optab (log_optab
, UNKNOWN
);
6290 init_optab (log10_optab
, UNKNOWN
);
6291 init_optab (log2_optab
, UNKNOWN
);
6292 init_optab (log1p_optab
, UNKNOWN
);
6293 init_optab (tan_optab
, UNKNOWN
);
6294 init_optab (atan_optab
, UNKNOWN
);
6295 init_optab (copysign_optab
, UNKNOWN
);
6296 init_optab (signbit_optab
, UNKNOWN
);
6298 init_optab (isinf_optab
, UNKNOWN
);
6300 init_optab (strlen_optab
, UNKNOWN
);
6301 init_optab (push_optab
, UNKNOWN
);
6303 init_optab (reduc_smax_optab
, UNKNOWN
);
6304 init_optab (reduc_umax_optab
, UNKNOWN
);
6305 init_optab (reduc_smin_optab
, UNKNOWN
);
6306 init_optab (reduc_umin_optab
, UNKNOWN
);
6307 init_optab (reduc_splus_optab
, UNKNOWN
);
6308 init_optab (reduc_uplus_optab
, UNKNOWN
);
6310 init_optab (ssum_widen_optab
, UNKNOWN
);
6311 init_optab (usum_widen_optab
, UNKNOWN
);
6312 init_optab (sdot_prod_optab
, UNKNOWN
);
6313 init_optab (udot_prod_optab
, UNKNOWN
);
6315 init_optab (vec_extract_optab
, UNKNOWN
);
6316 init_optab (vec_set_optab
, UNKNOWN
);
6317 init_optab (vec_init_optab
, UNKNOWN
);
6318 init_optab (vec_shl_optab
, UNKNOWN
);
6319 init_optab (vec_shr_optab
, UNKNOWN
);
6320 init_optab (vec_realign_load_optab
, UNKNOWN
);
6321 init_optab (movmisalign_optab
, UNKNOWN
);
6322 init_optab (vec_widen_umult_hi_optab
, UNKNOWN
);
6323 init_optab (vec_widen_umult_lo_optab
, UNKNOWN
);
6324 init_optab (vec_widen_smult_hi_optab
, UNKNOWN
);
6325 init_optab (vec_widen_smult_lo_optab
, UNKNOWN
);
6326 init_optab (vec_widen_ushiftl_hi_optab
, UNKNOWN
);
6327 init_optab (vec_widen_ushiftl_lo_optab
, UNKNOWN
);
6328 init_optab (vec_widen_sshiftl_hi_optab
, UNKNOWN
);
6329 init_optab (vec_widen_sshiftl_lo_optab
, UNKNOWN
);
6330 init_optab (vec_unpacks_hi_optab
, UNKNOWN
);
6331 init_optab (vec_unpacks_lo_optab
, UNKNOWN
);
6332 init_optab (vec_unpacku_hi_optab
, UNKNOWN
);
6333 init_optab (vec_unpacku_lo_optab
, UNKNOWN
);
6334 init_optab (vec_unpacks_float_hi_optab
, UNKNOWN
);
6335 init_optab (vec_unpacks_float_lo_optab
, UNKNOWN
);
6336 init_optab (vec_unpacku_float_hi_optab
, UNKNOWN
);
6337 init_optab (vec_unpacku_float_lo_optab
, UNKNOWN
);
6338 init_optab (vec_pack_trunc_optab
, UNKNOWN
);
6339 init_optab (vec_pack_usat_optab
, UNKNOWN
);
6340 init_optab (vec_pack_ssat_optab
, UNKNOWN
);
6341 init_optab (vec_pack_ufix_trunc_optab
, UNKNOWN
);
6342 init_optab (vec_pack_sfix_trunc_optab
, UNKNOWN
);
6344 init_optab (powi_optab
, UNKNOWN
);
6347 init_convert_optab (sext_optab
, SIGN_EXTEND
);
6348 init_convert_optab (zext_optab
, ZERO_EXTEND
);
6349 init_convert_optab (trunc_optab
, TRUNCATE
);
6350 init_convert_optab (sfix_optab
, FIX
);
6351 init_convert_optab (ufix_optab
, UNSIGNED_FIX
);
6352 init_convert_optab (sfixtrunc_optab
, UNKNOWN
);
6353 init_convert_optab (ufixtrunc_optab
, UNKNOWN
);
6354 init_convert_optab (sfloat_optab
, FLOAT
);
6355 init_convert_optab (ufloat_optab
, UNSIGNED_FLOAT
);
6356 init_convert_optab (lrint_optab
, UNKNOWN
);
6357 init_convert_optab (lround_optab
, UNKNOWN
);
6358 init_convert_optab (lfloor_optab
, UNKNOWN
);
6359 init_convert_optab (lceil_optab
, UNKNOWN
);
6361 init_convert_optab (fract_optab
, FRACT_CONVERT
);
6362 init_convert_optab (fractuns_optab
, UNSIGNED_FRACT_CONVERT
);
6363 init_convert_optab (satfract_optab
, SAT_FRACT
);
6364 init_convert_optab (satfractuns_optab
, UNSIGNED_SAT_FRACT
);
6366 /* Fill in the optabs with the insns we support. */
6369 /* Initialize the optabs with the names of the library functions. */
6370 add_optab
->libcall_basename
= "add";
6371 add_optab
->libcall_suffix
= '3';
6372 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6373 addv_optab
->libcall_basename
= "add";
6374 addv_optab
->libcall_suffix
= '3';
6375 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6376 ssadd_optab
->libcall_basename
= "ssadd";
6377 ssadd_optab
->libcall_suffix
= '3';
6378 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6379 usadd_optab
->libcall_basename
= "usadd";
6380 usadd_optab
->libcall_suffix
= '3';
6381 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6382 sub_optab
->libcall_basename
= "sub";
6383 sub_optab
->libcall_suffix
= '3';
6384 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6385 subv_optab
->libcall_basename
= "sub";
6386 subv_optab
->libcall_suffix
= '3';
6387 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6388 sssub_optab
->libcall_basename
= "sssub";
6389 sssub_optab
->libcall_suffix
= '3';
6390 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6391 ussub_optab
->libcall_basename
= "ussub";
6392 ussub_optab
->libcall_suffix
= '3';
6393 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6394 smul_optab
->libcall_basename
= "mul";
6395 smul_optab
->libcall_suffix
= '3';
6396 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6397 smulv_optab
->libcall_basename
= "mul";
6398 smulv_optab
->libcall_suffix
= '3';
6399 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6400 ssmul_optab
->libcall_basename
= "ssmul";
6401 ssmul_optab
->libcall_suffix
= '3';
6402 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6403 usmul_optab
->libcall_basename
= "usmul";
6404 usmul_optab
->libcall_suffix
= '3';
6405 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6406 sdiv_optab
->libcall_basename
= "div";
6407 sdiv_optab
->libcall_suffix
= '3';
6408 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6409 sdivv_optab
->libcall_basename
= "divv";
6410 sdivv_optab
->libcall_suffix
= '3';
6411 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6412 ssdiv_optab
->libcall_basename
= "ssdiv";
6413 ssdiv_optab
->libcall_suffix
= '3';
6414 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6415 udiv_optab
->libcall_basename
= "udiv";
6416 udiv_optab
->libcall_suffix
= '3';
6417 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6418 usdiv_optab
->libcall_basename
= "usdiv";
6419 usdiv_optab
->libcall_suffix
= '3';
6420 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6421 sdivmod_optab
->libcall_basename
= "divmod";
6422 sdivmod_optab
->libcall_suffix
= '4';
6423 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6424 udivmod_optab
->libcall_basename
= "udivmod";
6425 udivmod_optab
->libcall_suffix
= '4';
6426 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6427 smod_optab
->libcall_basename
= "mod";
6428 smod_optab
->libcall_suffix
= '3';
6429 smod_optab
->libcall_gen
= gen_int_libfunc
;
6430 umod_optab
->libcall_basename
= "umod";
6431 umod_optab
->libcall_suffix
= '3';
6432 umod_optab
->libcall_gen
= gen_int_libfunc
;
6433 ftrunc_optab
->libcall_basename
= "ftrunc";
6434 ftrunc_optab
->libcall_suffix
= '2';
6435 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6436 and_optab
->libcall_basename
= "and";
6437 and_optab
->libcall_suffix
= '3';
6438 and_optab
->libcall_gen
= gen_int_libfunc
;
6439 ior_optab
->libcall_basename
= "ior";
6440 ior_optab
->libcall_suffix
= '3';
6441 ior_optab
->libcall_gen
= gen_int_libfunc
;
6442 xor_optab
->libcall_basename
= "xor";
6443 xor_optab
->libcall_suffix
= '3';
6444 xor_optab
->libcall_gen
= gen_int_libfunc
;
6445 ashl_optab
->libcall_basename
= "ashl";
6446 ashl_optab
->libcall_suffix
= '3';
6447 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6448 ssashl_optab
->libcall_basename
= "ssashl";
6449 ssashl_optab
->libcall_suffix
= '3';
6450 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6451 usashl_optab
->libcall_basename
= "usashl";
6452 usashl_optab
->libcall_suffix
= '3';
6453 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6454 ashr_optab
->libcall_basename
= "ashr";
6455 ashr_optab
->libcall_suffix
= '3';
6456 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6457 lshr_optab
->libcall_basename
= "lshr";
6458 lshr_optab
->libcall_suffix
= '3';
6459 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6460 smin_optab
->libcall_basename
= "min";
6461 smin_optab
->libcall_suffix
= '3';
6462 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6463 smax_optab
->libcall_basename
= "max";
6464 smax_optab
->libcall_suffix
= '3';
6465 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6466 umin_optab
->libcall_basename
= "umin";
6467 umin_optab
->libcall_suffix
= '3';
6468 umin_optab
->libcall_gen
= gen_int_libfunc
;
6469 umax_optab
->libcall_basename
= "umax";
6470 umax_optab
->libcall_suffix
= '3';
6471 umax_optab
->libcall_gen
= gen_int_libfunc
;
6472 neg_optab
->libcall_basename
= "neg";
6473 neg_optab
->libcall_suffix
= '2';
6474 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6475 ssneg_optab
->libcall_basename
= "ssneg";
6476 ssneg_optab
->libcall_suffix
= '2';
6477 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6478 usneg_optab
->libcall_basename
= "usneg";
6479 usneg_optab
->libcall_suffix
= '2';
6480 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6481 negv_optab
->libcall_basename
= "neg";
6482 negv_optab
->libcall_suffix
= '2';
6483 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6484 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6485 one_cmpl_optab
->libcall_suffix
= '2';
6486 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6487 ffs_optab
->libcall_basename
= "ffs";
6488 ffs_optab
->libcall_suffix
= '2';
6489 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6490 clz_optab
->libcall_basename
= "clz";
6491 clz_optab
->libcall_suffix
= '2';
6492 clz_optab
->libcall_gen
= gen_int_libfunc
;
6493 ctz_optab
->libcall_basename
= "ctz";
6494 ctz_optab
->libcall_suffix
= '2';
6495 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6496 clrsb_optab
->libcall_basename
= "clrsb";
6497 clrsb_optab
->libcall_suffix
= '2';
6498 clrsb_optab
->libcall_gen
= gen_int_libfunc
;
6499 popcount_optab
->libcall_basename
= "popcount";
6500 popcount_optab
->libcall_suffix
= '2';
6501 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6502 parity_optab
->libcall_basename
= "parity";
6503 parity_optab
->libcall_suffix
= '2';
6504 parity_optab
->libcall_gen
= gen_int_libfunc
;
6506 /* Comparison libcalls for integers MUST come in pairs,
6508 cmp_optab
->libcall_basename
= "cmp";
6509 cmp_optab
->libcall_suffix
= '2';
6510 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6511 ucmp_optab
->libcall_basename
= "ucmp";
6512 ucmp_optab
->libcall_suffix
= '2';
6513 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6515 /* EQ etc are floating point only. */
6516 eq_optab
->libcall_basename
= "eq";
6517 eq_optab
->libcall_suffix
= '2';
6518 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6519 ne_optab
->libcall_basename
= "ne";
6520 ne_optab
->libcall_suffix
= '2';
6521 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6522 gt_optab
->libcall_basename
= "gt";
6523 gt_optab
->libcall_suffix
= '2';
6524 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6525 ge_optab
->libcall_basename
= "ge";
6526 ge_optab
->libcall_suffix
= '2';
6527 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6528 lt_optab
->libcall_basename
= "lt";
6529 lt_optab
->libcall_suffix
= '2';
6530 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6531 le_optab
->libcall_basename
= "le";
6532 le_optab
->libcall_suffix
= '2';
6533 le_optab
->libcall_gen
= gen_fp_libfunc
;
6534 unord_optab
->libcall_basename
= "unord";
6535 unord_optab
->libcall_suffix
= '2';
6536 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6538 powi_optab
->libcall_basename
= "powi";
6539 powi_optab
->libcall_suffix
= '2';
6540 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6543 sfloat_optab
->libcall_basename
= "float";
6544 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6545 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6546 sfix_optab
->libcall_basename
= "fix";
6547 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6548 ufix_optab
->libcall_basename
= "fixuns";
6549 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6550 lrint_optab
->libcall_basename
= "lrint";
6551 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6552 lround_optab
->libcall_basename
= "lround";
6553 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6554 lfloor_optab
->libcall_basename
= "lfloor";
6555 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6556 lceil_optab
->libcall_basename
= "lceil";
6557 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6559 /* trunc_optab is also used for FLOAT_EXTEND. */
6560 sext_optab
->libcall_basename
= "extend";
6561 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6562 trunc_optab
->libcall_basename
= "trunc";
6563 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6565 /* Conversions for fixed-point modes and other modes. */
6566 fract_optab
->libcall_basename
= "fract";
6567 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6568 satfract_optab
->libcall_basename
= "satfract";
6569 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6570 fractuns_optab
->libcall_basename
= "fractuns";
6571 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6572 satfractuns_optab
->libcall_basename
= "satfractuns";
6573 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6575 /* The ffs function operates on `int'. Fall back on it if we do not
6576 have a libgcc2 function for that width. */
6577 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6578 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6581 /* Explicitly initialize the bswap libfuncs since we need them to be
6582 valid for things other than word_mode. */
6583 if (targetm
.libfunc_gnu_prefix
)
6585 set_optab_libfunc (bswap_optab
, SImode
, "__gnu_bswapsi2");
6586 set_optab_libfunc (bswap_optab
, DImode
, "__gnu_bswapdi2");
6590 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6591 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6594 /* Use cabs for double complex abs, since systems generally have cabs.
6595 Don't define any libcall for float complex, so that cabs will be used. */
6596 if (complex_double_type_node
)
6597 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6599 abort_libfunc
= init_one_libfunc ("abort");
6600 memcpy_libfunc
= init_one_libfunc ("memcpy");
6601 memmove_libfunc
= init_one_libfunc ("memmove");
6602 memcmp_libfunc
= init_one_libfunc ("memcmp");
6603 memset_libfunc
= init_one_libfunc ("memset");
6604 setbits_libfunc
= init_one_libfunc ("__setbits");
6606 #ifndef DONT_USE_BUILTIN_SETJMP
6607 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6608 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6610 setjmp_libfunc
= init_one_libfunc ("setjmp");
6611 longjmp_libfunc
= init_one_libfunc ("longjmp");
6613 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6614 unwind_sjlj_unregister_libfunc
6615 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6617 /* For function entry/exit instrumentation. */
6618 profile_function_entry_libfunc
6619 = init_one_libfunc ("__cyg_profile_func_enter");
6620 profile_function_exit_libfunc
6621 = init_one_libfunc ("__cyg_profile_func_exit");
6623 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6625 /* Allow the target to add more libcalls or rename some, etc. */
6626 targetm
.init_libfuncs ();
6629 /* A helper function for init_sync_libfuncs. Using the basename BASE,
6630 install libfuncs into TAB for BASE_N for 1 <= N <= MAX. */
6633 init_sync_libfuncs_1 (optab tab
, const char *base
, int max
)
6635 enum machine_mode mode
;
6637 size_t len
= strlen (base
);
6640 gcc_assert (max
<= 8);
6641 gcc_assert (len
+ 3 < sizeof (buf
));
6643 memcpy (buf
, base
, len
);
6646 buf
[len
+ 2] = '\0';
6649 for (i
= 1; i
<= max
; i
*= 2)
6651 buf
[len
+ 1] = '0' + i
;
6652 set_optab_libfunc (tab
, mode
, buf
);
6653 mode
= GET_MODE_2XWIDER_MODE (mode
);
6658 init_sync_libfuncs (int max
)
6660 if (!flag_sync_libcalls
)
6663 init_sync_libfuncs_1 (sync_compare_and_swap_optab
,
6664 "__sync_val_compare_and_swap", max
);
6665 init_sync_libfuncs_1 (sync_lock_test_and_set_optab
,
6666 "__sync_lock_test_and_set", max
);
6668 init_sync_libfuncs_1 (sync_old_add_optab
, "__sync_fetch_and_add", max
);
6669 init_sync_libfuncs_1 (sync_old_sub_optab
, "__sync_fetch_and_sub", max
);
6670 init_sync_libfuncs_1 (sync_old_ior_optab
, "__sync_fetch_and_or", max
);
6671 init_sync_libfuncs_1 (sync_old_and_optab
, "__sync_fetch_and_and", max
);
6672 init_sync_libfuncs_1 (sync_old_xor_optab
, "__sync_fetch_and_xor", max
);
6673 init_sync_libfuncs_1 (sync_old_nand_optab
, "__sync_fetch_and_nand", max
);
6675 init_sync_libfuncs_1 (sync_new_add_optab
, "__sync_add_and_fetch", max
);
6676 init_sync_libfuncs_1 (sync_new_sub_optab
, "__sync_sub_and_fetch", max
);
6677 init_sync_libfuncs_1 (sync_new_ior_optab
, "__sync_or_and_fetch", max
);
6678 init_sync_libfuncs_1 (sync_new_and_optab
, "__sync_and_and_fetch", max
);
6679 init_sync_libfuncs_1 (sync_new_xor_optab
, "__sync_xor_and_fetch", max
);
6680 init_sync_libfuncs_1 (sync_new_nand_optab
, "__sync_nand_and_fetch", max
);
6683 /* Print information about the current contents of the optabs on
6687 debug_optab_libfuncs (void)
6693 /* Dump the arithmetic optabs. */
6694 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6695 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6700 o
= &optab_table
[i
];
6701 l
= optab_libfunc (o
, (enum machine_mode
) j
);
6704 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6705 fprintf (stderr
, "%s\t%s:\t%s\n",
6706 GET_RTX_NAME (optab_to_code (o
)),
6712 /* Dump the conversion optabs. */
6713 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6714 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6715 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6720 o
= &convert_optab_table
[i
];
6721 l
= convert_optab_libfunc (o
, (enum machine_mode
) j
,
6722 (enum machine_mode
) k
);
6725 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6726 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6727 GET_RTX_NAME (optab_to_code (o
)),
6736 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6737 CODE. Return 0 on failure. */
6740 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6742 enum machine_mode mode
= GET_MODE (op1
);
6743 enum insn_code icode
;
6747 if (mode
== VOIDmode
)
6750 icode
= optab_handler (ctrap_optab
, mode
);
6751 if (icode
== CODE_FOR_nothing
)
6754 /* Some targets only accept a zero trap code. */
6755 if (!insn_operand_matches (icode
, 3, tcode
))
6758 do_pending_stack_adjust ();
6760 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6765 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6768 /* If that failed, then give up. */
6776 insn
= get_insns ();
6781 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6782 or unsigned operation code. */
6784 static enum rtx_code
6785 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6797 code
= unsignedp
? LTU
: LT
;
6800 code
= unsignedp
? LEU
: LE
;
6803 code
= unsignedp
? GTU
: GT
;
6806 code
= unsignedp
? GEU
: GE
;
6809 case UNORDERED_EXPR
:
6840 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6841 unsigned operators. Do not generate compare instruction. */
6844 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6846 struct expand_operand ops
[2];
6847 enum rtx_code rcode
;
6849 rtx rtx_op0
, rtx_op1
;
6851 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6852 ensures that condition is a relational operation. */
6853 gcc_assert (COMPARISON_CLASS_P (cond
));
6855 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6856 t_op0
= TREE_OPERAND (cond
, 0);
6857 t_op1
= TREE_OPERAND (cond
, 1);
6859 /* Expand operands. */
6860 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6862 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6865 create_input_operand (&ops
[0], rtx_op0
, GET_MODE (rtx_op0
));
6866 create_input_operand (&ops
[1], rtx_op1
, GET_MODE (rtx_op1
));
6867 if (!maybe_legitimize_operands (icode
, 4, 2, ops
))
6869 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
6872 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6873 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6876 can_vec_perm_p (enum machine_mode mode
, bool variable
,
6877 const unsigned char *sel
)
6879 enum machine_mode qimode
;
6881 /* If the target doesn't implement a vector mode for the vector type,
6882 then no operations are supported. */
6883 if (!VECTOR_MODE_P (mode
))
6888 if (direct_optab_handler (vec_perm_const_optab
, mode
) != CODE_FOR_nothing
6890 || targetm
.vectorize
.vec_perm_const_ok
== NULL
6891 || targetm
.vectorize
.vec_perm_const_ok (mode
, sel
)))
6895 if (direct_optab_handler (vec_perm_optab
, mode
) != CODE_FOR_nothing
)
6898 /* We allow fallback to a QI vector mode, and adjust the mask. */
6899 if (GET_MODE_INNER (mode
) == QImode
)
6901 qimode
= mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6902 if (!VECTOR_MODE_P (qimode
))
6905 /* ??? For completeness, we ought to check the QImode version of
6906 vec_perm_const_optab. But all users of this implicit lowering
6907 feature implement the variable vec_perm_optab. */
6908 if (direct_optab_handler (vec_perm_optab
, qimode
) == CODE_FOR_nothing
)
6911 /* In order to support the lowering of variable permutations,
6912 we need to support shifts and adds. */
6915 if (GET_MODE_UNIT_SIZE (mode
) > 2
6916 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
6917 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
)
6919 if (optab_handler (add_optab
, qimode
) == CODE_FOR_nothing
)
6926 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6929 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
6930 rtx v0
, rtx v1
, rtx sel
)
6932 enum machine_mode tmode
= GET_MODE (target
);
6933 enum machine_mode smode
= GET_MODE (sel
);
6934 struct expand_operand ops
[4];
6936 create_output_operand (&ops
[0], target
, tmode
);
6937 create_input_operand (&ops
[3], sel
, smode
);
6939 /* Make an effort to preserve v0 == v1. The target expander is able to
6940 rely on this to determine if we're permuting a single input operand. */
6941 if (rtx_equal_p (v0
, v1
))
6943 if (!insn_operand_matches (icode
, 1, v0
))
6944 v0
= force_reg (tmode
, v0
);
6945 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6946 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6948 create_fixed_operand (&ops
[1], v0
);
6949 create_fixed_operand (&ops
[2], v0
);
6953 create_input_operand (&ops
[1], v0
, tmode
);
6954 create_input_operand (&ops
[2], v1
, tmode
);
6957 if (maybe_expand_insn (icode
, 4, ops
))
6958 return ops
[0].value
;
6962 /* Generate instructions for vec_perm optab given its mode
6963 and three operands. */
6966 expand_vec_perm (enum machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6968 enum insn_code icode
;
6969 enum machine_mode qimode
;
6970 unsigned int i
, w
, e
, u
;
6971 rtx tmp
, sel_qi
= NULL
;
6974 if (!target
|| GET_MODE (target
) != mode
)
6975 target
= gen_reg_rtx (mode
);
6977 w
= GET_MODE_SIZE (mode
);
6978 e
= GET_MODE_NUNITS (mode
);
6979 u
= GET_MODE_UNIT_SIZE (mode
);
6981 /* Set QIMODE to a different vector mode with byte elements.
6982 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6984 if (GET_MODE_INNER (mode
) != QImode
)
6986 qimode
= mode_for_vector (QImode
, w
);
6987 if (!VECTOR_MODE_P (qimode
))
6991 /* If the input is a constant, expand it specially. */
6992 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
6993 if (GET_CODE (sel
) == CONST_VECTOR
)
6995 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
6996 if (icode
!= CODE_FOR_nothing
)
6998 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
7003 /* Fall back to a constant byte-based permutation. */
7004 if (qimode
!= VOIDmode
)
7006 vec
= rtvec_alloc (w
);
7007 for (i
= 0; i
< e
; ++i
)
7009 unsigned int j
, this_e
;
7011 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
7012 this_e
&= 2 * e
- 1;
7015 for (j
= 0; j
< u
; ++j
)
7016 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
7018 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7020 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
7021 if (icode
!= CODE_FOR_nothing
)
7023 tmp
= expand_vec_perm_1 (icode
, gen_lowpart (qimode
, target
),
7024 gen_lowpart (qimode
, v0
),
7025 gen_lowpart (qimode
, v1
), sel_qi
);
7027 return gen_lowpart (mode
, tmp
);
7032 /* Otherwise expand as a fully variable permuation. */
7033 icode
= direct_optab_handler (vec_perm_optab
, mode
);
7034 if (icode
!= CODE_FOR_nothing
)
7036 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
7041 /* As a special case to aid several targets, lower the element-based
7042 permutation to a byte-based permutation and try again. */
7043 if (qimode
== VOIDmode
)
7045 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
7046 if (icode
== CODE_FOR_nothing
)
7051 /* Multiply each element by its byte size. */
7052 enum machine_mode selmode
= GET_MODE (sel
);
7054 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
7055 sel
, 0, OPTAB_DIRECT
);
7057 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
7058 GEN_INT (exact_log2 (u
)),
7059 sel
, 0, OPTAB_DIRECT
);
7060 gcc_assert (sel
!= NULL
);
7062 /* Broadcast the low byte each element into each of its bytes. */
7063 vec
= rtvec_alloc (w
);
7064 for (i
= 0; i
< w
; ++i
)
7066 int this_e
= i
/ u
* u
;
7067 if (BYTES_BIG_ENDIAN
)
7069 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
7071 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7072 sel
= gen_lowpart (qimode
, sel
);
7073 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
7074 gcc_assert (sel
!= NULL
);
7076 /* Add the byte offset to each byte element. */
7077 /* Note that the definition of the indicies here is memory ordering,
7078 so there should be no difference between big and little endian. */
7079 vec
= rtvec_alloc (w
);
7080 for (i
= 0; i
< w
; ++i
)
7081 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
7082 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7083 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
7084 sel
, 0, OPTAB_DIRECT
);
7085 gcc_assert (sel_qi
!= NULL
);
7088 tmp
= expand_vec_perm_1 (icode
, gen_lowpart (qimode
, target
),
7089 gen_lowpart (qimode
, v0
),
7090 gen_lowpart (qimode
, v1
), sel_qi
);
7092 tmp
= gen_lowpart (mode
, tmp
);
7096 /* Return insn code for a conditional operator with a comparison in
7097 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
7099 static inline enum insn_code
7100 get_vcond_icode (enum machine_mode vmode
, enum machine_mode cmode
, bool uns
)
7102 enum insn_code icode
= CODE_FOR_nothing
;
7104 icode
= convert_optab_handler (vcondu_optab
, vmode
, cmode
);
7106 icode
= convert_optab_handler (vcond_optab
, vmode
, cmode
);
7110 /* Return TRUE iff, appropriate vector insns are available
7111 for vector cond expr with vector type VALUE_TYPE and a comparison
7112 with operand vector types in CMP_OP_TYPE. */
7115 expand_vec_cond_expr_p (tree value_type
, tree cmp_op_type
)
7117 enum machine_mode value_mode
= TYPE_MODE (value_type
);
7118 enum machine_mode cmp_op_mode
= TYPE_MODE (cmp_op_type
);
7119 if (GET_MODE_SIZE (value_mode
) != GET_MODE_SIZE (cmp_op_mode
)
7120 || GET_MODE_NUNITS (value_mode
) != GET_MODE_NUNITS (cmp_op_mode
)
7121 || get_vcond_icode (TYPE_MODE (value_type
), TYPE_MODE (cmp_op_type
),
7122 TYPE_UNSIGNED (cmp_op_type
)) == CODE_FOR_nothing
)
7127 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
7131 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
7134 struct expand_operand ops
[6];
7135 enum insn_code icode
;
7136 rtx comparison
, rtx_op1
, rtx_op2
;
7137 enum machine_mode mode
= TYPE_MODE (vec_cond_type
);
7138 enum machine_mode cmp_op_mode
;
7141 gcc_assert (COMPARISON_CLASS_P (op0
));
7143 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0
, 0)));
7144 cmp_op_mode
= TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0
, 0)));
7146 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
7147 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
7149 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
7150 if (icode
== CODE_FOR_nothing
)
7153 comparison
= vector_compare_rtx (op0
, unsignedp
, icode
);
7154 rtx_op1
= expand_normal (op1
);
7155 rtx_op2
= expand_normal (op2
);
7157 create_output_operand (&ops
[0], target
, mode
);
7158 create_input_operand (&ops
[1], rtx_op1
, mode
);
7159 create_input_operand (&ops
[2], rtx_op2
, mode
);
7160 create_fixed_operand (&ops
[3], comparison
);
7161 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
7162 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
7163 expand_insn (icode
, 6, ops
);
7164 return ops
[0].value
;
7167 /* Return non-zero if a highpart multiply is supported of can be synthisized.
7168 For the benefit of expand_mult_highpart, the return value is 1 for direct,
7169 2 for even/odd widening, and 3 for hi/lo widening. */
7172 can_mult_highpart_p (enum machine_mode mode
, bool uns_p
)
7178 op
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
7179 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
7182 /* If the mode is an integral vector, synth from widening operations. */
7183 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
7186 nunits
= GET_MODE_NUNITS (mode
);
7187 sel
= XALLOCAVEC (unsigned char, nunits
);
7189 op
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
7190 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
7192 op
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
7193 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
7195 for (i
= 0; i
< nunits
; ++i
)
7196 sel
[i
] = !BYTES_BIG_ENDIAN
+ (i
& ~1) + ((i
& 1) ? nunits
: 0);
7197 if (can_vec_perm_p (mode
, false, sel
))
7202 op
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
7203 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
7205 op
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
7206 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
7208 for (i
= 0; i
< nunits
; ++i
)
7209 sel
[i
] = 2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1);
7210 if (can_vec_perm_p (mode
, false, sel
))
7218 /* Expand a highpart multiply. */
7221 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
7222 rtx target
, bool uns_p
)
7224 struct expand_operand eops
[3];
7225 enum insn_code icode
;
7226 int method
, i
, nunits
;
7227 enum machine_mode wmode
;
7232 method
= can_mult_highpart_p (mode
, uns_p
);
7238 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
7239 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
7242 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
7243 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
7246 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
7247 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
7248 if (BYTES_BIG_ENDIAN
)
7259 icode
= optab_handler (tab1
, mode
);
7260 nunits
= GET_MODE_NUNITS (mode
);
7261 wmode
= insn_data
[icode
].operand
[0].mode
;
7262 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
7263 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
7265 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
7266 create_input_operand (&eops
[1], op0
, mode
);
7267 create_input_operand (&eops
[2], op1
, mode
);
7268 expand_insn (icode
, 3, eops
);
7269 m1
= gen_lowpart (mode
, eops
[0].value
);
7271 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
7272 create_input_operand (&eops
[1], op0
, mode
);
7273 create_input_operand (&eops
[2], op1
, mode
);
7274 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
7275 m2
= gen_lowpart (mode
, eops
[0].value
);
7277 v
= rtvec_alloc (nunits
);
7280 for (i
= 0; i
< nunits
; ++i
)
7281 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
7282 + ((i
& 1) ? nunits
: 0));
7286 for (i
= 0; i
< nunits
; ++i
)
7287 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
7289 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
7291 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
7294 /* Return true if there is a compare_and_swap pattern. */
7297 can_compare_and_swap_p (enum machine_mode mode
, bool allow_libcall
)
7299 enum insn_code icode
;
7301 /* Check for __atomic_compare_and_swap. */
7302 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7303 if (icode
!= CODE_FOR_nothing
)
7306 /* Check for __sync_compare_and_swap. */
7307 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7308 if (icode
!= CODE_FOR_nothing
)
7310 if (allow_libcall
&& optab_libfunc (sync_compare_and_swap_optab
, mode
))
7313 /* No inline compare and swap. */
7317 /* Return true if an atomic exchange can be performed. */
7320 can_atomic_exchange_p (enum machine_mode mode
, bool allow_libcall
)
7322 enum insn_code icode
;
7324 /* Check for __atomic_exchange. */
7325 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
7326 if (icode
!= CODE_FOR_nothing
)
7329 /* Don't check __sync_test_and_set, as on some platforms that
7330 has reduced functionality. Targets that really do support
7331 a proper exchange should simply be updated to the __atomics. */
7333 return can_compare_and_swap_p (mode
, allow_libcall
);
7337 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7341 find_cc_set (rtx x
, const_rtx pat
, void *data
)
7343 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
7344 && GET_CODE (pat
) == SET
)
7346 rtx
*p_cc_reg
= (rtx
*) data
;
7347 gcc_assert (!*p_cc_reg
);
7352 /* This is a helper function for the other atomic operations. This function
7353 emits a loop that contains SEQ that iterates until a compare-and-swap
7354 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7355 a set of instructions that takes a value from OLD_REG as an input and
7356 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7357 set to the current contents of MEM. After SEQ, a compare-and-swap will
7358 attempt to update MEM with NEW_REG. The function returns true when the
7359 loop was generated successfully. */
7362 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7364 enum machine_mode mode
= GET_MODE (mem
);
7365 rtx label
, cmp_reg
, success
, oldval
;
7367 /* The loop we want to generate looks like
7373 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7377 Note that we only do the plain load from memory once. Subsequent
7378 iterations use the value loaded by the compare-and-swap pattern. */
7380 label
= gen_label_rtx ();
7381 cmp_reg
= gen_reg_rtx (mode
);
7383 emit_move_insn (cmp_reg
, mem
);
7385 emit_move_insn (old_reg
, cmp_reg
);
7391 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
7392 new_reg
, false, MEMMODEL_SEQ_CST
,
7396 if (oldval
!= cmp_reg
)
7397 emit_move_insn (cmp_reg
, oldval
);
7399 /* ??? Mark this jump predicted not taken? */
7400 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
7401 GET_MODE (success
), 1, label
);
7406 /* This function tries to emit an atomic_exchange intruction. VAL is written
7407 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7408 using TARGET if possible. */
7411 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
7413 enum machine_mode mode
= GET_MODE (mem
);
7414 enum insn_code icode
;
7416 /* If the target supports the exchange directly, great. */
7417 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
7418 if (icode
!= CODE_FOR_nothing
)
7420 struct expand_operand ops
[4];
7422 create_output_operand (&ops
[0], target
, mode
);
7423 create_fixed_operand (&ops
[1], mem
);
7424 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7425 create_convert_operand_to (&ops
[2], val
, mode
, true);
7426 create_integer_operand (&ops
[3], model
);
7427 if (maybe_expand_insn (icode
, 4, ops
))
7428 return ops
[0].value
;
7434 /* This function tries to implement an atomic exchange operation using
7435 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7436 The previous contents of *MEM are returned, using TARGET if possible.
7437 Since this instructionn is an acquire barrier only, stronger memory
7438 models may require additional barriers to be emitted. */
7441 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
7442 enum memmodel model
)
7444 enum machine_mode mode
= GET_MODE (mem
);
7445 enum insn_code icode
;
7446 rtx last_insn
= get_last_insn ();
7448 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
7450 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
7451 exists, and the memory model is stronger than acquire, add a release
7452 barrier before the instruction. */
7454 if (model
== MEMMODEL_SEQ_CST
7455 || model
== MEMMODEL_RELEASE
7456 || model
== MEMMODEL_ACQ_REL
)
7457 expand_mem_thread_fence (model
);
7459 if (icode
!= CODE_FOR_nothing
)
7461 struct expand_operand ops
[3];
7462 create_output_operand (&ops
[0], target
, mode
);
7463 create_fixed_operand (&ops
[1], mem
);
7464 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7465 create_convert_operand_to (&ops
[2], val
, mode
, true);
7466 if (maybe_expand_insn (icode
, 3, ops
))
7467 return ops
[0].value
;
7470 /* If an external test-and-set libcall is provided, use that instead of
7471 any external compare-and-swap that we might get from the compare-and-
7472 swap-loop expansion later. */
7473 if (!can_compare_and_swap_p (mode
, false))
7475 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
7476 if (libfunc
!= NULL
)
7480 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7481 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7482 mode
, 2, addr
, ptr_mode
,
7487 /* If the test_and_set can't be emitted, eliminate any barrier that might
7488 have been emitted. */
7489 delete_insns_since (last_insn
);
7493 /* This function tries to implement an atomic exchange operation using a
7494 compare_and_swap loop. VAL is written to *MEM. The previous contents of
7495 *MEM are returned, using TARGET if possible. No memory model is required
7496 since a compare_and_swap loop is seq-cst. */
7499 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
7501 enum machine_mode mode
= GET_MODE (mem
);
7503 if (can_compare_and_swap_p (mode
, true))
7505 if (!target
|| !register_operand (target
, mode
))
7506 target
= gen_reg_rtx (mode
);
7507 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7508 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7509 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7516 /* This function tries to implement an atomic test-and-set operation
7517 using the atomic_test_and_set instruction pattern. A boolean value
7518 is returned from the operation, using TARGET if possible. */
7520 #ifndef HAVE_atomic_test_and_set
7521 #define HAVE_atomic_test_and_set 0
7522 #define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
7526 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
7528 enum machine_mode pat_bool_mode
;
7529 struct expand_operand ops
[3];
7531 if (!HAVE_atomic_test_and_set
)
7534 /* While we always get QImode from __atomic_test_and_set, we get
7535 other memory modes from __sync_lock_test_and_set. Note that we
7536 use no endian adjustment here. This matches the 4.6 behavior
7537 in the Sparc backend. */
7539 (insn_data
[CODE_FOR_atomic_test_and_set
].operand
[1].mode
== QImode
);
7540 if (GET_MODE (mem
) != QImode
)
7541 mem
= adjust_address_nv (mem
, QImode
, 0);
7543 pat_bool_mode
= insn_data
[CODE_FOR_atomic_test_and_set
].operand
[0].mode
;
7544 create_output_operand (&ops
[0], target
, pat_bool_mode
);
7545 create_fixed_operand (&ops
[1], mem
);
7546 create_integer_operand (&ops
[2], model
);
7548 if (maybe_expand_insn (CODE_FOR_atomic_test_and_set
, 3, ops
))
7549 return ops
[0].value
;
7553 /* This function expands the legacy _sync_lock test_and_set operation which is
7554 generally an atomic exchange. Some limited targets only allow the
7555 constant 1 to be stored. This is an ACQUIRE operation.
7557 TARGET is an optional place to stick the return value.
7558 MEM is where VAL is stored. */
7561 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
7565 /* Try an atomic_exchange first. */
7566 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_ACQUIRE
);
7570 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
, MEMMODEL_ACQUIRE
);
7574 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7578 /* If there are no other options, try atomic_test_and_set if the value
7579 being stored is 1. */
7580 if (val
== const1_rtx
)
7581 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_ACQUIRE
);
7586 /* This function expands the atomic test_and_set operation:
7587 atomically store a boolean TRUE into MEM and return the previous value.
7589 MEMMODEL is the memory model variant to use.
7590 TARGET is an optional place to stick the return value. */
7593 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
7595 enum machine_mode mode
= GET_MODE (mem
);
7596 rtx ret
, trueval
, subtarget
;
7598 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
7602 /* Be binary compatible with non-default settings of trueval, and different
7603 cpu revisions. E.g. one revision may have atomic-test-and-set, but
7604 another only has atomic-exchange. */
7605 if (targetm
.atomic_test_and_set_trueval
== 1)
7607 trueval
= const1_rtx
;
7608 subtarget
= target
? target
: gen_reg_rtx (mode
);
7612 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
7613 subtarget
= gen_reg_rtx (mode
);
7616 /* Try the atomic-exchange optab... */
7617 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
7619 /* ... then an atomic-compare-and-swap loop ... */
7621 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
7623 /* ... before trying the vaguely defined legacy lock_test_and_set. */
7625 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
7627 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
7628 things with the value 1. Thus we try again without trueval. */
7629 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
7630 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
7632 /* Failing all else, assume a single threaded environment and simply
7633 perform the operation. */
7636 emit_move_insn (subtarget
, mem
);
7637 emit_move_insn (mem
, trueval
);
7641 /* Recall that have to return a boolean value; rectify if trueval
7642 is not exactly one. */
7643 if (targetm
.atomic_test_and_set_trueval
!= 1)
7644 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
7649 /* This function expands the atomic exchange operation:
7650 atomically store VAL in MEM and return the previous value in MEM.
7652 MEMMODEL is the memory model variant to use.
7653 TARGET is an optional place to stick the return value. */
7656 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
7660 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7662 /* Next try a compare-and-swap loop for the exchange. */
7664 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7669 /* This function expands the atomic compare exchange operation:
7671 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7672 *PTARGET_OVAL is an optional place to store the old value from memory.
7673 Both target parameters may be NULL to indicate that we do not care about
7674 that return value. Both target parameters are updated on success to
7675 the actual location of the corresponding result.
7677 MEMMODEL is the memory model variant to use.
7679 The return value of the function is true for success. */
7682 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
7683 rtx mem
, rtx expected
, rtx desired
,
7684 bool is_weak
, enum memmodel succ_model
,
7685 enum memmodel fail_model
)
7687 enum machine_mode mode
= GET_MODE (mem
);
7688 struct expand_operand ops
[8];
7689 enum insn_code icode
;
7690 rtx target_oval
, target_bool
= NULL_RTX
;
7693 /* Load expected into a register for the compare and swap. */
7694 if (MEM_P (expected
))
7695 expected
= copy_to_reg (expected
);
7697 /* Make sure we always have some place to put the return oldval.
7698 Further, make sure that place is distinct from the input expected,
7699 just in case we need that path down below. */
7700 if (ptarget_oval
== NULL
7701 || (target_oval
= *ptarget_oval
) == NULL
7702 || reg_overlap_mentioned_p (expected
, target_oval
))
7703 target_oval
= gen_reg_rtx (mode
);
7705 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7706 if (icode
!= CODE_FOR_nothing
)
7708 enum machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
7710 /* Make sure we always have a place for the bool operand. */
7711 if (ptarget_bool
== NULL
7712 || (target_bool
= *ptarget_bool
) == NULL
7713 || GET_MODE (target_bool
) != bool_mode
)
7714 target_bool
= gen_reg_rtx (bool_mode
);
7716 /* Emit the compare_and_swap. */
7717 create_output_operand (&ops
[0], target_bool
, bool_mode
);
7718 create_output_operand (&ops
[1], target_oval
, mode
);
7719 create_fixed_operand (&ops
[2], mem
);
7720 create_convert_operand_to (&ops
[3], expected
, mode
, true);
7721 create_convert_operand_to (&ops
[4], desired
, mode
, true);
7722 create_integer_operand (&ops
[5], is_weak
);
7723 create_integer_operand (&ops
[6], succ_model
);
7724 create_integer_operand (&ops
[7], fail_model
);
7725 expand_insn (icode
, 8, ops
);
7727 /* Return success/failure. */
7728 target_bool
= ops
[0].value
;
7729 target_oval
= ops
[1].value
;
7733 /* Otherwise fall back to the original __sync_val_compare_and_swap
7734 which is always seq-cst. */
7735 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7736 if (icode
!= CODE_FOR_nothing
)
7740 create_output_operand (&ops
[0], target_oval
, mode
);
7741 create_fixed_operand (&ops
[1], mem
);
7742 create_convert_operand_to (&ops
[2], expected
, mode
, true);
7743 create_convert_operand_to (&ops
[3], desired
, mode
, true);
7744 if (!maybe_expand_insn (icode
, 4, ops
))
7747 target_oval
= ops
[0].value
;
7749 /* If the caller isn't interested in the boolean return value,
7750 skip the computation of it. */
7751 if (ptarget_bool
== NULL
)
7754 /* Otherwise, work out if the compare-and-swap succeeded. */
7756 if (have_insn_for (COMPARE
, CCmode
))
7757 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7760 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
7761 const0_rtx
, VOIDmode
, 0, 1);
7764 goto success_bool_from_val
;
7767 /* Also check for library support for __sync_val_compare_and_swap. */
7768 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
7769 if (libfunc
!= NULL
)
7771 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7772 target_oval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7773 mode
, 3, addr
, ptr_mode
,
7774 expected
, mode
, desired
, mode
);
7776 /* Compute the boolean return value only if requested. */
7778 goto success_bool_from_val
;
7786 success_bool_from_val
:
7787 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
7788 expected
, VOIDmode
, 1, 1);
7790 /* Make sure that the oval output winds up where the caller asked. */
7792 *ptarget_oval
= target_oval
;
7794 *ptarget_bool
= target_bool
;
7798 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
7801 expand_asm_memory_barrier (void)
7805 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, empty_string
, empty_string
, 0,
7806 rtvec_alloc (0), rtvec_alloc (0),
7807 rtvec_alloc (0), UNKNOWN_LOCATION
);
7808 MEM_VOLATILE_P (asm_op
) = 1;
7810 clob
= gen_rtx_SCRATCH (VOIDmode
);
7811 clob
= gen_rtx_MEM (BLKmode
, clob
);
7812 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
7814 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
7817 /* This routine will either emit the mem_thread_fence pattern or issue a
7818 sync_synchronize to generate a fence for memory model MEMMODEL. */
7820 #ifndef HAVE_mem_thread_fence
7821 # define HAVE_mem_thread_fence 0
7822 # define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7824 #ifndef HAVE_memory_barrier
7825 # define HAVE_memory_barrier 0
7826 # define gen_memory_barrier() (gcc_unreachable (), NULL_RTX)
7830 expand_mem_thread_fence (enum memmodel model
)
7832 if (HAVE_mem_thread_fence
)
7833 emit_insn (gen_mem_thread_fence (GEN_INT (model
)));
7834 else if (model
!= MEMMODEL_RELAXED
)
7836 if (HAVE_memory_barrier
)
7837 emit_insn (gen_memory_barrier ());
7838 else if (synchronize_libfunc
!= NULL_RTX
)
7839 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
7841 expand_asm_memory_barrier ();
7845 /* This routine will either emit the mem_signal_fence pattern or issue a
7846 sync_synchronize to generate a fence for memory model MEMMODEL. */
7848 #ifndef HAVE_mem_signal_fence
7849 # define HAVE_mem_signal_fence 0
7850 # define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7854 expand_mem_signal_fence (enum memmodel model
)
7856 if (HAVE_mem_signal_fence
)
7857 emit_insn (gen_mem_signal_fence (GEN_INT (model
)));
7858 else if (model
!= MEMMODEL_RELAXED
)
7860 /* By default targets are coherent between a thread and the signal
7861 handler running on the same thread. Thus this really becomes a
7862 compiler barrier, in that stores must not be sunk past
7863 (or raised above) a given point. */
7864 expand_asm_memory_barrier ();
7868 /* This function expands the atomic load operation:
7869 return the atomically loaded value in MEM.
7871 MEMMODEL is the memory model variant to use.
7872 TARGET is an option place to stick the return value. */
7875 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
7877 enum machine_mode mode
= GET_MODE (mem
);
7878 enum insn_code icode
;
7880 /* If the target supports the load directly, great. */
7881 icode
= direct_optab_handler (atomic_load_optab
, mode
);
7882 if (icode
!= CODE_FOR_nothing
)
7884 struct expand_operand ops
[3];
7886 create_output_operand (&ops
[0], target
, mode
);
7887 create_fixed_operand (&ops
[1], mem
);
7888 create_integer_operand (&ops
[2], model
);
7889 if (maybe_expand_insn (icode
, 3, ops
))
7890 return ops
[0].value
;
7893 /* If the size of the object is greater than word size on this target,
7894 then we assume that a load will not be atomic. */
7895 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
7897 /* Issue val = compare_and_swap (mem, 0, 0).
7898 This may cause the occasional harmless store of 0 when the value is
7899 already 0, but it seems to be OK according to the standards guys. */
7900 if (expand_atomic_compare_and_swap (NULL
, &target
, mem
, const0_rtx
,
7901 const0_rtx
, false, model
, model
))
7904 /* Otherwise there is no atomic load, leave the library call. */
7908 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7909 if (!target
|| target
== const0_rtx
)
7910 target
= gen_reg_rtx (mode
);
7912 /* Emit the appropriate barrier before the load. */
7913 expand_mem_thread_fence (model
);
7915 emit_move_insn (target
, mem
);
7917 /* For SEQ_CST, also emit a barrier after the load. */
7918 if (model
== MEMMODEL_SEQ_CST
)
7919 expand_mem_thread_fence (model
);
7924 /* This function expands the atomic store operation:
7925 Atomically store VAL in MEM.
7926 MEMMODEL is the memory model variant to use.
7927 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7928 function returns const0_rtx if a pattern was emitted. */
7931 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7933 enum machine_mode mode
= GET_MODE (mem
);
7934 enum insn_code icode
;
7935 struct expand_operand ops
[3];
7937 /* If the target supports the store directly, great. */
7938 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7939 if (icode
!= CODE_FOR_nothing
)
7941 create_fixed_operand (&ops
[0], mem
);
7942 create_input_operand (&ops
[1], val
, mode
);
7943 create_integer_operand (&ops
[2], model
);
7944 if (maybe_expand_insn (icode
, 3, ops
))
7948 /* If using __sync_lock_release is a viable alternative, try it. */
7951 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7952 if (icode
!= CODE_FOR_nothing
)
7954 create_fixed_operand (&ops
[0], mem
);
7955 create_input_operand (&ops
[1], const0_rtx
, mode
);
7956 if (maybe_expand_insn (icode
, 2, ops
))
7958 /* lock_release is only a release barrier. */
7959 if (model
== MEMMODEL_SEQ_CST
)
7960 expand_mem_thread_fence (model
);
7966 /* If the size of the object is greater than word size on this target,
7967 a default store will not be atomic, Try a mem_exchange and throw away
7968 the result. If that doesn't work, don't do anything. */
7969 if (GET_MODE_PRECISION(mode
) > BITS_PER_WORD
)
7971 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7973 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
, val
);
7980 /* If there is no mem_store, default to a move with barriers */
7981 if (model
== MEMMODEL_SEQ_CST
|| model
== MEMMODEL_RELEASE
)
7982 expand_mem_thread_fence (model
);
7984 emit_move_insn (mem
, val
);
7986 /* For SEQ_CST, also emit a barrier after the load. */
7987 if (model
== MEMMODEL_SEQ_CST
)
7988 expand_mem_thread_fence (model
);
7994 /* Structure containing the pointers and values required to process the
7995 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7997 struct atomic_op_functions
7999 direct_optab mem_fetch_before
;
8000 direct_optab mem_fetch_after
;
8001 direct_optab mem_no_result
;
8004 direct_optab no_result
;
8005 enum rtx_code reverse_code
;
8009 /* Fill in structure pointed to by OP with the various optab entries for an
8010 operation of type CODE. */
8013 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
8015 gcc_assert (op
!= NULL
);
8017 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
8018 in the source code during compilation, and the optab entries are not
8019 computable until runtime. Fill in the values at runtime. */
8023 op
->mem_fetch_before
= atomic_fetch_add_optab
;
8024 op
->mem_fetch_after
= atomic_add_fetch_optab
;
8025 op
->mem_no_result
= atomic_add_optab
;
8026 op
->fetch_before
= sync_old_add_optab
;
8027 op
->fetch_after
= sync_new_add_optab
;
8028 op
->no_result
= sync_add_optab
;
8029 op
->reverse_code
= MINUS
;
8032 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
8033 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
8034 op
->mem_no_result
= atomic_sub_optab
;
8035 op
->fetch_before
= sync_old_sub_optab
;
8036 op
->fetch_after
= sync_new_sub_optab
;
8037 op
->no_result
= sync_sub_optab
;
8038 op
->reverse_code
= PLUS
;
8041 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
8042 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
8043 op
->mem_no_result
= atomic_xor_optab
;
8044 op
->fetch_before
= sync_old_xor_optab
;
8045 op
->fetch_after
= sync_new_xor_optab
;
8046 op
->no_result
= sync_xor_optab
;
8047 op
->reverse_code
= XOR
;
8050 op
->mem_fetch_before
= atomic_fetch_and_optab
;
8051 op
->mem_fetch_after
= atomic_and_fetch_optab
;
8052 op
->mem_no_result
= atomic_and_optab
;
8053 op
->fetch_before
= sync_old_and_optab
;
8054 op
->fetch_after
= sync_new_and_optab
;
8055 op
->no_result
= sync_and_optab
;
8056 op
->reverse_code
= UNKNOWN
;
8059 op
->mem_fetch_before
= atomic_fetch_or_optab
;
8060 op
->mem_fetch_after
= atomic_or_fetch_optab
;
8061 op
->mem_no_result
= atomic_or_optab
;
8062 op
->fetch_before
= sync_old_ior_optab
;
8063 op
->fetch_after
= sync_new_ior_optab
;
8064 op
->no_result
= sync_ior_optab
;
8065 op
->reverse_code
= UNKNOWN
;
8068 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
8069 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
8070 op
->mem_no_result
= atomic_nand_optab
;
8071 op
->fetch_before
= sync_old_nand_optab
;
8072 op
->fetch_after
= sync_new_nand_optab
;
8073 op
->no_result
= sync_nand_optab
;
8074 op
->reverse_code
= UNKNOWN
;
8081 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
8082 using memory order MODEL. If AFTER is true the operation needs to return
8083 the value of *MEM after the operation, otherwise the previous value.
8084 TARGET is an optional place to place the result. The result is unused if
8086 Return the result if there is a better sequence, otherwise NULL_RTX. */
8089 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
8090 enum memmodel model
, bool after
)
8092 /* If the value is prefetched, or not used, it may be possible to replace
8093 the sequence with a native exchange operation. */
8094 if (!after
|| target
== const0_rtx
)
8096 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
8097 if (code
== AND
&& val
== const0_rtx
)
8099 if (target
== const0_rtx
)
8100 target
= gen_reg_rtx (GET_MODE (mem
));
8101 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
8104 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
8105 if (code
== IOR
&& val
== constm1_rtx
)
8107 if (target
== const0_rtx
)
8108 target
= gen_reg_rtx (GET_MODE (mem
));
8109 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
8116 /* Try to emit an instruction for a specific operation varaition.
8117 OPTAB contains the OP functions.
8118 TARGET is an optional place to return the result. const0_rtx means unused.
8119 MEM is the memory location to operate on.
8120 VAL is the value to use in the operation.
8121 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
8122 MODEL is the memory model, if used.
8123 AFTER is true if the returned result is the value after the operation. */
8126 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
8127 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
8129 enum machine_mode mode
= GET_MODE (mem
);
8130 struct expand_operand ops
[4];
8131 enum insn_code icode
;
8135 /* Check to see if there is a result returned. */
8136 if (target
== const0_rtx
)
8140 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
8141 create_integer_operand (&ops
[2], model
);
8146 icode
= direct_optab_handler (optab
->no_result
, mode
);
8150 /* Otherwise, we need to generate a result. */
8155 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
8156 : optab
->mem_fetch_before
, mode
);
8157 create_integer_operand (&ops
[3], model
);
8162 icode
= optab_handler (after
? optab
->fetch_after
8163 : optab
->fetch_before
, mode
);
8166 create_output_operand (&ops
[op_counter
++], target
, mode
);
8168 if (icode
== CODE_FOR_nothing
)
8171 create_fixed_operand (&ops
[op_counter
++], mem
);
8172 /* VAL may have been promoted to a wider mode. Shrink it if so. */
8173 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
8175 if (maybe_expand_insn (icode
, num_ops
, ops
))
8176 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
8182 /* This function expands an atomic fetch_OP or OP_fetch operation:
8183 TARGET is an option place to stick the return value. const0_rtx indicates
8184 the result is unused.
8185 atomically fetch MEM, perform the operation with VAL and return it to MEM.
8186 CODE is the operation being performed (OP)
8187 MEMMODEL is the memory model variant to use.
8188 AFTER is true to return the result of the operation (OP_fetch).
8189 AFTER is false to return the value before the operation (fetch_OP). */
8191 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
8192 enum memmodel model
, bool after
)
8194 enum machine_mode mode
= GET_MODE (mem
);
8195 struct atomic_op_functions optab
;
8197 bool unused_result
= (target
== const0_rtx
);
8199 get_atomic_op_for_code (&optab
, code
);
8201 /* Check to see if there are any better instructions. */
8202 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
8206 /* Check for the case where the result isn't used and try those patterns. */
8209 /* Try the memory model variant first. */
8210 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
8214 /* Next try the old style withuot a memory model. */
8215 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
8219 /* There is no no-result pattern, so try patterns with a result. */
8223 /* Try the __atomic version. */
8224 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
8228 /* Try the older __sync version. */
8229 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
8233 /* If the fetch value can be calculated from the other variation of fetch,
8234 try that operation. */
8235 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
8237 /* Try the __atomic version, then the older __sync version. */
8238 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
8240 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
8244 /* If the result isn't used, no need to do compensation code. */
8248 /* Issue compensation code. Fetch_after == fetch_before OP val.
8249 Fetch_before == after REVERSE_OP val. */
8251 code
= optab
.reverse_code
;
8254 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
8255 true, OPTAB_LIB_WIDEN
);
8256 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
8259 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
8260 true, OPTAB_LIB_WIDEN
);
8265 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
8266 if (!can_compare_and_swap_p (mode
, false))
8271 libfunc
= optab_libfunc (after
? optab
.fetch_after
8272 : optab
.fetch_before
, mode
);
8274 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
8278 code
= optab
.reverse_code
;
8279 libfunc
= optab_libfunc (after
? optab
.fetch_before
8280 : optab
.fetch_after
, mode
);
8282 if (libfunc
!= NULL
)
8284 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
8285 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
8286 2, addr
, ptr_mode
, val
, mode
);
8288 if (!unused_result
&& fixup
)
8289 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
8290 true, OPTAB_LIB_WIDEN
);
8295 /* If nothing else has succeeded, default to a compare and swap loop. */
8296 if (can_compare_and_swap_p (mode
, true))
8299 rtx t0
= gen_reg_rtx (mode
), t1
;
8303 /* If the result is used, get a register for it. */
8306 if (!target
|| !register_operand (target
, mode
))
8307 target
= gen_reg_rtx (mode
);
8308 /* If fetch_before, copy the value now. */
8310 emit_move_insn (target
, t0
);
8313 target
= const0_rtx
;
8318 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
8319 true, OPTAB_LIB_WIDEN
);
8320 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
8323 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
8326 /* For after, copy the value now. */
8327 if (!unused_result
&& after
)
8328 emit_move_insn (target
, t1
);
8329 insn
= get_insns ();
8332 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
8339 /* Return true if OPERAND is suitable for operand number OPNO of
8340 instruction ICODE. */
8343 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
8345 return (!insn_data
[(int) icode
].operand
[opno
].predicate
8346 || (insn_data
[(int) icode
].operand
[opno
].predicate
8347 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
8350 /* TARGET is a target of a multiword operation that we are going to
8351 implement as a series of word-mode operations. Return true if
8352 TARGET is suitable for this purpose. */
8355 valid_multiword_target_p (rtx target
)
8357 enum machine_mode mode
;
8360 mode
= GET_MODE (target
);
8361 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
8362 if (!validate_subreg (word_mode
, mode
, target
, i
))
8367 /* Like maybe_legitimize_operand, but do not change the code of the
8368 current rtx value. */
8371 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
8372 struct expand_operand
*op
)
8374 /* See if the operand matches in its current form. */
8375 if (insn_operand_matches (icode
, opno
, op
->value
))
8378 /* If the operand is a memory whose address has no side effects,
8379 try forcing the address into a non-virtual pseudo register.
8380 The check for side effects is important because copy_to_mode_reg
8381 cannot handle things like auto-modified addresses. */
8382 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
8387 addr
= XEXP (mem
, 0);
8388 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
8389 && !side_effects_p (addr
))
8392 enum machine_mode mode
;
8394 last
= get_last_insn ();
8395 mode
= get_address_mode (mem
);
8396 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
8397 if (insn_operand_matches (icode
, opno
, mem
))
8402 delete_insns_since (last
);
8409 /* Try to make OP match operand OPNO of instruction ICODE. Return true
8410 on success, storing the new operand value back in OP. */
8413 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
8414 struct expand_operand
*op
)
8416 enum machine_mode mode
, imode
;
8417 bool old_volatile_ok
, result
;
8423 old_volatile_ok
= volatile_ok
;
8425 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
8426 volatile_ok
= old_volatile_ok
;
8430 gcc_assert (mode
!= VOIDmode
);
8432 && op
->value
!= const0_rtx
8433 && GET_MODE (op
->value
) == mode
8434 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
8437 op
->value
= gen_reg_rtx (mode
);
8442 gcc_assert (mode
!= VOIDmode
);
8443 gcc_assert (GET_MODE (op
->value
) == VOIDmode
8444 || GET_MODE (op
->value
) == mode
);
8445 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
8448 op
->value
= copy_to_mode_reg (mode
, op
->value
);
8451 case EXPAND_CONVERT_TO
:
8452 gcc_assert (mode
!= VOIDmode
);
8453 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
8456 case EXPAND_CONVERT_FROM
:
8457 if (GET_MODE (op
->value
) != VOIDmode
)
8458 mode
= GET_MODE (op
->value
);
8460 /* The caller must tell us what mode this value has. */
8461 gcc_assert (mode
!= VOIDmode
);
8463 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8464 if (imode
!= VOIDmode
&& imode
!= mode
)
8466 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
8471 case EXPAND_ADDRESS
:
8472 gcc_assert (mode
!= VOIDmode
);
8473 op
->value
= convert_memory_address (mode
, op
->value
);
8476 case EXPAND_INTEGER
:
8477 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8478 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
8482 return insn_operand_matches (icode
, opno
, op
->value
);
8485 /* Make OP describe an input operand that should have the same value
8486 as VALUE, after any mode conversion that the target might request.
8487 TYPE is the type of VALUE. */
8490 create_convert_operand_from_type (struct expand_operand
*op
,
8491 rtx value
, tree type
)
8493 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
8494 TYPE_UNSIGNED (type
));
8497 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8498 of instruction ICODE. Return true on success, leaving the new operand
8499 values in the OPS themselves. Emit no code on failure. */
8502 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
8503 unsigned int nops
, struct expand_operand
*ops
)
8508 last
= get_last_insn ();
8509 for (i
= 0; i
< nops
; i
++)
8510 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
8512 delete_insns_since (last
);
8518 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8519 as its operands. Return the instruction pattern on success,
8520 and emit any necessary set-up code. Return null and emit no
8524 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
8525 struct expand_operand
*ops
)
8527 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
8528 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
8534 return GEN_FCN (icode
) (ops
[0].value
);
8536 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
8538 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
8540 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8543 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8544 ops
[3].value
, ops
[4].value
);
8546 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8547 ops
[3].value
, ops
[4].value
, ops
[5].value
);
8549 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8550 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8553 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8554 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8555 ops
[6].value
, ops
[7].value
);
8560 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8561 as its operands. Return true on success and emit no code on failure. */
8564 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
8565 struct expand_operand
*ops
)
8567 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
8576 /* Like maybe_expand_insn, but for jumps. */
8579 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8580 struct expand_operand
*ops
)
8582 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
8585 emit_jump_insn (pat
);
8591 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8595 expand_insn (enum insn_code icode
, unsigned int nops
,
8596 struct expand_operand
*ops
)
8598 if (!maybe_expand_insn (icode
, nops
, ops
))
8602 /* Like expand_insn, but for jumps. */
8605 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8606 struct expand_operand
*ops
)
8608 if (!maybe_expand_jump_insn (icode
, nops
, ops
))
8612 #include "gt-optabs.h"