+2011-03-23 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * optabs.h (emit_unop_insn, maybe_emit_unop_insn): Change insn code
+ parameter from "int" to "enum insn_code".
+ (expand_operand_type): New enum.
+ (expand_operand): New structure.
+ (create_expand_operand): New function.
+ (create_fixed_operand, create_output_operand): Likewise
+ (create_input_operand, create_convert_operand_to): Likewise.
+ (create_convert_operand_from, create_address_operand): Likewise.
+ (create_integer_operand): Likewise.
+ (create_convert_operand_from_type, maybe_legitimize_operands): Declare.
+ (maybe_gen_insn, maybe_expand_insn, maybe_expand_jump_insn): Likewise.
+ (expand_insn, expand_jump_insn): Likewise.
+ * builtins.c (expand_builtin_prefetch): Use the new interfaces.
+ (expand_builtin_interclass_mathfn, expand_builtin_strlen): Likewise.
+ (expand_movstr, expand_builtin___clear_cache): Likewise.
+ (expand_builtin_lock_release): Likewise.
+ * explow.c (allocate_dynamic_stack_space): Likewise.
+ (probe_stack_range): Likewise. Allow check_stack to FAIL,
+ and use the default handling in that case.
+ * expmed.c (check_predicate_volatile_ok): Delete.
+ (store_bit_field_1, extract_bit_field_1): Use the new interfaces.
+ (emit_cstore): Likewise.
+ * expr.c (emit_block_move_via_movmem): Likewise.
+ (set_storage_via_setmem, expand_assignment): Likewise.
+ (emit_storent_insn, try_casesi): Likewise.
+ (emit_single_push_insn): Likewise. Allow the expansion to fail.
+ * optabs.c (expand_widen_pattern_expr, expand_ternary_op): Likewise.
+ (expand_vec_shift_expr, expand_binop_directly): Likewise.
+ (expand_twoval_unop, expand_twoval_binop): Likewise.
+ (expand_unop_direct, emit_indirect_jump): Likewise.
+ (emit_conditional_move, vector_compare_rtx): Likewise.
+ (expand_vec_cond_expr, expand_val_compare_and_swap_1): Likewise.
+ (expand_sync_operation, expand_sync_fetch_operation): Likewise.
+ (expand_sync_lock_test_and_set): Likewise.
+ (maybe_emit_unop_insn): Likewise. Change icode to an insn_code.
+ (emit_unop_insn): Likewise.
+ (expand_copysign_absneg): Change icode to an insn_code.
+ (create_convert_operand_from_type): New function.
+ (maybe_legitimize_operand, maybe_legitimize_operands): Likewise.
+ (maybe_gen_insn, maybe_expand_insn, maybe_expand_jump_insn): Likewise.
+ (expand_insn, expand_jump_insn): Likewise.
+ * config/i386/i386.md (setmem<mode>): Use nonmemory_operand rather
+ than const_int_operand for operand 2.
+
2011-03-23 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
* dwarf2out.c (const_ok_for_output_1): Print the unspec enum name
#ifdef HAVE_prefetch
if (HAVE_prefetch)
{
- if ((! (*insn_data[(int) CODE_FOR_prefetch].operand[0].predicate)
- (op0,
- insn_data[(int) CODE_FOR_prefetch].operand[0].mode))
- || (GET_MODE (op0) != Pmode))
- {
- op0 = convert_memory_address (Pmode, op0);
- op0 = force_reg (Pmode, op0);
- }
- emit_insn (gen_prefetch (op0, op1, op2));
+ struct expand_operand ops[3];
+
+ create_address_operand (&ops[0], op0);
+ create_integer_operand (&ops[1], INTVAL (op1));
+ create_integer_operand (&ops[2], INTVAL (op2));
+ if (maybe_expand_insn (CODE_FOR_prefetch, 3, ops))
+ return;
}
#endif
if (icode != CODE_FOR_nothing)
{
+ struct expand_operand ops[1];
rtx last = get_last_insn ();
tree orig_arg = arg;
- /* Make a suitable register to place result in. */
- if (!target
- || GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp))
- || !insn_data[icode].operand[0].predicate (target, GET_MODE (target)))
- target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
-
- gcc_assert (insn_data[icode].operand[0].predicate
- (target, GET_MODE (target)));
/* Wrap the computation of the argument in a SAVE_EXPR, as we may
need to expand the argument again. This way, we will not perform
if (mode != GET_MODE (op0))
op0 = convert_to_mode (mode, op0, 0);
- /* Compute into TARGET.
- Set TARGET to wherever the result comes back. */
- if (maybe_emit_unop_insn (icode, target, op0, UNKNOWN))
- return target;
+ create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
+ if (maybe_legitimize_operands (icode, 0, 1, ops)
+ && maybe_emit_unop_insn (icode, ops[0].value, op0, UNKNOWN))
+ return ops[0].value;
+
delete_insns_since (last);
CALL_EXPR_ARG (exp, 0) = orig_arg;
}
return NULL_RTX;
else
{
+ struct expand_operand ops[4];
rtx pat;
tree len;
tree src = CALL_EXPR_ARG (exp, 0);
- rtx result, src_reg, char_rtx, before_strlen;
- enum machine_mode insn_mode = target_mode, char_mode;
+ rtx src_reg, before_strlen;
+ enum machine_mode insn_mode = target_mode;
enum insn_code icode = CODE_FOR_nothing;
unsigned int align;
if (insn_mode == VOIDmode)
return NULL_RTX;
- /* Make a place to write the result of the instruction. */
- result = target;
- if (! (result != 0
- && REG_P (result)
- && GET_MODE (result) == insn_mode
- && REGNO (result) >= FIRST_PSEUDO_REGISTER))
- result = gen_reg_rtx (insn_mode);
-
/* Make a place to hold the source address. We will not expand
the actual source until we are sure that the expansion will
not fail -- there are trees that cannot be expanded twice. */
source operand later. */
before_strlen = get_last_insn ();
- char_rtx = const0_rtx;
- char_mode = insn_data[(int) icode].operand[2].mode;
- if (! (*insn_data[(int) icode].operand[2].predicate) (char_rtx,
- char_mode))
- char_rtx = copy_to_mode_reg (char_mode, char_rtx);
-
- pat = GEN_FCN (icode) (result, gen_rtx_MEM (BLKmode, src_reg),
- char_rtx, GEN_INT (align));
- if (! pat)
+ create_output_operand (&ops[0], target, insn_mode);
+ create_fixed_operand (&ops[1], gen_rtx_MEM (BLKmode, src_reg));
+ create_integer_operand (&ops[2], 0);
+ create_integer_operand (&ops[3], align);
+ if (!maybe_expand_insn (icode, 4, ops))
return NULL_RTX;
- emit_insn (pat);
/* Now that we are assured of success, expand the source. */
start_sequence ();
emit_insn_before (pat, get_insns ());
/* Return the value in the proper mode for this function. */
- if (GET_MODE (result) == target_mode)
- target = result;
+ if (GET_MODE (ops[0].value) == target_mode)
+ target = ops[0].value;
else if (target != 0)
- convert_move (target, result, 0);
+ convert_move (target, ops[0].value, 0);
else
- target = convert_to_mode (target_mode, result, 0);
+ target = convert_to_mode (target_mode, ops[0].value, 0);
return target;
}
static rtx
expand_movstr (tree dest, tree src, rtx target, int endp)
{
+ struct expand_operand ops[3];
rtx end;
rtx dest_mem;
rtx src_mem;
- rtx insn;
- const struct insn_data_d * data;
if (!HAVE_movstr)
return NULL_RTX;
dest_mem = get_memory_rtx (dest, NULL);
src_mem = get_memory_rtx (src, NULL);
- data = insn_data + CODE_FOR_movstr;
if (!endp)
{
target = force_reg (Pmode, XEXP (dest_mem, 0));
dest_mem = replace_equiv_address (dest_mem, target);
- end = gen_reg_rtx (Pmode);
- }
- else
- {
- if (target == 0
- || target == const0_rtx
- || ! (*data->operand[0].predicate) (target, Pmode))
- {
- end = gen_reg_rtx (Pmode);
- if (target != const0_rtx)
- target = end;
- }
- else
- end = target;
}
- if (data->operand[0].mode != VOIDmode)
- end = gen_lowpart (data->operand[0].mode, end);
-
- insn = data->genfun (end, dest_mem, src_mem);
-
- gcc_assert (insn);
+ create_output_operand (&ops[0], endp ? target : NULL_RTX, Pmode);
+ create_fixed_operand (&ops[1], dest_mem);
+ create_fixed_operand (&ops[2], src_mem);
+ expand_insn (CODE_FOR_movstr, 3, ops);
- emit_insn (insn);
-
- /* movstr is supposed to set end to the address of the NUL
- terminator. If the caller requested a mempcpy-like return value,
- adjust it. */
- if (endp == 1 && target != const0_rtx)
+ if (endp && target != const0_rtx)
{
- rtx tem = plus_constant (gen_lowpart (GET_MODE (target), end), 1);
- emit_move_insn (target, force_operand (tem, NULL_RTX));
+ target = ops[0].value;
+ /* movstr is supposed to set end to the address of the NUL
+ terminator. If the caller requested a mempcpy-like return value,
+ adjust it. */
+ if (endp == 1)
+ {
+ rtx tem = plus_constant (gen_lowpart (GET_MODE (target), end), 1);
+ emit_move_insn (target, force_operand (tem, NULL_RTX));
+ }
}
-
return target;
}
/* We have a "clear_cache" insn, and it will handle everything. */
tree begin, end;
rtx begin_rtx, end_rtx;
- enum insn_code icode;
/* We must not expand to a library call. If we did, any
fallback library function in libgcc that might contain a call to
if (HAVE_clear_cache)
{
- icode = CODE_FOR_clear_cache;
+ struct expand_operand ops[2];
begin = CALL_EXPR_ARG (exp, 0);
begin_rtx = expand_expr (begin, NULL_RTX, Pmode, EXPAND_NORMAL);
- begin_rtx = convert_memory_address (Pmode, begin_rtx);
- if (!insn_data[icode].operand[0].predicate (begin_rtx, Pmode))
- begin_rtx = copy_to_mode_reg (Pmode, begin_rtx);
end = CALL_EXPR_ARG (exp, 1);
end_rtx = expand_expr (end, NULL_RTX, Pmode, EXPAND_NORMAL);
- end_rtx = convert_memory_address (Pmode, end_rtx);
- if (!insn_data[icode].operand[1].predicate (end_rtx, Pmode))
- end_rtx = copy_to_mode_reg (Pmode, end_rtx);
- emit_insn (gen_clear_cache (begin_rtx, end_rtx));
+ create_address_operand (&ops[0], begin_rtx);
+ create_address_operand (&ops[1], end_rtx);
+ if (maybe_expand_insn (CODE_FOR_clear_cache, 2, ops))
+ return const0_rtx;
}
return const0_rtx;
#endif /* HAVE_clear_cache */
static void
expand_builtin_lock_release (enum machine_mode mode, tree exp)
{
+ struct expand_operand ops[2];
enum insn_code icode;
- rtx mem, insn;
- rtx val = const0_rtx;
+ rtx mem;
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
icode = direct_optab_handler (sync_lock_release_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (!insn_data[icode].operand[1].predicate (val, mode))
- val = force_reg (mode, val);
-
- insn = GEN_FCN (icode) (mem, val);
- if (insn)
- {
- emit_insn (insn);
- return;
- }
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], const0_rtx, mode);
+ if (maybe_expand_insn (icode, 2, ops))
+ return;
}
/* Otherwise we can implement this operation by emitting a barrier
followed by a store of zero. */
expand_builtin_synchronize ();
- emit_move_insn (mem, val);
+ emit_move_insn (mem, const0_rtx);
}
\f
/* Expand an expression EXP that calls a built-in function,
(define_expand "setmem<mode>"
[(use (match_operand:BLK 0 "memory_operand" ""))
(use (match_operand:SWI48 1 "nonmemory_operand" ""))
- (use (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:QI 2 "nonmemory_operand" ""))
(use (match_operand 3 "const_int_operand" ""))
(use (match_operand:SI 4 "const_int_operand" ""))
(use (match_operand:SI 5 "const_int_operand" ""))]
#ifdef HAVE_allocate_stack
if (HAVE_allocate_stack)
{
- enum machine_mode mode = STACK_SIZE_MODE;
- insn_operand_predicate_fn pred;
-
+ struct expand_operand ops[2];
/* We don't have to check against the predicate for operand 0 since
TARGET is known to be a pseudo of the proper mode, which must
- be valid for the operand. For operand 1, convert to the
- proper mode and validate. */
- if (mode == VOIDmode)
- mode = insn_data[(int) CODE_FOR_allocate_stack].operand[1].mode;
-
- pred = insn_data[(int) CODE_FOR_allocate_stack].operand[1].predicate;
- if (pred && ! ((*pred) (size, mode)))
- size = copy_to_mode_reg (mode, convert_to_mode (mode, size, 1));
-
- emit_insn (gen_allocate_stack (target, size));
+ be valid for the operand. */
+ create_fixed_operand (&ops[0], target);
+ create_convert_operand_to (&ops[1], size, STACK_SIZE_MODE, true);
+ expand_insn (CODE_FOR_allocate_stack, 2, ops);
}
else
#endif
plus_constant (size, first)));
emit_library_call (stack_check_libfunc, LCT_NORMAL, VOIDmode, 1, addr,
Pmode);
+ return;
}
/* Next see if we have an insn to check the stack. */
#ifdef HAVE_check_stack
- else if (HAVE_check_stack)
+ if (HAVE_check_stack)
{
+ struct expand_operand ops[1];
rtx addr = memory_address (Pmode,
gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
stack_pointer_rtx,
plus_constant (size, first)));
- insn_operand_predicate_fn pred
- = insn_data[(int) CODE_FOR_check_stack].operand[0].predicate;
- if (pred && !((*pred) (addr, Pmode)))
- addr = copy_to_mode_reg (Pmode, addr);
- emit_insn (gen_check_stack (addr));
+ create_input_operand (&ops[0], addr, Pmode);
+ if (maybe_expand_insn (CODE_FOR_check_stack, 1, ops))
+ return;
}
#endif
return word_mode;
return data->operand[opno].mode;
}
-
-/* Return true if X, of mode MODE, matches the predicate for operand
- OPNO of instruction ICODE. Allow volatile memories, regardless of
- the ambient volatile_ok setting. */
-
-static bool
-check_predicate_volatile_ok (enum insn_code icode, int opno,
- rtx x, enum machine_mode mode)
-{
- bool save_volatile_ok, result;
-
- save_volatile_ok = volatile_ok;
- result = insn_data[(int) icode].operand[opno].predicate (x, mode);
- volatile_ok = save_volatile_ok;
- return result;
-}
\f
/* A subroutine of store_bit_field, with the same arguments. Return true
if the operation could be implemented.
&& bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
&& !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
{
+ struct expand_operand ops[3];
enum machine_mode outermode = GET_MODE (op0);
enum machine_mode innermode = GET_MODE_INNER (outermode);
- int icode = (int) optab_handler (vec_set_optab, outermode);
+ enum insn_code icode = optab_handler (vec_set_optab, outermode);
int pos = bitnum / GET_MODE_BITSIZE (innermode);
- rtx rtxpos = GEN_INT (pos);
- rtx src = value;
- rtx dest = op0;
- rtx pat, seq;
- enum machine_mode mode0 = insn_data[icode].operand[0].mode;
- enum machine_mode mode1 = insn_data[icode].operand[1].mode;
- enum machine_mode mode2 = insn_data[icode].operand[2].mode;
-
- start_sequence ();
- if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
- src = copy_to_mode_reg (mode1, src);
-
- if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
- rtxpos = copy_to_mode_reg (mode1, rtxpos);
-
- /* We could handle this, but we should always be called with a pseudo
- for our targets and all insns should take them as outputs. */
- gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
- && (*insn_data[icode].operand[1].predicate) (src, mode1)
- && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
- pat = GEN_FCN (icode) (dest, src, rtxpos);
- seq = get_insns ();
- end_sequence ();
- if (pat)
- {
- emit_insn (seq);
- emit_insn (pat);
- return true;
- }
+ create_fixed_operand (&ops[0], op0);
+ create_input_operand (&ops[1], value, innermode);
+ create_integer_operand (&ops[2], pos);
+ if (maybe_expand_insn (icode, 3, ops))
+ return true;
}
/* If the target is a register, overwriting the entire object, or storing
&& bitsize == GET_MODE_BITSIZE (fieldmode)
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
- int icode = optab_handler (movstrict_optab, fieldmode);
- rtx insn;
- rtx start = get_last_insn ();
+ struct expand_operand ops[2];
+ enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
rtx arg0 = op0;
- /* Get appropriate low part of the value being stored. */
- if (CONST_INT_P (value) || REG_P (value))
- value = gen_lowpart (fieldmode, value);
- else if (!(GET_CODE (value) == SYMBOL_REF
- || GET_CODE (value) == LABEL_REF
- || GET_CODE (value) == CONST))
- value = convert_to_mode (fieldmode, value, 0);
-
- if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
- value = copy_to_mode_reg (fieldmode, value);
-
- if (GET_CODE (op0) == SUBREG)
+ if (GET_CODE (arg0) == SUBREG)
{
/* Else we've got some float mode source being extracted into
a different float mode destination -- this combination of
subregs results in Severe Tire Damage. */
- gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
+ gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
|| GET_MODE_CLASS (fieldmode) == MODE_INT
|| GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
- arg0 = SUBREG_REG (op0);
+ arg0 = SUBREG_REG (arg0);
}
- insn = (GEN_FCN (icode)
- (gen_rtx_SUBREG (fieldmode, arg0,
- (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
- + (offset * UNITS_PER_WORD)),
- value));
- if (insn)
- {
- emit_insn (insn);
- return true;
- }
- delete_insns_since (start);
+ arg0 = gen_rtx_SUBREG (fieldmode, arg0,
+ (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
+ + (offset * UNITS_PER_WORD));
+
+ create_fixed_operand (&ops[0], arg0);
+ /* Shrink the source operand to FIELDMODE. */
+ create_convert_operand_to (&ops[1], value, fieldmode, false);
+ if (maybe_expand_insn (icode, 2, ops))
+ return true;
}
/* Handle fields bigger than a word. */
&& bitsize > 0
&& GET_MODE_BITSIZE (op_mode) >= bitsize
&& ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
- && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
- VOIDmode)
- && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
+ && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
{
+ struct expand_operand ops[4];
int xbitpos = bitpos;
rtx value1;
rtx xop0 = op0;
rtx last = get_last_insn ();
- rtx pat;
bool copy_back = false;
/* Add OFFSET into OP0's address. */
gcc_assert (CONSTANT_P (value));
}
- /* If this machine's insv insists on a register,
- get VALUE1 into a register. */
- if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
- (value1, op_mode)))
- value1 = force_reg (op_mode, value1);
-
- pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
- if (pat)
+ create_fixed_operand (&ops[0], xop0);
+ create_integer_operand (&ops[1], bitsize);
+ create_integer_operand (&ops[2], xbitpos);
+ create_input_operand (&ops[3], value1, op_mode);
+ if (maybe_expand_insn (CODE_FOR_insv, 4, ops))
{
- emit_insn (pat);
-
if (copy_back)
convert_move (op0, xop0, true);
return true;
&& ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
== bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
{
+ struct expand_operand ops[3];
enum machine_mode outermode = GET_MODE (op0);
enum machine_mode innermode = GET_MODE_INNER (outermode);
- int icode = (int) optab_handler (vec_extract_optab, outermode);
+ enum insn_code icode = optab_handler (vec_extract_optab, outermode);
unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
- rtx rtxpos = GEN_INT (pos);
- rtx src = op0;
- rtx dest = NULL, pat, seq;
- enum machine_mode mode0 = insn_data[icode].operand[0].mode;
- enum machine_mode mode1 = insn_data[icode].operand[1].mode;
- enum machine_mode mode2 = insn_data[icode].operand[2].mode;
-
- if (innermode == tmode || innermode == mode)
- dest = target;
-
- if (!dest)
- dest = gen_reg_rtx (innermode);
-
- start_sequence ();
-
- if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
- dest = copy_to_mode_reg (mode0, dest);
- if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
- src = copy_to_mode_reg (mode1, src);
-
- if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
- rtxpos = copy_to_mode_reg (mode1, rtxpos);
-
- /* We could handle this, but we should always be called with a pseudo
- for our targets and all insns should take them as outputs. */
- gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
- && (*insn_data[icode].operand[1].predicate) (src, mode1)
- && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
-
- pat = GEN_FCN (icode) (dest, src, rtxpos);
- seq = get_insns ();
- end_sequence ();
- if (pat)
+ create_output_operand (&ops[0], target, innermode);
+ create_input_operand (&ops[1], op0, outermode);
+ create_integer_operand (&ops[2], pos);
+ if (maybe_expand_insn (icode, 3, ops))
{
- emit_insn (seq);
- emit_insn (pat);
- if (mode0 != mode)
- return gen_lowpart (tmode, dest);
- return dest;
+ target = ops[0].value;
+ if (GET_MODE (target) != mode)
+ return gen_lowpart (tmode, target);
+ return target;
}
}
acceptable to the format of ext(z)v. */
&& !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
&& !((REG_P (op0) || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
- && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
+ && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))))
{
+ struct expand_operand ops[4];
unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
- rtx bitsize_rtx, bitpos_rtx;
- rtx last = get_last_insn ();
rtx xop0 = op0;
rtx xtarget = target;
rtx xspec_target = target;
rtx xspec_target_subreg = 0;
- rtx pat;
/* If op0 is a register, we need it in EXT_MODE to make it
acceptable to the format of ext(z)v. */
xtarget = gen_reg_rtx (ext_mode);
}
- /* If this machine's ext(z)v insists on a register target,
- make sure we have one. */
- if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
- xtarget = gen_reg_rtx (ext_mode);
-
- bitsize_rtx = GEN_INT (bitsize);
- bitpos_rtx = GEN_INT (xbitpos);
-
- pat = (unsignedp
- ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
- : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
- if (pat)
+ create_output_operand (&ops[0], xtarget, ext_mode);
+ create_fixed_operand (&ops[1], xop0);
+ create_integer_operand (&ops[2], bitsize);
+ create_integer_operand (&ops[3], xbitpos);
+ if (maybe_expand_insn (unsignedp ? CODE_FOR_extzv : CODE_FOR_extv,
+ 4, ops))
{
- emit_insn (pat);
+ xtarget = ops[0].value;
if (xtarget == xspec_target)
return xtarget;
if (xtarget == xspec_target_subreg)
return xspec_target;
return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
}
- delete_insns_since (last);
}
/* If OP0 is a memory, try copying it to a register and seeing if a
int unsignedp, rtx x, rtx y, int normalizep,
enum machine_mode target_mode)
{
- rtx op0, last, comparison, subtarget, pattern;
+ struct expand_operand ops[4];
+ rtx op0, last, comparison, subtarget;
enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
last = get_last_insn ();
x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
- comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
- if (!x || !y
- || !insn_data[icode].operand[2].predicate
- (x, insn_data[icode].operand[2].mode)
- || !insn_data[icode].operand[3].predicate
- (y, insn_data[icode].operand[3].mode)
- || !insn_data[icode].operand[1].predicate (comparison, VOIDmode))
+ if (!x || !y)
{
delete_insns_since (last);
return NULL_RTX;
if (!target)
target = gen_reg_rtx (target_mode);
- if (optimize
- || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
- subtarget = gen_reg_rtx (result_mode);
- else
- subtarget = target;
+ comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
- pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
- if (!pattern)
- return NULL_RTX;
- emit_insn (pattern);
+ create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
+ create_fixed_operand (&ops[1], comparison);
+ create_fixed_operand (&ops[2], x);
+ create_fixed_operand (&ops[3], y);
+ if (!maybe_expand_insn (icode, 4, ops))
+ {
+ delete_insns_since (last);
+ return NULL_RTX;
+ }
+ subtarget = ops[0].value;
/* If we are converting to a wider mode, first convert to
TARGET_MODE, then normalize. This produces better combining
emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (movmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (x, BLKmode))
- && ((pred = insn_data[(int) code].operand[1].predicate) == 0
- || (*pred) (y, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
- {
- rtx op2;
- rtx last = get_last_insn ();
- rtx pat;
-
- op2 = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (op2, mode))
- op2 = copy_to_mode_reg (mode, op2);
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
+ {
+ struct expand_operand ops[6];
+ unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
nice if there were some way to inform the backend, so
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (x, y, op2, opalign);
- else
- pat = GEN_FCN ((int) code) (x, y, op2, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ nops = insn_data[(int) code].n_operands;
+ create_fixed_operand (&ops[0], x);
+ create_fixed_operand (&ops[1], y);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[2], size, mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops != 4)
+ {
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
+ nops = 6;
+ }
+ if (maybe_expand_insn (code, nops, ops))
{
- emit_insn (pat);
volatile_ok = save_volatile_ok;
return true;
}
- else
- delete_insns_since (last);
}
}
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
if (expected_align < align)
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (setmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (object, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
- {
- rtx opsize, opchar;
- enum machine_mode char_mode;
- rtx last = get_last_insn ();
- rtx pat;
-
- opsize = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[1].predicate;
- if (pred != 0 && ! (*pred) (opsize, mode))
- opsize = copy_to_mode_reg (mode, opsize);
-
- opchar = val;
- char_mode = insn_data[(int) code].operand[2].mode;
- if (char_mode != VOIDmode)
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
+ {
+ struct expand_operand ops[6];
+ unsigned int nops;
+
+ nops = insn_data[(int) code].n_operands;
+ create_fixed_operand (&ops[0], object);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[1], size, mode, true);
+ create_convert_operand_from (&ops[2], val, byte_mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops != 4)
{
- opchar = convert_to_mode (char_mode, opchar, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (opchar, char_mode))
- opchar = copy_to_mode_reg (char_mode, opchar);
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
+ nops = 6;
}
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign);
- else
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
- {
- emit_insn (pat);
- return true;
- }
- else
- delete_insns_since (last);
+ if (maybe_expand_insn (code, nops, ops))
+ return true;
}
}
unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
rtx dest;
enum insn_code icode;
- insn_operand_predicate_fn pred;
stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
/* If there is push pattern, use it. Otherwise try old way of throwing
icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (((pred = insn_data[(int) icode].operand[0].predicate)
- && !((*pred) (x, mode))))
- x = force_reg (mode, x);
- emit_insn (GEN_FCN (icode) (x));
- return;
+ struct expand_operand ops[1];
+
+ create_input_operand (&ops[0], x, mode);
+ if (maybe_expand_insn (icode, 1, ops))
+ return;
}
if (GET_MODE_SIZE (mode) == rounded_size)
dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
rtx to_rtx = 0;
rtx result;
enum machine_mode mode;
- int align, icode;
+ int align;
+ enum insn_code icode;
/* Don't crash if the lhs of the assignment was erroneous. */
if (TREE_CODE (to) == ERROR_MARK)
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- enum machine_mode address_mode, op_mode1;
- rtx insn, reg, op0, mem;
+ struct expand_operand ops[2];
+ enum machine_mode address_mode;
+ rtx reg, op0, mem;
reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
reg = force_not_mem (reg);
if (TREE_THIS_VOLATILE (to))
MEM_VOLATILE_P (mem) = 1;
- op_mode1 = insn_data[icode].operand[1].mode;
- if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
- && op_mode1 != VOIDmode)
- reg = copy_to_mode_reg (op_mode1, reg);
-
- insn = GEN_FCN (icode) (mem, reg);
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], reg, mode);
/* The movmisalign<mode> pattern cannot fail, else the assignment would
silently be omitted. */
- gcc_assert (insn != NULL_RTX);
- emit_insn (insn);
+ expand_insn (icode, 2, ops);
return;
}
bool
emit_storent_insn (rtx to, rtx from)
{
- enum machine_mode mode = GET_MODE (to), imode;
+ struct expand_operand ops[2];
+ enum machine_mode mode = GET_MODE (to);
enum insn_code code = optab_handler (storent_optab, mode);
- rtx pattern;
if (code == CODE_FOR_nothing)
return false;
- imode = insn_data[code].operand[0].mode;
- if (!insn_data[code].operand[0].predicate (to, imode))
- return false;
-
- imode = insn_data[code].operand[1].mode;
- if (!insn_data[code].operand[1].predicate (from, imode))
- {
- from = copy_to_mode_reg (imode, from);
- if (!insn_data[code].operand[1].predicate (from, imode))
- return false;
- }
-
- pattern = GEN_FCN (code) (to, from);
- if (pattern == NULL_RTX)
- return false;
-
- emit_insn (pattern);
- return true;
+ create_fixed_operand (&ops[0], to);
+ create_input_operand (&ops[1], from, mode);
+ return maybe_expand_insn (code, 2, ops);
}
/* Generate code for computing expression EXP,
rtx table_label ATTRIBUTE_UNUSED, rtx default_label,
rtx fallback_label ATTRIBUTE_UNUSED)
{
+ struct expand_operand ops[5];
enum machine_mode index_mode = SImode;
int index_bits = GET_MODE_BITSIZE (index_mode);
rtx op1, op2, index;
- enum machine_mode op_mode;
if (! HAVE_casesi)
return 0;
do_pending_stack_adjust ();
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[0].mode;
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[0].predicate)
- (index, op_mode))
- index = copy_to_mode_reg (op_mode, index);
-
op1 = expand_normal (minval);
-
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode;
- op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)),
- op1, TYPE_UNSIGNED (TREE_TYPE (minval)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[1].predicate)
- (op1, op_mode))
- op1 = copy_to_mode_reg (op_mode, op1);
-
op2 = expand_normal (range);
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode;
- op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)),
- op2, TYPE_UNSIGNED (TREE_TYPE (range)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[2].predicate)
- (op2, op_mode))
- op2 = copy_to_mode_reg (op_mode, op2);
-
- emit_jump_insn (gen_casesi (index, op1, op2,
- table_label, !default_label
- ? fallback_label : default_label));
+ create_input_operand (&ops[0], index, index_mode);
+ create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
+ create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
+ create_fixed_operand (&ops[3], table_label);
+ create_fixed_operand (&ops[4], (default_label
+ ? default_label
+ : fallback_label));
+ expand_jump_insn (CODE_FOR_casesi, 5, ops);
return 1;
}
expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
rtx target, int unsignedp)
{
+ struct expand_operand eops[4];
tree oprnd0, oprnd1, oprnd2;
enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
optab widen_pattern_optab;
- int icode;
- enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
- rtx temp;
- rtx pat;
- rtx xop0, xop1, wxop;
+ enum insn_code icode;
int nops = TREE_CODE_LENGTH (ops->code);
+ int op;
oprnd0 = ops->op0;
tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
if (ops->code == WIDEN_MULT_PLUS_EXPR
|| ops->code == WIDEN_MULT_MINUS_EXPR)
- icode = (int) optab_handler (widen_pattern_optab,
- TYPE_MODE (TREE_TYPE (ops->op2)));
+ icode = optab_handler (widen_pattern_optab,
+ TYPE_MODE (TREE_TYPE (ops->op2)));
else
- icode = (int) optab_handler (widen_pattern_optab, tmode0);
+ icode = optab_handler (widen_pattern_optab, tmode0);
gcc_assert (icode != CODE_FOR_nothing);
- xmode0 = insn_data[icode].operand[1].mode;
if (nops >= 2)
{
oprnd1 = ops->op1;
tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
- xmode1 = insn_data[icode].operand[2].mode;
}
/* The last operand is of a wider mode than the rest of the operands. */
if (nops == 2)
- {
- wmode = tmode1;
- wxmode = xmode1;
- }
+ wmode = tmode1;
else if (nops == 3)
{
gcc_assert (tmode1 == tmode0);
gcc_assert (op1);
oprnd2 = ops->op2;
wmode = TYPE_MODE (TREE_TYPE (oprnd2));
- wxmode = insn_data[icode].operand[3].mode;
}
- if (!wide_op)
- wmode = wxmode = insn_data[icode].operand[0].mode;
-
- if (!target
- || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
- temp = gen_reg_rtx (wmode);
- else
- temp = target;
-
- xop0 = op0;
- xop1 = op1;
- wxop = wide_op;
-
- /* In case the insn wants input operands in modes different from
- those of the actual operands, convert the operands. It would
- seem that we don't need to convert CONST_INTs, but we do, so
- that they're properly zero-extended, sign-extended or truncated
- for their mode. */
-
- if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
- xop0 = convert_modes (xmode0,
- GET_MODE (op0) != VOIDmode
- ? GET_MODE (op0)
- : tmode0,
- xop0, unsignedp);
-
+ op = 0;
+ create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
+ create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
if (op1)
- if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
- xop1 = convert_modes (xmode1,
- GET_MODE (op1) != VOIDmode
- ? GET_MODE (op1)
- : tmode1,
- xop1, unsignedp);
-
+ create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
if (wide_op)
- if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
- wxop = convert_modes (wxmode,
- GET_MODE (wide_op) != VOIDmode
- ? GET_MODE (wide_op)
- : wmode,
- wxop, unsignedp);
-
- /* Now, if insn's predicates don't allow our operands, put them into
- pseudo regs. */
-
- if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
- && xmode0 != VOIDmode)
- xop0 = copy_to_mode_reg (xmode0, xop0);
-
- if (op1)
- {
- if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
- && xmode1 != VOIDmode)
- xop1 = copy_to_mode_reg (xmode1, xop1);
-
- if (wide_op)
- {
- if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
- && wxmode != VOIDmode)
- wxop = copy_to_mode_reg (wxmode, wxop);
-
- pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
- }
- else
- pat = GEN_FCN (icode) (temp, xop0, xop1);
- }
- else
- {
- if (wide_op)
- {
- if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
- && wxmode != VOIDmode)
- wxop = copy_to_mode_reg (wxmode, wxop);
-
- pat = GEN_FCN (icode) (temp, xop0, wxop);
- }
- else
- pat = GEN_FCN (icode) (temp, xop0);
- }
-
- emit_insn (pat);
- return temp;
+ create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
+ expand_insn (icode, op, eops);
+ return eops[0].value;
}
/* Generate code to perform an operation specified by TERNARY_OPTAB
expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
rtx op1, rtx op2, rtx target, int unsignedp)
{
- int icode = (int) optab_handler (ternary_optab, mode);
- enum machine_mode mode0 = insn_data[icode].operand[1].mode;
- enum machine_mode mode1 = insn_data[icode].operand[2].mode;
- enum machine_mode mode2 = insn_data[icode].operand[3].mode;
- rtx temp;
- rtx pat;
- rtx xop0 = op0, xop1 = op1, xop2 = op2;
+ struct expand_operand ops[4];
+ enum insn_code icode = optab_handler (ternary_optab, mode);
gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
- if (!target || !insn_data[icode].operand[0].predicate (target, mode))
- temp = gen_reg_rtx (mode);
- else
- temp = target;
-
- /* In case the insn wants input operands in modes different from
- those of the actual operands, convert the operands. It would
- seem that we don't need to convert CONST_INTs, but we do, so
- that they're properly zero-extended, sign-extended or truncated
- for their mode. */
-
- if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
- xop0 = convert_modes (mode0,
- GET_MODE (op0) != VOIDmode
- ? GET_MODE (op0)
- : mode,
- xop0, unsignedp);
-
- if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
- xop1 = convert_modes (mode1,
- GET_MODE (op1) != VOIDmode
- ? GET_MODE (op1)
- : mode,
- xop1, unsignedp);
-
- if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
- xop2 = convert_modes (mode2,
- GET_MODE (op2) != VOIDmode
- ? GET_MODE (op2)
- : mode,
- xop2, unsignedp);
-
- /* Now, if insn's predicates don't allow our operands, put them into
- pseudo regs. */
-
- if (!insn_data[icode].operand[1].predicate (xop0, mode0)
- && mode0 != VOIDmode)
- xop0 = copy_to_mode_reg (mode0, xop0);
-
- if (!insn_data[icode].operand[2].predicate (xop1, mode1)
- && mode1 != VOIDmode)
- xop1 = copy_to_mode_reg (mode1, xop1);
-
- if (!insn_data[icode].operand[3].predicate (xop2, mode2)
- && mode2 != VOIDmode)
- xop2 = copy_to_mode_reg (mode2, xop2);
-
- pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
-
- emit_insn (pat);
- return temp;
+ create_output_operand (&ops[0], target, mode);
+ create_convert_operand_from (&ops[1], op0, mode, unsignedp);
+ create_convert_operand_from (&ops[2], op1, mode, unsignedp);
+ create_convert_operand_from (&ops[3], op2, mode, unsignedp);
+ expand_insn (icode, 4, ops);
+ return ops[0].value;
}
rtx
expand_vec_shift_expr (sepops ops, rtx target)
{
+ struct expand_operand eops[3];
enum insn_code icode;
rtx rtx_op1, rtx_op2;
- enum machine_mode mode1;
- enum machine_mode mode2;
enum machine_mode mode = TYPE_MODE (ops->type);
tree vec_oprnd = ops->op0;
tree shift_oprnd = ops->op1;
optab shift_optab;
- rtx pat;
switch (ops->code)
{
icode = optab_handler (shift_optab, mode);
gcc_assert (icode != CODE_FOR_nothing);
- mode1 = insn_data[icode].operand[1].mode;
- mode2 = insn_data[icode].operand[2].mode;
-
rtx_op1 = expand_normal (vec_oprnd);
- if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
- && mode1 != VOIDmode)
- rtx_op1 = force_reg (mode1, rtx_op1);
-
rtx_op2 = expand_normal (shift_oprnd);
- if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
- && mode2 != VOIDmode)
- rtx_op2 = force_reg (mode2, rtx_op2);
- if (!target
- || ! (*insn_data[icode].operand[0].predicate) (target, mode))
- target = gen_reg_rtx (mode);
+ create_output_operand (&eops[0], target, mode);
+ create_input_operand (&eops[1], rtx_op1, GET_MODE (rtx_op1));
+ create_convert_operand_from_type (&eops[2], rtx_op2, TREE_TYPE (shift_oprnd));
+ expand_insn (icode, 3, eops);
- /* Emit instruction */
- pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
- gcc_assert (pat);
- emit_insn (pat);
-
- return target;
+ return eops[0].value;
}
/* This subroutine of expand_doubleword_shift handles the cases in which
rtx target, int unsignedp, enum optab_methods methods,
rtx last)
{
- int icode = (int) optab_handler (binoptab, mode);
- enum machine_mode mode0 = insn_data[icode].operand[1].mode;
- enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum insn_code icode = optab_handler (binoptab, mode);
+ enum machine_mode mode0 = insn_data[(int) icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[(int) icode].operand[2].mode;
enum machine_mode tmp_mode;
+ struct expand_operand ops[3];
bool commutative_p;
rtx pat;
rtx xop0 = op0, xop1 = op1;
- rtx temp;
rtx swap;
- if (target)
- temp = target;
- else
- temp = gen_reg_rtx (mode);
-
/* If it is a commutative operator and the modes would match
if we would swap the operands, we can save the conversions. */
commutative_p = commutative_optab_p (binoptab);
if (!shift_optab_p (binoptab))
xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
- /* In case the insn wants input operands in modes different from
- those of the actual operands, convert the operands. It would
- seem that we don't need to convert CONST_INTs, but we do, so
- that they're properly zero-extended, sign-extended or truncated
- for their mode. */
-
- if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
- xop0 = convert_modes (mode0,
- GET_MODE (xop0) != VOIDmode
- ? GET_MODE (xop0)
- : mode,
- xop0, unsignedp);
-
- if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
- xop1 = convert_modes (mode1,
- GET_MODE (xop1) != VOIDmode
- ? GET_MODE (xop1)
- : mode,
- xop1, unsignedp);
-
- /* If operation is commutative,
- try to make the first operand a register.
- Even better, try to make it the same as the target.
- Also try to make the last operand a constant. */
- if (commutative_p
- && swap_commutative_operands_with_target (target, xop0, xop1))
- {
- swap = xop1;
- xop1 = xop0;
- xop0 = swap;
- }
-
/* Now, if insn's predicates don't allow our operands, put them into
pseudo regs. */
- if (!insn_data[icode].operand[1].predicate (xop0, mode0)
- && mode0 != VOIDmode)
- xop0 = copy_to_mode_reg (mode0, xop0);
-
- if (!insn_data[icode].operand[2].predicate (xop1, mode1)
- && mode1 != VOIDmode)
- xop1 = copy_to_mode_reg (mode1, xop1);
-
if (binoptab == vec_pack_trunc_optab
|| binoptab == vec_pack_usat_optab
|| binoptab == vec_pack_ssat_optab
{
/* The mode of the result is different then the mode of the
arguments. */
- tmp_mode = insn_data[icode].operand[0].mode;
+ tmp_mode = insn_data[(int) icode].operand[0].mode;
if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
- return 0;
+ {
+ delete_insns_since (last);
+ return NULL_RTX;
+ }
}
else
tmp_mode = mode;
- if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
- temp = gen_reg_rtx (tmp_mode);
-
- pat = GEN_FCN (icode) (temp, xop0, xop1);
- if (pat)
- {
- /* If PAT is composed of more than one insn, try to add an appropriate
- REG_EQUAL note to it. If we can't because TEMP conflicts with an
- operand, call expand_binop again, this time without a target. */
- if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
- && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
+ create_output_operand (&ops[0], target, tmp_mode);
+ create_convert_operand_from (&ops[1], xop0, mode, unsignedp);
+ create_convert_operand_from (&ops[2], xop1, mode, unsignedp);
+ if (maybe_legitimize_operands (icode, 0, 3, ops))
+ {
+ /* If operation is commutative,
+ try to make the first operand a register.
+ Even better, try to make it the same as the target.
+ Also try to make the last operand a constant. */
+ if (commutative_p
+ && swap_commutative_operands_with_target (ops[0].value, ops[1].value,
+ ops[2].value))
{
- delete_insns_since (last);
- return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
- unsignedp, methods);
+ swap = ops[2].value;
+ ops[2].value = ops[1].value;
+ ops[1].value = swap;
}
- emit_insn (pat);
- return temp;
- }
+ pat = GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
+ if (pat)
+ {
+ /* If PAT is composed of more than one insn, try to add an appropriate
+ REG_EQUAL note to it. If we can't because TEMP conflicts with an
+ operand, call expand_binop again, this time without a target. */
+ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
+ && ! add_equal_note (pat, ops[0].value, binoptab->code,
+ ops[1].value, ops[2].value))
+ {
+ delete_insns_since (last);
+ return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
+ unsignedp, methods);
+ }
+ emit_insn (pat);
+ return ops[0].value;
+ }
+ }
delete_insns_since (last);
return NULL_RTX;
}
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- int icode = (int) optab_handler (unoptab, mode);
- enum machine_mode mode0 = insn_data[icode].operand[2].mode;
- rtx pat;
- rtx xop0 = op0;
-
- if (GET_MODE (xop0) != VOIDmode
- && GET_MODE (xop0) != mode0)
- xop0 = convert_to_mode (mode0, xop0, unsignedp);
+ struct expand_operand ops[3];
+ enum insn_code icode = optab_handler (unoptab, mode);
- /* Now, if insn doesn't accept these operands, put them into pseudos. */
- if (!insn_data[icode].operand[2].predicate (xop0, mode0))
- xop0 = copy_to_mode_reg (mode0, xop0);
-
- /* We could handle this, but we should always be called with a pseudo
- for our targets and all insns should take them as outputs. */
- gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
- gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
-
- pat = GEN_FCN (icode) (targ0, targ1, xop0);
- if (pat)
- {
- emit_insn (pat);
- return 1;
- }
- else
- delete_insns_since (last);
+ create_fixed_operand (&ops[0], targ0);
+ create_fixed_operand (&ops[1], targ1);
+ create_convert_operand_from (&ops[2], op0, mode, unsignedp);
+ if (maybe_expand_insn (icode, 3, ops))
+ return 1;
}
/* It can't be done in this mode. Can we do it in a wider mode? */
if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
{
- int icode = (int) optab_handler (binoptab, mode);
+ struct expand_operand ops[4];
+ enum insn_code icode = optab_handler (binoptab, mode);
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
- rtx pat;
rtx xop0 = op0, xop1 = op1;
/* If we are optimizing, force expensive constants into a register. */
xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
- /* In case the insn wants input operands in modes different from
- those of the actual operands, convert the operands. It would
- seem that we don't need to convert CONST_INTs, but we do, so
- that they're properly zero-extended, sign-extended or truncated
- for their mode. */
-
- if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
- xop0 = convert_modes (mode0,
- GET_MODE (op0) != VOIDmode
- ? GET_MODE (op0)
- : mode,
- xop0, unsignedp);
-
- if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
- xop1 = convert_modes (mode1,
- GET_MODE (op1) != VOIDmode
- ? GET_MODE (op1)
- : mode,
- xop1, unsignedp);
-
- /* Now, if insn doesn't accept these operands, put them into pseudos. */
- if (!insn_data[icode].operand[1].predicate (xop0, mode0))
- xop0 = copy_to_mode_reg (mode0, xop0);
-
- if (!insn_data[icode].operand[2].predicate (xop1, mode1))
- xop1 = copy_to_mode_reg (mode1, xop1);
-
- /* We could handle this, but we should always be called with a pseudo
- for our targets and all insns should take them as outputs. */
- gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
- gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
-
- pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
- if (pat)
- {
- emit_insn (pat);
- return 1;
- }
- else
- delete_insns_since (last);
+ create_fixed_operand (&ops[0], targ0);
+ create_convert_operand_from (&ops[1], op0, mode, unsignedp);
+ create_convert_operand_from (&ops[2], op1, mode, unsignedp);
+ create_fixed_operand (&ops[3], targ1);
+ if (maybe_expand_insn (icode, 4, ops))
+ return 1;
+ delete_insns_since (last);
}
/* It can't be done in this mode. Can we do it in a wider mode? */
{
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- int icode = (int) optab_handler (unoptab, mode);
- enum machine_mode mode0 = insn_data[icode].operand[1].mode;
- rtx xop0 = op0;
+ struct expand_operand ops[2];
+ enum insn_code icode = optab_handler (unoptab, mode);
rtx last = get_last_insn ();
- rtx pat, temp;
-
- if (target)
- temp = target;
- else
- temp = gen_reg_rtx (mode);
-
- if (GET_MODE (xop0) != VOIDmode
- && GET_MODE (xop0) != mode0)
- xop0 = convert_to_mode (mode0, xop0, unsignedp);
-
- /* Now, if insn doesn't accept our operand, put it into a pseudo. */
-
- if (!insn_data[icode].operand[1].predicate (xop0, mode0))
- xop0 = copy_to_mode_reg (mode0, xop0);
-
- if (!insn_data[icode].operand[0].predicate (temp, mode))
- temp = gen_reg_rtx (mode);
+ rtx pat;
- pat = GEN_FCN (icode) (temp, xop0);
+ create_output_operand (&ops[0], target, mode);
+ create_convert_operand_from (&ops[1], op0, mode, unsignedp);
+ pat = maybe_gen_insn (icode, 2, ops);
if (pat)
{
if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
- && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
+ && ! add_equal_note (pat, ops[0].value, unoptab->code,
+ ops[1].value, NULL_RTX))
{
delete_insns_since (last);
return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
emit_insn (pat);
- return temp;
+ return ops[0].value;
}
- else
- delete_insns_since (last);
}
return 0;
}
int bitpos, bool op0_is_abs)
{
enum machine_mode imode;
- int icode;
+ enum insn_code icode;
rtx sign, label;
if (target == op1)
/* Check if the back end provides an insn that handles signbit for the
argument's mode. */
- icode = (int) optab_handler (signbit_optab, mode);
+ icode = optab_handler (signbit_optab, mode);
if (icode != CODE_FOR_nothing)
{
- imode = insn_data[icode].operand[0].mode;
+ imode = insn_data[(int) icode].operand[0].mode;
sign = gen_reg_rtx (imode);
emit_unop_insn (icode, sign, op1, UNKNOWN);
}
Return false if expansion failed. */
bool
-maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
+maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
+ enum rtx_code code)
{
- rtx temp;
- enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ struct expand_operand ops[2];
rtx pat;
- rtx last = get_last_insn ();
-
- temp = target;
-
- /* Now, if insn does not accept our operands, put them into pseudos. */
-
- if (!insn_data[icode].operand[1].predicate (op0, mode0))
- op0 = copy_to_mode_reg (mode0, op0);
- if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
- temp = gen_reg_rtx (GET_MODE (temp));
-
- pat = GEN_FCN (icode) (temp, op0);
+ create_output_operand (&ops[0], target, GET_MODE (target));
+ create_input_operand (&ops[1], op0, GET_MODE (op0));
+ pat = maybe_gen_insn (icode, 2, ops);
if (!pat)
- {
- delete_insns_since (last);
- return false;
- }
+ return false;
if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
- add_equal_note (pat, temp, code, op0, NULL_RTX);
+ add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
emit_insn (pat);
- if (temp != target)
- emit_move_insn (target, temp);
+ if (ops[0].value != target)
+ emit_move_insn (target, ops[0].value);
return true;
}
/* Generate an instruction whose insn-code is INSN_CODE,
the value that is stored into TARGET. */
void
-emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
+emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
{
bool ok = maybe_emit_unop_insn (icode, target, op0, code);
gcc_assert (ok);
void
emit_indirect_jump (rtx loc)
{
- if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
- (loc, Pmode))
- loc = copy_to_mode_reg (Pmode, loc);
+ struct expand_operand ops[1];
- emit_jump_insn (gen_indirect_jump (loc));
+ create_address_operand (&ops[0], loc);
+ expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
emit_barrier ();
}
\f
enum machine_mode cmode, rtx op2, rtx op3,
enum machine_mode mode, int unsignedp)
{
- rtx tem, subtarget, comparison, insn;
+ rtx tem, comparison, last;
enum insn_code icode;
enum rtx_code reversed;
if (!target)
target = gen_reg_rtx (mode);
- subtarget = target;
-
- /* If the insn doesn't accept these operands, put them in pseudos. */
-
- if (!insn_data[icode].operand[0].predicate
- (subtarget, insn_data[icode].operand[0].mode))
- subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
-
- if (!insn_data[icode].operand[2].predicate
- (op2, insn_data[icode].operand[2].mode))
- op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
-
- if (!insn_data[icode].operand[3].predicate
- (op3, insn_data[icode].operand[3].mode))
- op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
-
- /* Everything should now be in the suitable form. */
-
code = unsignedp ? unsigned_condition (code) : code;
comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
return NULL_RTX;
do_pending_stack_adjust ();
- start_sequence ();
+ last = get_last_insn ();
prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
&comparison, &cmode);
- if (!comparison)
- insn = NULL_RTX;
- else
- insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
-
- /* If that failed, then give up. */
- if (insn == 0)
+ if (comparison)
{
- end_sequence ();
- return 0;
- }
-
- emit_insn (insn);
- insn = get_insns ();
- end_sequence ();
- emit_insn (insn);
- if (subtarget != target)
- convert_move (target, subtarget, 0);
+ struct expand_operand ops[4];
- return target;
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], comparison);
+ create_input_operand (&ops[2], op2, mode);
+ create_input_operand (&ops[3], op3, mode);
+ if (maybe_expand_insn (icode, 4, ops))
+ {
+ if (ops[0].value != target)
+ convert_move (target, ops[0].value, false);
+ return target;
+ }
+ }
+ delete_insns_since (last);
+ return NULL_RTX;
}
/* Return nonzero if a conditional move of mode MODE is supported.
enum machine_mode cmode, rtx op2, rtx op3,
enum machine_mode mode, int unsignedp)
{
- rtx tem, subtarget, comparison, insn;
+ rtx tem, comparison, last;
enum insn_code icode;
enum rtx_code reversed;
if (!target)
target = gen_reg_rtx (mode);
- /* If the insn doesn't accept these operands, put them in pseudos. */
-
- if (!insn_data[icode].operand[0].predicate
- (target, insn_data[icode].operand[0].mode))
- subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
- else
- subtarget = target;
-
- if (!insn_data[icode].operand[2].predicate
- (op2, insn_data[icode].operand[2].mode))
- op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
-
- if (!insn_data[icode].operand[3].predicate
- (op3, insn_data[icode].operand[3].mode))
- op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
-
- /* Everything should now be in the suitable form. */
-
code = unsignedp ? unsigned_condition (code) : code;
comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
return NULL_RTX;
do_pending_stack_adjust ();
- start_sequence ();
+ last = get_last_insn ();
prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
&comparison, &cmode);
- if (!comparison)
- insn = NULL_RTX;
- else
- insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
-
- /* If that failed, then give up. */
- if (insn == 0)
+ if (comparison)
{
- end_sequence ();
- return 0;
- }
-
- emit_insn (insn);
- insn = get_insns ();
- end_sequence ();
- emit_insn (insn);
- if (subtarget != target)
- convert_move (target, subtarget, 0);
+ struct expand_operand ops[4];
- return target;
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], comparison);
+ create_input_operand (&ops[2], op2, mode);
+ create_input_operand (&ops[3], op3, mode);
+ if (maybe_expand_insn (icode, 4, ops))
+ {
+ if (ops[0].value != target)
+ convert_move (target, ops[0].value, false);
+ return target;
+ }
+ }
+ delete_insns_since (last);
+ return NULL_RTX;
}
\f
/* These functions attempt to generate an insn body, rather than
static rtx
vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
{
+ struct expand_operand ops[2];
enum rtx_code rcode;
tree t_op0, t_op1;
rtx rtx_op0, rtx_op1;
rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
EXPAND_STACK_PARM);
- if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
- && GET_MODE (rtx_op0) != VOIDmode)
- rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
-
- if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
- && GET_MODE (rtx_op1) != VOIDmode)
- rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
-
- return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
+ create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
+ create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
+ if (!maybe_legitimize_operands (icode, 4, 2, ops))
+ gcc_unreachable ();
+ return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
}
/* Return insn code for TYPE, the type of a VEC_COND_EXPR. */
expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx target)
{
+ struct expand_operand ops[6];
enum insn_code icode;
- rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
+ rtx comparison, rtx_op1, rtx_op2;
enum machine_mode mode = TYPE_MODE (vec_cond_type);
bool unsignedp = TYPE_UNSIGNED (vec_cond_type);
if (icode == CODE_FOR_nothing)
return 0;
- if (!target || !insn_data[icode].operand[0].predicate (target, mode))
- target = gen_reg_rtx (mode);
-
- /* Get comparison rtx. First expand both cond expr operands. */
- comparison = vector_compare_rtx (op0,
- unsignedp, icode);
- cc_op0 = XEXP (comparison, 0);
- cc_op1 = XEXP (comparison, 1);
- /* Expand both operands and force them in reg, if required. */
+ comparison = vector_compare_rtx (op0, unsignedp, icode);
rtx_op1 = expand_normal (op1);
- if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
- && mode != VOIDmode)
- rtx_op1 = force_reg (mode, rtx_op1);
-
rtx_op2 = expand_normal (op2);
- if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
- && mode != VOIDmode)
- rtx_op2 = force_reg (mode, rtx_op2);
-
- /* Emit instruction! */
- emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
- comparison, cc_op0, cc_op1));
- return target;
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], rtx_op1, mode);
+ create_input_operand (&ops[2], rtx_op2, mode);
+ create_fixed_operand (&ops[3], comparison);
+ create_fixed_operand (&ops[4], XEXP (comparison, 0));
+ create_fixed_operand (&ops[5], XEXP (comparison, 1));
+ expand_insn (icode, 6, ops);
+ return ops[0].value;
}
\f
expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
rtx target, enum insn_code icode)
{
+ struct expand_operand ops[4];
enum machine_mode mode = GET_MODE (mem);
- rtx insn;
- if (!target || !insn_data[icode].operand[0].predicate (target, mode))
- target = gen_reg_rtx (mode);
-
- if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
- old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
- if (!insn_data[icode].operand[2].predicate (old_val, mode))
- old_val = force_reg (mode, old_val);
-
- if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
- new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
- if (!insn_data[icode].operand[3].predicate (new_val, mode))
- new_val = force_reg (mode, new_val);
-
- insn = GEN_FCN (icode) (target, mem, old_val, new_val);
- if (insn == NULL_RTX)
- return NULL_RTX;
- emit_insn (insn);
-
- return target;
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], mem);
+ /* OLD_VAL and NEW_VAL may have been promoted to a wider mode.
+ Shrink them if so. */
+ create_convert_operand_to (&ops[2], old_val, mode, true);
+ create_convert_operand_to (&ops[3], new_val, mode, true);
+ if (maybe_expand_insn (icode, 4, ops))
+ return ops[0].value;
+ return NULL_RTX;
}
/* Expand a compare-and-swap operation and return its value. */
/* Generate the direct operation, if present. */
if (icode != CODE_FOR_nothing)
{
- if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
- val = convert_modes (mode, GET_MODE (val), val, 1);
- if (!insn_data[icode].operand[1].predicate (val, mode))
- val = force_reg (mode, val);
+ struct expand_operand ops[2];
- insn = GEN_FCN (icode) (mem, val);
- if (insn)
- {
- emit_insn (insn);
- return const0_rtx;
- }
+ create_fixed_operand (&ops[0], mem);
+ /* VAL may have been promoted to a wider mode. Shrink it if so. */
+ create_convert_operand_to (&ops[1], val, mode, true);
+ if (maybe_expand_insn (icode, 2, ops))
+ return const0_rtx;
}
/* Failing that, generate a compare-and-swap loop in which we perform the
/* If we found something supported, great. */
if (icode != CODE_FOR_nothing)
{
- if (!target || !insn_data[icode].operand[0].predicate (target, mode))
- target = gen_reg_rtx (mode);
+ struct expand_operand ops[3];
- if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
- val = convert_modes (mode, GET_MODE (val), val, 1);
- if (!insn_data[icode].operand[2].predicate (val, mode))
- val = force_reg (mode, val);
-
- insn = GEN_FCN (icode) (target, mem, val);
- if (insn)
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], mem);
+ /* VAL may have been promoted to a wider mode. Shrink it if so. */
+ create_convert_operand_to (&ops[2], val, mode, true);
+ if (maybe_expand_insn (icode, 3, ops))
{
- emit_insn (insn);
-
+ target = ops[0].value;
+ val = ops[2].value;
/* If we need to compensate for using an operation with the
wrong return value, do so now. */
if (compensate)
{
enum machine_mode mode = GET_MODE (mem);
enum insn_code icode;
- rtx insn;
/* If the target supports the test-and-set directly, great. */
icode = direct_optab_handler (sync_lock_test_and_set_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (!target || !insn_data[icode].operand[0].predicate (target, mode))
- target = gen_reg_rtx (mode);
-
- if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
- val = convert_modes (mode, GET_MODE (val), val, 1);
- if (!insn_data[icode].operand[2].predicate (val, mode))
- val = force_reg (mode, val);
+ struct expand_operand ops[3];
- insn = GEN_FCN (icode) (target, mem, val);
- if (insn)
- {
- emit_insn (insn);
- return target;
- }
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], mem);
+ /* VAL may have been promoted to a wider mode. Shrink it if so. */
+ create_convert_operand_to (&ops[2], val, mode, true);
+ if (maybe_expand_insn (icode, 3, ops))
+ return ops[0].value;
}
/* Otherwise, use a compare-and-swap loop for the exchange. */
|| (insn_data[(int) icode].operand[opno].predicate
(operand, insn_data[(int) icode].operand[opno].mode)));
}
+\f
+/* Try to make OP match operand OPNO of instruction ICODE. Return true
+ on success, storing the new operand value back in OP. */
+
+static bool
+maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
+ struct expand_operand *op)
+{
+ enum machine_mode mode, imode;
+ bool old_volatile_ok, result;
+
+ old_volatile_ok = volatile_ok;
+ mode = op->mode;
+ result = false;
+ switch (op->type)
+ {
+ case EXPAND_FIXED:
+ volatile_ok = true;
+ break;
+
+ case EXPAND_OUTPUT:
+ gcc_assert (mode != VOIDmode);
+ if (!op->value
+ || op->value == const0_rtx
+ || GET_MODE (op->value) != mode
+ || !insn_operand_matches (icode, opno, op->value))
+ op->value = gen_reg_rtx (mode);
+ break;
+
+ case EXPAND_INPUT:
+ input:
+ gcc_assert (mode != VOIDmode);
+ gcc_assert (GET_MODE (op->value) == VOIDmode
+ || GET_MODE (op->value) == mode);
+ result = insn_operand_matches (icode, opno, op->value);
+ if (!result)
+ op->value = copy_to_mode_reg (mode, op->value);
+ break;
+
+ case EXPAND_CONVERT_TO:
+ gcc_assert (mode != VOIDmode);
+ op->value = convert_to_mode (mode, op->value, op->unsigned_p);
+ goto input;
+
+ case EXPAND_CONVERT_FROM:
+ if (GET_MODE (op->value) != VOIDmode)
+ mode = GET_MODE (op->value);
+ else
+ /* The caller must tell us what mode this value has. */
+ gcc_assert (mode != VOIDmode);
+
+ imode = insn_data[(int) icode].operand[opno].mode;
+ if (imode != VOIDmode && imode != mode)
+ {
+ op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
+ mode = imode;
+ }
+ goto input;
+
+ case EXPAND_ADDRESS:
+ gcc_assert (mode != VOIDmode);
+ op->value = convert_memory_address (mode, op->value);
+ goto input;
+
+ case EXPAND_INTEGER:
+ mode = insn_data[(int) icode].operand[opno].mode;
+ if (mode != VOIDmode && const_int_operand (op->value, mode))
+ goto input;
+ break;
+ }
+ if (!result)
+ result = insn_operand_matches (icode, opno, op->value);
+ volatile_ok = old_volatile_ok;
+ return result;
+}
+
+/* Make OP describe an input operand that should have the same value
+ as VALUE, after any mode conversion that the target might request.
+ TYPE is the type of VALUE. */
+
+void
+create_convert_operand_from_type (struct expand_operand *op,
+ rtx value, tree type)
+{
+ create_convert_operand_from (op, value, TYPE_MODE (type),
+ TYPE_UNSIGNED (type));
+}
+
+/* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
+ of instruction ICODE. Return true on success, leaving the new operand
+ values in the OPS themselves. Emit no code on failure. */
+
+bool
+maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
+ unsigned int nops, struct expand_operand *ops)
+{
+ rtx last;
+ unsigned int i;
+
+ last = get_last_insn ();
+ for (i = 0; i < nops; i++)
+ if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
+ {
+ delete_insns_since (last);
+ return false;
+ }
+ return true;
+}
+
+/* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
+ as its operands. Return the instruction pattern on success,
+ and emit any necessary set-up code. Return null and emit no
+ code on failure. */
+
+rtx
+maybe_gen_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops)
+{
+ /* n_operands includes any automatically-generated match_scratches,
+ so we can't check for equality here. */
+ gcc_assert (nops <= (unsigned int) insn_data[(int) icode].n_operands);
+ if (!maybe_legitimize_operands (icode, 0, nops, ops))
+ return NULL_RTX;
+
+ switch (nops)
+ {
+ case 1:
+ return GEN_FCN (icode) (ops[0].value);
+ case 2:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value);
+ case 3:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
+ case 4:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
+ ops[3].value);
+ case 5:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
+ ops[3].value, ops[4].value);
+ case 6:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
+ ops[3].value, ops[4].value, ops[5].value);
+ }
+ gcc_unreachable ();
+}
+
+/* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
+ as its operands. Return true on success and emit no code on failure. */
+
+bool
+maybe_expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops)
+{
+ rtx pat = maybe_gen_insn (icode, nops, ops);
+ if (pat)
+ {
+ emit_insn (pat);
+ return true;
+ }
+ return false;
+}
+
+/* Like maybe_expand_insn, but for jumps. */
+
+bool
+maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops)
+{
+ rtx pat = maybe_gen_insn (icode, nops, ops);
+ if (pat)
+ {
+ emit_jump_insn (pat);
+ return true;
+ }
+ return false;
+}
+
+/* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
+ as its operands. */
+
+void
+expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops)
+{
+ if (!maybe_expand_insn (icode, nops, ops))
+ gcc_unreachable ();
+}
+
+/* Like expand_insn, but for jumps. */
+
+void
+expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops)
+{
+ if (!maybe_expand_jump_insn (icode, nops, ops))
+ gcc_unreachable ();
+}
#include "gt-optabs.h"
/* Generate an instruction with a given INSN_CODE with an output and
an input. */
-extern void emit_unop_insn (int, rtx, rtx, enum rtx_code);
-extern bool maybe_emit_unop_insn (int, rtx, rtx, enum rtx_code);
+extern void emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
+extern bool maybe_emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
/* An extra flag to control optab_for_tree_code's behavior. This is needed to
distinguish between machines with a vector shift that takes a scalar for the
extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
rtx operand);
+
+/* Describes the type of an expand_operand. Each value is associated
+ with a create_*_operand function; see the comments above those
+ functions for details. */
+enum expand_operand_type {
+ EXPAND_FIXED,
+ EXPAND_OUTPUT,
+ EXPAND_INPUT,
+ EXPAND_CONVERT_TO,
+ EXPAND_CONVERT_FROM,
+ EXPAND_ADDRESS,
+ EXPAND_INTEGER
+};
+
+/* Information about an operand for instruction expansion. */
+struct expand_operand {
+ /* The type of operand. */
+ ENUM_BITFIELD (expand_operand_type) type : 8;
+
+ /* True if any conversion should treat VALUE as being unsigned
+ rather than signed. Only meaningful for certain types. */
+ unsigned int unsigned_p : 1;
+
+ /* Unused; available for future use. */
+ unsigned int unused : 7;
+
+ /* The mode passed to the convert_*_operand function. It has a
+ type-dependent meaning. */
+ ENUM_BITFIELD (machine_mode) mode : 16;
+
+ /* The value of the operand. */
+ rtx value;
+};
+
+/* Initialize OP with the given fields. Initialise the other fields
+ to their default values. */
+
+static inline void
+create_expand_operand (struct expand_operand *op,
+ enum expand_operand_type type,
+ rtx value, enum machine_mode mode,
+ bool unsigned_p)
+{
+ op->type = type;
+ op->unsigned_p = unsigned_p;
+ op->unused = 0;
+ op->mode = mode;
+ op->value = value;
+}
+
+/* Make OP describe an operand that must use rtx X, even if X is volatile. */
+
+static inline void
+create_fixed_operand (struct expand_operand *op, rtx x)
+{
+ create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
+}
+
+/* Make OP describe an output operand that must have mode MODE.
+ X, if nonnull, is a suggestion for where the output should be stored.
+ It is OK for VALUE to be inconsistent with MODE, although it will just
+ be ignored in that case. */
+
+static inline void
+create_output_operand (struct expand_operand *op, rtx x,
+ enum machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_OUTPUT, x, mode, false);
+}
+
+/* Make OP describe an input operand that must have mode MODE and
+ value VALUE; MODE cannot be VOIDmode. The backend may request that
+ VALUE be copied into a different kind of rtx before being passed
+ as an operand. */
+
+static inline void
+create_input_operand (struct expand_operand *op, rtx value,
+ enum machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_INPUT, value, mode, false);
+}
+
+/* Like create_input_operand, except that VALUE must first be converted
+ to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
+
+static inline void
+create_convert_operand_to (struct expand_operand *op, rtx value,
+ enum machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_TO, value, mode, unsigned_p);
+}
+
+/* Make OP describe an input operand that should have the same value
+ as VALUE, after any mode conversion that the backend might request.
+ If VALUE is a CONST_INT, it should be treated as having mode MODE.
+ UNSIGNED_P says whether VALUE is unsigned. */
+
+static inline void
+create_convert_operand_from (struct expand_operand *op, rtx value,
+ enum machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_FROM, value, mode, unsigned_p);
+}
+
+extern void create_convert_operand_from_type (struct expand_operand *op,
+ rtx value, tree type);
+
+/* Make OP describe an input Pmode address operand. VALUE is the value
+ of the address, but it may need to be converted to Pmode first. */
+
+static inline void
+create_address_operand (struct expand_operand *op, rtx value)
+{
+ create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
+}
+
+/* Make OP describe an input operand that has value INTVAL and that has
+ no inherent mode. This function should only be used for operands that
+ are always expand-time constants. The backend may request that INTVAL
+ be copied into a different kind of rtx, but it must specify the mode
+ of that rtx if so. */
+
+static inline void
+create_integer_operand (struct expand_operand *op, HOST_WIDE_INT intval)
+{
+ create_expand_operand (op, EXPAND_INTEGER, GEN_INT (intval), VOIDmode, false);
+}
+
+extern bool maybe_legitimize_operands (enum insn_code icode,
+ unsigned int opno, unsigned int nops,
+ struct expand_operand *ops);
+extern rtx maybe_gen_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern bool maybe_expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern bool maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern void expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern void expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+
extern rtx prepare_operand (enum insn_code, rtx, int, enum machine_mode,
enum machine_mode, int);