+2018-08-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/md.texi: Expand the documentation of instruction names
+ to mention port-local uses. Document '@' in pattern names.
+ * read-md.h (overloaded_instance, overloaded_name): New structs.
+ (mapping): Declare.
+ (md_reader::handle_overloaded_name): New member function.
+ (md_reader::get_overloads): Likewise.
+ (md_reader::m_first_overload): New member variable.
+ (md_reader::m_next_overload_ptr): Likewise.
+ (md_reader::m_overloads_htab): Likewise.
+ * read-md.c (md_reader::md_reader): Initialize m_first_overload,
+ m_next_overload_ptr and m_overloads_htab.
+ * read-rtl.c (iterator_group): Add "type" and "get_c_token" fields.
+ (get_mode_token, get_code_token, get_int_token): New functions.
+ (map_attr_string): Add an optional argument that passes back
+ the associated iterator.
+ (overloaded_name_hash, overloaded_name_eq_p, named_rtx_p):
+ (md_reader::handle_overloaded_name, add_overload_instance): New
+ functions.
+ (apply_iterators): Handle '@' names. Report an error if '@'
+ is used without iterators.
+ (initialize_iterators): Initialize the new iterator_group fields.
+ * genopinit.c (handle_overloaded_code_for)
+ (handle_overloaded_gen): New functions.
+ (main): Use them to print declarations of maybe_code_for_* and
+ maybe_gen_* functions, and inline definitions of code_for_* and gen_*.
+ * genemit.c (print_overload_arguments, print_overload_test)
+ (handle_overloaded_code_for, handle_overloaded_gen): New functions.
+ (main): Use it to print definitions of maybe_code_for_* and
+ maybe_gen_* functions.
+ * config/aarch64/aarch64.c (aarch64_split_128bit_move): Use
+ gen_aarch64_mov{low,high}_di and gen_aarch64_movdi_{low,high}
+ instead of explicit mode checks.
+ (aarch64_split_simd_combine): Likewise gen_aarch64_simd_combine.
+ (aarch64_split_simd_move): Likewise gen_aarch64_split_simd_mov.
+ (aarch64_emit_load_exclusive): Likewise gen_aarch64_load_exclusive.
+ (aarch64_emit_store_exclusive): Likewise gen_aarch64_store_exclusive.
+ (aarch64_expand_compare_and_swap): Likewise
+ gen_aarch64_compare_and_swap and gen_aarch64_compare_and_swap_lse
+ (aarch64_gen_atomic_cas): Likewise gen_aarch64_atomic_cas.
+ (aarch64_emit_atomic_swap): Likewise gen_aarch64_atomic_swp.
+ (aarch64_constant_pool_reload_icode): Delete.
+ (aarch64_secondary_reload): Use code_for_aarch64_reload_movcp
+ instead of aarch64_constant_pool_reload_icode. Use
+ code_for_aarch64_reload_mov instead of explicit mode checks.
+ (rsqrte_type, get_rsqrte_type, rsqrts_type, get_rsqrts_type): Delete.
+ (aarch64_emit_approx_sqrt): Use gen_aarch64_rsqrte instead of
+ get_rsqrte_type and gen_aarch64_rsqrts instead of gen_rqrts_type.
+ (recpe_type, get_recpe_type, recps_type, get_recps_type): Delete.
+ (aarch64_emit_approx_div): Use gen_aarch64_frecpe instead of
+ get_recpe_type and gen_aarch64_frecps instead of get_recps_type.
+ (aarch64_atomic_load_op_code): Delete.
+ (aarch64_emit_atomic_load_op): Likewise.
+ (aarch64_gen_atomic_ldop): Use UNSPECV_ATOMIC_* instead of
+ aarch64_atomic_load_op_code. Use gen_aarch64_atomic_load
+ instead of aarch64_emit_atomic_load_op.
+ * config/aarch64/aarch64.md (aarch64_reload_movcp<GPF_TF:mode><P:mode>)
+ (aarch64_reload_movcp<VALL:mode><P:mode>, aarch64_reload_mov<mode>)
+ (aarch64_movdi_<mode>low, aarch64_movdi_<mode>high)
+ (aarch64_mov<mode>high_di, aarch64_mov<mode>low_di): Add a '@'
+ character before the pattern name.
+ * config/aarch64/aarch64-simd.md (aarch64_split_simd_mov<mode>)
+ (aarch64_rsqrte<mode>, aarch64_rsqrts<mode>)
+ (aarch64_simd_combine<mode>, aarch64_frecpe<mode>)
+ (aarch64_frecps<mode>): Likewise.
+ * config/aarch64/atomics.md (atomic_compare_and_swap<mode>)
+ (aarch64_compare_and_swap<mode>, aarch64_compare_and_swap<mode>_lse)
+ (aarch64_load_exclusive<mode>, aarch64_store_exclusive<mode>)
+ (aarch64_atomic_swp<mode>, aarch64_atomic_cas<mode>)
+ (aarch64_atomic_load<atomic_ldop><mode>): Likewise.
+
2018-08-02 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64.c (aarch64_float_const_representable_p):
DONE;
})
-(define_expand "aarch64_split_simd_mov<mode>"
+(define_expand "@aarch64_split_simd_mov<mode>"
[(set (match_operand:VQ 0)
(match_operand:VQ 1))]
"TARGET_SIMD"
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
-(define_insn "aarch64_rsqrte<mode>"
+(define_insn "@aarch64_rsqrte<mode>"
[(set (match_operand:VHSDF_HSDF 0 "register_operand" "=w")
(unspec:VHSDF_HSDF [(match_operand:VHSDF_HSDF 1 "register_operand" "w")]
UNSPEC_RSQRTE))]
"frsqrte\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
[(set_attr "type" "neon_fp_rsqrte_<stype><q>")])
-(define_insn "aarch64_rsqrts<mode>"
+(define_insn "@aarch64_rsqrts<mode>"
[(set (match_operand:VHSDF_HSDF 0 "register_operand" "=w")
(unspec:VHSDF_HSDF [(match_operand:VHSDF_HSDF 1 "register_operand" "w")
(match_operand:VHSDF_HSDF 2 "register_operand" "w")]
}
)
-(define_expand "aarch64_simd_combine<mode>"
+(define_expand "@aarch64_simd_combine<mode>"
[(match_operand:<VDBL> 0 "register_operand")
(match_operand:VDC 1 "register_operand")
(match_operand:VDC 2 "register_operand")]
)
-(define_insn "aarch64_frecpe<mode>"
+(define_insn "@aarch64_frecpe<mode>"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(unspec:VHSDF [(match_operand:VHSDF 1 "register_operand" "w")]
UNSPEC_FRECPE))]
[(set_attr "type" "neon_fp_recp<FRECP:frecp_suffix>_<GPF_F16:stype>")]
)
-(define_insn "aarch64_frecps<mode>"
+(define_insn "@aarch64_frecps<mode>"
[(set (match_operand:VHSDF_HSDF 0 "register_operand" "=w")
(unspec:VHSDF_HSDF
[(match_operand:VHSDF_HSDF 1 "register_operand" "w")
src_lo = gen_lowpart (word_mode, src);
src_hi = gen_highpart (word_mode, src);
- if (mode == TImode)
- {
- emit_insn (gen_aarch64_movtilow_di (dst, src_lo));
- emit_insn (gen_aarch64_movtihigh_di (dst, src_hi));
- }
- else
- {
- emit_insn (gen_aarch64_movtflow_di (dst, src_lo));
- emit_insn (gen_aarch64_movtfhigh_di (dst, src_hi));
- }
+ emit_insn (gen_aarch64_movlow_di (mode, dst, src_lo));
+ emit_insn (gen_aarch64_movhigh_di (mode, dst, src_hi));
return;
}
else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
dst_lo = gen_lowpart (word_mode, dst);
dst_hi = gen_highpart (word_mode, dst);
- if (mode == TImode)
- {
- emit_insn (gen_aarch64_movdi_tilow (dst_lo, src));
- emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src));
- }
- else
- {
- emit_insn (gen_aarch64_movdi_tflow (dst_lo, src));
- emit_insn (gen_aarch64_movdi_tfhigh (dst_hi, src));
- }
+ emit_insn (gen_aarch64_movdi_low (mode, dst_lo, src));
+ emit_insn (gen_aarch64_movdi_high (mode, dst_hi, src));
return;
}
}
&& register_operand (src1, src_mode)
&& register_operand (src2, src_mode));
- rtx (*gen) (rtx, rtx, rtx);
-
- switch (src_mode)
- {
- case E_V8QImode:
- gen = gen_aarch64_simd_combinev8qi;
- break;
- case E_V4HImode:
- gen = gen_aarch64_simd_combinev4hi;
- break;
- case E_V2SImode:
- gen = gen_aarch64_simd_combinev2si;
- break;
- case E_V4HFmode:
- gen = gen_aarch64_simd_combinev4hf;
- break;
- case E_V2SFmode:
- gen = gen_aarch64_simd_combinev2sf;
- break;
- case E_DImode:
- gen = gen_aarch64_simd_combinedi;
- break;
- case E_DFmode:
- gen = gen_aarch64_simd_combinedf;
- break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (dst, src1, src2));
+ emit_insn (gen_aarch64_simd_combine (src_mode, dst, src1, src2));
return;
}
if (REG_P (dst) && REG_P (src))
{
- rtx (*gen) (rtx, rtx);
-
gcc_assert (VECTOR_MODE_P (src_mode));
-
- switch (src_mode)
- {
- case E_V16QImode:
- gen = gen_aarch64_split_simd_movv16qi;
- break;
- case E_V8HImode:
- gen = gen_aarch64_split_simd_movv8hi;
- break;
- case E_V4SImode:
- gen = gen_aarch64_split_simd_movv4si;
- break;
- case E_V2DImode:
- gen = gen_aarch64_split_simd_movv2di;
- break;
- case E_V8HFmode:
- gen = gen_aarch64_split_simd_movv8hf;
- break;
- case E_V4SFmode:
- gen = gen_aarch64_split_simd_movv4sf;
- break;
- case E_V2DFmode:
- gen = gen_aarch64_split_simd_movv2df;
- break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (dst, src));
- return;
+ emit_insn (gen_aarch64_split_simd_mov (src_mode, dst, src));
}
}
return x;
}
-/* Return the reload icode required for a constant pool in mode. */
-static enum insn_code
-aarch64_constant_pool_reload_icode (machine_mode mode)
-{
- switch (mode)
- {
- case E_SFmode:
- return CODE_FOR_aarch64_reload_movcpsfdi;
-
- case E_DFmode:
- return CODE_FOR_aarch64_reload_movcpdfdi;
-
- case E_TFmode:
- return CODE_FOR_aarch64_reload_movcptfdi;
-
- case E_V8QImode:
- return CODE_FOR_aarch64_reload_movcpv8qidi;
-
- case E_V16QImode:
- return CODE_FOR_aarch64_reload_movcpv16qidi;
-
- case E_V4HImode:
- return CODE_FOR_aarch64_reload_movcpv4hidi;
-
- case E_V8HImode:
- return CODE_FOR_aarch64_reload_movcpv8hidi;
-
- case E_V2SImode:
- return CODE_FOR_aarch64_reload_movcpv2sidi;
-
- case E_V4SImode:
- return CODE_FOR_aarch64_reload_movcpv4sidi;
-
- case E_V2DImode:
- return CODE_FOR_aarch64_reload_movcpv2didi;
-
- case E_V2DFmode:
- return CODE_FOR_aarch64_reload_movcpv2dfdi;
-
- default:
- gcc_unreachable ();
- }
-
- gcc_unreachable ();
-}
static reg_class_t
aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
reg_class_t rclass,
|| targetm.vector_mode_supported_p (GET_MODE (x)))
&& !aarch64_pcrelative_literal_loads)
{
- sri->icode = aarch64_constant_pool_reload_icode (mode);
+ sri->icode = code_for_aarch64_reload_movcp (mode, DImode);
return NO_REGS;
}
&& FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
&& reg_class_subset_p (rclass, FP_REGS))
{
- if (mode == TFmode)
- sri->icode = CODE_FOR_aarch64_reload_movtf;
- else if (mode == TImode)
- sri->icode = CODE_FOR_aarch64_reload_movti;
+ sri->icode = code_for_aarch64_reload_mov (mode);
return NO_REGS;
}
return aarch64_builtin_rsqrt (DECL_FUNCTION_CODE (fndecl));
}
-typedef rtx (*rsqrte_type) (rtx, rtx);
-
-/* Select reciprocal square root initial estimate insn depending on machine
- mode. */
-
-static rsqrte_type
-get_rsqrte_type (machine_mode mode)
-{
- switch (mode)
- {
- case E_DFmode: return gen_aarch64_rsqrtedf;
- case E_SFmode: return gen_aarch64_rsqrtesf;
- case E_V2DFmode: return gen_aarch64_rsqrtev2df;
- case E_V2SFmode: return gen_aarch64_rsqrtev2sf;
- case E_V4SFmode: return gen_aarch64_rsqrtev4sf;
- default: gcc_unreachable ();
- }
-}
-
-typedef rtx (*rsqrts_type) (rtx, rtx, rtx);
-
-/* Select reciprocal square root series step insn depending on machine mode. */
-
-static rsqrts_type
-get_rsqrts_type (machine_mode mode)
-{
- switch (mode)
- {
- case E_DFmode: return gen_aarch64_rsqrtsdf;
- case E_SFmode: return gen_aarch64_rsqrtssf;
- case E_V2DFmode: return gen_aarch64_rsqrtsv2df;
- case E_V2SFmode: return gen_aarch64_rsqrtsv2sf;
- case E_V4SFmode: return gen_aarch64_rsqrtsv4sf;
- default: gcc_unreachable ();
- }
-}
-
/* Emit instruction sequence to compute either the approximate square root
or its approximate reciprocal, depending on the flag RECP, and return
whether the sequence was emitted or not. */
/* Estimate the approximate reciprocal square root. */
rtx xdst = gen_reg_rtx (mode);
- emit_insn ((*get_rsqrte_type (mode)) (xdst, src));
+ emit_insn (gen_aarch64_rsqrte (mode, xdst, src));
/* Iterate over the series twice for SF and thrice for DF. */
int iterations = (GET_MODE_INNER (mode) == DFmode) ? 3 : 2;
rtx x2 = gen_reg_rtx (mode);
emit_set_insn (x2, gen_rtx_MULT (mode, xdst, xdst));
- emit_insn ((*get_rsqrts_type (mode)) (x1, src, x2));
+ emit_insn (gen_aarch64_rsqrts (mode, x1, src, x2));
if (iterations > 0)
emit_set_insn (xdst, gen_rtx_MULT (mode, xdst, x1));
return true;
}
-typedef rtx (*recpe_type) (rtx, rtx);
-
-/* Select reciprocal initial estimate insn depending on machine mode. */
-
-static recpe_type
-get_recpe_type (machine_mode mode)
-{
- switch (mode)
- {
- case E_SFmode: return (gen_aarch64_frecpesf);
- case E_V2SFmode: return (gen_aarch64_frecpev2sf);
- case E_V4SFmode: return (gen_aarch64_frecpev4sf);
- case E_DFmode: return (gen_aarch64_frecpedf);
- case E_V2DFmode: return (gen_aarch64_frecpev2df);
- default: gcc_unreachable ();
- }
-}
-
-typedef rtx (*recps_type) (rtx, rtx, rtx);
-
-/* Select reciprocal series step insn depending on machine mode. */
-
-static recps_type
-get_recps_type (machine_mode mode)
-{
- switch (mode)
- {
- case E_SFmode: return (gen_aarch64_frecpssf);
- case E_V2SFmode: return (gen_aarch64_frecpsv2sf);
- case E_V4SFmode: return (gen_aarch64_frecpsv4sf);
- case E_DFmode: return (gen_aarch64_frecpsdf);
- case E_V2DFmode: return (gen_aarch64_frecpsv2df);
- default: gcc_unreachable ();
- }
-}
-
/* Emit the instruction sequence to compute the approximation for the division
of NUM by DEN in QUO and return whether the sequence was emitted or not. */
/* Estimate the approximate reciprocal. */
rtx xrcp = gen_reg_rtx (mode);
- emit_insn ((*get_recpe_type (mode)) (xrcp, den));
+ emit_insn (gen_aarch64_frecpe (mode, xrcp, den));
/* Iterate over the series twice for SF and thrice for DF. */
int iterations = (GET_MODE_INNER (mode) == DFmode) ? 3 : 2;
rtx xtmp = gen_reg_rtx (mode);
while (iterations--)
{
- emit_insn ((*get_recps_type (mode)) (xtmp, xrcp, den));
+ emit_insn (gen_aarch64_frecps (mode, xtmp, xrcp, den));
if (iterations > 0)
emit_set_insn (xrcp, gen_rtx_MULT (mode, xrcp, xtmp));
aarch64_emit_load_exclusive (machine_mode mode, rtx rval,
rtx mem, rtx model_rtx)
{
- rtx (*gen) (rtx, rtx, rtx);
-
- switch (mode)
- {
- case E_QImode: gen = gen_aarch64_load_exclusiveqi; break;
- case E_HImode: gen = gen_aarch64_load_exclusivehi; break;
- case E_SImode: gen = gen_aarch64_load_exclusivesi; break;
- case E_DImode: gen = gen_aarch64_load_exclusivedi; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (rval, mem, model_rtx));
+ emit_insn (gen_aarch64_load_exclusive (mode, rval, mem, model_rtx));
}
/* Emit store exclusive. */
aarch64_emit_store_exclusive (machine_mode mode, rtx bval,
rtx rval, rtx mem, rtx model_rtx)
{
- rtx (*gen) (rtx, rtx, rtx, rtx);
-
- switch (mode)
- {
- case E_QImode: gen = gen_aarch64_store_exclusiveqi; break;
- case E_HImode: gen = gen_aarch64_store_exclusivehi; break;
- case E_SImode: gen = gen_aarch64_store_exclusivesi; break;
- case E_DImode: gen = gen_aarch64_store_exclusivedi; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (bval, rval, mem, model_rtx));
+ emit_insn (gen_aarch64_store_exclusive (mode, bval, rval, mem, model_rtx));
}
/* Mark the previous jump instruction as unlikely. */
{
rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
machine_mode mode, cmp_mode;
- typedef rtx (*gen_cas_fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
- int idx;
- gen_cas_fn gen;
- const gen_cas_fn split_cas[] =
- {
- gen_aarch64_compare_and_swapqi,
- gen_aarch64_compare_and_swaphi,
- gen_aarch64_compare_and_swapsi,
- gen_aarch64_compare_and_swapdi
- };
- const gen_cas_fn atomic_cas[] =
- {
- gen_aarch64_compare_and_swapqi_lse,
- gen_aarch64_compare_and_swaphi_lse,
- gen_aarch64_compare_and_swapsi_lse,
- gen_aarch64_compare_and_swapdi_lse
- };
bval = operands[0];
rval = operands[1];
gcc_unreachable ();
}
- switch (mode)
- {
- case E_QImode: idx = 0; break;
- case E_HImode: idx = 1; break;
- case E_SImode: idx = 2; break;
- case E_DImode: idx = 3; break;
- default:
- gcc_unreachable ();
- }
if (TARGET_LSE)
- gen = atomic_cas[idx];
+ emit_insn (gen_aarch64_compare_and_swap_lse (mode, rval, mem, oldval,
+ newval, is_weak, mod_s,
+ mod_f));
else
- gen = split_cas[idx];
+ emit_insn (gen_aarch64_compare_and_swap (mode, rval, mem, oldval, newval,
+ is_weak, mod_s, mod_f));
- emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
if (mode == QImode || mode == HImode)
emit_move_insn (operands[1], gen_lowpart (mode, rval));
rtx expected, rtx desired,
rtx model)
{
- rtx (*gen) (rtx, rtx, rtx, rtx);
machine_mode mode;
mode = GET_MODE (mem);
- switch (mode)
- {
- case E_QImode: gen = gen_aarch64_atomic_casqi; break;
- case E_HImode: gen = gen_aarch64_atomic_cashi; break;
- case E_SImode: gen = gen_aarch64_atomic_cassi; break;
- case E_DImode: gen = gen_aarch64_atomic_casdi; break;
- default:
- gcc_unreachable ();
- }
-
/* Move the expected value into the CAS destination register. */
emit_insn (gen_rtx_SET (rval, expected));
/* Emit the CAS. */
- emit_insn (gen (rval, mem, desired, model));
+ emit_insn (gen_aarch64_atomic_cas (mode, rval, mem, desired, model));
/* Compare the expected value with the value loaded by the CAS, to establish
whether the swap was made. */
aarch64_emit_atomic_swap (machine_mode mode, rtx dst, rtx value,
rtx mem, rtx model)
{
- rtx (*gen) (rtx, rtx, rtx, rtx);
-
- switch (mode)
- {
- case E_QImode: gen = gen_aarch64_atomic_swpqi; break;
- case E_HImode: gen = gen_aarch64_atomic_swphi; break;
- case E_SImode: gen = gen_aarch64_atomic_swpsi; break;
- case E_DImode: gen = gen_aarch64_atomic_swpdi; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (dst, mem, value, model));
-}
-
-/* Operations supported by aarch64_emit_atomic_load_op. */
-
-enum aarch64_atomic_load_op_code
-{
- AARCH64_LDOP_PLUS, /* A + B */
- AARCH64_LDOP_XOR, /* A ^ B */
- AARCH64_LDOP_OR, /* A | B */
- AARCH64_LDOP_BIC /* A & ~B */
-};
-
-/* Emit an atomic load-operate. */
-
-static void
-aarch64_emit_atomic_load_op (enum aarch64_atomic_load_op_code code,
- machine_mode mode, rtx dst, rtx src,
- rtx mem, rtx model)
-{
- typedef rtx (*aarch64_atomic_load_op_fn) (rtx, rtx, rtx, rtx);
- const aarch64_atomic_load_op_fn plus[] =
- {
- gen_aarch64_atomic_loadaddqi,
- gen_aarch64_atomic_loadaddhi,
- gen_aarch64_atomic_loadaddsi,
- gen_aarch64_atomic_loadadddi
- };
- const aarch64_atomic_load_op_fn eor[] =
- {
- gen_aarch64_atomic_loadeorqi,
- gen_aarch64_atomic_loadeorhi,
- gen_aarch64_atomic_loadeorsi,
- gen_aarch64_atomic_loadeordi
- };
- const aarch64_atomic_load_op_fn ior[] =
- {
- gen_aarch64_atomic_loadsetqi,
- gen_aarch64_atomic_loadsethi,
- gen_aarch64_atomic_loadsetsi,
- gen_aarch64_atomic_loadsetdi
- };
- const aarch64_atomic_load_op_fn bic[] =
- {
- gen_aarch64_atomic_loadclrqi,
- gen_aarch64_atomic_loadclrhi,
- gen_aarch64_atomic_loadclrsi,
- gen_aarch64_atomic_loadclrdi
- };
- aarch64_atomic_load_op_fn gen;
- int idx = 0;
-
- switch (mode)
- {
- case E_QImode: idx = 0; break;
- case E_HImode: idx = 1; break;
- case E_SImode: idx = 2; break;
- case E_DImode: idx = 3; break;
- default:
- gcc_unreachable ();
- }
-
- switch (code)
- {
- case AARCH64_LDOP_PLUS: gen = plus[idx]; break;
- case AARCH64_LDOP_XOR: gen = eor[idx]; break;
- case AARCH64_LDOP_OR: gen = ior[idx]; break;
- case AARCH64_LDOP_BIC: gen = bic[idx]; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (dst, mem, src, model));
+ emit_insn (gen_aarch64_atomic_swp (mode, dst, mem, value, model));
}
/* Emit an atomic load+operate. CODE is the operation. OUT_DATA is the
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
const bool short_mode = (mode < SImode);
- aarch64_atomic_load_op_code ldop_code;
+ int ldop_code;
rtx src;
rtx x;
}
/* Fall-through. */
case PLUS:
- ldop_code = AARCH64_LDOP_PLUS;
+ ldop_code = UNSPECV_ATOMIC_LDOP_PLUS;
break;
case IOR:
- ldop_code = AARCH64_LDOP_OR;
+ ldop_code = UNSPECV_ATOMIC_LDOP_OR;
break;
case XOR:
- ldop_code = AARCH64_LDOP_XOR;
+ ldop_code = UNSPECV_ATOMIC_LDOP_XOR;
break;
case AND:
if (short_mode)
src = gen_lowpart (mode, src);
}
- ldop_code = AARCH64_LDOP_BIC;
+ ldop_code = UNSPECV_ATOMIC_LDOP_BIC;
break;
default:
gcc_unreachable ();
}
- aarch64_emit_atomic_load_op (ldop_code, mode, out_data, src, mem, model_rtx);
+ emit_insn (gen_aarch64_atomic_load (ldop_code, mode,
+ out_data, mem, src, model_rtx));
/* If necessary, calculate the data in memory after the update by redoing the
operation from values in registers. */
;; -------------------------------------------------------------------
;; Reload Scalar Floating point modes from constant pool.
;; The AArch64 port doesn't have __int128 constant move support.
-(define_expand "aarch64_reload_movcp<GPF_TF:mode><P:mode>"
+(define_expand "@aarch64_reload_movcp<GPF_TF:mode><P:mode>"
[(set (match_operand:GPF_TF 0 "register_operand" "=w")
(mem:GPF_TF (match_operand 1 "aarch64_constant_pool_symref" "S")))
(clobber (match_operand:P 2 "register_operand" "=&r"))]
)
;; Reload Vector modes from constant pool.
-(define_expand "aarch64_reload_movcp<VALL:mode><P:mode>"
+(define_expand "@aarch64_reload_movcp<VALL:mode><P:mode>"
[(set (match_operand:VALL 0 "register_operand" "=w")
(mem:VALL (match_operand 1 "aarch64_constant_pool_symref" "S")))
(clobber (match_operand:P 2 "register_operand" "=&r"))]
}
)
-(define_expand "aarch64_reload_mov<mode>"
+(define_expand "@aarch64_reload_mov<mode>"
[(set (match_operand:TX 0 "register_operand" "=w")
(match_operand:TX 1 "register_operand" "w"))
(clobber (match_operand:DI 2 "register_operand" "=&r"))
;; after or during reload as we don't want these patterns to start
;; kicking in during the combiner.
-(define_insn "aarch64_movdi_<mode>low"
+(define_insn "@aarch64_movdi_<mode>low"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extract:DI (match_operand:TX 1 "register_operand" "w")
(const_int 64) (const_int 0)))]
(set_attr "length" "4")
])
-(define_insn "aarch64_movdi_<mode>high"
+(define_insn "@aarch64_movdi_<mode>high"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extract:DI (match_operand:TX 1 "register_operand" "w")
(const_int 64) (const_int 64)))]
(set_attr "length" "4")
])
-(define_insn "aarch64_mov<mode>high_di"
+(define_insn "@aarch64_mov<mode>high_di"
[(set (zero_extract:TX (match_operand:TX 0 "register_operand" "+w")
(const_int 64) (const_int 64))
(zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
(set_attr "length" "4")
])
-(define_insn "aarch64_mov<mode>low_di"
+(define_insn "@aarch64_mov<mode>low_di"
[(set (match_operand:TX 0 "register_operand" "=w")
(zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
"TARGET_FLOAT && (reload_completed || reload_in_progress)"
;; Instruction patterns.
-(define_expand "atomic_compare_and_swap<mode>"
+(define_expand "@atomic_compare_and_swap<mode>"
[(match_operand:SI 0 "register_operand" "") ;; bool out
(match_operand:ALLI 1 "register_operand" "") ;; val out
(match_operand:ALLI 2 "aarch64_sync_memory_operand" "") ;; memory
}
)
-(define_insn_and_split "aarch64_compare_and_swap<mode>"
+(define_insn_and_split "@aarch64_compare_and_swap<mode>"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:SI 0 "register_operand" "=&r") ;; val out
}
)
-(define_insn_and_split "aarch64_compare_and_swap<mode>"
+(define_insn_and_split "@aarch64_compare_and_swap<mode>"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
}
)
-(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
+(define_insn_and_split "@aarch64_compare_and_swap<mode>_lse"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:SI 0 "register_operand" "=&r") ;; val out
}
)
-(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
+(define_insn_and_split "@aarch64_compare_and_swap<mode>_lse"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
}
)
-(define_insn "aarch64_load_exclusive<mode>"
+(define_insn "@aarch64_load_exclusive<mode>"
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI
(unspec_volatile:SHORT
}
)
-(define_insn "aarch64_load_exclusive<mode>"
+(define_insn "@aarch64_load_exclusive<mode>"
[(set (match_operand:GPI 0 "register_operand" "=r")
(unspec_volatile:GPI
[(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
}
)
-(define_insn "aarch64_store_exclusive<mode>"
+(define_insn "@aarch64_store_exclusive<mode>"
[(set (match_operand:SI 0 "register_operand" "=&r")
(unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
(set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
;; ARMv8.1-A LSE instructions.
;; Atomic swap with memory.
-(define_insn "aarch64_atomic_swp<mode>"
+(define_insn "@aarch64_atomic_swp<mode>"
[(set (match_operand:ALLI 0 "register_operand" "+&r")
(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
(set (match_dup 1)
;; Atomic compare-and-swap: HI and smaller modes.
-(define_insn "aarch64_atomic_cas<mode>"
+(define_insn "@aarch64_atomic_cas<mode>"
[(set (match_operand:SI 0 "register_operand" "+&r") ;; out
(zero_extend:SI
(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory.
;; Atomic compare-and-swap: SI and larger modes.
-(define_insn "aarch64_atomic_cas<mode>"
+(define_insn "@aarch64_atomic_cas<mode>"
[(set (match_operand:GPI 0 "register_operand" "+&r") ;; out
(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory.
(set (match_dup 1)
;; Atomic load-op: Load data, operate, store result, keep data.
-(define_insn "aarch64_atomic_load<atomic_ldop><mode>"
+(define_insn "@aarch64_atomic_load<atomic_ldop><mode>"
[(set (match_operand:ALLI 0 "register_operand" "=r")
(match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
(set (match_dup 1)
@enumerate
@item
-An optional name. The presence of a name indicates that this instruction
-pattern can perform a certain standard job for the RTL-generation
-pass of the compiler. This pass knows certain names and will use
-the instruction patterns with those names, if the names are defined
-in the machine description.
+An optional name @var{n}. When a name is present, the compiler
+automically generates a C++ function @samp{gen_@var{n}} that takes
+the operands of the instruction as arguments and returns the instruction's
+rtx pattern. The compiler also assigns the instruction a unique code
+@samp{CODE_FOR_@var{n}}, with all such codes belonging to an enum
+called @code{insn_code}.
+
+These names serve one of two purposes. The first is to indicate that the
+instruction performs a certain standard job for the RTL-generation
+pass of the compiler, such as a move, an addition, or a conditional
+jump. The second is to help the target generate certain target-specific
+operations, such as when implementing target-specific intrinsic functions.
+
+It is better to prefix target-specific names with the name of the
+target, to avoid any clash with current or future standard names.
The absence of a name is indicated by writing an empty string
where the name should go. Nameless instruction patterns are never
used for generating RTL code, but they may permit several simpler insns
to be combined later on.
-Names that are not thus known and used in RTL-generation have no
-effect; they are equivalent to no name at all.
-
For the purpose of debugging the compiler, you may also specify a
name beginning with the @samp{*} character. Such a name is used only
for identifying the instruction in RTL dumps; it is equivalent to having
a nameless pattern for all other purposes. Names beginning with the
@samp{*} character are not required to be unique.
+The name may also have the form @samp{@@@var{n}}. This has the same
+effect as a name @samp{@var{n}}, but in addition tells the compiler to
+generate further helper functions; see @xref{Parameterized Names} for details.
+
@item
The @dfn{RTL template}: This is a vector of incomplete RTL expressions
which describe the semantics of the instruction (@pxref{RTL Template}).
* Code Iterators:: Doing the same for codes.
* Int Iterators:: Doing the same for integers.
* Subst Iterators:: Generating variations of patterns for define_subst.
+* Parameterized Names:: Specifying iterator values in C++ code.
@end menu
@node Mode Iterators
@var{subst-applied-value} is a value with which subst-attribute would be
replaced in the second copy of the original RTL-template.
+@node Parameterized Names
+@subsection Parameterized Names
+@cindex @samp{@@} in instruction pattern names
+Ports sometimes need to apply iterators using C++ code, in order to
+get the code or RTL pattern for a specific instruction. For example,
+suppose we have the @samp{neon_vq<absneg><mode>} pattern given above:
+
+@smallexample
+(define_int_iterator QABSNEG [UNSPEC_VQABS UNSPEC_VQNEG])
+
+(define_int_attr absneg [(UNSPEC_VQABS "abs") (UNSPEC_VQNEG "neg")])
+
+(define_insn "neon_vq<absneg><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ QABSNEG))]
+ @dots{}
+)
+@end smallexample
+
+A port might need to generate this pattern for a variable
+@samp{QABSNEG} value and a variable @samp{VDQIW} mode. There are two
+ways of doing this. The first is to build the rtx for the pattern
+directly from C++ code; this is a valid technique and avoids any risk
+of combinatorial explosion. The second is to prefix the instruction
+name with the special character @samp{@@}, which tells GCC to generate
+the four additional functions below. In each case, @var{name} is the
+name of the instruction without the leading @samp{@@} character,
+without the @samp{<@dots{}>} placeholders, and with any underscore
+before a @samp{<@dots{}>} placeholder removed if keeping it would
+lead to a double or trailing underscore.
+
+@table @samp
+@item insn_code maybe_code_for_@var{name} (@var{i1}, @var{i2}, @dots{})
+See whether replacing the first @samp{<@dots{}>} placeholder with
+iterator value @var{i1}, the second with iterator value @var{i2}, and
+so on, gives a valid instruction. Return its code if so, otherwise
+return @code{CODE_FOR_nothing}.
+
+@item insn_code code_for_@var{name} (@var{i1}, @var{i2}, @dots{})
+Same, but abort the compiler if the requested instruction does not exist.
+
+@item rtx maybe_gen_@var{name} (@var{i1}, @var{i2}, @dots{}, @var{op0}, @var{op1}, @dots{})
+Check for a valid instruction in the same way as
+@code{maybe_code_for_@var{name}}. If the instruction exists,
+generate an instance of it using the operand values given by @var{op0},
+@var{op1}, and so on, otherwise return null.
+
+@item rtx gen_@var{name} (@var{i1}, @var{i2}, @dots{}, @var{op0}, @var{op1}, @dots{})
+Same, but abort the compiler if the requested instruction does not exist,
+or if the instruction generator invoked the @code{FAIL} macro.
+@end table
+
+For example, changing the pattern above to:
+
+@smallexample
+(define_insn "@@neon_vq<absneg><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ QABSNEG))]
+ @dots{}
+)
+@end smallexample
+
+would define the same patterns as before, but in addition would generate
+the four functions below:
+
+@smallexample
+insn_code maybe_code_for_neon_vq (int, machine_mode);
+insn_code code_for_neon_vq (int, machine_mode);
+rtx maybe_gen_neon_vq (int, machine_mode, rtx, rtx, rtx);
+rtx gen_neon_vq (int, machine_mode, rtx, rtx, rtx);
+@end smallexample
+
+Calling @samp{code_for_neon_vq (UNSPEC_VQABS, V8QImode)}
+would then give @code{CODE_FOR_neon_vqabsv8qi}.
+
+It is possible to have multiple @samp{@@} patterns with the same
+name and same types of iterator. For example:
+
+@smallexample
+(define_insn "@@some_arithmetic_op<mode>"
+ [(set (match_operand:INTEGER_MODES 0 "register_operand") @dots{})]
+ @dots{}
+)
+
+(define_insn "@@some_arithmetic_op<mode>"
+ [(set (match_operand:FLOAT_MODES 0 "register_operand") @dots{})]
+ @dots{}
+)
+@end smallexample
+
+would produce a single set of functions that handles both
+@code{INTEGER_MODES} and @code{FLOAT_MODES}.
+
@end ifset
}
}
+/* Print "arg<N>" parameter declarations for each argument N of ONAME. */
+
+static void
+print_overload_arguments (overloaded_name *oname)
+{
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ printf ("%s%s arg%d", i == 0 ? "" : ", ", oname->arg_types[i], i);
+}
+
+/* Print code to test whether INSTANCE should be chosne, given that
+ argument N of the overload is available as "arg<N>". */
+
+static void
+print_overload_test (overloaded_instance *instance)
+{
+ for (unsigned int i = 0; i < instance->arg_values.length (); ++i)
+ printf ("%sarg%d == %s", i == 0 ? " if (" : "\n && ",
+ i, instance->arg_values[i]);
+ printf (")\n");
+}
+
+/* Emit a maybe_code_for_* function for ONAME. */
+
+static void
+handle_overloaded_code_for (overloaded_name *oname)
+{
+ /* Print the function prototype. */
+ printf ("\ninsn_code\nmaybe_code_for_%s (", oname->name);
+ print_overload_arguments (oname);
+ printf (")\n{\n");
+
+ /* Use a sequence of "if" statements for each instance. */
+ for (overloaded_instance *instance = oname->first_instance;
+ instance; instance = instance->next)
+ {
+ print_overload_test (instance);
+ printf (" return CODE_FOR_%s;\n", instance->name);
+ }
+
+ /* Return null if no match was found. */
+ printf (" return CODE_FOR_nothing;\n}\n");
+}
+
+/* Emit a maybe_gen_* function for ONAME. */
+
+static void
+handle_overloaded_gen (overloaded_name *oname)
+{
+ /* All patterns must have the same number of operands. */
+ pattern_stats stats;
+ get_pattern_stats (&stats, XVEC (oname->first_instance->insn, 1));
+ for (overloaded_instance *instance = oname->first_instance->next;
+ instance; instance = instance->next)
+ {
+ pattern_stats stats2;
+ get_pattern_stats (&stats2, XVEC (instance->insn, 1));
+ if (stats.num_generator_args != stats2.num_generator_args)
+ fatal_at (get_file_location (instance->insn),
+ "inconsistent number of operands for '%s'; "
+ "this instance has %d, but previous instances had %d",
+ oname->name, stats2.num_generator_args,
+ stats.num_generator_args);
+ }
+
+ /* Print the function prototype. */
+ printf ("\nrtx\nmaybe_gen_%s (", oname->name);
+ print_overload_arguments (oname);
+ for (int i = 0; i < stats.num_generator_args; ++i)
+ printf (", rtx x%d", i);
+ printf (")\n{\n");
+
+ /* Use maybe_code_for_*, instead of duplicating the selection logic here. */
+ printf (" insn_code code = maybe_code_for_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ printf ("%sarg%d", i == 0 ? "" : ", ", i);
+ printf (");\n"
+ " if (code != CODE_FOR_nothing)\n"
+ " return GEN_FCN (code) (");
+ for (int i = 0; i < stats.num_generator_args; ++i)
+ printf ("%sx%d", i == 0 ? "" : ", ", i);
+ printf (");\n"
+ " else\n"
+ " return NULL_RTX;\n"
+ "}\n");
+}
+
int
main (int argc, const char **argv)
{
output_add_clobbers ();
output_added_clobbers_hard_reg_p ();
+ for (overloaded_name *oname = rtx_reader_ptr->get_overloads ();
+ oname; oname = oname->next)
+ {
+ handle_overloaded_code_for (oname);
+ handle_overloaded_gen (oname);
+ }
+
fflush (stdout);
return (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
}
return f;
}
+/* Declare the maybe_code_for_* function for ONAME, and provide
+ an inline definition of the assserting code_for_* wrapper. */
+
+static void
+handle_overloaded_code_for (FILE *file, overloaded_name *oname)
+{
+ fprintf (file, "\nextern insn_code maybe_code_for_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%s%s", i == 0 ? "" : ", ", oname->arg_types[i]);
+ fprintf (file, ");\n");
+
+ fprintf (file, "inline insn_code\ncode_for_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%s%s arg%d", i == 0 ? "" : ", ", oname->arg_types[i], i);
+ fprintf (file, ")\n{\n insn_code code = maybe_code_for_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%sarg%d", i == 0 ? "" : ", ", i);
+ fprintf (file,
+ ");\n"
+ " gcc_assert (code != CODE_FOR_nothing);\n"
+ " return code;\n"
+ "}\n");
+}
+
+/* Declare the maybe_gen_* function for ONAME, and provide
+ an inline definition of the assserting gen_* wrapper. */
+
+static void
+handle_overloaded_gen (FILE *file, overloaded_name *oname)
+{
+ pattern_stats stats;
+ get_pattern_stats (&stats, XVEC (oname->first_instance->insn, 1));
+
+ fprintf (file, "\nextern rtx maybe_gen_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%s%s", i == 0 ? "" : ", ", oname->arg_types[i]);
+ for (int i = 0; i < stats.num_generator_args; ++i)
+ fprintf (file, ", rtx");
+ fprintf (file, ");\n");
+
+ fprintf (file, "inline rtx\ngen_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%s%s arg%d", i == 0 ? "" : ", ", oname->arg_types[i], i);
+ for (int i = 0; i < stats.num_generator_args; ++i)
+ fprintf (file, ", rtx x%d", i);
+ fprintf (file, ")\n{\n rtx res = maybe_gen_%s (", oname->name);
+ for (unsigned int i = 0; i < oname->arg_types.length (); ++i)
+ fprintf (file, "%sarg%d", i == 0 ? "" : ", ", i);
+ for (int i = 0; i < stats.num_generator_args; ++i)
+ fprintf (file, ", x%d", i);
+ fprintf (file,
+ ");\n"
+ " gcc_assert (res);\n"
+ " return res;\n"
+ "}\n");
+}
+
int
main (int argc, const char **argv)
{
"optab_to_code (optab op)\n"
"{\n"
" return optab_to_code_[op];\n"
- "}\n"
+ "}\n");
+
+ for (overloaded_name *oname = rtx_reader_ptr->get_overloads ();
+ oname; oname = oname->next)
+ {
+ handle_overloaded_code_for (h_file, oname);
+ handle_overloaded_gen (h_file, oname);
+ }
+
+ fprintf (h_file,
"#endif\n"
"\n"
"extern const struct convert_optab_libcall_d convlib_def[NUM_CONVLIB_OPTABS];\n"
m_first_dir_md_include (NULL),
m_last_dir_md_include_ptr (&m_first_dir_md_include),
m_first_line (0),
- m_last_line (0)
+ m_last_line (0),
+ m_first_overload (NULL),
+ m_next_overload_ptr (&m_first_overload),
+ m_overloads_htab (NULL)
{
/* Set the global singleton pointer. */
md_reader_ptr = this;
unsigned int num_values;
};
+/* Describes one instance of an overloaded_name. */
+struct overloaded_instance {
+ /* The next instance in the chain, or null if none. */
+ overloaded_instance *next;
+
+ /* The values that the overloaded_name arguments should have for this
+ instance to be chosen. Each value is a C token. */
+ vec<const char *> arg_values;
+
+ /* The full (non-overloaded) name of the pattern. */
+ const char *name;
+
+ /* The corresponding define_expand or define_insn. */
+ rtx insn;
+};
+
+/* Describes a define_expand or define_insn whose name was preceded by '@'.
+ Overloads are uniquely determined by their name and the types of their
+ arguments; it's possible to have overloads with the same name but
+ different argument types. */
+struct overloaded_name {
+ /* The next overloaded name in the chain. */
+ overloaded_name *next;
+
+ /* The overloaded name (i.e. the name with "@" character and
+ "<...>" placeholders removed). */
+ const char *name;
+
+ /* The C types of the iterators that determine the underlying pattern,
+ in the same order as in the pattern name. E.g. "<mode>" in the
+ pattern name would give a "machine_mode" argument here. */
+ vec<const char *> arg_types;
+
+ /* The first instance associated with this overloaded_name. */
+ overloaded_instance *first_instance;
+
+ /* Where to chain new overloaded_instances. */
+ overloaded_instance **next_instance_ptr;
+};
+
+struct mapping;
+
/* A class for reading .md files and RTL dump files.
Implemented in read-md.c.
rtx x, unsigned int index,
const char *name);
struct mapping *read_mapping (struct iterator_group *group, htab_t table);
+ overloaded_name *handle_overloaded_name (rtx, vec<mapping *> *);
const char *get_top_level_filename () const { return m_toplevel_fname; }
const char *get_filename () const { return m_read_md_filename; }
struct obstack *get_string_obstack () { return &m_string_obstack; }
htab_t get_md_constants () { return m_md_constants; }
+ overloaded_name *get_overloads () const { return m_first_overload; }
+
private:
/* A singly-linked list of filenames. */
struct file_name_list {
/* If non-zero, filter the input to just this subset of lines. */
int m_first_line;
int m_last_line;
+
+ /* The first overloaded_name. */
+ overloaded_name *m_first_overload;
+
+ /* Where to chain further overloaded_names, */
+ overloaded_name **m_next_overload_ptr;
+
+ /* A hash table of overloaded_names, keyed off their name and the types of
+ their arguments. */
+ htab_t m_overloads_htab;
};
/* Global singleton; constrast with rtx_reader_ptr below. */
iterators. */
htab_t attrs, iterators;
+ /* The C++ type of the iterator, such as "machine_mode" for modes. */
+ const char *type;
+
/* Treat the given string as the name of a standard mode, etc., and
return its integer value. */
int (*find_builtin) (const char *);
If the iterator applies to operands, the second argument gives the
operand index, otherwise it is ignored. */
void (*apply_iterator) (rtx, unsigned int, int);
+
+ /* Return the C token for the given standard mode, code, etc. */
+ const char *(*get_c_token) (int);
};
/* Records one use of an iterator. */
PUT_MODE (x, (machine_mode) mode);
}
+static const char *
+get_mode_token (int mode)
+{
+ return concat ("E_", GET_MODE_NAME (mode), "mode", NULL);
+}
+
/* In compact dumps, the code of insns is prefixed with "c", giving "cinsn",
"cnote" etc, and CODE_LABEL is special-cased as "clabel". */
PUT_CODE (x, (enum rtx_code) code);
}
+static const char *
+get_code_token (int code)
+{
+ char *name = xstrdup (GET_RTX_NAME (code));
+ for (int i = 0; name[i]; ++i)
+ name[i] = TOUPPER (name[i]);
+ return name;
+}
+
/* Implementations of the iterator_group callbacks for ints. */
/* Since GCC does not construct a table of valid constants,
XINT (x, index) = value;
}
+static const char *
+get_int_token (int value)
+{
+ char buffer[HOST_BITS_PER_INT + 1];
+ sprintf (buffer, "%d", value);
+ return xstrdup (buffer);
+}
+
#ifdef GENERATOR_FILE
/* This routine adds attribute or does nothing depending on VALUE. When
}
/* Map attribute string P to its current value. Return null if the attribute
- isn't known. */
+ isn't known. If ITERATOR_OUT is nonnull, store the associated iterator
+ there. */
static struct map_value *
-map_attr_string (const char *p)
+map_attr_string (const char *p, mapping **iterator_out = 0)
{
const char *attr;
struct mapping *iterator;
iterator value. */
for (v = m->values; v; v = v->next)
if (v->number == iterator->current_value->number)
- return v;
+ {
+ if (iterator_out)
+ *iterator_out = iterator;
+ return v;
+ }
}
}
return NULL;
return 1;
}
+/* Return a hash value for overloaded_name UNCAST_ONAME. There shouldn't
+ be many instances of two overloaded_names having the same name but
+ different arguments, so hashing on the name should be good enough in
+ practice. */
+
+static hashval_t
+overloaded_name_hash (const void *uncast_oname)
+{
+ const overloaded_name *oname = (const overloaded_name *) uncast_oname;
+ return htab_hash_string (oname->name);
+}
+
+/* Return true if two overloaded_names are similar enough to share
+ the same generated functions. */
+
+static int
+overloaded_name_eq_p (const void *uncast_oname1, const void *uncast_oname2)
+{
+ const overloaded_name *oname1 = (const overloaded_name *) uncast_oname1;
+ const overloaded_name *oname2 = (const overloaded_name *) uncast_oname2;
+ if (strcmp (oname1->name, oname2->name) != 0
+ || oname1->arg_types.length () != oname2->arg_types.length ())
+ return 0;
+
+ for (unsigned int i = 0; i < oname1->arg_types.length (); ++i)
+ if (strcmp (oname1->arg_types[i], oname2->arg_types[i]) != 0)
+ return 0;
+
+ return 1;
+}
+
+/* Return true if X has an instruction name in XSTR (X, 0). */
+
+static bool
+named_rtx_p (rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case DEFINE_EXPAND:
+ case DEFINE_INSN:
+ case DEFINE_INSN_AND_SPLIT:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Check whether ORIGINAL is a named pattern whose name starts with '@'.
+ If so, return the associated overloaded_name and add the iterator for
+ each argument to ITERATORS. Return null otherwise. */
+
+overloaded_name *
+md_reader::handle_overloaded_name (rtx original, vec<mapping *> *iterators)
+{
+ /* Check for the leading '@'. */
+ if (!named_rtx_p (original) || XSTR (original, 0)[0] != '@')
+ return NULL;
+
+ /* Remove the '@', so that no other code needs to worry about it. */
+ const char *name = XSTR (original, 0);
+ copy_md_ptr_loc (name + 1, name);
+ name += 1;
+ XSTR (original, 0) = name;
+
+ /* Build a copy of the name without the '<...>' attribute strings.
+ Add the iterator associated with each such attribute string to ITERATORS
+ and add an associated argument to TMP_ONAME. */
+ char *copy = ASTRDUP (name);
+ char *base = copy, *start, *end;
+ overloaded_name tmp_oname;
+ tmp_oname.arg_types.create (current_iterators.length ());
+ bool pending_underscore_p = false;
+ while ((start = strchr (base, '<')) && (end = strchr (start, '>')))
+ {
+ *end = 0;
+ mapping *iterator;
+ if (!map_attr_string (start + 1, &iterator))
+ fatal_with_file_and_line ("unknown iterator `%s'", start + 1);
+ *end = '>';
+
+ /* Remove a trailing underscore, so that we don't end a name
+ with "_" or turn "_<...>_" into "__". */
+ if (start != base && start[-1] == '_')
+ {
+ start -= 1;
+ pending_underscore_p = true;
+ }
+
+ /* Add the text between either the last '>' or the start of
+ the string and this '<'. */
+ obstack_grow (&m_string_obstack, base, start - base);
+ base = end + 1;
+
+ /* If there's a character we need to keep after the '>', check
+ whether we should prefix it with a previously-dropped '_'. */
+ if (base[0] != 0 && base[0] != '<')
+ {
+ if (pending_underscore_p && base[0] != '_')
+ obstack_1grow (&m_string_obstack, '_');
+ pending_underscore_p = false;
+ }
+
+ /* Record an argument for ITERATOR. */
+ iterators->safe_push (iterator);
+ tmp_oname.arg_types.safe_push (iterator->group->type);
+ }
+ if (base == copy)
+ fatal_with_file_and_line ("no iterator attributes in name `%s'", name);
+
+ size_t length = obstack_object_size (&m_string_obstack);
+ if (length == 0)
+ fatal_with_file_and_line ("`%s' only contains iterator attributes", name);
+
+ /* Get the completed name. */
+ obstack_grow (&m_string_obstack, base, strlen (base) + 1);
+ char *new_name = XOBFINISH (&m_string_obstack, char *);
+ tmp_oname.name = new_name;
+
+ if (!m_overloads_htab)
+ m_overloads_htab = htab_create (31, overloaded_name_hash,
+ overloaded_name_eq_p, NULL);
+
+ /* See whether another pattern had the same overload name and list
+ of argument types. Create a new permanent one if not. */
+ void **slot = htab_find_slot (m_overloads_htab, &tmp_oname, INSERT);
+ overloaded_name *oname = (overloaded_name *) *slot;
+ if (!oname)
+ {
+ *slot = oname = new overloaded_name;
+ oname->name = tmp_oname.name;
+ oname->arg_types = tmp_oname.arg_types;
+ oname->next = NULL;
+ oname->first_instance = NULL;
+ oname->next_instance_ptr = &oname->first_instance;
+
+ *m_next_overload_ptr = oname;
+ m_next_overload_ptr = &oname->next;
+ }
+ else
+ {
+ obstack_free (&m_string_obstack, new_name);
+ tmp_oname.arg_types.release ();
+ }
+
+ return oname;
+}
+
+/* Add an instance of ONAME for instruction pattern X. ITERATORS[I]
+ gives the iterator associated with argument I of ONAME. */
+
+static void
+add_overload_instance (overloaded_name *oname, vec<mapping *> iterators, rtx x)
+{
+ /* Create the instance. */
+ overloaded_instance *instance = new overloaded_instance;
+ instance->next = NULL;
+ instance->arg_values.create (oname->arg_types.length ());
+ for (unsigned int i = 0; i < iterators.length (); ++i)
+ {
+ int value = iterators[i]->current_value->number;
+ const char *name = iterators[i]->group->get_c_token (value);
+ instance->arg_values.quick_push (name);
+ }
+ instance->name = XSTR (x, 0);
+ instance->insn = x;
+
+ /* Chain it onto the end of ONAME's list. */
+ *oname->next_instance_ptr = instance;
+ oname->next_instance_ptr = &instance->next;
+}
+
/* Expand all iterators in the current rtx, which is given as ORIGINAL.
Build a list of expanded rtxes in the EXPR_LIST pointed to by QUEUE. */
{
/* Raise an error if any attributes were used. */
apply_attribute_uses ();
+
+ if (named_rtx_p (original) && XSTR (original, 0)[0] == '@')
+ fatal_with_file_and_line ("'@' used without iterators");
+
queue->safe_push (original);
return;
}
htab_traverse (substs.iterators, add_current_iterators, NULL);
gcc_assert (!current_iterators.is_empty ());
+ /* Check whether this is a '@' overloaded pattern. */
+ auto_vec<mapping *, 16> iterators;
+ overloaded_name *oname
+ = rtx_reader_ptr->handle_overloaded_name (original, &iterators);
+
for (;;)
{
/* Apply the current iterator values. Accumulate a condition to
v->number);
}
}
+
+ if (oname)
+ add_overload_instance (oname, iterators, x);
+
/* Add the new rtx to the end of the queue. */
queue->safe_push (x);
modes.attrs = htab_create (13, leading_string_hash, leading_string_eq_p, 0);
modes.iterators = htab_create (13, leading_string_hash,
leading_string_eq_p, 0);
+ modes.type = "machine_mode";
modes.find_builtin = find_mode;
modes.apply_iterator = apply_mode_iterator;
+ modes.get_c_token = get_mode_token;
codes.attrs = htab_create (13, leading_string_hash, leading_string_eq_p, 0);
codes.iterators = htab_create (13, leading_string_hash,
leading_string_eq_p, 0);
+ codes.type = "rtx_code";
codes.find_builtin = find_code;
codes.apply_iterator = apply_code_iterator;
+ codes.get_c_token = get_code_token;
ints.attrs = htab_create (13, leading_string_hash, leading_string_eq_p, 0);
ints.iterators = htab_create (13, leading_string_hash,
leading_string_eq_p, 0);
+ ints.type = "int";
ints.find_builtin = find_int;
ints.apply_iterator = apply_int_iterator;
+ ints.get_c_token = get_int_token;
substs.attrs = htab_create (13, leading_string_hash, leading_string_eq_p, 0);
substs.iterators = htab_create (13, leading_string_hash,
leading_string_eq_p, 0);
+ substs.type = "int";
substs.find_builtin = find_int; /* We don't use it, anyway. */
#ifdef GENERATOR_FILE
substs.apply_iterator = apply_subst_iterator;
#endif
+ substs.get_c_token = get_int_token;
lower = add_mapping (&modes, modes.attrs, "mode");
upper = add_mapping (&modes, modes.attrs, "MODE");