return new pass_insert_endbranch (ctxt);
}
+/* At entry of the nearest common dominator for basic blocks with
+ conversions, generate a single
+ vxorps %xmmN, %xmmN, %xmmN
+ for all
+ vcvtss2sd op, %xmmN, %xmmX
+ vcvtsd2ss op, %xmmN, %xmmX
+ vcvtsi2ss op, %xmmN, %xmmX
+ vcvtsi2sd op, %xmmN, %xmmX
+
+ NB: We want to generate only a single vxorps to cover the whole
+ function. The LCM algorithm isn't appropriate here since it may
+ place a vxorps inside the loop. */
+
+static unsigned int
+remove_partial_avx_dependency (void)
+{
+ timevar_push (TV_MACH_DEP);
+
+ bitmap_obstack_initialize (NULL);
+ bitmap convert_bbs = BITMAP_ALLOC (NULL);
+
+ basic_block bb;
+ rtx_insn *insn, *set_insn;
+ rtx set;
+ rtx v4sf_const0 = NULL_RTX;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (!NONDEBUG_INSN_P (insn))
+ continue;
+
+ set = single_set (insn);
+ if (!set)
+ continue;
+
+ if (get_attr_avx_partial_xmm_update (insn)
+ != AVX_PARTIAL_XMM_UPDATE_TRUE)
+ continue;
+
+ if (!v4sf_const0)
+ v4sf_const0 = gen_reg_rtx (V4SFmode);
+
+ /* Convert PARTIAL_XMM_UPDATE_TRUE insns, DF -> SF, SF -> DF,
+ SI -> SF, SI -> DF, DI -> SF, DI -> DF, to vec_dup and
+ vec_merge with subreg. */
+ rtx src = SET_SRC (set);
+ rtx dest = SET_DEST (set);
+ machine_mode dest_mode = GET_MODE (dest);
+
+ rtx zero;
+ machine_mode dest_vecmode;
+ if (dest_mode == E_SFmode)
+ {
+ dest_vecmode = V4SFmode;
+ zero = v4sf_const0;
+ }
+ else
+ {
+ dest_vecmode = V2DFmode;
+ zero = gen_rtx_SUBREG (V2DFmode, v4sf_const0, 0);
+ }
+
+ /* Change source to vector mode. */
+ src = gen_rtx_VEC_DUPLICATE (dest_vecmode, src);
+ src = gen_rtx_VEC_MERGE (dest_vecmode, src, zero,
+ GEN_INT (HOST_WIDE_INT_1U));
+ /* Change destination to vector mode. */
+ rtx vec = gen_reg_rtx (dest_vecmode);
+ /* Generate an XMM vector SET. */
+ set = gen_rtx_SET (vec, src);
+ set_insn = emit_insn_before (set, insn);
+ df_insn_rescan (set_insn);
+
+ src = gen_rtx_SUBREG (dest_mode, vec, 0);
+ set = gen_rtx_SET (dest, src);
+
+ /* Drop possible dead definitions. */
+ PATTERN (insn) = set;
+
+ INSN_CODE (insn) = -1;
+ recog_memoized (insn);
+ df_insn_rescan (insn);
+ bitmap_set_bit (convert_bbs, bb->index);
+ }
+ }
+
+ if (v4sf_const0)
+ {
+ calculate_dominance_info (CDI_DOMINATORS);
+ df_set_flags (DF_DEFER_INSN_RESCAN);
+ df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
+ df_md_add_problem ();
+ df_analyze ();
+
+ /* (Re-)discover loops so that bb->loop_father can be used in the
+ analysis below. */
+ loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
+
+ /* Generate a vxorps at entry of the nearest dominator for basic
+ blocks with conversions, which is in the the fake loop that
+ contains the whole function, so that there is only a single
+ vxorps in the whole function. */
+ bb = nearest_common_dominator_for_set (CDI_DOMINATORS,
+ convert_bbs);
+ while (bb->loop_father->latch
+ != EXIT_BLOCK_PTR_FOR_FN (cfun))
+ bb = get_immediate_dominator (CDI_DOMINATORS,
+ bb->loop_father->header);
+
+ insn = BB_HEAD (bb);
+ if (!NONDEBUG_INSN_P (insn))
+ insn = next_nonnote_nondebug_insn (insn);
+ set = gen_rtx_SET (v4sf_const0, CONST0_RTX (V4SFmode));
+ set_insn = emit_insn_before (set, insn);
+ df_insn_rescan (set_insn);
+ df_process_deferred_rescans ();
+ loop_optimizer_finalize ();
+ }
+
+ bitmap_obstack_release (NULL);
+ BITMAP_FREE (convert_bbs);
+
+ timevar_pop (TV_MACH_DEP);
+ return 0;
+}
+
+namespace {
+
+const pass_data pass_data_remove_partial_avx_dependency =
+{
+ RTL_PASS, /* type */
+ "rpad", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish, /* todo_flags_finish */
+};
+
+class pass_remove_partial_avx_dependency : public rtl_opt_pass
+{
+public:
+ pass_remove_partial_avx_dependency (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_remove_partial_avx_dependency, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return (TARGET_AVX
+ && TARGET_SSE_PARTIAL_REG_DEPENDENCY
+ && TARGET_SSE_MATH
+ && optimize
+ && optimize_function_for_speed_p (cfun));
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ return remove_partial_avx_dependency ();
+ }
+}; // class pass_rpad
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_remove_partial_avx_dependency (gcc::context *ctxt)
+{
+ return new pass_remove_partial_avx_dependency (ctxt);
+}
+
/* Return true if a red-zone is in use. We can't use red-zone when
there are local indirect jumps, like "indirect_jump" or "tablejump",
which jumps to another place in the function, since "call" in the
(define_attr "i387_cw" "trunc,floor,ceil,uninitialized,any"
(const_string "any"))
+;; Define attribute to indicate AVX insns with partial XMM register update.
+(define_attr "avx_partial_xmm_update" "false,true"
+ (const_string "false"))
+
;; Define attribute to classify add/sub insns that consumes carry flag (CF)
(define_attr "use_carry" "0,1" (const_string "0"))
}
}
[(set_attr "type" "fmov,fmov,ssecvt,ssecvt")
+ (set_attr "avx_partial_xmm_update" "false,false,false,true")
(set_attr "prefix" "orig,orig,maybe_vex,maybe_vex")
(set_attr "mode" "SF,XF,DF,DF")
(set (attr "enabled")
[(set (match_operand:DF 0 "sse_reg_operand")
(float_extend:DF
(match_operand:SF 1 "nonimmediate_operand")))]
- "TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
+ "!TARGET_AVX
+ && TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& (!REG_P (operands[1])
|| (!TARGET_AVX && REGNO (operands[0]) != REGNO (operands[1])))
}
}
[(set_attr "type" "fmov,fmov,ssecvt,ssecvt")
+ (set_attr "avx_partial_xmm_update" "false,false,false,true")
(set_attr "mode" "SF")
(set (attr "enabled")
(if_then_else
[(set (match_operand:SF 0 "sse_reg_operand")
(float_truncate:SF
(match_operand:DF 1 "nonimmediate_operand")))]
- "TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
+ "!TARGET_AVX
+ && TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& (!REG_P (operands[1])
|| (!TARGET_AVX && REGNO (operands[0]) != REGNO (operands[1])))
%vcvtsi2<MODEF:ssemodesuffix><SWI48:rex64suffix>\t{%1, %d0|%d0, %1}
%vcvtsi2<MODEF:ssemodesuffix><SWI48:rex64suffix>\t{%1, %d0|%d0, %1}"
[(set_attr "type" "fmov,sseicvt,sseicvt")
+ (set_attr "avx_partial_xmm_update" "false,true,true")
(set_attr "prefix" "orig,maybe_vex,maybe_vex")
(set_attr "mode" "<MODEF:MODE>")
(set (attr "prefix_rex")
(define_split
[(set (match_operand:MODEF 0 "sse_reg_operand")
(float:MODEF (match_operand:SWI48 1 "nonimmediate_operand")))]
- "TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
+ "!TARGET_AVX
+ && TARGET_SSE_PARTIAL_REG_DEPENDENCY && epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& (!EXT_REX_SSE_REG_P (operands[0])
|| TARGET_AVX512VL)"