tree-streamer-out.o \
tree-tailcall.o \
tree-vect-generic.o \
+ gimple-isel.o \
tree-vect-patterns.o \
tree-vect-data-refs.o \
tree-vect-stmts.o \
$(srcdir)/dwarf2cfi.c \
$(srcdir)/dwarf2out.c \
$(srcdir)/tree-vect-generic.c \
+ $(srcdir)/gimple-isel.cc \
$(srcdir)/dojump.c $(srcdir)/emit-rtl.h \
$(srcdir)/emit-rtl.c $(srcdir)/except.h $(srcdir)/explow.c $(srcdir)/expr.c \
$(srcdir)/expr.h \
if (temp != 0)
return temp;
- /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
- and similarly for MAX <x, y>. */
if (VECTOR_TYPE_P (type))
- {
- tree t0 = make_tree (type, op0);
- tree t1 = make_tree (type, op1);
- tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
- type, t0, t1);
- return expand_vec_cond_expr (type, comparison, t0, t1,
- original_target);
- }
+ gcc_unreachable ();
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
return temp;
}
- case VEC_COND_EXPR:
- target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
- return target;
-
case VEC_DUPLICATE_EXPR:
op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
target = expand_vector_broadcast (mode, op0);
STRIP_NOPS (arg1);
/* For vector typed comparisons emit code to generate the desired
- all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
- expander for this. */
+ all-ones or all-zeros mask. */
if (TREE_CODE (ops->type) == VECTOR_TYPE)
{
tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
&& expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
return expand_vec_cmp_expr (ops->type, ifexp, target);
else
- {
- tree if_true = constant_boolean_node (true, ops->type);
- tree if_false = constant_boolean_node (false, ops->type);
- return expand_vec_cond_expr (ops->type, ifexp, if_true,
- if_false, target);
- }
+ gcc_unreachable ();
}
/* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
--- /dev/null
+/* Schedule GIMPLE vector statements.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "tree.h"
+#include "gimple.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "expmed.h"
+#include "optabs-tree.h"
+#include "tree-eh.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "gimplify.h"
+#include "tree-cfg.h"
+
+/* Expand all VEC_COND_EXPR gimple assignments into calls to internal
+ function based on type of selected expansion. */
+
+static gimple *
+gimple_expand_vec_cond_expr (gimple_stmt_iterator *gsi,
+ hash_map<tree, unsigned int> *vec_cond_ssa_name_uses)
+{
+ tree lhs, op0a = NULL_TREE, op0b = NULL_TREE;
+ enum tree_code code;
+ enum tree_code tcode;
+ machine_mode cmp_op_mode;
+ bool unsignedp;
+ enum insn_code icode;
+ imm_use_iterator imm_iter;
+
+ /* Only consider code == GIMPLE_ASSIGN. */
+ gassign *stmt = dyn_cast<gassign *> (gsi_stmt (*gsi));
+ if (!stmt)
+ return NULL;
+
+ code = gimple_assign_rhs_code (stmt);
+ if (code != VEC_COND_EXPR)
+ return NULL;
+
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op2 = gimple_assign_rhs3 (stmt);
+ lhs = gimple_assign_lhs (stmt);
+ machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
+
+ gcc_assert (!COMPARISON_CLASS_P (op0));
+ if (TREE_CODE (op0) == SSA_NAME)
+ {
+ unsigned int used_vec_cond_exprs = 0;
+ unsigned int *slot = vec_cond_ssa_name_uses->get (op0);
+ if (slot)
+ used_vec_cond_exprs = *slot;
+ else
+ {
+ gimple *use_stmt;
+ FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, op0)
+ {
+ gassign *assign = dyn_cast<gassign *> (use_stmt);
+ if (assign != NULL
+ && gimple_assign_rhs_code (assign) == VEC_COND_EXPR
+ && gimple_assign_rhs1 (assign) == op0)
+ used_vec_cond_exprs++;
+ }
+ vec_cond_ssa_name_uses->put (op0, used_vec_cond_exprs);
+ }
+
+ gassign *def_stmt = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (op0));
+ if (def_stmt)
+ {
+ tcode = gimple_assign_rhs_code (def_stmt);
+ op0a = gimple_assign_rhs1 (def_stmt);
+ op0b = gimple_assign_rhs2 (def_stmt);
+
+ tree op0a_type = TREE_TYPE (op0a);
+ if (used_vec_cond_exprs >= 2
+ && (get_vcond_mask_icode (mode, TYPE_MODE (op0a_type))
+ != CODE_FOR_nothing)
+ && expand_vec_cmp_expr_p (op0a_type, TREE_TYPE (lhs), tcode))
+ {
+ /* Keep the SSA name and use vcond_mask. */
+ tcode = TREE_CODE (op0);
+ }
+ }
+ else
+ tcode = TREE_CODE (op0);
+ }
+ else
+ tcode = TREE_CODE (op0);
+
+ if (TREE_CODE_CLASS (tcode) != tcc_comparison)
+ {
+ gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
+ if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
+ != CODE_FOR_nothing)
+ return gimple_build_call_internal (IFN_VCOND_MASK, 3, op0, op1, op2);
+ /* Fake op0 < 0. */
+ else
+ {
+ gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
+ == MODE_VECTOR_INT);
+ op0a = op0;
+ op0b = build_zero_cst (TREE_TYPE (op0));
+ tcode = LT_EXPR;
+ }
+ }
+ cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
+ unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
+
+
+ gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode))
+ && known_eq (GET_MODE_NUNITS (mode),
+ GET_MODE_NUNITS (cmp_op_mode)));
+
+ icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
+ if (icode == CODE_FOR_nothing)
+ {
+ if (tcode == LT_EXPR
+ && op0a == op0
+ && TREE_CODE (op0) == VECTOR_CST)
+ {
+ /* A VEC_COND_EXPR condition could be folded from EQ_EXPR/NE_EXPR
+ into a constant when only get_vcond_eq_icode is supported.
+ Verify < 0 and != 0 behave the same and change it to NE_EXPR. */
+ unsigned HOST_WIDE_INT nelts;
+ if (!VECTOR_CST_NELTS (op0).is_constant (&nelts))
+ {
+ if (VECTOR_CST_STEPPED_P (op0))
+ gcc_unreachable ();
+ nelts = vector_cst_encoded_nelts (op0);
+ }
+ for (unsigned int i = 0; i < nelts; ++i)
+ if (tree_int_cst_sgn (vector_cst_elt (op0, i)) == 1)
+ gcc_unreachable ();
+ tcode = NE_EXPR;
+ }
+ if (tcode == EQ_EXPR || tcode == NE_EXPR)
+ {
+ tree tcode_tree = build_int_cst (integer_type_node, tcode);
+ return gimple_build_call_internal (IFN_VCONDEQ, 5, op0a, op0b, op1,
+ op2, tcode_tree);
+ }
+ }
+
+ gcc_assert (icode != CODE_FOR_nothing);
+ tree tcode_tree = build_int_cst (integer_type_node, tcode);
+ return gimple_build_call_internal (unsignedp ? IFN_VCONDU : IFN_VCOND,
+ 5, op0a, op0b, op1, op2, tcode_tree);
+}
+
+
+
+/* Iterate all gimple statements and try to expand
+ VEC_COND_EXPR assignments. */
+
+static unsigned int
+gimple_expand_vec_cond_exprs (void)
+{
+ gimple_stmt_iterator gsi;
+ basic_block bb;
+ bool cfg_changed = false;
+ hash_map<tree, unsigned int> vec_cond_ssa_name_uses;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *g = gimple_expand_vec_cond_expr (&gsi,
+ &vec_cond_ssa_name_uses);
+ if (g != NULL)
+ {
+ tree lhs = gimple_assign_lhs (gsi_stmt (gsi));
+ gimple_set_lhs (g, lhs);
+ gsi_replace (&gsi, g, false);
+ }
+ }
+ }
+
+ return cfg_changed ? TODO_cleanup_cfg : 0;
+}
+
+namespace {
+
+const pass_data pass_data_gimple_isel =
+{
+ GIMPLE_PASS, /* type */
+ "isel", /* name */
+ OPTGROUP_VEC, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_gimple_isel : public gimple_opt_pass
+{
+public:
+ pass_gimple_isel (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_gimple_isel, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return true;
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ return gimple_expand_vec_cond_exprs ();
+ }
+
+}; // class pass_gimple_isel
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_gimple_isel (gcc::context *ctxt)
+{
+ return new pass_gimple_isel (ctxt);
+}
+
}
case VEC_COND_EXPR:
- {
- enum gimplify_status r0, r1, r2;
-
- r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
- post_p, is_gimple_condexpr, fb_rvalue);
- r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
- post_p, is_gimple_val, fb_rvalue);
- r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
- post_p, is_gimple_val, fb_rvalue);
-
- ret = MIN (MIN (r0, r1), r2);
- recalculate_side_effects (*expr_p);
- }
- break;
+ goto expr_3;
case VEC_PERM_EXPR:
/* Classified as tcc_expression. */
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "explow.h"
/* The names of each internal function, indexed by function number. */
const char *const internal_fn_name_array[] = {
#define mask_store_direct { 3, 2, false }
#define store_lanes_direct { 0, 0, false }
#define mask_store_lanes_direct { 0, 0, false }
+#define vec_cond_mask_direct { 0, 0, false }
+#define vec_cond_direct { 0, 0, false }
+#define vec_condu_direct { 0, 0, false }
+#define vec_condeq_direct { 0, 0, false }
#define scatter_store_direct { 3, 1, false }
#define unary_direct { 0, 0, true }
#define binary_direct { 0, 0, true }
#define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
+/* Expand VCOND, VCONDU and VCONDEQ optab internal functions.
+ The expansion of STMT happens based on OPTAB table associated. */
+
+static void
+expand_vect_cond_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+{
+ class expand_operand ops[6];
+ insn_code icode;
+ tree lhs = gimple_call_lhs (stmt);
+ tree op0a = gimple_call_arg (stmt, 0);
+ tree op0b = gimple_call_arg (stmt, 1);
+ tree op1 = gimple_call_arg (stmt, 2);
+ tree op2 = gimple_call_arg (stmt, 3);
+ enum tree_code tcode = (tree_code) int_cst_value (gimple_call_arg (stmt, 4));
+
+ tree vec_cond_type = TREE_TYPE (lhs);
+ tree op_mode = TREE_TYPE (op0a);
+ bool unsignedp = TYPE_UNSIGNED (op_mode);
+
+ machine_mode mode = TYPE_MODE (vec_cond_type);
+ machine_mode cmp_op_mode = TYPE_MODE (op_mode);
+
+ icode = convert_optab_handler (optab, mode, cmp_op_mode);
+ rtx comparison
+ = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp, icode, 4);
+ rtx rtx_op1 = expand_normal (op1);
+ rtx rtx_op2 = expand_normal (op2);
+
+ rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], rtx_op1, mode);
+ create_input_operand (&ops[2], rtx_op2, mode);
+ create_fixed_operand (&ops[3], comparison);
+ create_fixed_operand (&ops[4], XEXP (comparison, 0));
+ create_fixed_operand (&ops[5], XEXP (comparison, 1));
+ expand_insn (icode, 6, ops);
+}
+
+#define expand_vec_cond_optab_fn expand_vect_cond_optab_fn
+#define expand_vec_condu_optab_fn expand_vect_cond_optab_fn
+#define expand_vec_condeq_optab_fn expand_vect_cond_optab_fn
+
+/* Expand VCOND_MASK optab internal function.
+ The expansion of STMT happens based on OPTAB table associated. */
+
+static void
+expand_vect_cond_mask_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+{
+ class expand_operand ops[4];
+
+ tree lhs = gimple_call_lhs (stmt);
+ tree op0 = gimple_call_arg (stmt, 0);
+ tree op1 = gimple_call_arg (stmt, 1);
+ tree op2 = gimple_call_arg (stmt, 2);
+ tree vec_cond_type = TREE_TYPE (lhs);
+
+ machine_mode mode = TYPE_MODE (vec_cond_type);
+ machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
+ enum insn_code icode = convert_optab_handler (optab, mode, mask_mode);
+ rtx mask, rtx_op1, rtx_op2;
+
+ gcc_assert (icode != CODE_FOR_nothing);
+
+ mask = expand_normal (op0);
+ rtx_op1 = expand_normal (op1);
+ rtx_op2 = expand_normal (op2);
+
+ mask = force_reg (mask_mode, mask);
+ rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
+
+ rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], rtx_op1, mode);
+ create_input_operand (&ops[2], rtx_op2, mode);
+ create_input_operand (&ops[3], mask, mask_mode);
+ expand_insn (icode, 4, ops);
+}
+
+#define expand_vec_cond_mask_optab_fn expand_vect_cond_mask_optab_fn
+
static void
expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
{
#define direct_mask_store_optab_supported_p direct_optab_supported_p
#define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
#define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
+#define direct_vec_cond_mask_optab_supported_p multi_vector_optab_supported_p
+#define direct_vec_cond_optab_supported_p multi_vector_optab_supported_p
+#define direct_vec_condu_optab_supported_p multi_vector_optab_supported_p
+#define direct_vec_condeq_optab_supported_p multi_vector_optab_supported_p
#define direct_scatter_store_optab_supported_p convert_optab_supported_p
#define direct_while_optab_supported_p convert_optab_supported_p
#define direct_fold_extract_optab_supported_p direct_optab_supported_p
DEF_INTERNAL_OPTAB_FN (MASK_STORE_LANES, 0,
vec_mask_store_lanes, mask_store_lanes)
+DEF_INTERNAL_OPTAB_FN (VCOND, 0, vcond, vec_cond)
+DEF_INTERNAL_OPTAB_FN (VCONDU, 0, vcondu, vec_condu)
+DEF_INTERNAL_OPTAB_FN (VCONDEQ, 0, vcondeq, vec_condeq)
+DEF_INTERNAL_OPTAB_FN (VCOND_MASK, 0, vcond_mask, vec_cond_mask)
+
DEF_INTERNAL_OPTAB_FN (WHILE_ULT, ECF_CONST | ECF_NOTHROW, while_ult, while)
DEF_INTERNAL_OPTAB_FN (CHECK_RAW_PTRS, ECF_CONST | ECF_NOTHROW,
check_raw_ptrs, check_ptrs)
first comparison operand for insn ICODE. Do not generate the
compare instruction itself. */
-static rtx
+rtx
vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
tree t_op0, tree t_op1, bool unsignedp,
enum insn_code icode, unsigned int opno)
return tmp;
}
-/* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
- three operands. */
-
-rtx
-expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
- rtx target)
-{
- class expand_operand ops[4];
- machine_mode mode = TYPE_MODE (vec_cond_type);
- machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
- enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
- rtx mask, rtx_op1, rtx_op2;
-
- if (icode == CODE_FOR_nothing)
- return 0;
-
- mask = expand_normal (op0);
- rtx_op1 = expand_normal (op1);
- rtx_op2 = expand_normal (op2);
-
- mask = force_reg (mask_mode, mask);
- rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
-
- create_output_operand (&ops[0], target, mode);
- create_input_operand (&ops[1], rtx_op1, mode);
- create_input_operand (&ops[2], rtx_op2, mode);
- create_input_operand (&ops[3], mask, mask_mode);
- expand_insn (icode, 4, ops);
-
- return ops[0].value;
-}
-
-/* Generate insns for a VEC_COND_EXPR, given its TYPE and its
- three operands. */
-
-rtx
-expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
- rtx target)
-{
- class expand_operand ops[6];
- enum insn_code icode;
- rtx comparison, rtx_op1, rtx_op2;
- machine_mode mode = TYPE_MODE (vec_cond_type);
- machine_mode cmp_op_mode;
- bool unsignedp;
- tree op0a, op0b;
- enum tree_code tcode;
-
- if (COMPARISON_CLASS_P (op0))
- {
- op0a = TREE_OPERAND (op0, 0);
- op0b = TREE_OPERAND (op0, 1);
- tcode = TREE_CODE (op0);
- }
- else
- {
- gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
- if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
- != CODE_FOR_nothing)
- return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
- op2, target);
- /* Fake op0 < 0. */
- else
- {
- gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
- == MODE_VECTOR_INT);
- op0a = op0;
- op0b = build_zero_cst (TREE_TYPE (op0));
- tcode = LT_EXPR;
- }
- }
- cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
- unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
-
-
- gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode))
- && known_eq (GET_MODE_NUNITS (mode),
- GET_MODE_NUNITS (cmp_op_mode)));
-
- icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
- if (icode == CODE_FOR_nothing)
- {
- if (tcode == LT_EXPR
- && op0a == op0
- && TREE_CODE (op0) == VECTOR_CST)
- {
- /* A VEC_COND_EXPR condition could be folded from EQ_EXPR/NE_EXPR
- into a constant when only get_vcond_eq_icode is supported.
- Verify < 0 and != 0 behave the same and change it to NE_EXPR. */
- unsigned HOST_WIDE_INT nelts;
- if (!VECTOR_CST_NELTS (op0).is_constant (&nelts))
- {
- if (VECTOR_CST_STEPPED_P (op0))
- return 0;
- nelts = vector_cst_encoded_nelts (op0);
- }
- for (unsigned int i = 0; i < nelts; ++i)
- if (tree_int_cst_sgn (vector_cst_elt (op0, i)) == 1)
- return 0;
- tcode = NE_EXPR;
- }
- if (tcode == EQ_EXPR || tcode == NE_EXPR)
- icode = get_vcond_eq_icode (mode, cmp_op_mode);
- if (icode == CODE_FOR_nothing)
- return 0;
- }
-
- comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
- icode, 4);
- rtx_op1 = expand_normal (op1);
- rtx_op2 = expand_normal (op2);
-
- create_output_operand (&ops[0], target, mode);
- create_input_operand (&ops[1], rtx_op1, mode);
- create_input_operand (&ops[2], rtx_op2, mode);
- create_fixed_operand (&ops[3], comparison);
- create_fixed_operand (&ops[4], XEXP (comparison, 0));
- create_fixed_operand (&ops[5], XEXP (comparison, 1));
- expand_insn (icode, 6, ops);
- return ops[0].value;
-}
-
/* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
Use TARGET for the result if nonnull and convenient. */
/* Generate code for vector comparison. */
extern rtx expand_vec_cmp_expr (tree, tree, rtx);
-/* Generate code for VEC_COND_EXPR. */
-extern rtx expand_vec_cond_expr (tree, tree, tree, tree, rtx);
-
/* Generate code for VEC_SERIES_EXPR. */
extern rtx expand_vec_series_expr (machine_mode, rtx, rtx, rtx);
class expand_operand *ops);
extern enum rtx_code get_rtx_code (enum tree_code tcode, bool unsignedp);
+extern rtx vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
+ tree t_op0, tree t_op1, bool unsignedp,
+ enum insn_code icode, unsigned int opno);
+
#endif /* GCC_OPTABS_H */
NEXT_PASS (pass_cleanup_eh);
NEXT_PASS (pass_lower_resx);
NEXT_PASS (pass_nrv);
+ NEXT_PASS (pass_gimple_isel);
NEXT_PASS (pass_cleanup_cfg_post_optimizing);
NEXT_PASS (pass_warn_function_noreturn);
NEXT_PASS (pass_gen_hsail);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-additional-options "-fnon-call-exceptions" } */
+
+typedef double v2df __attribute__((vector_size(16)));
+
+v2df foo (v2df a, v2df b, v2df c, v2df d)
+{
+ try
+ {
+ v2df res = a < b ? c : d;
+ return res;
+ }
+ catch (...)
+ {
+ return (v2df){};
+ }
+}
return true;
}
- if (((rhs_code == VEC_COND_EXPR || rhs_code == COND_EXPR)
+ if ((rhs_code == COND_EXPR
? !is_gimple_condexpr (rhs1) : !is_gimple_val (rhs1))
|| !is_gimple_val (rhs2)
|| !is_gimple_val (rhs3))
extern gimple_opt_pass *make_pass_update_address_taken (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_convert_switch (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_vaarg (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt);
/* Current optimization pass. */
extern opt_pass *current_pass;
tree rhs1 = gimple_assign_rhs1 (stmt);
enum tree_code code = gimple_assign_rhs_code (stmt);
- if (code == COND_EXPR
- || code == VEC_COND_EXPR)
+ if (code == COND_EXPR)
{
/* In this case the entire COND_EXPR is in rhs1. */
if (forward_propagate_into_cond (&gsi))
to type of comparison. */
static tree_code
-ovce_extract_ops (tree var, gassign **rets, bool *reti, tree *type)
+ovce_extract_ops (tree var, gassign **rets, bool *reti, tree *type,
+ tree *lhs, tree *rhs, gassign **vcond)
{
if (TREE_CODE (var) != SSA_NAME)
return ERROR_MARK;
gassign *stmt = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (var));
if (stmt == NULL)
return ERROR_MARK;
+ if (*vcond)
+ *vcond = stmt;
/* ??? If we start creating more COND_EXPR, we could perform
this same optimization with them. For now, simplify. */
tree cond = gimple_assign_rhs1 (stmt);
tree_code cmp = TREE_CODE (cond);
- if (TREE_CODE_CLASS (cmp) != tcc_comparison)
+ if (cmp != SSA_NAME)
return ERROR_MARK;
+ gassign *assign = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (cond));
+ if (stmt == NULL
+ || TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) != tcc_comparison)
+ return ERROR_MARK;
+
+ cmp = gimple_assign_rhs_code (assign);
+ if (lhs)
+ *lhs = gimple_assign_rhs1 (assign);
+ if (rhs)
+ *rhs = gimple_assign_rhs2 (assign);
+
/* ??? For now, allow only canonical true and false result vectors.
We could expand this to other constants should the need arise,
but at the moment we don't create them. */
/* Success! */
if (rets)
- *rets = stmt;
+ *rets = assign;
if (reti)
*reti = inv;
if (type)
{
tree elt0 = (*ops)[i]->op;
- gassign *stmt0;
+ gassign *stmt0, *vcond0;
bool invert;
- tree type;
- tree_code cmp0 = ovce_extract_ops (elt0, &stmt0, &invert, &type);
+ tree type, lhs0, rhs0;
+ tree_code cmp0 = ovce_extract_ops (elt0, &stmt0, &invert, &type, &lhs0,
+ &rhs0, &vcond0);
if (cmp0 == ERROR_MARK)
continue;
{
tree &elt1 = (*ops)[j]->op;
- gassign *stmt1;
- tree_code cmp1 = ovce_extract_ops (elt1, &stmt1, NULL, NULL);
+ gassign *stmt1, *vcond1;
+ tree lhs1, rhs1;
+ tree_code cmp1 = ovce_extract_ops (elt1, &stmt1, NULL, NULL, &lhs1,
+ &rhs1, &vcond1);
if (cmp1 == ERROR_MARK)
continue;
- tree cond0 = gimple_assign_rhs1 (stmt0);
- tree x0 = TREE_OPERAND (cond0, 0);
- tree y0 = TREE_OPERAND (cond0, 1);
-
- tree cond1 = gimple_assign_rhs1 (stmt1);
- tree x1 = TREE_OPERAND (cond1, 0);
- tree y1 = TREE_OPERAND (cond1, 1);
-
tree comb;
if (opcode == BIT_AND_EXPR)
- comb = maybe_fold_and_comparisons (type, cmp0, x0, y0, cmp1, x1,
- y1);
+ comb = maybe_fold_and_comparisons (type, cmp0, lhs0, rhs0,
+ cmp1, lhs1, rhs1);
else if (opcode == BIT_IOR_EXPR)
- comb = maybe_fold_or_comparisons (type, cmp0, x0, y0, cmp1, x1,
- y1);
+ comb = maybe_fold_or_comparisons (type, cmp0, lhs0, rhs0,
+ cmp1, lhs1, rhs1);
else
gcc_unreachable ();
if (comb == NULL)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Transforming ");
- print_generic_expr (dump_file, cond0);
+ print_generic_expr (dump_file, gimple_assign_lhs (stmt0));
fprintf (dump_file, " %c ", opcode == BIT_AND_EXPR ? '&' : '|');
- print_generic_expr (dump_file, cond1);
+ print_generic_expr (dump_file, gimple_assign_lhs (stmt1));
fprintf (dump_file, " into ");
print_generic_expr (dump_file, comb);
fputc ('\n', dump_file);
}
- gimple_assign_set_rhs1 (stmt0, comb);
+ gimple_stmt_iterator gsi = gsi_for_stmt (vcond0);
+ tree exp = force_gimple_operand_gsi (&gsi, comb, true, NULL_TREE,
+ true, GSI_SAME_STMT);
if (invert)
- std::swap (*gimple_assign_rhs2_ptr (stmt0),
- *gimple_assign_rhs3_ptr (stmt0));
- update_stmt (stmt0);
+ swap_ssa_operands (vcond0, gimple_assign_rhs2_ptr (vcond0),
+ gimple_assign_rhs3_ptr (vcond0));
+ gimple_assign_set_rhs1 (vcond0, exp);
+ update_stmt (vcond0);
elt1 = error_mark_node;
any_changes = true;
if (addend == NULL_TREE
&& expand_vec_cond_expr_p (type, type, LT_EXPR))
{
- tree zero, cst, cond, mask_type;
- gimple *stmt;
+ tree zero, cst, mask_type, mask;
+ gimple *stmt, *cond;
mask_type = truth_type_for (type);
zero = build_zero_cst (type);
- cond = build2 (LT_EXPR, mask_type, op0, zero);
+ mask = make_ssa_name (mask_type);
+ cond = gimple_build_assign (mask, LT_EXPR, op0, zero);
+ gsi_insert_before (gsi, cond, GSI_SAME_STMT);
tree_vector_builder vec (type, nunits, 1);
for (i = 0; i < nunits; i++)
vec.quick_push (build_int_cst (TREE_TYPE (type),
<< shifts[i]) - 1));
cst = vec.build ();
addend = make_ssa_name (type);
- stmt = gimple_build_assign (addend, VEC_COND_EXPR, cond,
- cst, zero);
+ stmt
+ = gimple_build_assign (addend, VEC_COND_EXPR, mask, cst, zero);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
}
}
tree index = bitsize_int (0);
tree comp_width = width;
tree comp_index = index;
- int i;
location_t loc = gimple_location (gsi_stmt (*gsi));
+ tree_code code = TREE_CODE (a);
- if (!is_gimple_val (a))
+ if (code == SSA_NAME)
{
- gcc_assert (COMPARISON_CLASS_P (a));
- a_is_comparison = true;
- a1 = TREE_OPERAND (a, 0);
- a2 = TREE_OPERAND (a, 1);
- comp_inner_type = TREE_TYPE (TREE_TYPE (a1));
- comp_width = vector_element_bits_tree (TREE_TYPE (a1));
+ gimple *assign = SSA_NAME_DEF_STMT (a);
+ if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
+ {
+ a_is_comparison = true;
+ a1 = gimple_assign_rhs1 (assign);
+ a2 = gimple_assign_rhs2 (assign);
+ code = gimple_assign_rhs_code (assign);
+ comp_inner_type = TREE_TYPE (TREE_TYPE (a1));
+ comp_width = vector_element_bits_tree (TREE_TYPE (a1));
+ }
}
- if (expand_vec_cond_expr_p (type, TREE_TYPE (a1), TREE_CODE (a)))
- return;
+ if (expand_vec_cond_expr_p (type, TREE_TYPE (a1), code))
+ {
+ gcc_assert (TREE_CODE (a) == SSA_NAME || TREE_CODE (a) == VECTOR_CST);
+ return;
+ }
/* Handle vector boolean types with bitmasks. If there is a comparison
and we can expand the comparison into the vector boolean bitmask,
: expand_vec_cmp_expr_p (TREE_TYPE (a1), type, TREE_CODE (a))))
{
if (a_is_comparison)
- a = gimplify_build2 (gsi, TREE_CODE (a), type, a1, a2);
+ a = gimplify_build2 (gsi, code, type, a1, a2);
a1 = gimplify_build2 (gsi, BIT_AND_EXPR, type, a, b);
a2 = gimplify_build1 (gsi, BIT_NOT_EXPR, type, a);
a2 = gimplify_build2 (gsi, BIT_AND_EXPR, type, a2, c);
int nunits = nunits_for_known_piecewise_op (type);
vec_alloc (v, nunits);
- for (i = 0; i < nunits; i++)
+ for (int i = 0; i < nunits; i++)
{
tree aa, result;
tree bb = tree_vec_extract (gsi, inner_type, b, width, index);
comp_width, comp_index);
tree aa2 = tree_vec_extract (gsi, comp_inner_type, a2,
comp_width, comp_index);
- aa = fold_build2 (TREE_CODE (a), cond_type, aa1, aa2);
+ aa = fold_build2 (code, cond_type, aa1, aa2);
}
else if (a_is_scalar_bitmask)
{
{
vec_cond_rhs = vec_oprnds1[i];
if (bitop1 == NOP_EXPR)
- vec_compare = build2 (cond_code, vec_cmp_type,
- vec_cond_lhs, vec_cond_rhs);
+ {
+ gimple_seq stmts = NULL;
+ vec_compare = gimple_build (&stmts, cond_code, vec_cmp_type,
+ vec_cond_lhs, vec_cond_rhs);
+ gsi_insert_before (gsi, stmts, GSI_SAME_STMT);
+ }
else
{
new_temp = make_ssa_name (vec_cmp_type);