+2018-09-19 David Malcolm <dmalcolm@redhat.com>
+
+ * tree-data-ref.c (runtime_alias_check_p): Use formatted printing
+ with %T in place of calls to dump_generic_expr.
+ (prune_runtime_alias_test_list): Likewise.
+ (create_runtime_alias_checks): Likewise.
+ * tree-vect-data-refs.c (vect_check_nonzero_value): Likewise.
+ (vect_analyze_data_ref_dependence): Likewise.
+ (vect_slp_analyze_data_ref_dependence): Likewise.
+ (vect_record_base_alignment): Likewise. Use %G in place of call
+ to dump_gimple_stmt.
+ (vect_compute_data_ref_alignment): Likewise.
+ (verify_data_ref_alignment): Likewise.
+ (vect_find_same_alignment_drs): Likewise.
+ (vect_analyze_group_access_1): Likewise.
+ (vect_analyze_data_ref_accesses): Likewise.
+ (dependence_distance_ge_vf): Likewise.
+ (dump_lower_bound): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ (vect_find_stmt_data_reference): Likewise.
+ (vect_analyze_data_refs): Likewise.
+ (vect_create_addr_base_for_vector_ref): Likewise.
+ (vect_create_data_ref_ptr): Likewise.
+ * tree-vect-loop-manip.c (vect_set_loop_condition): Likewise.
+ (vect_can_advance_ivs_p): Likewise.
+ (vect_update_ivs_after_vectorizer): Likewise.
+ (vect_gen_prolog_loop_niters): Likewise.
+ (vect_prepare_for_masked_peels): Likewise.
+ * tree-vect-loop.c (vect_determine_vf_for_stmt): Likewise.
+ (vect_determine_vectorization_factor): Likewise.
+ (vect_is_simple_iv_evolution): Likewise.
+ (vect_analyze_scalar_cycles_1): Likewise.
+ (vect_analyze_loop_operations): Likewise.
+ (report_vect_op): Likewise.
+ (vect_is_slp_reduction): Likewise.
+ (check_reduction_path): Likewise.
+ (vect_is_simple_reduction): Likewise.
+ (vect_create_epilog_for_reduction): Likewise.
+ (vect_finalize_reduction:): Likewise.
+ (vectorizable_induction): Likewise.
+ (vect_transform_loop_stmt): Likewise.
+ (vect_transform_loop): Likewise.
+ (optimize_mask_stores): Likewise.
+ * tree-vect-patterns.c (vect_pattern_detected): Likewise.
+ (vect_split_statement): Likewise.
+ (vect_recog_over_widening_pattern): Likewise.
+ (vect_recog_average_pattern): Likewise.
+ (vect_determine_min_output_precision_1): Likewise.
+ (vect_determine_precisions_from_range): Likewise.
+ (vect_determine_precisions_from_users): Likewise.
+ (vect_mark_pattern_stmts): Likewise.
+ (vect_pattern_recog_1): Likewise.
+ * tree-vect-slp.c (vect_get_and_check_slp_defs): Likewise.
+ (vect_record_max_nunits): Likewise.
+ (vect_build_slp_tree_1): Likewise.
+ (vect_build_slp_tree_2): Likewise.
+ (vect_print_slp_tree): Likewise.
+ (vect_analyze_slp_instance): Likewise.
+ (vect_detect_hybrid_slp_stmts): Likewise.
+ (vect_detect_hybrid_slp_1): Likewise.
+ (vect_slp_analyze_operations): Likewise.
+ (vect_slp_analyze_bb_1): Likewise.
+ (vect_transform_slp_perm_load): Likewise.
+ (vect_schedule_slp_instance): Likewise.
+ * tree-vect-stmts.c (vect_mark_relevant): Likewise.
+ (vect_mark_stmts_to_be_vectorized): Likewise.
+ (vect_init_vector_1): Likewise.
+ (vect_get_vec_def_for_operand): Likewise.
+ (vect_finish_stmt_generation_1): Likewise.
+ (vect_check_load_store_mask): Likewise.
+ (vectorizable_call): Likewise.
+ (vectorizable_conversion): Likewise.
+ (vectorizable_operation): Likewise.
+ (vectorizable_load): Likewise.
+ (vect_analyze_stmt): Likewise.
+ (vect_is_simple_use): Likewise.
+ (vect_get_vector_types_for_stmt): Likewise.
+ (vect_get_mask_type_for_stmt): Likewise.
+ * tree-vectorizer.c (increase_alignment): Likewise.
+
2018-09-19 Andrew Stubbs <ams@codesourcery.com>
* doc/rtl.texi: Adjust vec_select description.
runtime_alias_check_p (ddr_p ddr, struct loop *loop, bool speed_p)
{
if (dump_enabled_p ())
- {
- dump_printf (MSG_NOTE, "consider run-time aliasing test between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf (MSG_NOTE,
+ "consider run-time aliasing test between %T and %T\n",
+ DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
if (!speed_p)
{
if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
{
if (dump_enabled_p ())
- {
- dump_printf (MSG_NOTE, "found equal ranges ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr));
- dump_printf (MSG_NOTE, ", ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr));
- dump_printf (MSG_NOTE, ", ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n",
+ DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
+ DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
alias_pairs->ordered_remove (i--);
continue;
}
dr_a1->align = MIN (dr_a1->align, new_align);
}
if (dump_enabled_p ())
- {
- dump_printf (MSG_NOTE, "merging ranges for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr));
- dump_printf (MSG_NOTE, ", ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr));
- dump_printf (MSG_NOTE, ", ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n",
+ DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
+ DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
alias_pairs->ordered_remove (i);
i--;
}
const dr_with_seg_len& dr_b = (*alias_pairs)[i].second;
if (dump_enabled_p ())
- {
- dump_printf (MSG_NOTE, "create runtime check for data references ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a.dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b.dr));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf (MSG_NOTE,
+ "create runtime check for data references %T and %T\n",
+ DR_REF (dr_a.dr), DR_REF (dr_b.dr));
/* Create condition expression for each pair data references. */
create_intersect_range_checks (loop, &part_cond_expr, dr_a, dr_b);
return;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "need run-time check that ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, value);
- dump_printf (MSG_NOTE, " is nonzero\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "need run-time check that %T is nonzero\n",
+ value);
LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
}
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "versioning for alias not supported for: "
- "can't determine dependence between ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "versioning for alias not supported for: "
+ "can't determine dependence between %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
return true;
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "versioning for alias required: "
- "can't determine dependence between ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "versioning for alias required: "
+ "can't determine dependence between %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "versioning for alias not supported for: "
- "bad dist vector for ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "versioning for alias not supported for: "
+ "bad dist vector for %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
return true;
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "versioning for alias required: "
- "bad dist vector for ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "versioning for alias required: "
+ "bad dist vector for %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
if (dist == 0)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "dependence distance == 0 between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "dependence distance == 0 between %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
/* When we perform grouped accesses and perform implicit CSE
by detecting equal accesses and doing disambiguation with
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized, possible dependence "
- "between data-refs ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized, possible dependence "
+ "between data-refs %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
return true;
}
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't determine dependence between ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't determine dependence between %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
}
else if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "determined dependence between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "determined dependence between %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
return true;
}
{
entry = drb;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "recording new base alignment for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address);
- dump_printf (MSG_NOTE, "\n");
- dump_printf_loc (MSG_NOTE, vect_location,
- " alignment: %d\n", drb->base_alignment);
- dump_printf_loc (MSG_NOTE, vect_location,
- " misalignment: %d\n", drb->base_misalignment);
- dump_printf_loc (MSG_NOTE, vect_location,
- " based on: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "recording new base alignment for %T\n"
+ " alignment: %d\n"
+ " misalignment: %d\n"
+ " based on: %G",
+ drb->base_address,
+ drb->base_alignment,
+ drb->base_misalignment,
+ stmt_info->stmt);
}
}
|| TREE_CODE (drb->step) != INTEGER_CST)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Unknown alignment for access: ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Unknown alignment for access: %T\n", ref);
return;
}
vector_alignment * BITS_PER_UNIT))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "can't force alignment of ref: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "can't force alignment of ref: %T\n", ref);
return;
}
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "force alignment of %T\n", ref);
dr_info->base_decl = base;
dr_info->base_misaligned = true;
&const_misalignment))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Non-constant misalignment for access: ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Non-constant misalignment for access: %T\n", ref);
return;
}
SET_DR_MISALIGNMENT (dr_info, const_misalignment);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "misalign = %d bytes of ref ",
- DR_MISALIGNMENT (dr_info));
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "misalign = %d bytes of ref %T\n",
+ DR_MISALIGNMENT (dr_info), ref);
return;
}
"not vectorized: unsupported unaligned "
"store.");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- DR_REF (dr_info->dr));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
+ dump_printf (MSG_MISSED_OPTIMIZATION, "%T\n", DR_REF (dr_info->dr));
}
return false;
}
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "accesses have the same alignment: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "accesses have the same alignment: %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
}
if ((dr_step % type_size) != 0)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Step ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
- dump_printf (MSG_NOTE,
- " is not a multiple of the element size for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Step %T is not a multiple of the element size"
+ " for %T\n",
+ step, DR_REF (dr));
return false;
}
groupsize = absu_hwi (dr_step) / type_size;
DR_GROUP_SIZE (stmt_info) = groupsize;
DR_GROUP_GAP (stmt_info) = groupsize - 1;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Detected single element interleaving ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
- dump_printf (MSG_NOTE, " step ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Detected single element interleaving %T"
+ " step %T\n",
+ DR_REF (dr), step);
return true;
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not consecutive access ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not consecutive access %G", stmt_info->stmt);
if (bb_vinfo)
{
dump_printf (MSG_NOTE, "strided store ");
else
dump_printf (MSG_NOTE, "store ");
- dump_printf (MSG_NOTE, "of size %u starting with ",
- (unsigned)groupsize);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
+ dump_printf (MSG_NOTE, "of size %u starting with %G",
+ (unsigned)groupsize, stmt_info->stmt);
if (DR_GROUP_GAP (stmt_info) != 0)
dump_printf_loc (MSG_NOTE, vect_location,
"There is a gap of %u elements after the group\n",
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Detected interleaving ");
- if (DR_IS_READ (dra))
- dump_printf (MSG_NOTE, "load ");
- else
- dump_printf (MSG_NOTE, "store ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ DR_IS_READ (dra)
+ ? "Detected interleaving load %T and %T\n"
+ : "Detected interleaving store %T and %T\n",
+ DR_REF (dra), DR_REF (drb));
/* Link the found element into the group list. */
if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "dependence distance between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
- dump_printf (MSG_NOTE, " is >= VF\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "dependence distance between %T and %T is >= VF\n",
+ DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
return true;
}
static void
dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
{
- dump_printf (dump_kind, "%s (", lower_bound.unsigned_p ? "unsigned" : "abs");
- dump_generic_expr (dump_kind, TDF_SLIM, lower_bound.expr);
- dump_printf (dump_kind, ") >= ");
+ dump_printf (dump_kind, "%s (%T) >= ",
+ lower_bound.unsigned_p ? "unsigned" : "abs",
+ lower_bound.expr);
dump_dec (dump_kind, lower_bound.min_value);
}
if (!compared_objects.add (new_pair))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "checking that ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first);
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second);
- dump_printf (MSG_NOTE, " have different addresses\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "checking that %T and %T"
+ " have different addresses\n",
+ new_pair.first, new_pair.second);
LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
}
continue;
&lower_bound)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "no need for alias check between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr));
- dump_printf (MSG_NOTE, " when VF is 1\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "no need for alias check between "
+ "%T and %T when VF is 1\n",
+ DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
continue;
}
bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
if (dump_enabled_p ())
{
- dump_printf_loc (MSG_NOTE, vect_location, "no alias between ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr));
- dump_printf (MSG_NOTE, " when the step ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_STEP (dr_info_a->dr));
- dump_printf (MSG_NOTE, " is outside ");
+ dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
+ "%T and %T when the step %T is outside ",
+ DR_REF (dr_info_a->dr),
+ DR_REF (dr_info_b->dr),
+ DR_STEP (dr_info_a->dr));
if (unsigned_p)
dump_printf (MSG_NOTE, "[0");
else
if (res >= 0 && dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
- "can tell at compile time that ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr));
+ "can tell at compile time that %T and %T",
+ DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
if (res == 0)
dump_printf (MSG_NOTE, " do not alias\n");
else
if (gimple_has_volatile_ops (stmt))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: volatile type ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: volatile type %G", stmt);
return false;
}
if (stmt_can_throw_internal (stmt))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: statement can throw an "
- "exception ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: statement can throw an exception %G",
+ stmt);
return false;
}
if (refs.length () > 1)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: more than one data ref "
- "in stmt: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: more than one data ref "
+ "in stmt: %G", stmt);
return false;
}
&& gimple_call_internal_fn (call) != IFN_MASK_STORE))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: dr in a call ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: dr in a call %G", stmt);
return false;
}
&& DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: statement is bitfield "
- "access ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: statement is bitfield "
+ "access %G", stmt);
return false;
}
if (gatherscatter == SG_NONE)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: data ref analysis "
- "failed ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: data ref analysis "
+ "failed %G", stmt_info->stmt);
if (is_a <bb_vec_info> (vinfo))
{
/* In BB vectorization the ref can still participate
if (nested_in_vect_loop_p (loop, stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: data ref analysis "
- "failed ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: data ref analysis "
+ "failed %G", stmt_info->stmt);
return false;
}
STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
if (base && VAR_P (base) && DECL_NONALIASED (base))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: base object not addressable "
- "for stmt: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: base object not addressable "
+ "for stmt: %G", stmt_info->stmt);
if (is_a <bb_vec_info> (vinfo))
{
/* In BB vectorization the ref can still participate
if (nested_in_vect_loop_p (loop, stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: not suitable for strided "
- "load ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: not suitable for strided "
+ "load %G", stmt_info->stmt);
return false;
}
STMT_VINFO_STRIDED_P (stmt_info) = true;
tree init_ref = build_fold_indirect_ref (init_addr);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "analyze in outer loop: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, init_ref);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "analyze in outer loop: %T\n", init_ref);
if (!dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
init_ref, loop))
return false;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "\touter base_address: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- STMT_VINFO_DR_OFFSET (stmt_info));
- dump_printf (MSG_NOTE,
- "\n\touter constant offset from base address: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- STMT_VINFO_DR_INIT (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter step: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- STMT_VINFO_DR_STEP (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter base alignment: %d\n",
- STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter base misalignment: %d\n",
- STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter offset alignment: %d\n",
- STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info));
- dump_printf (MSG_NOTE, "\n\touter step alignment: %d\n",
- STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "\touter base_address: %T\n"
+ "\touter offset from base address: %T\n"
+ "\touter constant offset from base address: %T\n"
+ "\touter step: %T\n"
+ "\touter base alignment: %d\n\n"
+ "\touter base misalignment: %d\n"
+ "\touter offset alignment: %d\n"
+ "\touter step alignment: %d\n",
+ STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
+ STMT_VINFO_DR_OFFSET (stmt_info),
+ STMT_VINFO_DR_INIT (stmt_info),
+ STMT_VINFO_DR_STEP (stmt_info),
+ STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
+ STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
+ STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
+ STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
}
/* Set vectype for STMT. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: no vectype for stmt: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
+ "not vectorized: no vectype for stmt: %G",
+ stmt_info->stmt);
dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
scalar_type);
else
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "got vectype for stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- STMT_VINFO_VECTYPE (stmt_info));
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "got vectype for stmt: %G%T\n",
+ stmt_info->stmt, STMT_VINFO_VECTYPE (stmt_info));
}
/* Adjust the minimal vectorization factor according to the
|| !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- (gatherscatter == GATHER) ?
- "not vectorized: not suitable for gather "
- "load " :
- "not vectorized: not suitable for scatter "
- "store ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ (gatherscatter == GATHER) ?
+ "not vectorized: not suitable for gather "
+ "load %G" :
+ "not vectorized: not suitable for scatter "
+ "store %G",
+ stmt_info->stmt);
return false;
}
STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "created ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
return addr_base;
}
{
tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
dump_printf_loc (MSG_NOTE, vect_location,
- "create %s-pointer variable to type: ",
- get_tree_code_name (TREE_CODE (aggr_type)));
- dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
+ "create %s-pointer variable to type: %T",
+ get_tree_code_name (TREE_CODE (aggr_type)),
+ aggr_type);
if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
dump_printf (MSG_NOTE, " vectorizing an array ref: ");
else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
else
dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
- dump_printf (MSG_NOTE, "\n");
+ dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
}
/* (1) Create the new aggregate-pointer variable.
gsi_remove (&loop_cond_gsi, true);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: %G",
+ cond_stmt);
}
/* Helper routine of slpeel_tree_duplicate_loop_to_edge_cfg.
gphi *phi = gsi.phi ();
stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G",
+ phi_info->stmt);
/* Skip virtual phi's. The data dependences that are associated with
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere.
gphi *phi1 = gsi1.phi ();
stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_update_ivs_after_vectorizer: phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_update_ivs_after_vectorizer: phi: %G", phi);
/* Skip reduction and virtual phis. */
if (!iv_phi_p (phi_info))
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "niters for prolog loop: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, iters);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "niters for prolog loop: %T\n", iters);
var = create_tmp_var (niters_type, "prolog_loop_niters");
iters_name = force_gimple_operand (iters, &new_stmts, false, var);
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "misalignment for fully-masked loop: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, misalign_in_elems);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "misalignment for fully-masked loop: %T\n",
+ misalign_in_elems);
LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo) = misalign_in_elems;
{
vec_info *vinfo = stmt_info->vinfo;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
+ stmt_info->stmt);
if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
return false;
{
stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "==> examining pattern def stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
- def_stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "==> examining pattern def stmt: %G",
+ def_stmt_info->stmt);
if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
vf, mask_producers))
return false;
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "==> examining pattern statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "==> examining pattern statement: %G",
+ stmt_info->stmt);
if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
return false;
}
phi = si.phi ();
stmt_info = loop_vinfo->lookup_stmt (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G",
+ phi);
gcc_assert (stmt_info);
scalar_type = TREE_TYPE (PHI_RESULT (phi));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "get vectype for scalar type: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "get vectype for scalar type: %T\n",
+ scalar_type);
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: unsupported "
- "data-type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- scalar_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: unsupported "
+ "data-type %T\n",
+ scalar_type);
return false;
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
+ vectype);
if (dump_enabled_p ())
{
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "step: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
- dump_printf (MSG_NOTE, ", init: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n",
+ step_expr, init_expr);
*init = init_expr;
*step = step_expr;
stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
/* Skip virtual phi's. The data dependences that are associated with
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
{
STRIP_NOPS (access_fn);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Access function of PHI: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Access function of PHI: %T\n", access_fn);
STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
= initial_condition_in_loop_num (access_fn, loop->num);
STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
tree def = PHI_RESULT (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
gcc_assert (!virtual_operand_p (def)
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
stmt_info = loop_vinfo->lookup_stmt (phi);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi);
if (virtual_operand_p (gimple_phi_result (phi)))
continue;
if (!ok)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: relevant phi not "
- "supported: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: relevant phi not "
+ "supported: %G", phi);
return false;
}
}
static void
report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
{
- dump_printf_loc (msg_type, vect_location, "%s", msg);
- dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
+ dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt);
}
/* DEF_STMT_INFO occurs in a loop that contains a potential reduction
&& vect_valid_reduction_input_p (def_stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: %G",
+ next_stmt);
swap_ssa_operands (next_stmt,
gimple_assign_rhs1_ptr (next_stmt),
unsigned i;
std::pair<ssa_op_iter, use_operand_p> *x;
FOR_EACH_VEC_ELT (path, i, x)
- {
- dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
- dump_printf (MSG_NOTE, " ");
- }
+ dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second));
dump_printf (MSG_NOTE, "\n");
}
if (TREE_CODE (loop_arg) != SSA_NAME)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "reduction: not ssa_name: ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "reduction: not ssa_name: %T\n", loop_arg);
return NULL;
}
else
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "reduction: unhandled reduction operation: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- def_stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "reduction: unhandled reduction operation: %G",
+ def_stmt_info->stmt);
return NULL;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
- "reduction: multiple types: operation type: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
- dump_printf (MSG_NOTE, ", operands types: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- TREE_TYPE (op1));
- dump_printf (MSG_NOTE, ",");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- TREE_TYPE (op2));
+ "reduction: multiple types: operation type: "
+ "%T, operands types: %T,%T",
+ type, TREE_TYPE (op1), TREE_TYPE (op2));
if (op3)
- {
- dump_printf (MSG_NOTE, ",");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- TREE_TYPE (op3));
- }
+ dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op3));
if (op4)
- {
- dump_printf (MSG_NOTE, ",");
- dump_generic_expr (MSG_NOTE, TDF_SLIM,
- TREE_TYPE (op4));
- }
+ dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op4));
dump_printf (MSG_NOTE, "\n");
}
add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform reduction: created def-use cycle: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "transform reduction: created def-use cycle: %G%G",
+ phi, SSA_NAME_DEF_STMT (def));
}
}
add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "created double reduction phi node: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "created double reduction phi node: %G",
+ vect_phi);
vect_phi_res = PHI_RESULT (vect_phi);
STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "vector of inductions after inner-loop:");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vector of inductions after inner-loop:%G",
+ new_stmt);
}
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform induction: created def-use cycle: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
- SSA_NAME_DEF_STMT (vec_def), 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "transform induction: created def-use cycle: %G%G",
+ induction_phi, SSA_NAME_DEF_STMT (vec_def));
return true;
}
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "------>vectorizing statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "------>vectorizing statement: %G", stmt_info->stmt);
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, stmt_info);
{
gphi *phi = si.phi ();
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "------>vectorizing phi: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "------>vectorizing phi: %G", phi);
stmt_info = loop_vinfo->lookup_stmt (phi);
if (!stmt_info)
continue;
/* Setup GSI_TO to the non-empty block start. */
gsi_to = gsi_start_bb (store_bb);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Move stmt to created bb\n");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Move stmt to created bb\n%G", last);
/* Move all stored value producers if possible. */
while (!gsi_end_p (gsi))
{
/* Can move STMT1 to STORE_BB. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Move stmt to created bb\n");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Move stmt to created bb\n%G", stmt1);
gsi_move_before (&gsi_from, &gsi_to);
/* Shift GSI_TO for further insertion. */
gsi_prev (&gsi_to);
vect_pattern_detected (const char *name, gimple *stmt)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "%s: detected: ", name);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "%s: detected: %G", name, stmt);
}
/* Associate pattern statement PATTERN_STMT with ORIG_STMT_INFO and
vect_init_pattern_stmt (stmt1, orig_stmt2_info, vectype);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Splitting pattern statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Splitting pattern statement: %G", stmt2_info->stmt);
/* Since STMT2_INFO is a pattern statement, we can change it
in-situ without worrying about changing the code for the
if (dump_enabled_p ())
{
- dump_printf_loc (MSG_NOTE, vect_location, "into: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
- dump_printf_loc (MSG_NOTE, vect_location, "and: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
+ dump_printf_loc (MSG_NOTE, vect_location, "into: %G", stmt1);
+ dump_printf_loc (MSG_NOTE, vect_location, "and: %G",
+ stmt2_info->stmt);
}
gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt2_info);
return false;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Splitting statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Splitting statement: %G", stmt2_info->stmt);
/* Add STMT1 as a singleton pattern definition sequence. */
gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
- "into pattern statements: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
- dump_printf_loc (MSG_NOTE, vect_location, "and: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt2, 0);
+ "into pattern statements: %G", stmt1);
+ dump_printf_loc (MSG_NOTE, vect_location, "and: %G", new_stmt2);
}
return true;
return NULL;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "demoting ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
- dump_printf (MSG_NOTE, " to ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, new_type);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "demoting %T to %T\n",
+ type, new_type);
/* Calculate the rhs operands for an operation on NEW_TYPE. */
tree ops[3] = {};
gimple_set_location (pattern_stmt, gimple_location (last_stmt));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "created pattern stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "created pattern stmt: %G", pattern_stmt);
pattern_stmt = vect_convert_output (last_stmt_info, type,
pattern_stmt, new_vectype);
gimple_set_location (average_stmt, gimple_location (last_stmt));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "created pattern stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, average_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "created pattern stmt: %G", average_stmt);
return vect_convert_output (last_stmt_info, type, average_stmt, new_vectype);
}
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "only the low %d bits of ",
- precision);
- dump_generic_expr (MSG_NOTE, TDF_SLIM, lhs);
- dump_printf (MSG_NOTE, " are significant\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "only the low %d bits of %T are significant\n",
+ precision, lhs);
stmt_info->min_output_precision = precision;
return true;
}
return;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
- " without loss of precision: ",
- sign == SIGNED ? "signed" : "unsigned",
- value_precision);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
+ " without loss of precision: %G",
+ sign == SIGNED ? "signed" : "unsigned",
+ value_precision, stmt);
vect_set_operation_type (stmt_info, type, value_precision, sign);
vect_set_min_input_precision (stmt_info, type, value_precision);
if (operation_precision < precision)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
- " without affecting users: ",
- TYPE_UNSIGNED (type) ? "unsigned" : "signed",
- operation_precision);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d"
+ " without affecting users: %G",
+ TYPE_UNSIGNED (type) ? "unsigned" : "signed",
+ operation_precision, stmt);
vect_set_operation_type (stmt_info, type, operation_precision,
TYPE_SIGN (type));
}
sequence. */
orig_pattern_stmt = orig_stmt_info->stmt;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "replacing earlier pattern ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, orig_pattern_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "replacing earlier pattern %G", orig_pattern_stmt);
/* To keep the book-keeping simple, just swap the lhs of the
old and new statements, so that the old one has a valid but
gimple_set_lhs (pattern_stmt, old_lhs);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "with ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "with %G", pattern_stmt);
/* Switch to the statement that ORIG replaces. */
orig_stmt_info = STMT_VINFO_RELATED_STMT (orig_stmt_info);
/* Found a vectorizable pattern. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "%s pattern recognized: ", recog_func->name);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "%s pattern recognized: %G",
+ recog_func->name, pattern_stmt);
/* Mark the stmts that are involved in the pattern. */
vect_mark_pattern_stmts (stmt_info, pattern_stmt, pattern_vectype);
if (!vect_is_simple_use (oprnd, vinfo, &dt, &def_stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: can't analyze def for ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: can't analyze def for %T\n",
+ oprnd);
return -1;
}
TYPE_MODE (type))))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: invalid type of def "
- "for variable-length SLP ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: invalid type of def "
+ "for variable-length SLP %T\n", oprnd);
return -1;
}
}
default:
/* FORNOW: Not supported. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: illegal type of def ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: illegal type of def %T\n",
+ oprnd);
return -1;
}
if (STMT_VINFO_NUM_SLP_USES (stmt_info) != 0)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: cannot swap operands of "
- "shared stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: cannot swap operands of "
+ "shared stmt %G", stmt_info->stmt);
return -1;
}
gimple_op_ptr (stmt_info->stmt, op + 1));
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "swapped operands to match def types in ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "swapped operands to match def types in %G",
+ stmt_info->stmt);
}
*swap = swapped;
if (!vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unsupported data-type in ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unsupported data-type in %G\n",
+ stmt_info->stmt);
/* Fatal mismatch. */
return false;
}
matches[i] = false;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for %G", stmt);
/* Fail to vectorize statements marked as unvectorizable. */
if (!STMT_VINFO_VECTORIZABLE (stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unvectorizable statement ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unvectorizable statement %G",
+ stmt);
/* Fatal mismatch. */
matches[0] = false;
return false;
if (lhs == NULL_TREE)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: not GIMPLE_ASSIGN nor "
- "GIMPLE_CALL ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: not GIMPLE_ASSIGN nor "
+ "GIMPLE_CALL %G", stmt);
/* Fatal mismatch. */
matches[0] = false;
return false;
|| gimple_call_chain (call_stmt))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unsupported call type ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- call_stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unsupported call type %G",
+ call_stmt);
/* Fatal mismatch. */
matches[0] = false;
return false;
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
- "in stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ "in stmt %G", stmt);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "original stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- first_stmt_info->stmt, 0);
+ "original stmt %G", first_stmt_info->stmt);
}
/* Mismatch. */
continue;
&& !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: different shift "
- "arguments in ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: different shift "
+ "arguments in %G", stmt);
/* Mismatch. */
continue;
}
as_a <gcall *> (stmt)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: different calls in ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: different calls in %G",
+ stmt);
/* Mismatch. */
continue;
}
if (prev_first_load != first_load)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "Build SLP failed: different "
- "interleaving chains in one node ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "Build SLP failed: different "
+ "interleaving chains in one node %G",
+ stmt);
/* Mismatch. */
continue;
}
{
/* Not grouped load. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: not grouped load ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: not grouped load %G", stmt);
/* FORNOW: Not grouped loads are not supported. */
/* Fatal mismatch. */
&& rhs_code != CALL_EXPR)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: operation");
- dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: operation unsupported %G",
+ stmt);
/* Fatal mismatch. */
matches[0] = false;
return false;
else
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: different"
- " operation");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: different"
+ " operation %G", stmt);
/* Mismatch. */
continue;
}
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
- "in stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmts[i]->stmt, 0);
+ "in stmt %G", stmts[i]->stmt);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "original stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- first_stmt_info->stmt, 0);
+ "original stmt %G", first_stmt_info->stmt);
}
}
return false;
if (!swap_not_matching)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "Build SLP failed: cannot swap "
- "operands of shared stmt ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,
- TDF_SLIM, stmts[j]->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "Build SLP failed: cannot swap "
+ "operands of shared stmt %G",
+ stmts[j]->stmt);
goto fail;
}
swap_not_matching = false;
SLP_TREE_DEF_TYPE (node) != vect_internal_def
? " (external)" : "");
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info)
- {
- dump_printf_loc (dump_kind, loc, "\tstmt %d ", i);
- dump_gimple_stmt (dump_kind, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (dump_kind, loc, "\tstmt %d %G", i, stmt_info->stmt);
FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_print_slp_tree (dump_kind, loc, child);
}
if (!vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unsupported data-type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unsupported data-type %T\n",
+ scalar_type);
return false;
}
if (!vect_supported_load_permutation_p (new_instance))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: unsupported load "
- "permutation ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,
- TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: unsupported load "
+ "permutation %G", stmt_info->stmt);
vect_free_slp_instance (new_instance, false);
return false;
}
&& STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
- "def in non-SLP stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
+ "def in non-SLP stmt: %G", use_stmt);
stype = hybrid;
}
}
&& !HYBRID_SLP_STMT (stmt_vinfo))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: %G",
+ stmt_vinfo->stmt);
STMT_SLP_TYPE (stmt_vinfo) = hybrid;
}
if (def_stmt_info && PURE_SLP_STMT (def_stmt_info))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: %G",
+ def_stmt_info->stmt);
STMT_SLP_TYPE (def_stmt_info) = hybrid;
}
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
dump_printf_loc (MSG_NOTE, vect_location,
- "removing SLP instance operations starting from: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
+ "removing SLP instance operations starting from: %G",
+ stmt_info->stmt);
vect_free_slp_instance (instance, false);
vinfo->slp_instances.ordered_remove (i);
cost_vec.release ();
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
dump_printf_loc (MSG_NOTE, vect_location,
- "removing SLP instance operations starting from: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
+ "removing SLP instance operations starting from: %G",
+ stmt_info->stmt);
vect_free_slp_instance (instance, false);
BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
continue;
else
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "permutation requires at "
- "least three vectors ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "permutation requires at "
+ "least three vectors %G",
+ stmt_info->stmt);
gcc_assert (analyze_only);
return false;
}
SLP_TREE_VEC_STMTS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE,vect_location,
- "------>vectorizing SLP node starting from: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "------>vectorizing SLP node starting from: %G",
+ stmt_info->stmt);
/* Vectorized stmts go before the last scalar stmt which is where
all uses are ready. */
bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "mark relevant %d, live %d: ", relevant, live_p);
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "mark relevant %d, live %d: %G", relevant, live_p,
+ stmt_info->stmt);
/* If this stmt is an original stmt in a pattern, we might need to mark its
related pattern stmt instead of the original stmt. However, such stmts
{
stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G",
+ phi_info->stmt);
if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
vect_mark_relevant (&worklist, phi_info, relevant, live_p);
{
stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "init: stmt relevant? %G", stmt_info->stmt);
if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
stmt_vec_info stmt_vinfo = worklist.pop ();
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "worklist: examine stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "worklist: examine stmt: %G", stmt_vinfo->stmt);
/* Examine the USEs of STMT. For each USE, mark the stmt that defines it
(DEF_STMT) as relevant/irrelevant according to the relevance property
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "created new init_stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "created new init_stmt: %G", new_stmt);
}
/* Function vect_init_vector.
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_get_vec_def_for_operand: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_get_vec_def_for_operand: %T\n", op);
stmt_vec_info def_stmt_info;
is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
&def_stmt_info, &def_stmt);
gcc_assert (is_simple_use);
if (def_stmt && dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = %G", def_stmt);
if (dt == vect_constant_def || dt == vect_external_def)
{
stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: %G", vec_stmt);
gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "vector mask type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION,
- " does not match vector data type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "vector mask type %T",
+ " does not match vector data type %T.\n",
+ mask_vectype, vectype);
+
return false;
}
if (!vectype_in)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "no vectype for scalar type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "no vectype for scalar type %T\n", rhs_type);
return false;
}
if (!vectype_in)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "no vectype for scalar type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "no vectype for scalar type %T\n", rhs_type);
return false;
}
&& !VECTOR_BOOLEAN_TYPE_P (vectype_in))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't convert between boolean and non "
- "boolean vectors");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't convert between boolean and non "
+ "boolean vectors %T\n", rhs_type);
return false;
}
if (!vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "no vectype for scalar type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- TREE_TYPE (op0));
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "no vectype for scalar type %T\n",
+ TREE_TYPE (op0));
return false;
}
{
gassign *stmt = as_a <gassign *> (stmt_info->stmt);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "hoisting out of the vectorized loop: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "hoisting out of the vectorized loop: %G", stmt);
scalar_dest = copy_ssa_name (scalar_dest);
tree rhs = unshare_expr (gimple_assign_rhs1 (stmt));
gsi_insert_on_edge_immediate
gimple_seq pattern_def_seq;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
+ stmt_info->stmt);
if (gimple_has_volatile_ops (stmt_info->stmt))
{
{
/* Analyze def stmt of STMT if it's a pattern stmt. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "==> examining pattern def statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
- pattern_def_stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "==> examining pattern def statement: %G",
+ pattern_def_stmt_info->stmt);
if (!vect_analyze_stmt (pattern_def_stmt_info,
need_to_vectorize, node, node_instance,
/* Analyze PATTERN_STMT instead of the original stmt. */
stmt_info = pattern_stmt_info;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "==> examining pattern statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "==> examining pattern statement: %G",
+ stmt_info->stmt);
}
else
{
{
/* Analyze PATTERN_STMT too. */
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "==> examining pattern statement: ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "==> examining pattern statement: %G",
+ pattern_stmt_info->stmt);
if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
node_instance, cost_vec))
if (!ok)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: relevant stmt not ");
- dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: relevant stmt not supported: %G",
+ stmt_info->stmt);
return false;
}
&& !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: live stmt not supported: ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- stmt_info->stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: live stmt not supported: %G",
+ stmt_info->stmt);
return false;
}
*vectype = STMT_VINFO_VECTYPE (def_stmt_info);
gcc_assert (*vectype != NULL_TREE);
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_is_simple_use: vectype ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vect_is_simple_use: vectype %T\n", *vectype);
}
else if (*dt == vect_uninitialized_def
|| *dt == vect_constant_def
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: irregular stmt.");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: irregular stmt.%G", stmt);
return false;
}
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: vector stmt in loop:");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: vector stmt in loop:%G", stmt);
return false;
}
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "get vectype for scalar type: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "get vectype for scalar type: %T\n", scalar_type);
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: unsupported data-type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- scalar_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: unsupported data-type %T\n",
+ scalar_type);
return false;
}
*stmt_vectype_out = vectype;
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
}
/* Don't try to compute scalar types if the stmt produces a boolean
&dummy, &dummy);
}
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "get vectype for scalar type: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
- dump_printf (MSG_NOTE, "\n");
- }
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "get vectype for scalar type: %T\n", scalar_type);
nunits_vectype = get_vectype_for_scalar_type (scalar_type);
}
if (!nunits_vectype)
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: unsupported data-type ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: unsupported data-type %T\n",
+ scalar_type);
return false;
}
GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: different sized vector "
- "types in statement, ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: different sized vector "
+ "types in statement, %T and %T\n",
+ vectype, nunits_vectype);
return false;
}
if (dump_enabled_p ())
{
- dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
- dump_printf (MSG_NOTE, "\n");
+ dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
+ nunits_vectype);
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: can't compute mask type "
- "for statement, ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
- 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: can't compute mask type "
+ "for statement, %G", stmt);
return NULL_TREE;
}
TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: different sized masks "
- "types in statement, ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- mask_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: different sized masks "
+ "types in statement, %T and %T\n",
+ mask_type, vectype);
return NULL_TREE;
}
else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
!= VECTOR_BOOLEAN_TYPE_P (vectype))
{
if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: mixed mask and "
- "nonmask vector types in statement, ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- mask_type);
- dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
- dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
- vectype);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: mixed mask and "
+ "nonmask vector types in statement, "
+ "%T and %T\n",
+ mask_type, vectype);
return NULL_TREE;
}
}
/* No mask_type should mean loop invariant predicate.
This is probably a subject for optimization in if-conversion. */
if (!mask_type && dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: can't compute mask type "
- "for statement, ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: can't compute mask type "
+ "for statement, %G", stmt);
return mask_type;
}
if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
{
vnode->increase_alignment (alignment);
- dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
- dump_printf (MSG_NOTE, "\n");
+ dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
}
}