From 3c2a8ed0d9a56a45a67f87c5fa3a093c6852e62b Mon Sep 17 00:00:00 2001 From: David Malcolm Date: Wed, 19 Sep 2018 16:18:06 +0000 Subject: [PATCH] dump_printf: use %T and %G throughout As promised at Cauldron, this patch uses %T and %G with dump_printf and dump_printf_loc calls to eliminate calls to dump_generic_expr (MSG_*, arg, TDF_SLIM) (via %T) and dump_gimple_stmt (MSG_*, TDF_SLIM, stmt, 0) (via %G) throughout the middle-end, simplifying numerous dump callsites. A few calls to these functions didn't match the above pattern; I didn't touch these. I wasn't able to use %E anywhere. gcc/ChangeLog: * tree-data-ref.c (runtime_alias_check_p): Use formatted printing with %T in place of calls to dump_generic_expr. (prune_runtime_alias_test_list): Likewise. (create_runtime_alias_checks): Likewise. * tree-vect-data-refs.c (vect_check_nonzero_value): Likewise. (vect_analyze_data_ref_dependence): Likewise. (vect_slp_analyze_data_ref_dependence): Likewise. (vect_record_base_alignment): Likewise. Use %G in place of call to dump_gimple_stmt. (vect_compute_data_ref_alignment): Likewise. (verify_data_ref_alignment): Likewise. (vect_find_same_alignment_drs): Likewise. (vect_analyze_group_access_1): Likewise. (vect_analyze_data_ref_accesses): Likewise. (dependence_distance_ge_vf): Likewise. (dump_lower_bound): Likewise. (vect_prune_runtime_alias_test_list): Likewise. (vect_find_stmt_data_reference): Likewise. (vect_analyze_data_refs): Likewise. (vect_create_addr_base_for_vector_ref): Likewise. (vect_create_data_ref_ptr): Likewise. * tree-vect-loop-manip.c (vect_set_loop_condition): Likewise. (vect_can_advance_ivs_p): Likewise. (vect_update_ivs_after_vectorizer): Likewise. (vect_gen_prolog_loop_niters): Likewise. (vect_prepare_for_masked_peels): Likewise. * tree-vect-loop.c (vect_determine_vf_for_stmt): Likewise. (vect_determine_vectorization_factor): Likewise. (vect_is_simple_iv_evolution): Likewise. (vect_analyze_scalar_cycles_1): Likewise. (vect_analyze_loop_operations): Likewise. (report_vect_op): Likewise. (vect_is_slp_reduction): Likewise. (check_reduction_path): Likewise. (vect_is_simple_reduction): Likewise. (vect_create_epilog_for_reduction): Likewise. (vect_finalize_reduction:): Likewise. (vectorizable_induction): Likewise. (vect_transform_loop_stmt): Likewise. (vect_transform_loop): Likewise. (optimize_mask_stores): Likewise. * tree-vect-patterns.c (vect_pattern_detected): Likewise. (vect_split_statement): Likewise. (vect_recog_over_widening_pattern): Likewise. (vect_recog_average_pattern): Likewise. (vect_determine_min_output_precision_1): Likewise. (vect_determine_precisions_from_range): Likewise. (vect_determine_precisions_from_users): Likewise. (vect_mark_pattern_stmts): Likewise. (vect_pattern_recog_1): Likewise. * tree-vect-slp.c (vect_get_and_check_slp_defs): Likewise. (vect_record_max_nunits): Likewise. (vect_build_slp_tree_1): Likewise. (vect_build_slp_tree_2): Likewise. (vect_print_slp_tree): Likewise. (vect_analyze_slp_instance): Likewise. (vect_detect_hybrid_slp_stmts): Likewise. (vect_detect_hybrid_slp_1): Likewise. (vect_slp_analyze_operations): Likewise. (vect_slp_analyze_bb_1): Likewise. (vect_transform_slp_perm_load): Likewise. (vect_schedule_slp_instance): Likewise. * tree-vect-stmts.c (vect_mark_relevant): Likewise. (vect_mark_stmts_to_be_vectorized): Likewise. (vect_init_vector_1): Likewise. (vect_get_vec_def_for_operand): Likewise. (vect_finish_stmt_generation_1): Likewise. (vect_check_load_store_mask): Likewise. (vectorizable_call): Likewise. (vectorizable_conversion): Likewise. (vectorizable_operation): Likewise. (vectorizable_load): Likewise. (vect_analyze_stmt): Likewise. (vect_is_simple_use): Likewise. (vect_get_vector_types_for_stmt): Likewise. (vect_get_mask_type_for_stmt): Likewise. * tree-vectorizer.c (increase_alignment): Likewise. From-SVN: r264424 --- gcc/ChangeLog | 80 ++++++ gcc/tree-data-ref.c | 48 +--- gcc/tree-vect-data-refs.c | 481 ++++++++++++------------------------- gcc/tree-vect-loop-manip.c | 36 +-- gcc/tree-vect-loop.c | 215 +++++------------ gcc/tree-vect-patterns.c | 106 +++----- gcc/tree-vect-slp.c | 242 ++++++------------- gcc/tree-vect-stmts.c | 290 +++++++--------------- gcc/tree-vectorizer.c | 4 +- 9 files changed, 522 insertions(+), 980 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 6ff707401b4..cf3d66a6c03 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,83 @@ +2018-09-19 David Malcolm + + * tree-data-ref.c (runtime_alias_check_p): Use formatted printing + with %T in place of calls to dump_generic_expr. + (prune_runtime_alias_test_list): Likewise. + (create_runtime_alias_checks): Likewise. + * tree-vect-data-refs.c (vect_check_nonzero_value): Likewise. + (vect_analyze_data_ref_dependence): Likewise. + (vect_slp_analyze_data_ref_dependence): Likewise. + (vect_record_base_alignment): Likewise. Use %G in place of call + to dump_gimple_stmt. + (vect_compute_data_ref_alignment): Likewise. + (verify_data_ref_alignment): Likewise. + (vect_find_same_alignment_drs): Likewise. + (vect_analyze_group_access_1): Likewise. + (vect_analyze_data_ref_accesses): Likewise. + (dependence_distance_ge_vf): Likewise. + (dump_lower_bound): Likewise. + (vect_prune_runtime_alias_test_list): Likewise. + (vect_find_stmt_data_reference): Likewise. + (vect_analyze_data_refs): Likewise. + (vect_create_addr_base_for_vector_ref): Likewise. + (vect_create_data_ref_ptr): Likewise. + * tree-vect-loop-manip.c (vect_set_loop_condition): Likewise. + (vect_can_advance_ivs_p): Likewise. + (vect_update_ivs_after_vectorizer): Likewise. + (vect_gen_prolog_loop_niters): Likewise. + (vect_prepare_for_masked_peels): Likewise. + * tree-vect-loop.c (vect_determine_vf_for_stmt): Likewise. + (vect_determine_vectorization_factor): Likewise. + (vect_is_simple_iv_evolution): Likewise. + (vect_analyze_scalar_cycles_1): Likewise. + (vect_analyze_loop_operations): Likewise. + (report_vect_op): Likewise. + (vect_is_slp_reduction): Likewise. + (check_reduction_path): Likewise. + (vect_is_simple_reduction): Likewise. + (vect_create_epilog_for_reduction): Likewise. + (vect_finalize_reduction:): Likewise. + (vectorizable_induction): Likewise. + (vect_transform_loop_stmt): Likewise. + (vect_transform_loop): Likewise. + (optimize_mask_stores): Likewise. + * tree-vect-patterns.c (vect_pattern_detected): Likewise. + (vect_split_statement): Likewise. + (vect_recog_over_widening_pattern): Likewise. + (vect_recog_average_pattern): Likewise. + (vect_determine_min_output_precision_1): Likewise. + (vect_determine_precisions_from_range): Likewise. + (vect_determine_precisions_from_users): Likewise. + (vect_mark_pattern_stmts): Likewise. + (vect_pattern_recog_1): Likewise. + * tree-vect-slp.c (vect_get_and_check_slp_defs): Likewise. + (vect_record_max_nunits): Likewise. + (vect_build_slp_tree_1): Likewise. + (vect_build_slp_tree_2): Likewise. + (vect_print_slp_tree): Likewise. + (vect_analyze_slp_instance): Likewise. + (vect_detect_hybrid_slp_stmts): Likewise. + (vect_detect_hybrid_slp_1): Likewise. + (vect_slp_analyze_operations): Likewise. + (vect_slp_analyze_bb_1): Likewise. + (vect_transform_slp_perm_load): Likewise. + (vect_schedule_slp_instance): Likewise. + * tree-vect-stmts.c (vect_mark_relevant): Likewise. + (vect_mark_stmts_to_be_vectorized): Likewise. + (vect_init_vector_1): Likewise. + (vect_get_vec_def_for_operand): Likewise. + (vect_finish_stmt_generation_1): Likewise. + (vect_check_load_store_mask): Likewise. + (vectorizable_call): Likewise. + (vectorizable_conversion): Likewise. + (vectorizable_operation): Likewise. + (vectorizable_load): Likewise. + (vect_analyze_stmt): Likewise. + (vect_is_simple_use): Likewise. + (vect_get_vector_types_for_stmt): Likewise. + (vect_get_mask_type_for_stmt): Likewise. + * tree-vectorizer.c (increase_alignment): Likewise. + 2018-09-19 Andrew Stubbs * doc/rtl.texi: Adjust vec_select description. diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index a8c6872a235..bf30a61c868 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -1322,13 +1322,9 @@ bool runtime_alias_check_p (ddr_p ddr, struct loop *loop, bool speed_p) { if (dump_enabled_p ()) - { - dump_printf (MSG_NOTE, "consider run-time aliasing test between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf (MSG_NOTE, + "consider run-time aliasing test between %T and %T\n", + DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr))); if (!speed_p) { @@ -1469,17 +1465,9 @@ prune_runtime_alias_test_list (vec *alias_pairs, if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2) { if (dump_enabled_p ()) - { - dump_printf (MSG_NOTE, "found equal ranges "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr)); - dump_printf (MSG_NOTE, ", "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr)); - dump_printf (MSG_NOTE, ", "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n", + DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), + DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); alias_pairs->ordered_remove (i--); continue; } @@ -1576,17 +1564,9 @@ prune_runtime_alias_test_list (vec *alias_pairs, dr_a1->align = MIN (dr_a1->align, new_align); } if (dump_enabled_p ()) - { - dump_printf (MSG_NOTE, "merging ranges for "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr)); - dump_printf (MSG_NOTE, ", "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr)); - dump_printf (MSG_NOTE, ", "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n", + DR_REF (dr_a1->dr), DR_REF (dr_b1->dr), + DR_REF (dr_a2->dr), DR_REF (dr_b2->dr)); alias_pairs->ordered_remove (i); i--; } @@ -1925,13 +1905,9 @@ create_runtime_alias_checks (struct loop *loop, const dr_with_seg_len& dr_b = (*alias_pairs)[i].second; if (dump_enabled_p ()) - { - dump_printf (MSG_NOTE, "create runtime check for data references "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a.dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b.dr)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf (MSG_NOTE, + "create runtime check for data references %T and %T\n", + DR_REF (dr_a.dr), DR_REF (dr_b.dr)); /* Create condition expression for each pair data references. */ create_intersect_range_checks (loop, &part_cond_expr, dr_a, dr_b); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 9beb9d51f87..56b7968dab0 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -183,11 +183,9 @@ vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value) return; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "need run-time check that "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, value); - dump_printf (MSG_NOTE, " is nonzero\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "need run-time check that %T is nonzero\n", + value); LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value); } @@ -351,32 +349,18 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "versioning for alias not supported for: " - "can't determine dependence between "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (dra)); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "versioning for alias not supported for: " + "can't determine dependence between %T and %T\n", + DR_REF (dra), DR_REF (drb)); return true; } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "versioning for alias required: " - "can't determine dependence between "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (dra)); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "versioning for alias required: " + "can't determine dependence between %T and %T\n", + DR_REF (dra), DR_REF (drb)); /* Add to list of ddrs that need to be tested at run-time. */ return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); @@ -399,30 +383,18 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "versioning for alias not supported for: " - "bad dist vector for "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (dra)); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "versioning for alias not supported for: " + "bad dist vector for %T and %T\n", + DR_REF (dra), DR_REF (drb)); return true; } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "versioning for alias required: " - "bad dist vector for "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "versioning for alias required: " + "bad dist vector for %T and %T\n", + DR_REF (dra), DR_REF (drb)); /* Add to list of ddrs that need to be tested at run-time. */ return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); } @@ -445,14 +417,9 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, if (dist == 0) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "dependence distance == 0 between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "dependence distance == 0 between %T and %T\n", + DR_REF (dra), DR_REF (drb)); /* When we perform grouped accesses and perform implicit CSE by detecting equal accesses and doing disambiguation with @@ -537,15 +504,10 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized, possible dependence " - "between data-refs "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized, possible dependence " + "between data-refs %T and %T\n", + DR_REF (dra), DR_REF (drb)); return true; } @@ -639,24 +601,14 @@ vect_slp_analyze_data_ref_dependence (vec_info *vinfo, if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "can't determine dependence between "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "can't determine dependence between %T and %T\n", + DR_REF (dra), DR_REF (drb)); } else if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "determined dependence between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "determined dependence between %T and %T\n", + DR_REF (dra), DR_REF (drb)); return true; } @@ -814,19 +766,15 @@ vect_record_base_alignment (stmt_vec_info stmt_info, { entry = drb; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "recording new base alignment for "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address); - dump_printf (MSG_NOTE, "\n"); - dump_printf_loc (MSG_NOTE, vect_location, - " alignment: %d\n", drb->base_alignment); - dump_printf_loc (MSG_NOTE, vect_location, - " misalignment: %d\n", drb->base_misalignment); - dump_printf_loc (MSG_NOTE, vect_location, - " based on: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "recording new base alignment for %T\n" + " alignment: %d\n" + " misalignment: %d\n" + " based on: %G", + drb->base_address, + drb->base_alignment, + drb->base_misalignment, + stmt_info->stmt); } } @@ -977,12 +925,8 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info) || TREE_CODE (drb->step) != INTEGER_CST) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Unknown alignment for access: "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Unknown alignment for access: %T\n", ref); return; } @@ -995,12 +939,8 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info) vector_alignment * BITS_PER_UNIT)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "can't force alignment of ref: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "can't force alignment of ref: %T\n", ref); return; } @@ -1008,11 +948,8 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info) NOTE: This is the only change to the code we make during the analysis phase, before deciding to vectorize the loop. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "force alignment of "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "force alignment of %T\n", ref); dr_info->base_decl = base; dr_info->base_misaligned = true; @@ -1034,25 +971,17 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info) &const_misalignment)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Non-constant misalignment for access: "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Non-constant misalignment for access: %T\n", ref); return; } SET_DR_MISALIGNMENT (dr_info, const_misalignment); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "misalign = %d bytes of ref ", - DR_MISALIGNMENT (dr_info)); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "misalign = %d bytes of ref %T\n", + DR_MISALIGNMENT (dr_info), ref); return; } @@ -1143,9 +1072,7 @@ verify_data_ref_alignment (dr_vec_info *dr_info) "not vectorized: unsupported unaligned " "store."); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - DR_REF (dr_info->dr)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); + dump_printf (MSG_MISSED_OPTIMIZATION, "%T\n", DR_REF (dr_info->dr)); } return false; } @@ -2337,14 +2264,9 @@ vect_find_same_alignment_drs (vec_info *vinfo, data_dependence_relation *ddr) STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb); STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "accesses have the same alignment: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "accesses have the same alignment: %T and %T\n", + DR_REF (dra), DR_REF (drb)); } @@ -2474,15 +2396,10 @@ vect_analyze_group_access_1 (dr_vec_info *dr_info) if ((dr_step % type_size) != 0) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Step "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, step); - dump_printf (MSG_NOTE, - " is not a multiple of the element size for "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Step %T is not a multiple of the element size" + " for %T\n", + step, DR_REF (dr)); return false; } groupsize = absu_hwi (dr_step) / type_size; @@ -2506,25 +2423,17 @@ vect_analyze_group_access_1 (dr_vec_info *dr_info) DR_GROUP_SIZE (stmt_info) = groupsize; DR_GROUP_GAP (stmt_info) = groupsize - 1; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Detected single element interleaving "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); - dump_printf (MSG_NOTE, " step "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, step); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Detected single element interleaving %T" + " step %T\n", + DR_REF (dr), step); return true; } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not consecutive access "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not consecutive access %G", stmt_info->stmt); if (bb_vinfo) { @@ -2654,9 +2563,8 @@ vect_analyze_group_access_1 (dr_vec_info *dr_info) dump_printf (MSG_NOTE, "strided store "); else dump_printf (MSG_NOTE, "store "); - dump_printf (MSG_NOTE, "of size %u starting with ", - (unsigned)groupsize); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); + dump_printf (MSG_NOTE, "of size %u starting with %G", + (unsigned)groupsize, stmt_info->stmt); if (DR_GROUP_GAP (stmt_info) != 0) dump_printf_loc (MSG_NOTE, vect_location, "There is a gap of %u elements after the group\n", @@ -3049,18 +2957,11 @@ vect_analyze_data_ref_accesses (vec_info *vinfo) } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Detected interleaving "); - if (DR_IS_READ (dra)) - dump_printf (MSG_NOTE, "load "); - else - dump_printf (MSG_NOTE, "store "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + DR_IS_READ (dra) + ? "Detected interleaving load %T and %T\n" + : "Detected interleaving store %T and %T\n", + DR_REF (dra), DR_REF (drb)); /* Link the found element into the group list. */ if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a)) @@ -3236,14 +3137,9 @@ dependence_distance_ge_vf (data_dependence_relation *ddr, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "dependence distance between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); - dump_printf (MSG_NOTE, " is >= VF\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "dependence distance between %T and %T is >= VF\n", + DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr))); return true; } @@ -3253,9 +3149,9 @@ dependence_distance_ge_vf (data_dependence_relation *ddr, static void dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound) { - dump_printf (dump_kind, "%s (", lower_bound.unsigned_p ? "unsigned" : "abs"); - dump_generic_expr (dump_kind, TDF_SLIM, lower_bound.expr); - dump_printf (dump_kind, ") >= "); + dump_printf (dump_kind, "%s (%T) >= ", + lower_bound.unsigned_p ? "unsigned" : "abs", + lower_bound.expr); dump_dec (dump_kind, lower_bound.min_value); } @@ -3424,13 +3320,10 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) if (!compared_objects.add (new_pair)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "checking that "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second); - dump_printf (MSG_NOTE, " have different addresses\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "checking that %T and %T" + " have different addresses\n", + new_pair.first, new_pair.second); LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair); } continue; @@ -3450,14 +3343,10 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) &lower_bound))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "no need for alias check between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr)); - dump_printf (MSG_NOTE, " when VF is 1\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "no need for alias check between " + "%T and %T when VF is 1\n", + DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr)); continue; } @@ -3475,13 +3364,11 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr); if (dump_enabled_p ()) { - dump_printf_loc (MSG_NOTE, vect_location, "no alias between "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr)); - dump_printf (MSG_NOTE, " when the step "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_STEP (dr_info_a->dr)); - dump_printf (MSG_NOTE, " is outside "); + dump_printf_loc (MSG_NOTE, vect_location, "no alias between " + "%T and %T when the step %T is outside ", + DR_REF (dr_info_a->dr), + DR_REF (dr_info_b->dr), + DR_STEP (dr_info_a->dr)); if (unsigned_p) dump_printf (MSG_NOTE, "[0"); else @@ -3553,10 +3440,8 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) if (res >= 0 && dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, - "can tell at compile time that "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_a->dr)); - dump_printf (MSG_NOTE, " and "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_info_b->dr)); + "can tell at compile time that %T and %T", + DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr)); if (res == 0) dump_printf (MSG_NOTE, " do not alias\n"); else @@ -3974,23 +3859,17 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt, if (gimple_has_volatile_ops (stmt)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: volatile type "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: volatile type %G", stmt); return false; } if (stmt_can_throw_internal (stmt)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: statement can throw an " - "exception "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: statement can throw an exception %G", + stmt); return false; } @@ -4004,12 +3883,9 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt, if (refs.length () > 1) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: more than one data ref " - "in stmt: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: more than one data ref " + "in stmt: %G", stmt); return false; } @@ -4019,11 +3895,8 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt, && gimple_call_internal_fn (call) != IFN_MASK_STORE)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: dr in a call "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: dr in a call %G", stmt); return false; } @@ -4032,12 +3905,9 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt, && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: statement is bitfield " - "access "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: statement is bitfield " + "access %G", stmt); return false; } @@ -4194,13 +4064,9 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) if (gatherscatter == SG_NONE) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: data ref analysis " - "failed "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: data ref analysis " + "failed %G", stmt_info->stmt); if (is_a (vinfo)) { /* In BB vectorization the ref can still participate @@ -4218,13 +4084,9 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) if (nested_in_vect_loop_p (loop, stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: data ref analysis " - "failed "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: data ref analysis " + "failed %G", stmt_info->stmt); return false; } STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true; @@ -4234,13 +4096,9 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) if (base && VAR_P (base) && DECL_NONALIASED (base)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: base object not addressable " - "for stmt: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: base object not addressable " + "for stmt: %G", stmt_info->stmt); if (is_a (vinfo)) { /* In BB vectorization the ref can still participate @@ -4258,13 +4116,9 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) if (nested_in_vect_loop_p (loop, stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: not suitable for strided " - "load "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: not suitable for strided " + "load %G", stmt_info->stmt); return false; } STMT_VINFO_STRIDED_P (stmt_info) = true; @@ -4293,12 +4147,8 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) tree init_ref = build_fold_indirect_ref (init_addr); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "analyze in outer loop: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, init_ref); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "analyze in outer loop: %T\n", init_ref); if (!dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info), init_ref, loop)) @@ -4306,30 +4156,23 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) return false; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "\touter base_address: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter offset from base address: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - STMT_VINFO_DR_OFFSET (stmt_info)); - dump_printf (MSG_NOTE, - "\n\touter constant offset from base address: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - STMT_VINFO_DR_INIT (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter step: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - STMT_VINFO_DR_STEP (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter base alignment: %d\n", - STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter base misalignment: %d\n", - STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter offset alignment: %d\n", - STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info)); - dump_printf (MSG_NOTE, "\n\touter step alignment: %d\n", - STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info)); - } + dump_printf_loc (MSG_NOTE, vect_location, + "\touter base_address: %T\n" + "\touter offset from base address: %T\n" + "\touter constant offset from base address: %T\n" + "\touter step: %T\n" + "\touter base alignment: %d\n\n" + "\touter base misalignment: %d\n" + "\touter offset alignment: %d\n" + "\touter step alignment: %d\n", + STMT_VINFO_DR_BASE_ADDRESS (stmt_info), + STMT_VINFO_DR_OFFSET (stmt_info), + STMT_VINFO_DR_INIT (stmt_info), + STMT_VINFO_DR_STEP (stmt_info), + STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info), + STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info), + STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info), + STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info)); } /* Set vectype for STMT. */ @@ -4341,9 +4184,8 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: no vectype for stmt: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); + "not vectorized: no vectype for stmt: %G", + stmt_info->stmt); dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS, scalar_type); @@ -4362,14 +4204,9 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) else { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "got vectype for stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - STMT_VINFO_VECTYPE (stmt_info)); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "got vectype for stmt: %G%T\n", + stmt_info->stmt, STMT_VINFO_VECTYPE (stmt_info)); } /* Adjust the minimal vectorization factor according to the @@ -4386,16 +4223,13 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf) || !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - (gatherscatter == GATHER) ? - "not vectorized: not suitable for gather " - "load " : - "not vectorized: not suitable for scatter " - "store "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + (gatherscatter == GATHER) ? + "not vectorized: not suitable for gather " + "load %G" : + "not vectorized: not suitable for scatter " + "store %G", + stmt_info->stmt); return false; } STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter; @@ -4610,11 +4444,7 @@ vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "created "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base); return addr_base; } @@ -4728,9 +4558,9 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, { tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr)); dump_printf_loc (MSG_NOTE, vect_location, - "create %s-pointer variable to type: ", - get_tree_code_name (TREE_CODE (aggr_type))); - dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type); + "create %s-pointer variable to type: %T", + get_tree_code_name (TREE_CODE (aggr_type)), + aggr_type); if (TREE_CODE (dr_base_type) == ARRAY_TYPE) dump_printf (MSG_NOTE, " vectorizing an array ref: "); else if (TREE_CODE (dr_base_type) == VECTOR_TYPE) @@ -4739,8 +4569,7 @@ vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type, dump_printf (MSG_NOTE, " vectorizing a record based array ref: "); else dump_printf (MSG_NOTE, " vectorizing a pointer ref: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr)); - dump_printf (MSG_NOTE, "\n"); + dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr)); } /* (1) Create the new aggregate-pointer variable. diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c index dd7463a9c24..a93c6eecb52 100644 --- a/gcc/tree-vect-loop-manip.c +++ b/gcc/tree-vect-loop-manip.c @@ -943,10 +943,8 @@ vect_set_loop_condition (struct loop *loop, loop_vec_info loop_vinfo, gsi_remove (&loop_cond_gsi, true); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: %G", + cond_stmt); } /* Helper routine of slpeel_tree_duplicate_loop_to_edge_cfg. @@ -1383,10 +1381,8 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo) gphi *phi = gsi.phi (); stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", + phi_info->stmt); /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. @@ -1506,11 +1502,8 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, gphi *phi1 = gsi1.phi (); stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "vect_update_ivs_after_vectorizer: phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "vect_update_ivs_after_vectorizer: phi: %G", phi); /* Skip reduction and virtual phis. */ if (!iv_phi_p (phi_info)) @@ -1677,12 +1670,8 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "niters for prolog loop: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, iters); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "niters for prolog loop: %T\n", iters); var = create_tmp_var (niters_type, "prolog_loop_niters"); iters_name = force_gimple_operand (iters, &new_stmts, false, var); @@ -1801,12 +1790,9 @@ vect_prepare_for_masked_peels (loop_vec_info loop_vinfo) } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "misalignment for fully-masked loop: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, misalign_in_elems); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "misalignment for fully-masked loop: %T\n", + misalign_in_elems); LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo) = misalign_in_elems; diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index 124a4be0a67..640b0b9700c 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -215,10 +215,8 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf, { vec_info *vinfo = stmt_info->vinfo; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G", + stmt_info->stmt); if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers)) return false; @@ -234,23 +232,18 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf, { stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "==> examining pattern def stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, - def_stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "==> examining pattern def stmt: %G", + def_stmt_info->stmt); if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true, vf, mask_producers)) return false; } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "==> examining pattern statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "==> examining pattern statement: %G", + stmt_info->stmt); if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers)) return false; } @@ -309,10 +302,8 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo) phi = si.phi (); stmt_info = loop_vinfo->lookup_stmt (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G", + phi); gcc_assert (stmt_info); @@ -323,35 +314,25 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo) scalar_type = TREE_TYPE (PHI_RESULT (phi)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "get vectype for scalar type: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "get vectype for scalar type: %T\n", + scalar_type); vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: unsupported " - "data-type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - scalar_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: unsupported " + "data-type %T\n", + scalar_type); return false; } STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", + vectype); if (dump_enabled_p ()) { @@ -432,13 +413,8 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "step: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); - dump_printf (MSG_NOTE, ", init: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n", + step_expr, init_expr); *init = init_expr; *step = step_expr; @@ -525,10 +501,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi); /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ @@ -543,12 +516,8 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) { STRIP_NOPS (access_fn); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Access function of PHI: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Access function of PHI: %T\n", access_fn); STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) = initial_condition_in_loop_num (access_fn, loop->num); STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) @@ -583,10 +552,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) tree def = PHI_RESULT (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi); gcc_assert (!virtual_operand_p (def) && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); @@ -1551,10 +1517,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) stmt_info = loop_vinfo->lookup_stmt (phi); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi); if (virtual_operand_p (gimple_phi_result (phi))) continue; @@ -1636,12 +1599,9 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) if (!ok) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: relevant phi not " - "supported: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: relevant phi not " + "supported: %G", phi); return false; } } @@ -2505,8 +2465,7 @@ neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code, static void report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg) { - dump_printf_loc (msg_type, vect_location, "%s", msg); - dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); + dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt); } /* DEF_STMT_INFO occurs in a loop that contains a potential reduction @@ -2666,10 +2625,8 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, && vect_valid_reduction_input_p (def_stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: %G", + next_stmt); swap_ssa_operands (next_stmt, gimple_assign_rhs1_ptr (next_stmt), @@ -2801,10 +2758,7 @@ pop: unsigned i; std::pair *x; FOR_EACH_VEC_ELT (path, i, x) - { - dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second)); - dump_printf (MSG_NOTE, " "); - } + dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second)); dump_printf (MSG_NOTE, "\n"); } @@ -2947,12 +2901,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, if (TREE_CODE (loop_arg) != SSA_NAME) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "reduction: not ssa_name: "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "reduction: not ssa_name: %T\n", loop_arg); return NULL; } @@ -2974,12 +2924,9 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, else { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "reduction: unhandled reduction operation: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - def_stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "reduction: unhandled reduction operation: %G", + def_stmt_info->stmt); return NULL; } @@ -3137,27 +3084,14 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info, if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, - "reduction: multiple types: operation type: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, type); - dump_printf (MSG_NOTE, ", operands types: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - TREE_TYPE (op1)); - dump_printf (MSG_NOTE, ","); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - TREE_TYPE (op2)); + "reduction: multiple types: operation type: " + "%T, operands types: %T,%T", + type, TREE_TYPE (op1), TREE_TYPE (op2)); if (op3) - { - dump_printf (MSG_NOTE, ","); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - TREE_TYPE (op3)); - } + dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op3)); if (op4) - { - dump_printf (MSG_NOTE, ","); - dump_generic_expr (MSG_NOTE, TDF_SLIM, - TREE_TYPE (op4)); - } + dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op4)); dump_printf (MSG_NOTE, "\n"); } @@ -4582,12 +4516,9 @@ vect_create_epilog_for_reduction (vec vect_defs, add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "transform reduction: created def-use cycle: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "transform reduction: created def-use cycle: %G%G", + phi, SSA_NAME_DEF_STMT (def)); } } @@ -5703,11 +5634,9 @@ vect_finalize_reduction: add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt), loop_latch_edge (outer_loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "created double reduction phi node: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "created double reduction phi node: %G", + vect_phi); vect_phi_res = PHI_RESULT (vect_phi); @@ -7783,23 +7712,17 @@ vectorizable_induction (stmt_vec_info stmt_info, STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "vector of inductions after inner-loop:"); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "vector of inductions after inner-loop:%G", + new_stmt); } } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "transform induction: created def-use cycle: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, - SSA_NAME_DEF_STMT (vec_def), 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "transform induction: created def-use cycle: %G%G", + induction_phi, SSA_NAME_DEF_STMT (vec_def)); return true; } @@ -8231,11 +8154,8 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info, poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "------>vectorizing statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "------>vectorizing statement: %G", stmt_info->stmt); if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, stmt_info); @@ -8416,11 +8336,8 @@ vect_transform_loop (loop_vec_info loop_vinfo) { gphi *phi = si.phi (); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "------>vectorizing phi: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "------>vectorizing phi: %G", phi); stmt_info = loop_vinfo->lookup_stmt (phi); if (!stmt_info) continue; @@ -8790,11 +8707,8 @@ optimize_mask_stores (struct loop *loop) /* Setup GSI_TO to the non-empty block start. */ gsi_to = gsi_start_bb (store_bb); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Move stmt to created bb\n"); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Move stmt to created bb\n%G", last); /* Move all stored value producers if possible. */ while (!gsi_end_p (gsi)) { @@ -8858,11 +8772,8 @@ optimize_mask_stores (struct loop *loop) /* Can move STMT1 to STORE_BB. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Move stmt to created bb\n"); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Move stmt to created bb\n%G", stmt1); gsi_move_before (&gsi_from, &gsi_to); /* Shift GSI_TO for further insertion. */ gsi_prev (&gsi_to); diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index d41c61a3908..7956c1326d3 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -88,10 +88,7 @@ static void vect_pattern_detected (const char *name, gimple *stmt) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "%s: detected: ", name); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "%s: detected: %G", name, stmt); } /* Associate pattern statement PATTERN_STMT with ORIG_STMT_INFO and @@ -639,11 +636,8 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, vect_init_pattern_stmt (stmt1, orig_stmt2_info, vectype); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Splitting pattern statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Splitting pattern statement: %G", stmt2_info->stmt); /* Since STMT2_INFO is a pattern statement, we can change it in-situ without worrying about changing the code for the @@ -652,10 +646,9 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, if (dump_enabled_p ()) { - dump_printf_loc (MSG_NOTE, vect_location, "into: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); - dump_printf_loc (MSG_NOTE, vect_location, "and: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0); + dump_printf_loc (MSG_NOTE, vect_location, "into: %G", stmt1); + dump_printf_loc (MSG_NOTE, vect_location, "and: %G", + stmt2_info->stmt); } gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt2_info); @@ -683,11 +676,8 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, return false; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "Splitting statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt2_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "Splitting statement: %G", stmt2_info->stmt); /* Add STMT1 as a singleton pattern definition sequence. */ gimple_seq *def_seq = &STMT_VINFO_PATTERN_DEF_SEQ (stmt2_info); @@ -702,10 +692,8 @@ vect_split_statement (stmt_vec_info stmt2_info, tree new_rhs, if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, - "into pattern statements: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); - dump_printf_loc (MSG_NOTE, vect_location, "and: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt2, 0); + "into pattern statements: %G", stmt1); + dump_printf_loc (MSG_NOTE, vect_location, "and: %G", new_stmt2); } return true; @@ -1662,13 +1650,8 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) return NULL; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "demoting "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, type); - dump_printf (MSG_NOTE, " to "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, new_type); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, "demoting %T to %T\n", + type, new_type); /* Calculate the rhs operands for an operation on NEW_TYPE. */ tree ops[3] = {}; @@ -1684,11 +1667,8 @@ vect_recog_over_widening_pattern (stmt_vec_info last_stmt_info, tree *type_out) gimple_set_location (pattern_stmt, gimple_location (last_stmt)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "created pattern stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "created pattern stmt: %G", pattern_stmt); pattern_stmt = vect_convert_output (last_stmt_info, type, pattern_stmt, new_vectype); @@ -1831,11 +1811,8 @@ vect_recog_average_pattern (stmt_vec_info last_stmt_info, tree *type_out) gimple_set_location (average_stmt, gimple_location (last_stmt)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "created pattern stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, average_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "created pattern stmt: %G", average_stmt); return vect_convert_output (last_stmt_info, type, average_stmt, new_vectype); } @@ -4411,12 +4388,9 @@ vect_determine_min_output_precision_1 (stmt_vec_info stmt_info, tree lhs) } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "only the low %d bits of ", - precision); - dump_generic_expr (MSG_NOTE, TDF_SLIM, lhs); - dump_printf (MSG_NOTE, " are significant\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "only the low %d bits of %T are significant\n", + precision, lhs); stmt_info->min_output_precision = precision; return true; } @@ -4524,13 +4498,10 @@ vect_determine_precisions_from_range (stmt_vec_info stmt_info, gassign *stmt) return; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d" - " without loss of precision: ", - sign == SIGNED ? "signed" : "unsigned", - value_precision); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d" + " without loss of precision: %G", + sign == SIGNED ? "signed" : "unsigned", + value_precision, stmt); vect_set_operation_type (stmt_info, type, value_precision, sign); vect_set_min_input_precision (stmt_info, type, value_precision); @@ -4599,13 +4570,10 @@ vect_determine_precisions_from_users (stmt_vec_info stmt_info, gassign *stmt) if (operation_precision < precision) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d" - " without affecting users: ", - TYPE_UNSIGNED (type) ? "unsigned" : "signed", - operation_precision); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "can narrow to %s:%d" + " without affecting users: %G", + TYPE_UNSIGNED (type) ? "unsigned" : "signed", + operation_precision, stmt); vect_set_operation_type (stmt_info, type, operation_precision, TYPE_SIGN (type)); } @@ -4727,11 +4695,8 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, sequence. */ orig_pattern_stmt = orig_stmt_info->stmt; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "replacing earlier pattern "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, orig_pattern_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "replacing earlier pattern %G", orig_pattern_stmt); /* To keep the book-keeping simple, just swap the lhs of the old and new statements, so that the old one has a valid but @@ -4741,10 +4706,7 @@ vect_mark_pattern_stmts (stmt_vec_info orig_stmt_info, gimple *pattern_stmt, gimple_set_lhs (pattern_stmt, old_lhs); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "with "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "with %G", pattern_stmt); /* Switch to the statement that ORIG replaces. */ orig_stmt_info = STMT_VINFO_RELATED_STMT (orig_stmt_info); @@ -4830,11 +4792,9 @@ vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info) /* Found a vectorizable pattern. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "%s pattern recognized: ", recog_func->name); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "%s pattern recognized: %G", + recog_func->name, pattern_stmt); /* Mark the stmts that are involved in the pattern. */ vect_mark_pattern_stmts (stmt_info, pattern_stmt, pattern_vectype); diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index 0ab7bd8086c..ae1c453c5df 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -368,12 +368,9 @@ again: if (!vect_is_simple_use (oprnd, vinfo, &dt, &def_stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: can't analyze def for "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: can't analyze def for %T\n", + oprnd); return -1; } @@ -425,13 +422,9 @@ again: TYPE_MODE (type)))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: invalid type of def " - "for variable-length SLP "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: invalid type of def " + "for variable-length SLP %T\n", oprnd); return -1; } } @@ -452,12 +445,9 @@ again: default: /* FORNOW: Not supported. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: illegal type of def "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: illegal type of def %T\n", + oprnd); return -1; } @@ -471,13 +461,9 @@ again: if (STMT_VINFO_NUM_SLP_USES (stmt_info) != 0) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: cannot swap operands of " - "shared stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: cannot swap operands of " + "shared stmt %G", stmt_info->stmt); return -1; } @@ -513,11 +499,9 @@ again: gimple_op_ptr (stmt_info->stmt, op + 1)); } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "swapped operands to match def types in "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "swapped operands to match def types in %G", + stmt_info->stmt); } *swap = swapped; @@ -573,13 +557,9 @@ vect_record_max_nunits (stmt_vec_info stmt_info, unsigned int group_size, if (!vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: unsupported data-type in "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: unsupported data-type in %G\n", + stmt_info->stmt); /* Fatal mismatch. */ return false; } @@ -677,20 +657,15 @@ vect_build_slp_tree_1 (unsigned char *swap, matches[i] = false; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for %G", stmt); /* Fail to vectorize statements marked as unvectorizable. */ if (!STMT_VINFO_VECTORIZABLE (stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: unvectorizable statement "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: unvectorizable statement %G", + stmt); /* Fatal mismatch. */ matches[0] = false; return false; @@ -700,12 +675,9 @@ vect_build_slp_tree_1 (unsigned char *swap, if (lhs == NULL_TREE) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: not GIMPLE_ASSIGN nor " - "GIMPLE_CALL "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: not GIMPLE_ASSIGN nor " + "GIMPLE_CALL %G", stmt); /* Fatal mismatch. */ matches[0] = false; return false; @@ -737,12 +709,9 @@ vect_build_slp_tree_1 (unsigned char *swap, || gimple_call_chain (call_stmt)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: unsupported call type "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - call_stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: unsupported call type %G", + call_stmt); /* Fatal mismatch. */ matches[0] = false; return false; @@ -848,12 +817,9 @@ vect_build_slp_tree_1 (unsigned char *swap, { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Build SLP failed: different operation " - "in stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); + "in stmt %G", stmt); dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "original stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - first_stmt_info->stmt, 0); + "original stmt %G", first_stmt_info->stmt); } /* Mismatch. */ continue; @@ -863,12 +829,9 @@ vect_build_slp_tree_1 (unsigned char *swap, && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: different shift " - "arguments in "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: different shift " + "arguments in %G", stmt); /* Mismatch. */ continue; } @@ -879,12 +842,9 @@ vect_build_slp_tree_1 (unsigned char *swap, as_a (stmt))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: different calls in "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: different calls in %G", + stmt); /* Mismatch. */ continue; } @@ -910,14 +870,11 @@ vect_build_slp_tree_1 (unsigned char *swap, if (prev_first_load != first_load) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, - vect_location, - "Build SLP failed: different " - "interleaving chains in one node "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, + vect_location, + "Build SLP failed: different " + "interleaving chains in one node %G", + stmt); /* Mismatch. */ continue; } @@ -932,11 +889,8 @@ vect_build_slp_tree_1 (unsigned char *swap, { /* Not grouped load. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: not grouped load "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: not grouped load %G", stmt); /* FORNOW: Not grouped loads are not supported. */ /* Fatal mismatch. */ @@ -952,12 +906,9 @@ vect_build_slp_tree_1 (unsigned char *swap, && rhs_code != CALL_EXPR) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: operation"); - dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: operation unsupported %G", + stmt); /* Fatal mismatch. */ matches[0] = false; return false; @@ -990,13 +941,9 @@ vect_build_slp_tree_1 (unsigned char *swap, else { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: different" - " operation"); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: different" + " operation %G", stmt); /* Mismatch. */ continue; } @@ -1027,13 +974,9 @@ vect_build_slp_tree_1 (unsigned char *swap, { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Build SLP failed: different operation " - "in stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmts[i]->stmt, 0); + "in stmt %G", stmts[i]->stmt); dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "original stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - first_stmt_info->stmt, 0); + "original stmt %G", first_stmt_info->stmt); } } return false; @@ -1370,14 +1313,11 @@ vect_build_slp_tree_2 (vec_info *vinfo, if (!swap_not_matching) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, - vect_location, - "Build SLP failed: cannot swap " - "operands of shared stmt "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, - TDF_SLIM, stmts[j]->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, + vect_location, + "Build SLP failed: cannot swap " + "operands of shared stmt %G", + stmts[j]->stmt); goto fail; } swap_not_matching = false; @@ -1507,10 +1447,7 @@ vect_print_slp_tree (dump_flags_t dump_kind, dump_location_t loc, SLP_TREE_DEF_TYPE (node) != vect_internal_def ? " (external)" : ""); FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info) - { - dump_printf_loc (dump_kind, loc, "\tstmt %d ", i); - dump_gimple_stmt (dump_kind, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (dump_kind, loc, "\tstmt %d %G", i, stmt_info->stmt); FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) vect_print_slp_tree (dump_kind, loc, child); } @@ -1901,12 +1838,9 @@ vect_analyze_slp_instance (vec_info *vinfo, if (!vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: unsupported data-type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: unsupported data-type %T\n", + scalar_type); return false; } @@ -2034,13 +1968,9 @@ vect_analyze_slp_instance (vec_info *vinfo, if (!vect_supported_load_permutation_p (new_instance)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "Build SLP failed: unsupported load " - "permutation "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, - TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: unsupported load " + "permutation %G", stmt_info->stmt); vect_free_slp_instance (new_instance, false); return false; } @@ -2276,11 +2206,8 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype) && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "use of SLP " - "def in non-SLP stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "use of SLP " + "def in non-SLP stmt: %G", use_stmt); stype = hybrid; } } @@ -2290,10 +2217,8 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype) && !HYBRID_SLP_STMT (stmt_vinfo)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: %G", + stmt_vinfo->stmt); STMT_SLP_TYPE (stmt_vinfo) = hybrid; } @@ -2317,10 +2242,8 @@ vect_detect_hybrid_slp_1 (tree *tp, int *, void *data) if (def_stmt_info && PURE_SLP_STMT (def_stmt_info)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: %G", + def_stmt_info->stmt); STMT_SLP_TYPE (def_stmt_info) = hybrid; } @@ -2583,8 +2506,8 @@ vect_slp_analyze_operations (vec_info *vinfo) slp_tree node = SLP_INSTANCE_TREE (instance); stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0]; dump_printf_loc (MSG_NOTE, vect_location, - "removing SLP instance operations starting from: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); + "removing SLP instance operations starting from: %G", + stmt_info->stmt); vect_free_slp_instance (instance, false); vinfo->slp_instances.ordered_remove (i); cost_vec.release (); @@ -2863,8 +2786,8 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, slp_tree node = SLP_INSTANCE_TREE (instance); stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0]; dump_printf_loc (MSG_NOTE, vect_location, - "removing SLP instance operations starting from: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); + "removing SLP instance operations starting from: %G", + stmt_info->stmt); vect_free_slp_instance (instance, false); BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i); continue; @@ -3713,13 +3636,10 @@ vect_transform_slp_perm_load (slp_tree node, vec dr_chain, else { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "permutation requires at " - "least three vectors "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "permutation requires at " + "least three vectors %G", + stmt_info->stmt); gcc_assert (analyze_only); return false; } @@ -3858,11 +3778,9 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance, SLP_TREE_VEC_STMTS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE,vect_location, - "------>vectorizing SLP node starting from: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "------>vectorizing SLP node starting from: %G", + stmt_info->stmt); /* Vectorized stmts go before the last scalar stmt which is where all uses are ready. */ diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 3b23ad5c75c..7a6efdb5117 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -202,11 +202,9 @@ vect_mark_relevant (vec *worklist, stmt_vec_info stmt_info, bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "mark relevant %d, live %d: ", relevant, live_p); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "mark relevant %d, live %d: %G", relevant, live_p, + stmt_info->stmt); /* If this stmt is an original stmt in a pattern, we might need to mark its related pattern stmt instead of the original stmt. However, such stmts @@ -633,10 +631,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) { stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G", + phi_info->stmt); if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p)) vect_mark_relevant (&worklist, phi_info, relevant, live_p); @@ -645,10 +641,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) { stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "init: stmt relevant? %G", stmt_info->stmt); if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p)) vect_mark_relevant (&worklist, stmt_info, relevant, live_p); @@ -663,11 +657,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) stmt_vec_info stmt_vinfo = worklist.pop (); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "worklist: examine stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "worklist: examine stmt: %G", stmt_vinfo->stmt); /* Examine the USEs of STMT. For each USE, mark the stmt that defines it (DEF_STMT) as relevant/irrelevant according to the relevance property @@ -1383,11 +1374,8 @@ vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "created new init_stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "created new init_stmt: %G", new_stmt); } /* Function vect_init_vector. @@ -1539,22 +1527,15 @@ vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "vect_get_vec_def_for_operand: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, op); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "vect_get_vec_def_for_operand: %T\n", op); stmt_vec_info def_stmt_info; is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt, &def_stmt_info, &def_stmt); gcc_assert (is_simple_use); if (def_stmt && dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = %G", def_stmt); if (dt == vect_constant_def || dt == vect_external_def) { @@ -1725,10 +1706,7 @@ vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: %G", vec_stmt); gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt)); @@ -2535,15 +2513,11 @@ vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask, TYPE_VECTOR_SUBPARTS (vectype))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "vector mask type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, - " does not match vector data type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, ".\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "vector mask type %T", + " does not match vector data type %T.\n", + mask_vectype, vectype); + return false; } @@ -3222,12 +3196,8 @@ vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (!vectype_in) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "no vectype for scalar type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "no vectype for scalar type %T\n", rhs_type); return false; } @@ -4716,12 +4686,8 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (!vectype_in) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "no vectype for scalar type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "no vectype for scalar type %T\n", rhs_type); return false; } @@ -4730,13 +4696,9 @@ vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, && !VECTOR_BOOLEAN_TYPE_P (vectype_in)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "can't convert between boolean and non " - "boolean vectors"); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "can't convert between boolean and non " + "boolean vectors %T\n", rhs_type); return false; } @@ -5893,13 +5855,9 @@ vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, if (!vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "no vectype for scalar type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - TREE_TYPE (op0)); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "no vectype for scalar type %T\n", + TREE_TYPE (op0)); return false; } @@ -7675,11 +7633,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, { gassign *stmt = as_a (stmt_info->stmt); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "hoisting out of the vectorized loop: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "hoisting out of the vectorized loop: %G", stmt); scalar_dest = copy_ssa_name (scalar_dest); tree rhs = unshare_expr (gimple_assign_rhs1 (stmt)); gsi_insert_on_edge_immediate @@ -9439,10 +9394,8 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, gimple_seq pattern_def_seq; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G", + stmt_info->stmt); if (gimple_has_volatile_ops (stmt_info->stmt)) { @@ -9468,12 +9421,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, { /* Analyze def stmt of STMT if it's a pattern stmt. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "==> examining pattern def statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, - pattern_def_stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "==> examining pattern def statement: %G", + pattern_def_stmt_info->stmt); if (!vect_analyze_stmt (pattern_def_stmt_info, need_to_vectorize, node, node_instance, @@ -9509,11 +9459,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, /* Analyze PATTERN_STMT instead of the original stmt. */ stmt_info = pattern_stmt_info; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "==> examining pattern statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "==> examining pattern statement: %G", + stmt_info->stmt); } else { @@ -9531,11 +9479,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, { /* Analyze PATTERN_STMT too. */ if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "==> examining pattern statement: "); - dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0); - } + dump_printf_loc (MSG_NOTE, vect_location, + "==> examining pattern statement: %G", + pattern_stmt_info->stmt); if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node, node_instance, cost_vec)) @@ -9629,13 +9575,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, if (!ok) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: relevant stmt not "); - dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: relevant stmt not supported: %G", + stmt_info->stmt); return false; } @@ -9647,12 +9589,9 @@ vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: live stmt not supported: "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - stmt_info->stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: live stmt not supported: %G", + stmt_info->stmt); return false; } @@ -10142,12 +10081,8 @@ vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, *vectype = STMT_VINFO_VECTYPE (def_stmt_info); gcc_assert (*vectype != NULL_TREE); if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "vect_is_simple_use: vectype "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "vect_is_simple_use: vectype %T\n", *vectype); } else if (*dt == vect_uninitialized_def || *dt == vect_constant_def @@ -10629,22 +10564,16 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: irregular stmt."); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: irregular stmt.%G", stmt); return false; } if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: vector stmt in loop:"); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: vector stmt in loop:%G", stmt); return false; } @@ -10682,23 +10611,15 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "get vectype for scalar type: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "get vectype for scalar type: %T\n", scalar_type); vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: unsupported data-type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - scalar_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: unsupported data-type %T\n", + scalar_type); return false; } @@ -10706,11 +10627,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, *stmt_vectype_out = vectype; if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype); } /* Don't try to compute scalar types if the stmt produces a boolean @@ -10730,23 +10647,16 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, &dummy, &dummy); } if (dump_enabled_p ()) - { - dump_printf_loc (MSG_NOTE, vect_location, - "get vectype for scalar type: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); - dump_printf (MSG_NOTE, "\n"); - } + dump_printf_loc (MSG_NOTE, vect_location, + "get vectype for scalar type: %T\n", scalar_type); nunits_vectype = get_vectype_for_scalar_type (scalar_type); } if (!nunits_vectype) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: unsupported data-type "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: unsupported data-type %T\n", + scalar_type); return false; } @@ -10754,23 +10664,17 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, GET_MODE_SIZE (TYPE_MODE (nunits_vectype)))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: different sized vector " - "types in statement, "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: different sized vector " + "types in statement, %T and %T\n", + vectype, nunits_vectype); return false; } if (dump_enabled_p ()) { - dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype); - dump_printf (MSG_NOTE, "\n"); + dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", + nunits_vectype); dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype)); @@ -10818,13 +10722,9 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info) if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: can't compute mask type " - "for statement, "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, - 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: can't compute mask type " + "for statement, %G", stmt); return NULL_TREE; } @@ -10840,34 +10740,21 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info) TYPE_VECTOR_SUBPARTS (vectype))) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: different sized masks " - "types in statement, "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - mask_type); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: different sized masks " + "types in statement, %T and %T\n", + mask_type, vectype); return NULL_TREE; } else if (VECTOR_BOOLEAN_TYPE_P (mask_type) != VECTOR_BOOLEAN_TYPE_P (vectype)) { if (dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: mixed mask and " - "nonmask vector types in statement, "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - mask_type); - dump_printf (MSG_MISSED_OPTIMIZATION, " and "); - dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, - vectype); - dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: mixed mask and " + "nonmask vector types in statement, " + "%T and %T\n", + mask_type, vectype); return NULL_TREE; } } @@ -10884,11 +10771,8 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info) /* No mask_type should mean loop invariant predicate. This is probably a subject for optimization in if-conversion. */ if (!mask_type && dump_enabled_p ()) - { - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not vectorized: can't compute mask type " - "for statement, "); - dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); - } + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "not vectorized: can't compute mask type " + "for statement, %G", stmt); return mask_type; } diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c index db4fb766dc2..23bddf3f455 100644 --- a/gcc/tree-vectorizer.c +++ b/gcc/tree-vectorizer.c @@ -1425,9 +1425,7 @@ increase_alignment (void) if (alignment && vect_can_force_dr_alignment_p (decl, alignment)) { vnode->increase_alignment (alignment); - dump_printf (MSG_NOTE, "Increasing alignment of decl: "); - dump_generic_expr (MSG_NOTE, TDF_SLIM, decl); - dump_printf (MSG_NOTE, "\n"); + dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl); } } -- 2.30.2