From: Ben Elliston Date: Tue, 28 Sep 2004 07:59:54 +0000 (+0000) Subject: backport: basic-block.h: Include vec.h, errors.h. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=628f6a4e7c8e168256fe257a0dfc5f4fddda900a;p=gcc.git backport: basic-block.h: Include vec.h, errors.h. 2004-09-24 Ben Elliston Steven Bosscher Andrew Pinski Merge from edge-vector-branch: * basic-block.h: Include vec.h, errors.h. Instantiate a VEC(edge). (struct edge_def): Remove pred_next, succ_next members. (struct basic_block_def): Remove pred, succ members. Add preds and succs members of type VEC(edge). (FALLTHRU_EDGE): Redefine using EDGE_SUCC. (BRANCH_EDGE): Likewise. (EDGE_CRITICAL_P): Redefine using EDGE_COUNT. (EDGE_COUNT, EDGE_I, EDGE_PRED, EDGE_SUCC): New. (edge_iterator): New. (ei_start, ei_last, ei_end_p, ei_one_before_end_p): New. (ei_next, ei_prev, ei_edge, ei_safe_edge): Likewise. (FOR_EACH_EDGE): New. * bb-reorder.c (find_traces): Use FOR_EACH_EDGE and EDGE_* macros where applicable. (rotate_loop): Likewise. (find_traces_1_route): Likewise. (bb_to_key): Likewise. (connect_traces): Likewise. (copy_bb_p): Likewise. (find_rarely_executed_basic_blocks_and_crossing_edges): Likewise. (add_labels_and_missing_jumps): Likewise. (fix_up_fall_thru_edges): Likewise. (find_jump_block): Likewise. (fix_crossing_conditional_branches): Likewise. (fix_crossing_unconditional_branches): Likewise. (add_reg_crossing_jump_notes): Likewise. * bt-load.c (augment_live_range): Likewise. * cfg.c (clear_edges): Likewise. (unchecked_make_edge): Likewise. (cached_make_edge): Likewise. (make_single_succ_edge): Likewise. (remove_edge): Likewise. (redirect_edge_succ_nodup): Likewise. (check_bb_profile): Likewise. (dump_flow_info): Likewise. (alloc_aux_for_edges): Likewise. (clear_aux_for_edges): Likewise. (dump_cfg_bb_info): Likewise. * cfganal.c (forwarder_block_p): Likewise. (can_fallthru): Likewise. (could_fall_through): Likewise. (mark_dfs_back_edges): Likewise. (set_edge_can_fallthru_flag): Likewise. (find_unreachable_blocks): Likewise. (create_edge_list): Likewise. (verify_edge_list): Likewise. (add_noreturn_fake_exit_edges): Likewise. (connect_infinite_loops_to_exit): Likewise. (flow_reverse_top_sort_order_compute): Likewise. (flow_depth_first_order_compute): Likewise. (flow_preorder_transversal_compute): Likewise. (flow_dfs_compute_reverse_execute): Likewise. (dfs_enumerate_from): Likewise. (compute_dominance_frontiers_1): Likewise. * cfgbuild.c (make_edges): Likewise. (compute_outgoing_frequencies): Likewise. (find_many_sub_basic_blocks): Likewise. (find_sub_basic_blocks): Likewise. * cfgcleanup.c (try_simplify_condjump): Likewise. (thread_jump): Likewise. (try_forward_edges): Likewise. (merge_blocks_move): Likewise. (outgoing_edges_match): Likewise. (try_crossjump_to_edge): Likewise. (try_crossjump_bb): Likewise. (try_optimize_cfg): Likewise. (merge_seq_blocks): Likewise. * cfgexpand.c (expand_gimple_tailcall): Likewise. (expand_gimple_basic_block): Likewise. (construct_init_block): Likewise. (construct_exit_block): Likewise. * cfghooks.c (verify_flow_info): Likewise. (dump_bb): Likewise. (delete_basic_block): Likewise. (split_edge): Likewise. (merge_blocks): Likewise. (make_forwarder_block): Likewise. (tidy_fallthru_edges): Likewise. (can_duplicate_block_p): Likewise. (duplicate_block): Likewise. * cfglayout.c (fixup_reorder_chain): Likewise. (fixup_fallthru_exit_predecessor): Likewise. (can_copy_bbs_p): Likewise. (copy_bbs): Likewise. * cfgloop.c (flow_loops_cfg_dump): Likewise. (flow_loop_entry_edges_find): Likewise. (flow_loop_exit_edges_find): Likewise. (flow_loop_nodes_find): Likewise. (mark_single_exit_loops): Likewise. (flow_loop_pre_header_scan): Likewise. (flow_loop_pre_header_find): Likewise. (update_latch_info): Likewise. (canonicalize_loop_headers): Likewise. (flow_loops_find): Likewise. (get_loop_body_in_bfs_order): Likewise. (get_loop_exit_edges): Likewise. (num_loop_branches): Likewise. (verify_loop_structure): Likewise. (loop_latch_edge): Likewise. (loop_preheader_edge): Likewise. * cfgloopanal.c (mark_irreducible_loops): Likewise. (expected_loop_iterations): Likewise. * cfgloopmanip.c (remove_bbs): Likewise. (fix_bb_placement): Likewise. (fix_irreducible_loops): Likewise. (remove_path): Likewise. (scale_bbs_frequencies): Likewise. (loopify): Likewise. (unloop): Likewise. (fix_loop_placement): Likewise. (loop_delete_branch_edge): Likewise. (duplicate_loop_to_header_edge): Likewise. (mfb_keep_just): Likewise. (create_preheader): Likewise. (force_single_succ_latches): Likewise. (loop_split_edge_with): Likewise. (create_loop_notes): Likewise. * cfgrtl.c (rtl_split_block): Likewise. (rtl_merge_blocks): Likewise. (rtl_can_merge_blocks): Likewise. (try_redirect_by_replacing_jump): Likewise. (force_nonfallthru_and_redirect): Likewise. (rtl_tidy_fallthru_edge): Likewise. (commit_one_edge_insertion): Likewise. (commit_edge_insertions): Likewise. (commit_edge_insertions_watch_calls): Likewise. (rtl_verify_flow_info_1): Likewise. (rtl_verify_flow_info): Likewise. (purge_dead_edges): Likewise. (cfg_layout_redirect_edge_and_branch): Likewise. (cfg_layout_can_merge_blocks_p): Likewise. (rtl_flow_call_edges_add): Likewise. * cse.c (cse_cc_succs): Likewise. * df.c (hybrid_search): Likewise. * dominance.c (calc_dfs_tree_nonrec): Likewise. (calc_dfs_tree): Likewise. (calc_idoms): Likewise. (recount_dominator): Likewise. * domwalk.c (walk_dominator_tree): Likewise. * except.c (emit_to_new_bb_before): Likewise. (connect_post_landing_pads): Likewise. (sjlj_emit_function_enter): Likewise. (sjlj_emit_function_exit): Likewise. (finish_eh_generation): Likewise. * final.c (compute_alignments): Likewise. * flow.c (calculate_global_regs_live): Likewise. (initialize_uninitialized_subregs): Likewise. (init_propagate_block_info): Likewise. * function.c (thread_prologue_and_epilogue_insns): Likewise. * gcse.c (find_implicit_sets): Likewise. (bypass_block): Likewise. (bypass_conditional_jumps): Likewise. (compute_pre_data): Likewise. (insert_insn_end_bb): Likewise. (insert_store): Likewise. (remove_reachable_equiv_notes): Likewise. * global.c (global_conflicts): Likewise. (calculate_reg_pav): Likewise. * graph.c (print_rtl_graph_with_bb): Likewise. * ifcvt.c (mark_loop_exit_edges): Likewise. (merge_if_block): Likewise. (find_if_header): Likewise. (block_jumps_and_fallthru_p): Likewise. (find_if_block): Likewise. (find_cond_trap): Likewise. (block_has_only_trap): Likewise. (find_if_case1): Likewise. (find_if_case_2): Likewise. * lambda-code.c (lambda_loopnest_to_gcc_loopnest): Likewise. (perfect_nestify): Likewise. * lcm.c (compute_antinout_edge): Likewise. (compute_laterin): Likewise. (compute_available): Likewise. (compute_nearerout): Likewise. * loop-doloop.c (doloop_modify): Likewise. * loop-init.c (loop_optimizer_init): Likewise. * loop-invariant.c (find_exits): Likewise. * loop-iv.c (simplify_using_initial_values): Likewise. (check_simple_exit): Likewise. (find_simple_exit): Likewise. * loop-unroll.c (peel_loop_completely): Likewise. (unroll_loop_constant_iterations): Likewise. (unroll_loop_runtime_iterations): Likewise. * loop-unswitch.c (may_unswitch_on): Likewise. (unswitch_loop): Likewise. * modulo-sched.c (generate_prolog_epilog): Likewise. (sms_schedule): Likewise. * postreload-gcse.c (eliminate_partially_redundant_load): Likewise. * predict.c (can_predict_insn_p): Likewise. (set_even_probabilities): Likewise. (combine_predictions_for_bb): Likewise. (predict_loops): Likewise. (estimate_probability): Likewise. (tree_predict_by_opcode): Likewise. (tree_estimate_probability): Likewise. (last_basic_block_p): Likewise. (propagate_freq): Likewise. (estimate_loops_at_level): Likewise. (estimate_bb_frequencies): Likewise. * profile.c (instrument_edges): Likewise. (get_exec_counts): Likewise. (compute_branch_probabilities): Likewise. (branch_prob): Likewise. * ra-build.c (live_in): Likewise. * ra-rewrite.c (rewrite_program2): Likewise. * ra.c (reg_alloc): Likewise. * reg-stack.c (reg_to_stack): Likewise. (convert_regs_entry): Likewise. (compensate_edge): Likewise. (convert_regs_1): Likewise, (convert_regs_2): Likewise. (convert_regs): Likewise. * regrename.c (copyprop_hardreg_forward): Likewise. * reload1.c (fixup_abnormal_edges): Likewise. * sbitmap.c (sbitmap_intersection_of_succs): Likewise. (sbitmap_insersection_of_preds): Likewise. (sbitmap_union_of_succs): Likewise. (sbitmap_union_of_preds): Likewise. * sched-ebb.c (compute_jump_reg_dependencies): Likewise. (fix_basic_block_boundaries): Likewise. (sched_ebbs): Likewise. * sched-rgn.c (build_control_flow): Likewise. (find_rgns): Likewise. * tracer.c (find_best_successor): Likewise. (find_best_predecessor): Likewise. (tail_duplicate): Likewise. * tree-cfg.c (make_edges): Likewise. (make_ctrl_stmt_edges): Likewise. (make_goto_expr_edges): Likewise. (tree_can_merge_blocks_p): Likewise. (tree_merge_blocks): Likewise. (cfg_remove_useless_stmts_bb): Likewise. (remove_phi_nodes_and_edges_for_unreachable_block): Likewise. (tree_block_forwards_to): Likewise. (cleanup_control_expr_graph): Likewise. (find_taken_edge): Likewise. (dump_cfg_stats): Likewise. (tree_cfg2vcg): Likewise. (disband_implicit_edges): Likewise. (tree_find_edge_insert_loc): Likewise. (bsi_commit_edge_inserts): Likewise. (tree_split_edge): Likewise. (tree_verify_flow_info): Likewise. (tree_make_forwarder_block): Likewise. (tree_forwarder_block_p): Likewise. (thread_jumps): Likewise. (tree_try_redirect_by_replacing_jump): Likewise. (tree_split_block): Likewise. (add_phi_args_after_copy_bb): Likewise. (rewrite_to_new_ssa_names_bb): Likewise. (dump_function_to_file): Likewise. (print_pred_bbs): Likewise. (print_loop): Likewise. (tree_flow_call_edges_add): Likewise. (split_critical_edges): Likewise. (execute_warn_function_return): Likewise. (extract_true_false_edges_from_block): Likewise. * tree-if-conv.c (tree_if_conversion): Likewise. (if_convertable_bb_p): Likewise. (find_phi_replacement_condition): Likewise. (combine_blocks): Likewise. * tree-into-ssa.c (compute_global_livein): Likewise. (ssa_mark_phi_uses): Likewise. (ssa_rewrite_initialize_block): Likewise. (rewrite_add_phi_arguments): Likewise. (ssa_rewrite_phi_arguments): Likewise. (insert_phi_nodes_for): Likewise. (rewrite_into_ssa): Likewise. (rewrite_ssa_into_ssa): Likewise. * tree-mudflap.c (mf_build_check_statement_for): Likewise. * tree-outof-ssa.c (coalesce_abnormal_edges): Likewise. (rewrite_trees): Likewise. * tree-pretty-print.c (dump_bb_header): Likewise. (dump_implicit_edges): Likewise. * tree-sra.c (insert_edge_copies): Likewise. (find_obviously_necessary_stmts): Likewise. (remove_data_stmt): Likewise. * tree-ssa-dom.c (thread_across_edge): Likewise. (dom_opt_finalize_block): Likewise. (single_incoming_edge_ignoring_loop_edges): Likewise. (record_equivalences_from_incoming_edges): Likewise. (cprop_into_successor_phis): Likewise. * tree-ssa-live.c (live_worklist): Likewise. (calculate_live_on_entry): Likewise. (calculate_live_on_exit): Likewise. * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Likewise. (copy_loop_headers): Likewise. * tree-ssa-loop-im.c (loop_commit_inserts): Likewise. (fill_always_executed_in): Likewise. * tree-ssa-loop-ivcanon.c (create_canonical_iv): Likewise. * tree-ssa-loop-ivopts.c (find_interesting_uses): Likewise. (compute_phi_arg_on_exit): Likewise. * tree-ssa-loop-manip.c (add_exit_phis_edge): Likewise. (get_loops_exit): Likewise. (split_loop_exit_edge): Likewise. (ip_normal_pos): Likewise. * tree-ssa-loop-niter.c (simplify_using_initial_conditions): Likewise. * tree-ssa-phiopt.c (candidate_bb_for_phi_optimization): Likewise. (replace_phi_with_stmt): Likewise. (value_replacement): Likewise. * tree-ssa-pre.c (compute_antic_aux): Likewise. (insert_aux): Likewise. (init_pre): Likewise. * tree-ssa-propagate.c (simulate_stmt): Likewise. (simulate_block): Likewise. (ssa_prop_init): Likewise. * tree-ssa-threadupdate.c (thread_block): Likewise. (create_block_for_threading): Likewise. (remove_last_stmt_and_useless_edges): Likewise. * tree-ssa.c (verify_phi_args): Likewise. (verify_ssa): Likewise. * tree_tailcall.c (independent_of_stmt_p): Likewise. (find_tail_calls): Likewise. (eliminate_tail_call): Likewise. (tree_optimize_tail_calls_1): Likewise. * tree-vectorizer.c (vect_transform_loop): Likewise. * var-tracking.c (prologue_stack_adjust): Likewise. (vt_stack_adjustments): Likewise. (vt_find_locations): Likewise. * config/frv/frv.c (frv_ifcvt_modify_tests): Likewise. * config/i386/i386.c (ix86_pad_returns): Likewise. * config/ia64/ia64.c (ia64_expand_prologue): Likewise. * config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise. Co-Authored-By: Andrew Pinski Co-Authored-By: Steven Bosscher From-SVN: r88222 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 678c005e25a..563ef3e375a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,334 @@ +2004-09-24 Ben Elliston + Steven Bosscher + Andrew Pinski + + Merge from edge-vector-branch: + * basic-block.h: Include vec.h, errors.h. Instantiate a VEC(edge). + (struct edge_def): Remove pred_next, succ_next members. + (struct basic_block_def): Remove pred, succ members. Add preds + and succs members of type VEC(edge). + (FALLTHRU_EDGE): Redefine using EDGE_SUCC. + (BRANCH_EDGE): Likewise. + (EDGE_CRITICAL_P): Redefine using EDGE_COUNT. + (EDGE_COUNT, EDGE_I, EDGE_PRED, EDGE_SUCC): New. + (edge_iterator): New. + (ei_start, ei_last, ei_end_p, ei_one_before_end_p): New. + (ei_next, ei_prev, ei_edge, ei_safe_edge): Likewise. + (FOR_EACH_EDGE): New. + * bb-reorder.c (find_traces): Use FOR_EACH_EDGE and EDGE_* macros + where applicable. + (rotate_loop): Likewise. + (find_traces_1_route): Likewise. + (bb_to_key): Likewise. + (connect_traces): Likewise. + (copy_bb_p): Likewise. + (find_rarely_executed_basic_blocks_and_crossing_edges): Likewise. + (add_labels_and_missing_jumps): Likewise. + (fix_up_fall_thru_edges): Likewise. + (find_jump_block): Likewise. + (fix_crossing_conditional_branches): Likewise. + (fix_crossing_unconditional_branches): Likewise. + (add_reg_crossing_jump_notes): Likewise. + * bt-load.c (augment_live_range): Likewise. + * cfg.c (clear_edges): Likewise. + (unchecked_make_edge): Likewise. + (cached_make_edge): Likewise. + (make_single_succ_edge): Likewise. + (remove_edge): Likewise. + (redirect_edge_succ_nodup): Likewise. + (check_bb_profile): Likewise. + (dump_flow_info): Likewise. + (alloc_aux_for_edges): Likewise. + (clear_aux_for_edges): Likewise. + (dump_cfg_bb_info): Likewise. + * cfganal.c (forwarder_block_p): Likewise. + (can_fallthru): Likewise. + (could_fall_through): Likewise. + (mark_dfs_back_edges): Likewise. + (set_edge_can_fallthru_flag): Likewise. + (find_unreachable_blocks): Likewise. + (create_edge_list): Likewise. + (verify_edge_list): Likewise. + (add_noreturn_fake_exit_edges): Likewise. + (connect_infinite_loops_to_exit): Likewise. + (flow_reverse_top_sort_order_compute): Likewise. + (flow_depth_first_order_compute): Likewise. + (flow_preorder_transversal_compute): Likewise. + (flow_dfs_compute_reverse_execute): Likewise. + (dfs_enumerate_from): Likewise. + (compute_dominance_frontiers_1): Likewise. + * cfgbuild.c (make_edges): Likewise. + (compute_outgoing_frequencies): Likewise. + (find_many_sub_basic_blocks): Likewise. + (find_sub_basic_blocks): Likewise. + * cfgcleanup.c (try_simplify_condjump): Likewise. + (thread_jump): Likewise. + (try_forward_edges): Likewise. + (merge_blocks_move): Likewise. + (outgoing_edges_match): Likewise. + (try_crossjump_to_edge): Likewise. + (try_crossjump_bb): Likewise. + (try_optimize_cfg): Likewise. + (merge_seq_blocks): Likewise. + * cfgexpand.c (expand_gimple_tailcall): Likewise. + (expand_gimple_basic_block): Likewise. + (construct_init_block): Likewise. + (construct_exit_block): Likewise. + * cfghooks.c (verify_flow_info): Likewise. + (dump_bb): Likewise. + (delete_basic_block): Likewise. + (split_edge): Likewise. + (merge_blocks): Likewise. + (make_forwarder_block): Likewise. + (tidy_fallthru_edges): Likewise. + (can_duplicate_block_p): Likewise. + (duplicate_block): Likewise. + * cfglayout.c (fixup_reorder_chain): Likewise. + (fixup_fallthru_exit_predecessor): Likewise. + (can_copy_bbs_p): Likewise. + (copy_bbs): Likewise. + * cfgloop.c (flow_loops_cfg_dump): Likewise. + (flow_loop_entry_edges_find): Likewise. + (flow_loop_exit_edges_find): Likewise. + (flow_loop_nodes_find): Likewise. + (mark_single_exit_loops): Likewise. + (flow_loop_pre_header_scan): Likewise. + (flow_loop_pre_header_find): Likewise. + (update_latch_info): Likewise. + (canonicalize_loop_headers): Likewise. + (flow_loops_find): Likewise. + (get_loop_body_in_bfs_order): Likewise. + (get_loop_exit_edges): Likewise. + (num_loop_branches): Likewise. + (verify_loop_structure): Likewise. + (loop_latch_edge): Likewise. + (loop_preheader_edge): Likewise. + * cfgloopanal.c (mark_irreducible_loops): Likewise. + (expected_loop_iterations): Likewise. + * cfgloopmanip.c (remove_bbs): Likewise. + (fix_bb_placement): Likewise. + (fix_irreducible_loops): Likewise. + (remove_path): Likewise. + (scale_bbs_frequencies): Likewise. + (loopify): Likewise. + (unloop): Likewise. + (fix_loop_placement): Likewise. + (loop_delete_branch_edge): Likewise. + (duplicate_loop_to_header_edge): Likewise. + (mfb_keep_just): Likewise. + (create_preheader): Likewise. + (force_single_succ_latches): Likewise. + (loop_split_edge_with): Likewise. + (create_loop_notes): Likewise. + * cfgrtl.c (rtl_split_block): Likewise. + (rtl_merge_blocks): Likewise. + (rtl_can_merge_blocks): Likewise. + (try_redirect_by_replacing_jump): Likewise. + (force_nonfallthru_and_redirect): Likewise. + (rtl_tidy_fallthru_edge): Likewise. + (commit_one_edge_insertion): Likewise. + (commit_edge_insertions): Likewise. + (commit_edge_insertions_watch_calls): Likewise. + (rtl_verify_flow_info_1): Likewise. + (rtl_verify_flow_info): Likewise. + (purge_dead_edges): Likewise. + (cfg_layout_redirect_edge_and_branch): Likewise. + (cfg_layout_can_merge_blocks_p): Likewise. + (rtl_flow_call_edges_add): Likewise. + * cse.c (cse_cc_succs): Likewise. + * df.c (hybrid_search): Likewise. + * dominance.c (calc_dfs_tree_nonrec): Likewise. + (calc_dfs_tree): Likewise. + (calc_idoms): Likewise. + (recount_dominator): Likewise. + * domwalk.c (walk_dominator_tree): Likewise. + * except.c (emit_to_new_bb_before): Likewise. + (connect_post_landing_pads): Likewise. + (sjlj_emit_function_enter): Likewise. + (sjlj_emit_function_exit): Likewise. + (finish_eh_generation): Likewise. + * final.c (compute_alignments): Likewise. + * flow.c (calculate_global_regs_live): Likewise. + (initialize_uninitialized_subregs): Likewise. + (init_propagate_block_info): Likewise. + * function.c (thread_prologue_and_epilogue_insns): Likewise. + * gcse.c (find_implicit_sets): Likewise. + (bypass_block): Likewise. + (bypass_conditional_jumps): Likewise. + (compute_pre_data): Likewise. + (insert_insn_end_bb): Likewise. + (insert_store): Likewise. + (remove_reachable_equiv_notes): Likewise. + * global.c (global_conflicts): Likewise. + (calculate_reg_pav): Likewise. + * graph.c (print_rtl_graph_with_bb): Likewise. + * ifcvt.c (mark_loop_exit_edges): Likewise. + (merge_if_block): Likewise. + (find_if_header): Likewise. + (block_jumps_and_fallthru_p): Likewise. + (find_if_block): Likewise. + (find_cond_trap): Likewise. + (block_has_only_trap): Likewise. + (find_if_case1): Likewise. + (find_if_case_2): Likewise. + * lambda-code.c (lambda_loopnest_to_gcc_loopnest): Likewise. + (perfect_nestify): Likewise. + * lcm.c (compute_antinout_edge): Likewise. + (compute_laterin): Likewise. + (compute_available): Likewise. + (compute_nearerout): Likewise. + * loop-doloop.c (doloop_modify): Likewise. + * loop-init.c (loop_optimizer_init): Likewise. + * loop-invariant.c (find_exits): Likewise. + * loop-iv.c (simplify_using_initial_values): Likewise. + (check_simple_exit): Likewise. + (find_simple_exit): Likewise. + * loop-unroll.c (peel_loop_completely): Likewise. + (unroll_loop_constant_iterations): Likewise. + (unroll_loop_runtime_iterations): Likewise. + * loop-unswitch.c (may_unswitch_on): Likewise. + (unswitch_loop): Likewise. + * modulo-sched.c (generate_prolog_epilog): Likewise. + (sms_schedule): Likewise. + * postreload-gcse.c (eliminate_partially_redundant_load): + Likewise. + * predict.c (can_predict_insn_p): Likewise. + (set_even_probabilities): Likewise. + (combine_predictions_for_bb): Likewise. + (predict_loops): Likewise. + (estimate_probability): Likewise. + (tree_predict_by_opcode): Likewise. + (tree_estimate_probability): Likewise. + (last_basic_block_p): Likewise. + (propagate_freq): Likewise. + (estimate_loops_at_level): Likewise. + (estimate_bb_frequencies): Likewise. + * profile.c (instrument_edges): Likewise. + (get_exec_counts): Likewise. + (compute_branch_probabilities): Likewise. + (branch_prob): Likewise. + * ra-build.c (live_in): Likewise. + * ra-rewrite.c (rewrite_program2): Likewise. + * ra.c (reg_alloc): Likewise. + * reg-stack.c (reg_to_stack): Likewise. + (convert_regs_entry): Likewise. + (compensate_edge): Likewise. + (convert_regs_1): Likewise, + (convert_regs_2): Likewise. + (convert_regs): Likewise. + * regrename.c (copyprop_hardreg_forward): Likewise. + * reload1.c (fixup_abnormal_edges): Likewise. + * sbitmap.c (sbitmap_intersection_of_succs): Likewise. + (sbitmap_insersection_of_preds): Likewise. + (sbitmap_union_of_succs): Likewise. + (sbitmap_union_of_preds): Likewise. + * sched-ebb.c (compute_jump_reg_dependencies): Likewise. + (fix_basic_block_boundaries): Likewise. + (sched_ebbs): Likewise. + * sched-rgn.c (build_control_flow): Likewise. + (find_rgns): Likewise. + * tracer.c (find_best_successor): Likewise. + (find_best_predecessor): Likewise. + (tail_duplicate): Likewise. + * tree-cfg.c (make_edges): Likewise. + (make_ctrl_stmt_edges): Likewise. + (make_goto_expr_edges): Likewise. + (tree_can_merge_blocks_p): Likewise. + (tree_merge_blocks): Likewise. + (cfg_remove_useless_stmts_bb): Likewise. + (remove_phi_nodes_and_edges_for_unreachable_block): Likewise. + (tree_block_forwards_to): Likewise. + (cleanup_control_expr_graph): Likewise. + (find_taken_edge): Likewise. + (dump_cfg_stats): Likewise. + (tree_cfg2vcg): Likewise. + (disband_implicit_edges): Likewise. + (tree_find_edge_insert_loc): Likewise. + (bsi_commit_edge_inserts): Likewise. + (tree_split_edge): Likewise. + (tree_verify_flow_info): Likewise. + (tree_make_forwarder_block): Likewise. + (tree_forwarder_block_p): Likewise. + (thread_jumps): Likewise. + (tree_try_redirect_by_replacing_jump): Likewise. + (tree_split_block): Likewise. + (add_phi_args_after_copy_bb): Likewise. + (rewrite_to_new_ssa_names_bb): Likewise. + (dump_function_to_file): Likewise. + (print_pred_bbs): Likewise. + (print_loop): Likewise. + (tree_flow_call_edges_add): Likewise. + (split_critical_edges): Likewise. + (execute_warn_function_return): Likewise. + (extract_true_false_edges_from_block): Likewise. + * tree-if-conv.c (tree_if_conversion): Likewise. + (if_convertable_bb_p): Likewise. + (find_phi_replacement_condition): Likewise. + (combine_blocks): Likewise. + * tree-into-ssa.c (compute_global_livein): Likewise. + (ssa_mark_phi_uses): Likewise. + (ssa_rewrite_initialize_block): Likewise. + (rewrite_add_phi_arguments): Likewise. + (ssa_rewrite_phi_arguments): Likewise. + (insert_phi_nodes_for): Likewise. + (rewrite_into_ssa): Likewise. + (rewrite_ssa_into_ssa): Likewise. + * tree-mudflap.c (mf_build_check_statement_for): Likewise. + * tree-outof-ssa.c (coalesce_abnormal_edges): Likewise. + (rewrite_trees): Likewise. + * tree-pretty-print.c (dump_bb_header): Likewise. + (dump_implicit_edges): Likewise. + * tree-sra.c (insert_edge_copies): Likewise. + (find_obviously_necessary_stmts): Likewise. + (remove_data_stmt): Likewise. + * tree-ssa-dom.c (thread_across_edge): Likewise. + (dom_opt_finalize_block): Likewise. + (single_incoming_edge_ignoring_loop_edges): Likewise. + (record_equivalences_from_incoming_edges): Likewise. + (cprop_into_successor_phis): Likewise. + * tree-ssa-live.c (live_worklist): Likewise. + (calculate_live_on_entry): Likewise. + (calculate_live_on_exit): Likewise. + * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Likewise. + (copy_loop_headers): Likewise. + * tree-ssa-loop-im.c (loop_commit_inserts): Likewise. + (fill_always_executed_in): Likewise. + * tree-ssa-loop-ivcanon.c (create_canonical_iv): Likewise. + * tree-ssa-loop-ivopts.c (find_interesting_uses): Likewise. + (compute_phi_arg_on_exit): Likewise. + * tree-ssa-loop-manip.c (add_exit_phis_edge): Likewise. + (get_loops_exit): Likewise. + (split_loop_exit_edge): Likewise. + (ip_normal_pos): Likewise. + * tree-ssa-loop-niter.c (simplify_using_initial_conditions): + Likewise. + * tree-ssa-phiopt.c (candidate_bb_for_phi_optimization): Likewise. + (replace_phi_with_stmt): Likewise. + (value_replacement): Likewise. + * tree-ssa-pre.c (compute_antic_aux): Likewise. + (insert_aux): Likewise. + (init_pre): Likewise. + * tree-ssa-propagate.c (simulate_stmt): Likewise. + (simulate_block): Likewise. + (ssa_prop_init): Likewise. + * tree-ssa-threadupdate.c (thread_block): Likewise. + (create_block_for_threading): Likewise. + (remove_last_stmt_and_useless_edges): Likewise. + * tree-ssa.c (verify_phi_args): Likewise. + (verify_ssa): Likewise. + * tree_tailcall.c (independent_of_stmt_p): Likewise. + (find_tail_calls): Likewise. + (eliminate_tail_call): Likewise. + (tree_optimize_tail_calls_1): Likewise. + * tree-vectorizer.c (vect_transform_loop): Likewise. + * var-tracking.c (prologue_stack_adjust): Likewise. + (vt_stack_adjustments): Likewise. + (vt_find_locations): Likewise. + * config/frv/frv.c (frv_ifcvt_modify_tests): Likewise. + * config/i386/i386.c (ix86_pad_returns): Likewise. + * config/ia64/ia64.c (ia64_expand_prologue): Likewise. + * config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise. + 2004-09-28 Eric Botcazou PR target/16532 diff --git a/gcc/basic-block.h b/gcc/basic-block.h index 978f313a3cc..04ddee06335 100644 --- a/gcc/basic-block.h +++ b/gcc/basic-block.h @@ -28,6 +28,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "partition.h" #include "hard-reg-set.h" #include "predict.h" +#include "vec.h" +#include "errors.h" /* Head of register set linked list. */ typedef bitmap_head regset_head; @@ -147,12 +149,8 @@ do { \ typedef HOST_WIDEST_INT gcov_type; /* Control flow edge information. */ -struct edge_def GTY((chain_next ("%h.pred_next"))) +struct edge_def GTY(()) { - /* Links through the predecessor and successor lists. */ - struct edge_def *pred_next; - struct edge_def *succ_next; - /* The two blocks at the ends of the edge. */ struct basic_block_def *src; struct basic_block_def *dest; @@ -176,6 +174,7 @@ struct edge_def GTY((chain_next ("%h.pred_next"))) }; typedef struct edge_def *edge; +DEF_VEC_GC_P(edge); #define EDGE_FALLTHRU 1 /* 'Straight line' flow */ #define EDGE_ABNORMAL 2 /* Strange flow, like computed @@ -250,8 +249,8 @@ struct basic_block_def GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb") tree stmt_list; /* The edges into and out of the block. */ - edge pred; - edge succ; + VEC(edge) *preds; + VEC(edge) *succs; /* Liveness info. */ @@ -539,12 +538,12 @@ struct edge_list #define NUM_EDGES(el) ((el)->num_edges) /* BB is assumed to contain conditional jump. Return the fallthru edge. */ -#define FALLTHRU_EDGE(bb) ((bb)->succ->flags & EDGE_FALLTHRU \ - ? (bb)->succ : (bb)->succ->succ_next) +#define FALLTHRU_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \ + ? EDGE_SUCC ((bb), 0) : EDGE_SUCC ((bb), 1)) /* BB is assumed to contain conditional jump. Return the branch edge. */ -#define BRANCH_EDGE(bb) ((bb)->succ->flags & EDGE_FALLTHRU \ - ? (bb)->succ->succ_next : (bb)->succ) +#define BRANCH_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \ + ? EDGE_SUCC ((bb), 1) : EDGE_SUCC ((bb), 0)) /* Return expected execution frequency of the edge E. */ #define EDGE_FREQUENCY(e) (((e)->src->frequency \ @@ -553,8 +552,112 @@ struct edge_list / REG_BR_PROB_BASE) /* Return nonzero if edge is critical. */ -#define EDGE_CRITICAL_P(e) ((e)->src->succ->succ_next \ - && (e)->dest->pred->pred_next) +#define EDGE_CRITICAL_P(e) (EDGE_COUNT ((e)->src->succs) >= 2 \ + && EDGE_COUNT ((e)->dest->preds) >= 2) + +#define EDGE_COUNT(ev) VEC_length (edge, (ev)) +#define EDGE_I(ev,i) VEC_index (edge, (ev), (i)) +#define EDGE_PRED(bb,i) VEC_index (edge, (bb)->preds, (i)) +#define EDGE_SUCC(bb,i) VEC_index (edge, (bb)->succs, (i)) + +/* Iterator object for edges. */ + +typedef struct { + unsigned index; + VEC(edge) *container; +} edge_iterator; + +/* Return an iterator pointing to the start of an edge vector. */ +static inline edge_iterator +ei_start (VEC(edge) *ev) +{ + edge_iterator i; + + i.index = 0; + i.container = ev; + + return i; +} + +/* Return an iterator pointing to the last element of an edge + vector. */ +static inline edge_iterator +ei_last (VEC(edge) *ev) +{ + edge_iterator i; + + i.index = EDGE_COUNT (ev) - 1; + i.container = ev; + + return i; +} + +/* Is the iterator `i' at the end of the sequence? */ +static inline bool +ei_end_p (edge_iterator i) +{ + return (i.index == EDGE_COUNT (i.container)); +} + +/* Is the iterator `i' at one position before the end of the + sequence? */ +static inline bool +ei_one_before_end_p (edge_iterator i) +{ + return (i.index + 1 == EDGE_COUNT (i.container)); +} + +/* Advance the iterator to the next element. */ +static inline void +ei_next (edge_iterator *i) +{ + gcc_assert (i->index < EDGE_COUNT (i->container)); + i->index++; +} + +/* Move the iterator to the previous element. */ +static inline void +ei_prev (edge_iterator *i) +{ + gcc_assert (i->index > 0); + i->index--; +} + +/* Return the edge pointed to by the iterator `i'. */ +static inline edge +ei_edge (edge_iterator i) +{ + return EDGE_I (i.container, i.index); +} + +/* Return an edge pointed to by the iterator. Do it safely so that + NULL is returned when the iterator is pointing at the end of the + sequence. */ +static inline edge +ei_safe_edge (edge_iterator i) +{ + return !ei_end_p (i) ? ei_edge (i) : NULL; +} + +/* This macro serves as a convenient way to iterate each edge in a + vector of predeccesor or successor edges. It must not be used when + an element might be removed during the traversal, otherwise + elements will be missed. Instead, use a for-loop like that shown + in the following pseudo-code: + + FOR (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + { + IF (e != taken_edge) + ssa_remove_edge (e); + ELSE + ei_next (&ei); + } +*/ + +#define FOR_EACH_EDGE(EDGE,ITER,EDGE_VEC) \ + for ((EDGE) = NULL, (ITER) = ei_start ((EDGE_VEC)); \ + ((EDGE) = ei_safe_edge ((ITER))); \ + ei_next (&(ITER))) struct edge_list * create_edge_list (void); void free_edge_list (struct edge_list *); diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index 11db0c58bd8..1d0b097de20 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -229,6 +229,7 @@ find_traces (int *n_traces, struct trace *traces) int i; int number_of_rounds; edge e; + edge_iterator ei; fibheap_t heap; /* Add one extra round of trace collection when partitioning hot/cold @@ -243,7 +244,7 @@ find_traces (int *n_traces, struct trace *traces) heap = fibheap_new (); max_entry_frequency = 0; max_entry_count = 0; - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { bbd[e->dest->index].heap = heap; bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), @@ -311,7 +312,9 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) do { edge e; - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR && e->dest->rbi->visited != trace_n && (e->flags & EDGE_CAN_FALLTHRU) @@ -382,9 +385,9 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) prev_bb->rbi->next = best_bb->rbi->next; /* Try to get rid of uncond jump to cond jump. */ - if (prev_bb->succ && !prev_bb->succ->succ_next) + if (EDGE_COUNT (prev_bb->succs) == 1) { - basic_block header = prev_bb->succ->dest; + basic_block header = EDGE_SUCC (prev_bb, 0)->dest; /* Duplicate HEADER if it is a small block containing cond jump in the end. */ @@ -392,7 +395,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) && !find_reg_note (BB_END (header), REG_CROSSING_JUMP, NULL_RTX)) { - copy_bb (header, prev_bb->succ, prev_bb, trace_n); + copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n); } } } @@ -448,6 +451,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, struct trace *trace; edge best_edge, e; fibheapkey_t key; + edge_iterator ei; bb = fibheap_extract_min (*heap); bbd[bb->index].heap = NULL; @@ -498,7 +502,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, bb->index, *n_traces - 1); /* Select the successor that will be placed after BB. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { gcc_assert (!(e->flags & EDGE_FAKE)); @@ -537,12 +541,12 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* If the best destination has multiple predecessors, and can be duplicated cheaper than a jump, don't allow it to be added to a trace. We'll duplicate it when connecting traces. */ - if (best_edge && best_edge->dest->pred->pred_next + if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2 && copy_bb_p (best_edge->dest, 0)) best_edge = NULL; /* Add all non-selected successors to the heaps. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e == best_edge || e->dest == EXIT_BLOCK_PTR @@ -637,9 +641,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* Check whether there is another edge from BB. */ edge another_edge; - for (another_edge = bb->succ; - another_edge; - another_edge = another_edge->succ_next) + FOR_EACH_EDGE (another_edge, ei, bb->succs) if (another_edge != best_edge) break; @@ -678,18 +680,17 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e != best_edge && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && !e->dest->rbi->visited - && !e->dest->pred->pred_next + && EDGE_COUNT (e->dest->preds) == 1 && !(e->flags & EDGE_CROSSING) - && e->dest->succ - && (e->dest->succ->flags & EDGE_CAN_FALLTHRU) - && !(e->dest->succ->flags & EDGE_COMPLEX) - && !e->dest->succ->succ_next - && e->dest->succ->dest == best_edge->dest + && EDGE_COUNT (e->dest->succs) == 1 + && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU) + && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX) + && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) { best_edge = e; @@ -712,7 +713,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* The trace is terminated so we have to recount the keys in heap (some block can have a lower key because now one of its predecessors is an end of the trace). */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR || e->dest->rbi->visited) @@ -801,7 +802,7 @@ static fibheapkey_t bb_to_key (basic_block bb) { edge e; - + edge_iterator ei; int priority = 0; /* Do not start in probably never executed blocks. */ @@ -812,7 +813,7 @@ bb_to_key (basic_block bb) /* Prefer blocks whose predecessor is an end of some trace or whose predecessor edge is EDGE_DFS_BACK. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) || (e->flags & EDGE_DFS_BACK)) @@ -969,9 +970,10 @@ connect_traces (int n_traces, struct trace *traces) /* Find the predecessor traces. */ for (t2 = t; t2 > 0;) { + edge_iterator ei; best = NULL; best_len = 0; - for (e = traces[t2].first->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, traces[t2].first->preds) { int si = e->src->index; @@ -1016,9 +1018,10 @@ connect_traces (int n_traces, struct trace *traces) while (1) { /* Find the continuation of the chain. */ + edge_iterator ei; best = NULL; best_len = 0; - for (e = traces[t].last->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, traces[t].last->succs) { int di = e->dest->index; @@ -1058,12 +1061,13 @@ connect_traces (int n_traces, struct trace *traces) basic_block next_bb = NULL; bool try_copy = false; - for (e = traces[t].last->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, traces[t].last->succs) if (e->dest != EXIT_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && (!best || e->probability > best->probability)) { + edge_iterator ei; edge best2 = NULL; int best2_len = 0; @@ -1079,7 +1083,7 @@ connect_traces (int n_traces, struct trace *traces) continue; } - for (e2 = e->dest->succ; e2; e2 = e2->succ_next) + FOR_EACH_EDGE (e2, ei, e->dest->succs) { int di = e2->dest->index; @@ -1177,24 +1181,17 @@ copy_bb_p (basic_block bb, int code_may_grow) int size = 0; int max_size = uncond_jump_length; rtx insn; - int n_succ; - edge e; if (!bb->frequency) return false; - if (!bb->pred || !bb->pred->pred_next) + if (EDGE_COUNT (bb->preds) < 2) return false; if (!can_duplicate_block_p (bb)) return false; /* Avoid duplicating blocks which have many successors (PR/13430). */ - n_succ = 0; - for (e = bb->succ; e; e = e->succ_next) - { - n_succ++; - if (n_succ > 8) - return false; - } + if (EDGE_COUNT (bb->succs) > 8) + return false; if (code_may_grow && maybe_hot_bb_p (bb)) max_size *= 8; @@ -1262,6 +1259,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, bool has_hot_blocks = false; edge e; int i; + edge_iterator ei; /* Mark which partition (hot/cold) each basic block belongs in. */ @@ -1281,7 +1279,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, the hot partition (if there is one). */ if (has_hot_blocks) - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) if (e->dest->index >= 0) { BB_SET_PARTITION (e->dest, BB_HOT_PARTITION); @@ -1294,7 +1292,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, if (targetm.have_named_sections) { FOR_EACH_BB (bb) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR @@ -1384,7 +1382,7 @@ add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges) /* bb just falls through. */ { /* make sure there's only one successor */ - gcc_assert (src->succ && !src->succ->succ_next); + gcc_assert (EDGE_COUNT (src->succs) == 1); /* Find label in dest block. */ label = block_label (dest); @@ -1432,9 +1430,13 @@ fix_up_fall_thru_edges (void) FOR_EACH_BB (cur_bb) { fall_thru = NULL; - succ1 = cur_bb->succ; - if (succ1) - succ2 = succ1->succ_next; + if (EDGE_COUNT (cur_bb->succs) > 0) + succ1 = EDGE_SUCC (cur_bb, 0); + else + succ1 = NULL; + + if (EDGE_COUNT (cur_bb->succs) > 1) + succ2 = EDGE_SUCC (cur_bb, 1); else succ2 = NULL; @@ -1522,7 +1524,7 @@ fix_up_fall_thru_edges (void) partition as bb it's falling through from. */ BB_COPY_PARTITION (new_bb, cur_bb); - new_bb->succ->flags |= EDGE_CROSSING; + EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING; } /* Add barrier after new jump */ @@ -1557,8 +1559,9 @@ find_jump_block (basic_block jump_dest) basic_block source_bb = NULL; edge e; rtx insn; + edge_iterator ei; - for (e = jump_dest->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, jump_dest->preds) if (e->flags & EDGE_CROSSING) { basic_block src = e->src; @@ -1619,11 +1622,15 @@ fix_crossing_conditional_branches (void) FOR_EACH_BB (cur_bb) { crossing_edge = NULL; - succ1 = cur_bb->succ; - if (succ1) - succ2 = succ1->succ_next; + if (EDGE_COUNT (cur_bb->succs) > 0) + succ1 = EDGE_SUCC (cur_bb, 0); + else + succ1 = NULL; + + if (EDGE_COUNT (cur_bb->succs) > 1) + succ2 = EDGE_SUCC (cur_bb, 1); else - succ2 = NULL; + succ2 = NULL; /* We already took care of fall-through edges, so only one successor can be a crossing edge. */ @@ -1738,10 +1745,10 @@ fix_crossing_conditional_branches (void) will be a successor for new_bb and a predecessor for 'dest'. */ - if (!new_bb->succ) + if (EDGE_COUNT (new_bb->succs) == 0) new_edge = make_edge (new_bb, dest, 0); else - new_edge = new_bb->succ; + new_edge = EDGE_SUCC (new_bb, 0); crossing_edge->flags &= ~EDGE_CROSSING; new_edge->flags |= EDGE_CROSSING; @@ -1769,7 +1776,7 @@ fix_crossing_unconditional_branches (void) FOR_EACH_BB (cur_bb) { last_insn = BB_END (cur_bb); - succ = cur_bb->succ; + succ = EDGE_SUCC (cur_bb, 0); /* Check to see if bb ends in a crossing (unconditional) jump. At this point, no crossing jumps should be conditional. */ @@ -1839,9 +1846,10 @@ add_reg_crossing_jump_notes (void) { basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if ((e->flags & EDGE_CROSSING) && JUMP_P (BB_END (e->src))) REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, diff --git a/gcc/bt-load.c b/gcc/bt-load.c index ef4cf92864a..b0ced417518 100644 --- a/gcc/bt-load.c +++ b/gcc/bt-load.c @@ -879,6 +879,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, else { edge e; + edge_iterator ei; int new_block = new_bb->index; gcc_assert (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb)); @@ -900,7 +901,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, dump_hard_reg_set (*btrs_live_in_range); fprintf (dump_file, "\n"); } - for (e = head_bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, head_bb->preds) *tos++ = e->src; } @@ -910,6 +911,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, if (!bitmap_bit_p (live_range, bb->index)) { edge e; + edge_iterator ei; bitmap_set_bit (live_range, bb->index); IOR_HARD_REG_SET (*btrs_live_in_range, @@ -923,7 +925,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, fprintf (dump_file, "\n"); } - for (e = bb->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pred = e->src; if (!bitmap_bit_p (live_range, pred->index)) diff --git a/gcc/cfg.c b/gcc/cfg.c index c8f1de51ae4..0669bed74c5 100644 --- a/gcc/cfg.c +++ b/gcc/cfg.c @@ -144,34 +144,20 @@ clear_edges (void) { basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) { - edge e = bb->succ; - - while (e) - { - edge next = e->succ_next; - - free_edge (e); - e = next; - } - - bb->succ = NULL; - bb->pred = NULL; - } - - e = ENTRY_BLOCK_PTR->succ; - while (e) - { - edge next = e->succ_next; - - free_edge (e); - e = next; + FOR_EACH_EDGE (e, ei, bb->succs) + free_edge (e); + VEC_truncate (edge, bb->succs, 0); + VEC_truncate (edge, bb->preds, 0); } - EXIT_BLOCK_PTR->pred = NULL; - ENTRY_BLOCK_PTR->succ = NULL; + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) + free_edge (e); + VEC_truncate (edge, EXIT_BLOCK_PTR->preds, 0); + VEC_truncate (edge, ENTRY_BLOCK_PTR->succs, 0); gcc_assert (!n_edges); } @@ -284,15 +270,13 @@ unchecked_make_edge (basic_block src, basic_block dst, int flags) e = ggc_alloc_cleared (sizeof (*e)); n_edges++; - e->succ_next = src->succ; - e->pred_next = dst->pred; + VEC_safe_insert (edge, src->succs, 0, e); + VEC_safe_insert (edge, dst->preds, 0, e); + e->src = src; e->dest = dst; e->flags = flags; - src->succ = e; - dst->pred = e; - return e; } @@ -304,6 +288,7 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla { int use_edge_cache; edge e; + edge_iterator ei; /* Don't bother with edge cache for ENTRY or EXIT, if there aren't that many edges to them, or we didn't allocate memory for it. */ @@ -324,7 +309,7 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla /* Fall through. */ case 0: - for (e = src->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, src->succs) if (e->dest == dst) { e->flags |= flags; @@ -368,30 +353,42 @@ make_single_succ_edge (basic_block src, basic_block dest, int flags) void remove_edge (edge e) { - edge last_pred = NULL; - edge last_succ = NULL; edge tmp; basic_block src, dest; + bool found = false; + edge_iterator ei; src = e->src; dest = e->dest; - for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next) - last_succ = tmp; - gcc_assert (tmp); - if (last_succ) - last_succ->succ_next = e->succ_next; - else - src->succ = e->succ_next; + for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_ordered_remove (edge, src->succs, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } - for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next) - last_pred = tmp; + gcc_assert (found); - gcc_assert (tmp); - if (last_pred) - last_pred->pred_next = e->pred_next; - else - dest->pred = e->pred_next; + found = false; + for (ei = ei_start (dest->preds); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_ordered_remove (edge, dest->preds, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } + + gcc_assert (found); free_edge (e); } @@ -401,16 +398,27 @@ remove_edge (edge e) void redirect_edge_succ (edge e, basic_block new_succ) { - edge *pe; + edge tmp; + edge_iterator ei; + bool found = false; /* Disconnect the edge from the old successor block. */ - for (pe = &e->dest->pred; *pe != e; pe = &(*pe)->pred_next) - continue; - *pe = (*pe)->pred_next; + for (ei = ei_start (e->dest->preds); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_ordered_remove (edge, e->dest->preds, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } + + gcc_assert (found); /* Reconnect the edge to the new successor block. */ - e->pred_next = new_succ->pred; - new_succ->pred = e; + VEC_safe_insert (edge, new_succ->preds, 0, e); e->dest = new_succ; } @@ -420,9 +428,10 @@ edge redirect_edge_succ_nodup (edge e, basic_block new_succ) { edge s; + edge_iterator ei; /* Check whether the edge is already present. */ - for (s = e->src->succ; s; s = s->succ_next) + FOR_EACH_EDGE (s, ei, e->src->succs) if (s->dest == new_succ && s != e) break; @@ -447,17 +456,27 @@ redirect_edge_succ_nodup (edge e, basic_block new_succ) void redirect_edge_pred (edge e, basic_block new_pred) { - edge *pe; + edge tmp; + edge_iterator ei; + bool found = false; /* Disconnect the edge from the old predecessor block. */ - for (pe = &e->src->succ; *pe != e; pe = &(*pe)->succ_next) - continue; + for (ei = ei_start (e->src->succs); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_ordered_remove (edge, e->src->succs, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } - *pe = (*pe)->succ_next; + gcc_assert (found); /* Reconnect the edge to the new predecessor block. */ - e->succ_next = new_pred->succ; - new_pred->succ = e; + VEC_safe_insert (edge, new_pred->succs, 0, e); e->src = new_pred; } @@ -482,35 +501,37 @@ check_bb_profile (basic_block bb, FILE * file) edge e; int sum = 0; gcov_type lsum; + edge_iterator ei; if (profile_status == PROFILE_ABSENT) return; if (bb != EXIT_BLOCK_PTR) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) sum += e->probability; - if (bb->succ && abs (sum - REG_BR_PROB_BASE) > 100) + if (EDGE_COUNT (bb->succs) && abs (sum - REG_BR_PROB_BASE) > 100) fprintf (file, "Invalid sum of outgoing probabilities %.1f%%\n", sum * 100.0 / REG_BR_PROB_BASE); lsum = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) lsum += e->count; - if (bb->succ && (lsum - bb->count > 100 || lsum - bb->count < -100)) + if (EDGE_COUNT (bb->succs) + && (lsum - bb->count > 100 || lsum - bb->count < -100)) fprintf (file, "Invalid sum of outgoing counts %i, should be %i\n", (int) lsum, (int) bb->count); } if (bb != ENTRY_BLOCK_PTR) { sum = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) sum += EDGE_FREQUENCY (e); if (abs (sum - bb->frequency) > 100) fprintf (file, "Invalid sum of incoming frequencies %i, should be %i\n", sum, bb->frequency); lsum = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) lsum += e->count; if (lsum - bb->count > 100 || lsum - bb->count < -100) fprintf (file, "Invalid sum of incoming counts %i, should be %i\n", @@ -577,6 +598,7 @@ dump_flow_info (FILE *file) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; fprintf (file, "\nBasic block %d ", bb->index); fprintf (file, "prev %d, next %d, ", @@ -591,11 +613,11 @@ dump_flow_info (FILE *file) fprintf (file, ".\n"); fprintf (file, "Predecessors: "); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) dump_edge_info (file, e, 0); fprintf (file, "\nSuccessors: "); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) dump_edge_info (file, e, 1); fprintf (file, "\nRegisters live at start:"); @@ -788,8 +810,9 @@ alloc_aux_for_edges (int size) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) alloc_aux_for_edge (e, size); } } @@ -805,7 +828,8 @@ clear_aux_for_edges (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) e->aux = NULL; } } @@ -843,6 +867,7 @@ static void dump_cfg_bb_info (FILE *file, basic_block bb) { unsigned i; + edge_iterator ei; bool first = true; static const char * const bb_bitnames[] = { @@ -867,11 +892,11 @@ dump_cfg_bb_info (FILE *file, basic_block bb) fprintf (file, "\n"); fprintf (file, "Predecessors: "); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) dump_edge_info (file, e, 0); fprintf (file, "\nSuccessors: "); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) dump_edge_info (file, e, 1); fprintf (file, "\n\n"); } @@ -902,6 +927,7 @@ update_bb_profile_for_threading (basic_block bb, int edge_frequency, { edge c; int prob; + edge_iterator ei; bb->count -= count; if (bb->count < 0) @@ -935,12 +961,14 @@ update_bb_profile_for_threading (basic_block bb, int edge_frequency, fprintf (dump_file, "Edge frequencies of bb %i has been reset, " "frequency of block should end up being 0, it is %i\n", bb->index, bb->frequency); - bb->succ->probability = REG_BR_PROB_BASE; - for (c = bb->succ->succ_next; c; c = c->succ_next) + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; + ei = ei_start (bb->succs); + ei_next (&ei); + for (; (c = ei_safe_edge (ei)); ei_next (&ei)) c->probability = 0; } else - for (c = bb->succ; c; c = c->succ_next) + FOR_EACH_EDGE (c, ei, bb->succs) c->probability = ((c->probability * REG_BR_PROB_BASE) / (double) prob); if (bb != taken_edge->src) diff --git a/gcc/cfganal.c b/gcc/cfganal.c index 01f5f7d04b6..30aa5c40db3 100644 --- a/gcc/cfganal.c +++ b/gcc/cfganal.c @@ -85,7 +85,7 @@ forwarder_block_p (basic_block bb) rtx insn; if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR - || !bb->succ || bb->succ->succ_next) + || EDGE_COUNT (bb->succs) != 1) return false; for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn)) @@ -105,15 +105,16 @@ can_fallthru (basic_block src, basic_block target) rtx insn = BB_END (src); rtx insn2; edge e; + edge_iterator ei; if (target == EXIT_BLOCK_PTR) return true; if (src->next_bb != target) return 0; - for (e = src->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, src->succs) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - return 0; + return 0; insn2 = BB_HEAD (target); if (insn2 && !active_insn_p (insn2)) @@ -130,13 +131,14 @@ bool could_fall_through (basic_block src, basic_block target) { edge e; + edge_iterator ei; if (target == EXIT_BLOCK_PTR) return true; - for (e = src->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, src->succs) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - return 0; + return 0; return true; } @@ -153,7 +155,7 @@ could_fall_through (basic_block src, basic_block target) bool mark_dfs_back_edges (void) { - edge *stack; + edge_iterator *stack; int *pre; int *post; int sp; @@ -167,7 +169,7 @@ mark_dfs_back_edges (void) post = xcalloc (last_basic_block, sizeof (int)); /* Allocate stack for back-tracking up CFG. */ - stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ @@ -177,19 +179,19 @@ mark_dfs_back_edges (void) sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; - e->flags &= ~EDGE_DFS_BACK; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; + ei_edge (ei)->flags &= ~EDGE_DFS_BACK; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -198,11 +200,11 @@ mark_dfs_back_edges (void) SET_BIT (visited, dest->index); pre[dest->index] = prenum++; - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) { /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); } else post[dest->index] = postnum++; @@ -212,13 +214,13 @@ mark_dfs_back_edges (void) if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR && pre[src->index] >= pre[dest->index] && post[dest->index] == 0) - e->flags |= EDGE_DFS_BACK, found = true; + ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true; - if (! e->succ_next && src != ENTRY_BLOCK_PTR) + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR) post[src->index] = postnum++; - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -242,8 +244,9 @@ set_edge_can_fallthru_flag (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { e->flags &= ~EDGE_CAN_FALLTHRU; @@ -254,15 +257,15 @@ set_edge_can_fallthru_flag (void) /* If the BB ends with an invertible condjump all (2) edges are CAN_FALLTHRU edges. */ - if (!bb->succ || !bb->succ->succ_next || bb->succ->succ_next->succ_next) + if (EDGE_COUNT (bb->succs) != 2) continue; if (!any_condjump_p (BB_END (bb))) continue; if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0)) continue; invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0); - bb->succ->flags |= EDGE_CAN_FALLTHRU; - bb->succ->succ_next->flags |= EDGE_CAN_FALLTHRU; + EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU; + EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU; } } @@ -274,6 +277,7 @@ void find_unreachable_blocks (void) { edge e; + edge_iterator ei; basic_block *tos, *worklist, bb; tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); @@ -287,7 +291,7 @@ find_unreachable_blocks (void) be only one. It isn't inconceivable that we might one day directly support Fortran alternate entry points. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { *tos++ = e->dest; @@ -301,7 +305,7 @@ find_unreachable_blocks (void) { basic_block b = *--tos; - for (e = b->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, b->succs) if (!(e->dest->flags & BB_REACHABLE)) { *tos++ = e->dest; @@ -333,6 +337,7 @@ create_edge_list (void) int num_edges; int block_count; basic_block bb; + edge_iterator ei; block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ @@ -342,8 +347,7 @@ create_edge_list (void) edges on each basic block. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - for (e = bb->succ; e; e = e->succ_next) - num_edges++; + num_edges += EDGE_COUNT (bb->succs); } elist = xmalloc (sizeof (struct edge_list)); @@ -355,7 +359,7 @@ create_edge_list (void) /* Follow successors of blocks, and register these edges. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) elist->index_to_edge[num_edges++] = e; return elist; @@ -408,10 +412,11 @@ verify_edge_list (FILE *f, struct edge_list *elist) int pred, succ, index; edge e; basic_block bb, p, s; + edge_iterator ei; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { pred = e->src->index; succ = e->dest->index; @@ -439,14 +444,14 @@ verify_edge_list (FILE *f, struct edge_list *elist) { int found_edge = 0; - for (e = p->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, p->succs) if (e->dest == s) { found_edge = 1; break; } - for (e = s->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, s->preds) if (e->src == p) { found_edge = 1; @@ -471,8 +476,9 @@ edge find_edge (basic_block pred, basic_block succ) { edge e; + edge_iterator ei; - for (e = pred->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, pred->succs) if (e->dest == succ) return e; @@ -537,14 +543,14 @@ static void remove_fake_predecessors (basic_block bb) { edge e; + edge_iterator ei; - for (e = bb->pred; e;) + for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) { - edge tmp = e; - - e = e->pred_next; - if ((tmp->flags & EDGE_FAKE) == EDGE_FAKE) - remove_edge (tmp); + if ((e->flags & EDGE_FAKE) == EDGE_FAKE) + remove_edge (e); + else + ei_next (&ei); } } @@ -580,7 +586,7 @@ add_noreturn_fake_exit_edges (void) basic_block bb; FOR_EACH_BB (bb) - if (bb->succ == NULL) + if (EDGE_COUNT (bb->succs) == 0) make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } @@ -626,13 +632,13 @@ connect_infinite_loops_to_exit (void) void flow_reverse_top_sort_order_compute (int *rts_order) { - edge *stack; + edge_iterator *stack; int sp; int postnum = 0; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ - stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ @@ -642,18 +648,18 @@ flow_reverse_top_sort_order_compute (int *rts_order) sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -661,20 +667,20 @@ flow_reverse_top_sort_order_compute (int *rts_order) /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); else rts_order[postnum++] = dest->index; } else { - if (! e->succ_next && src != ENTRY_BLOCK_PTR) + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR) rts_order[postnum++] = src->index; - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -694,14 +700,14 @@ flow_reverse_top_sort_order_compute (int *rts_order) int flow_depth_first_order_compute (int *dfs_order, int *rc_order) { - edge *stack; + edge_iterator *stack; int sp; int dfsnum = 0; int rcnum = n_basic_blocks - 1; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ - stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ @@ -711,18 +717,18 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order) sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -735,10 +741,10 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order) dfsnum++; - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); else if (rc_order) /* There are no successors for the DEST node so assign its reverse completion number. */ @@ -746,14 +752,14 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order) } else { - if (! e->succ_next && src != ENTRY_BLOCK_PTR + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR && rc_order) /* There are no more successors for the SRC node so assign its reverse completion number. */ rc_order[rcnum--] = src->index; - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -789,8 +795,7 @@ struct dfst_node void flow_preorder_transversal_compute (int *pot_order) { - edge e; - edge *stack; + edge_iterator *stack, ei; int i; int max_successors; int sp; @@ -808,10 +813,7 @@ flow_preorder_transversal_compute (int *pot_order) FOR_EACH_BB (bb) { - max_successors = 0; - for (e = bb->succ; e; e = e->succ_next) - max_successors++; - + max_successors = EDGE_COUNT (bb->succs); dfst[bb->index].node = (max_successors ? xcalloc (max_successors, sizeof (struct dfst_node *)) : NULL); @@ -824,7 +826,7 @@ flow_preorder_transversal_compute (int *pot_order) sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { @@ -832,9 +834,9 @@ flow_preorder_transversal_compute (int *pot_order) basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -850,14 +852,14 @@ flow_preorder_transversal_compute (int *pot_order) dfst[dest->index].up = &dfst[src->index]; } - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); } - else if (e->succ_next) - stack[sp - 1] = e->succ_next; + else if (! ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -960,13 +962,14 @@ flow_dfs_compute_reverse_execute (depth_first_search_ds data) { basic_block bb; edge e; + edge_iterator ei; while (data->sp > 0) { bb = data->stack[--data->sp]; /* Perform depth-first search on adjacent vertices. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!TEST_BIT (data->visited_blocks, e->src->index - (INVALID_BLOCK + 1))) flow_dfs_compute_reverse_add_bb (data, e->src); @@ -1007,10 +1010,11 @@ dfs_enumerate_from (basic_block bb, int reverse, while (sp) { edge e; + edge_iterator ei; lbb = st[--sp]; if (reverse) { - for (e = lbb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, lbb->preds) if (!(e->src->flags & BB_VISITED) && predicate (e->src, data)) { gcc_assert (tv != rslt_max); @@ -1020,7 +1024,7 @@ dfs_enumerate_from (basic_block bb, int reverse, } else { - for (e = lbb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, lbb->succs) if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data)) { gcc_assert (tv != rslt_max); @@ -1056,6 +1060,7 @@ static void compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) { edge e; + edge_iterator ei; basic_block c; SET_BIT (done, bb->index); @@ -1072,7 +1077,7 @@ compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) } /* Find blocks conforming to rule (1) above. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -1106,7 +1111,7 @@ compute_dominance_frontiers (bitmap *frontiers) sbitmap_zero (done); - compute_dominance_frontiers_1 (frontiers, ENTRY_BLOCK_PTR->succ->dest, done); + compute_dominance_frontiers_1 (frontiers, EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest, done); sbitmap_free (done); diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c index 453e65cf495..f5bf3e839cd 100644 --- a/gcc/cfgbuild.c +++ b/gcc/cfgbuild.c @@ -251,8 +251,9 @@ make_edges (basic_block min, basic_block max, int update_p) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR) SET_BIT (edge_cache[bb->index], e->dest->index); } @@ -270,6 +271,7 @@ make_edges (basic_block min, basic_block max, int update_p) enum rtx_code code; int force_fallthru = 0; edge e; + edge_iterator ei; if (LABEL_P (BB_HEAD (bb)) && LABEL_ALT_ENTRY_P (BB_HEAD (bb))) @@ -388,7 +390,7 @@ make_edges (basic_block min, basic_block max, int update_p) /* Find out if we can drop through to the next block. */ insn = NEXT_INSN (insn); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) { insn = 0; @@ -640,8 +642,9 @@ static void compute_outgoing_frequencies (basic_block b) { edge e, f; + edge_iterator ei; - if (b->succ && b->succ->succ_next && !b->succ->succ_next->succ_next) + if (EDGE_COUNT (b->succs) == 2) { rtx note = find_reg_note (BB_END (b), REG_BR_PROB, NULL); int probability; @@ -660,16 +663,16 @@ compute_outgoing_frequencies (basic_block b) } } - if (b->succ && !b->succ->succ_next) + if (EDGE_COUNT (b->succs) == 1) { - e = b->succ; + e = EDGE_SUCC (b, 0); e->probability = REG_BR_PROB_BASE; e->count = b->count; return; } guess_outgoing_edge_probabilities (b); if (b->count) - for (e = b->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, b->succs) e->count = ((b->count * e->probability + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE); } @@ -709,6 +712,7 @@ find_many_sub_basic_blocks (sbitmap blocks) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; if (STATE (bb) == BLOCK_ORIGINAL) continue; @@ -716,7 +720,7 @@ find_many_sub_basic_blocks (sbitmap blocks) { bb->count = 0; bb->frequency = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { bb->count += e->count; bb->frequency += EDGE_FREQUENCY (e); @@ -751,12 +755,13 @@ find_sub_basic_blocks (basic_block bb) FOR_BB_BETWEEN (b, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; if (b != min) { b->count = 0; b->frequency = 0; - for (e = b->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, b->preds) { b->count += e->count; b->frequency += EDGE_FREQUENCY (e); diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c index f9d06075caa..eccaab4605e 100644 --- a/gcc/cfgcleanup.c +++ b/gcc/cfgcleanup.c @@ -124,9 +124,7 @@ try_simplify_condjump (basic_block cbranch_block) rtx cbranch_insn; /* Verify that there are exactly two successors. */ - if (!cbranch_block->succ - || !cbranch_block->succ->succ_next - || cbranch_block->succ->succ_next->succ_next) + if (EDGE_COUNT (cbranch_block->succs) != 2) return false; /* Verify that we've got a normal conditional branch at the end @@ -142,11 +140,11 @@ try_simplify_condjump (basic_block cbranch_block) be the last block in the function, and must contain just the unconditional jump. */ jump_block = cbranch_fallthru_edge->dest; - if (jump_block->pred->pred_next + if (EDGE_COUNT (jump_block->preds) >= 2 || jump_block->next_bb == EXIT_BLOCK_PTR || !FORWARDER_BLOCK_P (jump_block)) return false; - jump_dest_block = jump_block->succ->dest; + jump_dest_block = EDGE_SUCC (jump_block, 0)->dest; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -290,9 +288,9 @@ thread_jump (int mode, edge e, basic_block b) /* At the moment, we do handle only conditional jumps, but later we may want to extend this code to tablejumps and others. */ - if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next) + if (EDGE_COUNT (e->src->succs) != 2) return NULL; - if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next) + if (EDGE_COUNT (b->succs) != 2) { BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK); return NULL; @@ -421,7 +419,8 @@ static bool try_forward_edges (int mode, basic_block b) { bool changed = false; - edge e, next, *threaded_edges = NULL; + edge_iterator ei; + edge e, *threaded_edges = NULL; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -437,7 +436,7 @@ try_forward_edges (int mode, basic_block b) && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) return false; - for (e = b->succ; e; e = next) + for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); ) { basic_block target, first; int counter; @@ -445,15 +444,16 @@ try_forward_edges (int mode, basic_block b) int nthreaded_edges = 0; bool may_thread = first_pass | (b->flags & BB_DIRTY); - next = e->succ_next; - /* Skip complex edges because we don't know how to update them. Still handle fallthru edges, as we can succeed to forward fallthru edge to the same place as the branch edge of conditional branch and turn conditional branch to an unconditional branch. */ if (e->flags & EDGE_COMPLEX) - continue; + { + ei_next (&ei); + continue; + } target = first = e->dest; counter = 0; @@ -480,13 +480,13 @@ try_forward_edges (int mode, basic_block b) may_thread |= target->flags & BB_DIRTY; if (FORWARDER_BLOCK_P (target) - && !(target->succ->flags & EDGE_CROSSING) - && target->succ->dest != EXIT_BLOCK_PTR) + && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING) + && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR) { /* Bypass trivial infinite loops. */ - if (target == target->succ->dest) + if (target == EDGE_SUCC (target, 0)->dest) counter = n_basic_blocks; - new_target = target->succ->dest; + new_target = EDGE_SUCC (target, 0)->dest; } /* Allow to thread only over one edge at time to simplify updating @@ -538,7 +538,7 @@ try_forward_edges (int mode, basic_block b) it must appear before the JUMP_INSN. */ if ((mode & CLEANUP_PRE_LOOP) && optimize) { - rtx insn = (target->succ->flags & EDGE_FALLTHRU + rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target))); if (!NOTE_P (insn)) @@ -597,6 +597,7 @@ try_forward_edges (int mode, basic_block b) fprintf (dump_file, "Forwarding edge %i->%i to %i failed.\n", b->index, e->dest->index, target->index); + ei_next (&ei); continue; } @@ -614,7 +615,7 @@ try_forward_edges (int mode, basic_block b) { edge t; - if (first->succ->succ_next) + if (EDGE_COUNT (first->succs) > 1) { gcc_assert (n < nthreaded_edges); t = threaded_edges [n++]; @@ -638,7 +639,7 @@ try_forward_edges (int mode, basic_block b) if (n < nthreaded_edges && first == threaded_edges [n]->src) n++; - t = first->succ; + t = EDGE_SUCC (first, 0); } t->count -= edge_count; @@ -649,7 +650,9 @@ try_forward_edges (int mode, basic_block b) while (first != target); changed = true; + continue; } + ei_next (&ei); } if (threaded_edges) @@ -837,6 +840,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) edge tmp_edge, b_fallthru_edge; bool c_has_outgoing_fallthru; bool b_has_incoming_fallthru; + edge_iterator ei; /* Avoid overactive code motion, as the forwarder blocks should be eliminated by edge redirection instead. One exception might have @@ -849,13 +853,13 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) and loop notes. This is done by squeezing out all the notes and leaving them there to lie. Not ideal, but functional. */ - for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next) + FOR_EACH_EDGE (tmp_edge, ei, c->succs) if (tmp_edge->flags & EDGE_FALLTHRU) break; c_has_outgoing_fallthru = (tmp_edge != NULL); - for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next) + FOR_EACH_EDGE (tmp_edge, ei, b->preds) if (tmp_edge->flags & EDGE_FALLTHRU) break; @@ -1214,21 +1218,20 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) int nehedges1 = 0, nehedges2 = 0; edge fallthru1 = 0, fallthru2 = 0; edge e1, e2; + edge_iterator ei; /* If BB1 has only one successor, we may be looking at either an unconditional jump, or a fake edge to exit. */ - if (bb1->succ && !bb1->succ->succ_next - && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 + if (EDGE_COUNT (bb1->succs) == 1 + && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1)))) - return (bb2->succ && !bb2->succ->succ_next - && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 + return (EDGE_COUNT (bb2->succs) == 1 + && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2)))); /* Match conditional jumps - this may get tricky when fallthru and branch edges are crossed. */ - if (bb1->succ - && bb1->succ->succ_next - && !bb1->succ->succ_next->succ_next + if (EDGE_COUNT (bb1->succs) == 2 && any_condjump_p (BB_END (bb1)) && onlyjump_p (BB_END (bb1))) { @@ -1237,9 +1240,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) rtx set1, set2, cond1, cond2; enum rtx_code code1, code2; - if (!bb2->succ - || !bb2->succ->succ_next - || bb2->succ->succ_next->succ_next + if (EDGE_COUNT (bb2->succs) != 2 || !any_condjump_p (BB_END (bb2)) || !onlyjump_p (BB_END (bb2))) return false; @@ -1252,10 +1253,10 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) /* Get around possible forwarders on fallthru edges. Other cases should be optimized out already. */ if (FORWARDER_BLOCK_P (f1->dest)) - f1 = f1->dest->succ; + f1 = EDGE_SUCC (f1->dest, 0); if (FORWARDER_BLOCK_P (f2->dest)) - f2 = f2->dest->succ; + f2 = EDGE_SUCC (f2->dest, 0); /* To simplify use of this function, return false if there are unneeded forwarder blocks. These will get eliminated later @@ -1425,9 +1426,13 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) /* Search the outgoing edges, ensure that the counts do match, find possible fallthru and exception handling edges since these needs more validation. */ - for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2; - e1 = e1->succ_next, e2 = e2->succ_next) + if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) + return false; + + FOR_EACH_EDGE (e1, ei, bb1->succs) { + e2 = EDGE_SUCC (bb2, ei.index); + if (e1->flags & EDGE_EH) nehedges1++; @@ -1441,8 +1446,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) } /* If number of edges of various types does not match, fail. */ - if (e1 || e2 - || nehedges1 != nehedges2 + if (nehedges1 != nehedges2 || (fallthru1 != 0) != (fallthru2 != 0)) return false; @@ -1450,9 +1454,9 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) if (fallthru1) { basic_block d1 = (forwarder_block_p (fallthru1->dest) - ? fallthru1->dest->succ->dest: fallthru1->dest); + ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest); basic_block d2 = (forwarder_block_p (fallthru2->dest) - ? fallthru2->dest->succ->dest: fallthru2->dest); + ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest); if (d1 != d2) return false; @@ -1487,6 +1491,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) basic_block redirect_to, redirect_from, to_remove; rtx newpos1, newpos2; edge s; + edge_iterator ei; newpos1 = newpos2 = NULL_RTX; @@ -1506,15 +1511,13 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) about multiple entry or chained forwarders, as they will be optimized away. We do this to look past the unconditional jump following a conditional jump that is required due to the current CFG shape. */ - if (src1->pred - && !src1->pred->pred_next + if (EDGE_COUNT (src1->preds) == 1 && FORWARDER_BLOCK_P (src1)) - e1 = src1->pred, src1 = e1->src; + e1 = EDGE_PRED (src1, 0), src1 = e1->src; - if (src2->pred - && !src2->pred->pred_next + if (EDGE_COUNT (src2->preds) == 1 && FORWARDER_BLOCK_P (src2)) - e2 = src2->pred, src2 = e2->src; + e2 = EDGE_PRED (src2, 0), src2 = e2->src; /* Nothing to do if we reach ENTRY, or a common source block. */ if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR) @@ -1524,16 +1527,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) /* Seeing more than 1 forwarder blocks would confuse us later... */ if (FORWARDER_BLOCK_P (e1->dest) - && FORWARDER_BLOCK_P (e1->dest->succ->dest)) + && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest)) return false; if (FORWARDER_BLOCK_P (e2->dest) - && FORWARDER_BLOCK_P (e2->dest->succ->dest)) + && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest)) return false; /* Likewise with dead code (possibly newly created by the other optimizations of cfg_cleanup). */ - if (!src1->pred || !src2->pred) + if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) return false; /* Look for the common insn sequence, part the first ... */ @@ -1606,19 +1609,20 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) redirect_to->flags |= BB_DIRTY; /* Recompute the frequencies and counts of outgoing edges. */ - for (s = redirect_to->succ; s; s = s->succ_next) + FOR_EACH_EDGE (s, ei, redirect_to->succs) { edge s2; + edge_iterator ei; basic_block d = s->dest; if (FORWARDER_BLOCK_P (d)) - d = d->succ->dest; + d = EDGE_SUCC (d, 0)->dest; - for (s2 = src1->succ; ; s2 = s2->succ_next) + FOR_EACH_EDGE (s2, ei, src1->succs) { basic_block d2 = s2->dest; if (FORWARDER_BLOCK_P (d2)) - d2 = d2->succ->dest; + d2 = EDGE_SUCC (d2, 0)->dest; if (d == d2) break; } @@ -1630,16 +1634,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) into infinite loop. */ if (FORWARDER_BLOCK_P (s->dest)) { - s->dest->succ->count += s2->count; + EDGE_SUCC (s->dest, 0)->count += s2->count; s->dest->count += s2->count; s->dest->frequency += EDGE_FREQUENCY (s); } if (FORWARDER_BLOCK_P (s2->dest)) { - s2->dest->succ->count -= s2->count; - if (s2->dest->succ->count < 0) - s2->dest->succ->count = 0; + EDGE_SUCC (s2->dest, 0)->count -= s2->count; + if (EDGE_SUCC (s2->dest, 0)->count < 0) + EDGE_SUCC (s2->dest, 0)->count = 0; s2->dest->count -= s2->count; s2->dest->frequency -= EDGE_FREQUENCY (s); if (s2->dest->frequency < 0) @@ -1669,9 +1673,9 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) newpos1 = NEXT_INSN (newpos1); redirect_from = split_block (src1, PREV_INSN (newpos1))->src; - to_remove = redirect_from->succ->dest; + to_remove = EDGE_SUCC (redirect_from, 0)->dest; - redirect_edge_and_branch_force (redirect_from->succ, redirect_to); + redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to); delete_basic_block (to_remove); update_forwarder_flag (redirect_from); @@ -1686,12 +1690,14 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) static bool try_crossjump_bb (int mode, basic_block bb) { - edge e, e2, nexte2, nexte, fallthru; + edge e, e2, fallthru; bool changed; - int n = 0, max; + unsigned max, ix, ix2; + basic_block ev, ev2; + edge_iterator ei; /* Nothing to do if there is not at least two incoming edges. */ - if (!bb->pred || !bb->pred->pred_next) + if (EDGE_COUNT (bb->preds) < 2) return false; /* If we are partitioning hot/cold basic blocks, we don't want to @@ -1705,8 +1711,8 @@ try_crossjump_bb (int mode, basic_block bb) bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ if (flag_reorder_blocks_and_partition - && (BB_PARTITION (bb->pred->src) != BB_PARTITION (bb->pred->pred_next->src) - || (bb->pred->flags & EDGE_CROSSING))) + && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src) + || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))) return false; /* It is always cheapest to redirect a block that ends in a branch to @@ -1714,18 +1720,21 @@ try_crossjump_bb (int mode, basic_block bb) program. We'll try that combination first. */ fallthru = NULL; max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); - for (e = bb->pred; e ; e = e->pred_next, n++) + + if (EDGE_COUNT (bb->preds) > max) + return false; + + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_FALLTHRU) - fallthru = e; - if (n > max) - return false; + fallthru = e; } changed = false; - for (e = bb->pred; e; e = nexte) + for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); ) { - nexte = e->pred_next; + e = EDGE_PRED (ev, ix); + ix++; /* As noted above, first try with the fallthru predecessor. */ if (fallthru) @@ -1744,7 +1753,8 @@ try_crossjump_bb (int mode, basic_block bb) if (try_crossjump_to_edge (mode, e, fallthru)) { changed = true; - nexte = bb->pred; + ix = 0; + ev = bb; continue; } } @@ -1761,12 +1771,13 @@ try_crossjump_bb (int mode, basic_block bb) can eliminate redundant checks of crossjump(A,B) by arbitrarily choosing to do the check from the block for which the edge in question is the first successor of A. */ - if (e->src->succ != e) + if (EDGE_SUCC (e->src, 0) != e) continue; - for (e2 = bb->pred; e2; e2 = nexte2) + for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); ) { - nexte2 = e2->pred_next; + e2 = EDGE_PRED (ev2, ix2); + ix2++; if (e2 == e) continue; @@ -1792,7 +1803,8 @@ try_crossjump_bb (int mode, basic_block bb) if (try_crossjump_to_edge (mode, e, e2)) { changed = true; - nexte = bb->pred; + ev2 = bb; + ix = 0; break; } } @@ -1844,7 +1856,7 @@ try_optimize_cfg (int mode) bool changed_here = false; /* Delete trivially dead basic blocks. */ - while (b->pred == NULL) + while (EDGE_COUNT (b->preds) == 0) { c = b->prev_bb; if (dump_file) @@ -1858,9 +1870,9 @@ try_optimize_cfg (int mode) } /* Remove code labels no longer used. */ - if (b->pred->pred_next == NULL - && (b->pred->flags & EDGE_FALLTHRU) - && !(b->pred->flags & EDGE_COMPLEX) + if (EDGE_COUNT (b->preds) == 1 + && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU) + && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX) && LABEL_P (BB_HEAD (b)) /* If the previous block ends with a branch to this block, we can't delete the label. Normally this @@ -1868,10 +1880,10 @@ try_optimize_cfg (int mode) if CASE_DROPS_THRU, this can be a tablejump with some element going to the same place as the default (fallthru). */ - && (b->pred->src == ENTRY_BLOCK_PTR - || !JUMP_P (BB_END (b->pred->src)) + && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR + || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src)) || ! label_is_jump_target_p (BB_HEAD (b), - BB_END (b->pred->src)))) + BB_END (EDGE_PRED (b, 0)->src)))) { rtx label = BB_HEAD (b); @@ -1892,13 +1904,13 @@ try_optimize_cfg (int mode) /* If we fall through an empty block, we can remove it. */ if (!(mode & CLEANUP_CFGLAYOUT) - && b->pred->pred_next == NULL - && (b->pred->flags & EDGE_FALLTHRU) + && EDGE_COUNT (b->preds) == 1 + && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU) && !LABEL_P (BB_HEAD (b)) && FORWARDER_BLOCK_P (b) /* Note that forwarder_block_p true ensures that there is a successor for this block. */ - && (b->succ->flags & EDGE_FALLTHRU) + && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU) && n_basic_blocks > 1) { if (dump_file) @@ -1907,17 +1919,17 @@ try_optimize_cfg (int mode) b->index); c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb; - redirect_edge_succ_nodup (b->pred, b->succ->dest); + redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest); delete_basic_block (b); changed = true; b = c; } - if ((s = b->succ) != NULL - && s->succ_next == NULL + if (EDGE_COUNT (b->succs) == 1 + && (s = EDGE_SUCC (b, 0)) && !(s->flags & EDGE_COMPLEX) && (c = s->dest) != EXIT_BLOCK_PTR - && c->pred->pred_next == NULL + && EDGE_COUNT (c->preds) == 1 && b != c) { /* When not in cfg_layout mode use code aware of reordering @@ -1959,12 +1971,11 @@ try_optimize_cfg (int mode) non-trivial jump instruction without side-effects, we can either delete the jump entirely, or replace it with a simple unconditional jump. */ - if (b->succ - && ! b->succ->succ_next - && b->succ->dest != EXIT_BLOCK_PTR + if (EDGE_COUNT (b->succs) == 1 + && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR && onlyjump_p (BB_END (b)) && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) - && try_redirect_by_replacing_jump (b->succ, b->succ->dest, + && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest, (mode & CLEANUP_CFGLAYOUT) != 0)) { update_forwarder_flag (b); @@ -2049,12 +2060,11 @@ merge_seq_blocks (void) for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; ) { - if (bb->succ - && !bb->succ->succ_next - && can_merge_blocks_p (bb, bb->succ->dest)) + if (EDGE_COUNT (bb->succs) == 1 + && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest)) { /* Merge the blocks and retry. */ - merge_blocks (bb, bb->succ->dest); + merge_blocks (bb, EDGE_SUCC (bb, 0)->dest); changed = true; continue; } diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index 5d6c56e9bbb..ee0d534ac55 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -924,6 +924,7 @@ expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru) { rtx last = get_last_insn (); edge e; + edge_iterator ei; int probability; gcov_type count; @@ -948,13 +949,11 @@ expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru) all edges here, or redirecting the existing fallthru edge to the exit block. */ - e = bb->succ; probability = 0; count = 0; - while (e) - { - edge next = e->succ_next; + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + { if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH))) { if (e->dest != EXIT_BLOCK_PTR) @@ -970,8 +969,8 @@ expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru) probability += e->probability; remove_edge (e); } - - e = next; + else + ei_next (&ei); } /* This is somewhat ugly: the call_expr expander often emits instructions @@ -1020,6 +1019,7 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) tree stmt = NULL; rtx note, last; edge e; + edge_iterator ei; if (dump_file) { @@ -1050,11 +1050,8 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) NOTE_BASIC_BLOCK (note) = bb; - e = bb->succ; - while (e) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - edge next = e->succ_next; - /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ e->flags &= ~EDGE_EXECUTABLE; @@ -1063,8 +1060,8 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) rediscover them. In the future we should get this fixed properly. */ if (e->flags & EDGE_ABNORMAL) remove_edge (e); - - e = next; + else + ei_next (&ei); } for (; !bsi_end_p (bsi); bsi_next (&bsi)) @@ -1129,8 +1126,9 @@ construct_init_block (void) { basic_block init_block, first_block; edge e = NULL, e2; + edge_iterator ei; - for (e2 = ENTRY_BLOCK_PTR->succ; e2; e2 = e2->succ_next) + FOR_EACH_EDGE (e2, ei, ENTRY_BLOCK_PTR->succs) { /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. @@ -1173,7 +1171,9 @@ construct_exit_block (void) rtx head = get_last_insn (); rtx end; basic_block exit_block; - edge e, e2, next; + edge e, e2; + unsigned ix; + edge_iterator ei; /* Make sure the locus is set to the end of the function, so that epilogue line numbers and warnings are set properly. */ @@ -1199,16 +1199,21 @@ construct_exit_block (void) EXIT_BLOCK_PTR->prev_bb); exit_block->frequency = EXIT_BLOCK_PTR->frequency; exit_block->count = EXIT_BLOCK_PTR->count; - for (e = EXIT_BLOCK_PTR->pred; e; e = next) + + ix = 0; + while (ix < EDGE_COUNT (EXIT_BLOCK_PTR->preds)) { - next = e->pred_next; + e = EDGE_I (EXIT_BLOCK_PTR->preds, ix); if (!(e->flags & EDGE_ABNORMAL)) - redirect_edge_succ (e, exit_block); + redirect_edge_succ (e, exit_block); + else + ix++; } + e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU); e->probability = REG_BR_PROB_BASE; e->count = EXIT_BLOCK_PTR->count; - for (e2 = EXIT_BLOCK_PTR->pred; e2; e2 = e2->pred_next) + FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds) if (e2 != e) { e->count -= e2->count; diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c index 970dcbedda6..b3fde683cb9 100644 --- a/gcc/cfghooks.c +++ b/gcc/cfghooks.c @@ -106,6 +106,7 @@ verify_flow_info (void) { int n_fallthru = 0; edge e; + edge_iterator ei; if (bb->count < 0) { @@ -119,7 +120,7 @@ verify_flow_info (void) bb->index, bb->frequency); err = 1; } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (last_visited [e->dest->index + 2] == bb) { @@ -165,7 +166,7 @@ verify_flow_info (void) err = 1; } - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->dest != bb) { @@ -184,11 +185,12 @@ verify_flow_info (void) /* Complete edge checksumming for ENTRY and EXIT. */ { edge e; + edge_iterator ei; - for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) edge_checksum[e->dest->index + 2] += (size_t) e; - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) edge_checksum[e->dest->index + 2] -= (size_t) e; } @@ -221,6 +223,7 @@ void dump_bb (basic_block bb, FILE *outf, int indent) { edge e; + edge_iterator ei; char *s_indent; s_indent = alloca ((size_t) indent + 1); @@ -245,12 +248,12 @@ dump_bb (basic_block bb, FILE *outf, int indent) putc ('\n', outf); fprintf (outf, ";;%s pred: ", s_indent); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) dump_edge_info (outf, e, 0); putc ('\n', outf); fprintf (outf, ";;%s succ: ", s_indent); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) dump_edge_info (outf, e, 1); putc ('\n', outf); @@ -360,13 +363,13 @@ delete_basic_block (basic_block bb) /* Remove the edges into and out of this block. Note that there may indeed be edges in, if we are removing an unreachable loop. */ - while (bb->pred != NULL) - remove_edge (bb->pred); - while (bb->succ != NULL) - remove_edge (bb->succ); + while (EDGE_COUNT (bb->preds) != 0) + remove_edge (EDGE_PRED (bb, 0)); + while (EDGE_COUNT (bb->succs) != 0) + remove_edge (EDGE_SUCC (bb, 0)); - bb->pred = NULL; - bb->succ = NULL; + VEC_truncate (edge, bb->preds, 0); + VEC_truncate (edge, bb->succs, 0); if (dom_computed[CDI_DOMINATORS]) delete_from_dominance_info (CDI_DOMINATORS, bb); @@ -393,11 +396,11 @@ split_edge (edge e) ret = cfg_hooks->split_edge (e); ret->count = count; ret->frequency = freq; - ret->succ->probability = REG_BR_PROB_BASE; - ret->succ->count = count; + EDGE_SUCC (ret, 0)->probability = REG_BR_PROB_BASE; + EDGE_SUCC (ret, 0)->count = count; if (dom_computed[CDI_DOMINATORS]) - set_immediate_dominator (CDI_DOMINATORS, ret, ret->pred->src); + set_immediate_dominator (CDI_DOMINATORS, ret, EDGE_PRED (ret, 0)->src); if (dom_computed[CDI_DOMINATORS] >= DOM_NO_FAST_QUERY) { @@ -410,21 +413,22 @@ split_edge (edge e) ret, provided that all other predecessors of e->dest are dominated by e->dest. */ - if (get_immediate_dominator (CDI_DOMINATORS, ret->succ->dest) - == ret->pred->src) + if (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest) + == EDGE_PRED (ret, 0)->src) { - for (f = ret->succ->dest->pred; f; f = f->pred_next) + edge_iterator ei; + FOR_EACH_EDGE (f, ei, EDGE_SUCC (ret, 0)->dest->preds) { - if (f == ret->succ) + if (f == EDGE_SUCC (ret, 0)) continue; if (!dominated_by_p (CDI_DOMINATORS, f->src, - ret->succ->dest)) + EDGE_SUCC (ret, 0)->dest)) break; } if (!f) - set_immediate_dominator (CDI_DOMINATORS, ret->succ->dest, ret); + set_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest, ret); } }; @@ -500,6 +504,7 @@ void merge_blocks (basic_block a, basic_block b) { edge e; + edge_iterator ei; if (!cfg_hooks->merge_blocks) internal_error ("%s does not support merge_blocks.", cfg_hooks->name); @@ -510,17 +515,18 @@ merge_blocks (basic_block a, basic_block b) partway though the merge of blocks for conditional_execution we'll be merging a TEST block with THEN and ELSE successors. Free the whole lot of them and hope the caller knows what they're doing. */ - while (a->succ) - remove_edge (a->succ); + + while (EDGE_COUNT (a->succs) != 0) + remove_edge (EDGE_SUCC (a, 0)); /* Adjust the edges out of B for the new owner. */ - for (e = b->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, b->succs) e->src = a; - a->succ = b->succ; + a->succs = b->succs; a->flags |= b->flags; /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */ - b->pred = b->succ = NULL; + b->preds = b->succs = NULL; a->global_live_at_end = b->global_live_at_end; if (dom_computed[CDI_DOMINATORS]) @@ -542,7 +548,8 @@ edge make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), void (*new_bb_cbk) (basic_block)) { - edge e, next_e, fallthru; + edge e, fallthru; + edge_iterator ei; basic_block dummy, jump; if (!cfg_hooks->make_forwarder_block) @@ -554,11 +561,13 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), bb = fallthru->dest; /* Redirect back edges we want to keep. */ - for (e = dummy->pred; e; e = next_e) + for (ei = ei_start (dummy->preds); (e = ei_safe_edge (ei)); ) { - next_e = e->pred_next; if (redirect_edge_p (e)) - continue; + { + ei_next (&ei); + continue; + } dummy->frequency -= EDGE_FREQUENCY (e); dummy->count -= e->count; @@ -630,12 +639,14 @@ tidy_fallthru_edges (void) merge the flags for the duplicate edges. So we do not want to check that the edge is not a FALLTHRU edge. */ - if ((s = b->succ) != NULL - && ! (s->flags & EDGE_COMPLEX) - && s->succ_next == NULL - && s->dest == c - && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) - tidy_fallthru_edge (s); + if (EDGE_COUNT (b->succs) == 1) + { + s = EDGE_SUCC (b, 0); + if (! (s->flags & EDGE_COMPLEX) + && s->dest == c + && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) + tidy_fallthru_edge (s); + } } } @@ -645,6 +656,7 @@ bool can_duplicate_block_p (basic_block bb) { edge e; + edge_iterator ei; if (!cfg_hooks->can_duplicate_block_p) internal_error ("%s does not support can_duplicate_block_p.", @@ -655,7 +667,7 @@ can_duplicate_block_p (basic_block bb) /* Duplicating fallthru block to exit would require adding a jump and splitting the real last BB. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return false; @@ -671,6 +683,7 @@ duplicate_block (basic_block bb, edge e) edge s, n; basic_block new_bb; gcov_type new_count = e ? e->count : 0; + edge_iterator ei; if (!cfg_hooks->duplicate_block) internal_error ("%s does not support duplicate_block.", @@ -678,7 +691,7 @@ duplicate_block (basic_block bb, edge e) if (bb->count < new_count) new_count = bb->count; - gcc_assert (bb->pred); + gcc_assert (EDGE_COUNT (bb->preds) > 0); #ifdef ENABLE_CHECKING gcc_assert (can_duplicate_block_p (bb)); #endif @@ -687,7 +700,7 @@ duplicate_block (basic_block bb, edge e) new_bb->loop_depth = bb->loop_depth; new_bb->flags = bb->flags; - for (s = bb->succ; s; s = s->succ_next) + FOR_EACH_EDGE (s, ei, bb->succs) { /* Since we are creating edges from a new block to successors of another block (which therefore are known to be disjoint), there diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c index bc7cec93f0a..2d88a376b2e 100644 --- a/gcc/cfglayout.c +++ b/gcc/cfglayout.c @@ -632,14 +632,16 @@ fixup_reorder_chain (void) rtx bb_end_insn; basic_block nb; basic_block old_bb; + edge_iterator ei; - if (bb->succ == NULL) + if (EDGE_COUNT (bb->succs) == 0) continue; /* Find the old fallthru edge, and another non-EH edge for a taken jump. */ e_taken = e_fall = NULL; - for (e = bb->succ; e ; e = e->succ_next) + + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) e_fall = e; else if (! (e->flags & EDGE_EH)) @@ -790,11 +792,11 @@ fixup_reorder_chain (void) /* Make sure new bb is tagged for correct section (same as fall-thru source, since you cannot fall-throu across section boundaries). */ - BB_COPY_PARTITION (e_fall->src, bb->pred->src); + BB_COPY_PARTITION (e_fall->src, EDGE_PRED (bb, 0)->src); if (flag_reorder_blocks_and_partition && targetm.have_named_sections) { - if (BB_PARTITION (bb->pred->src) == BB_COLD_PARTITION) + if (BB_PARTITION (EDGE_PRED (bb, 0)->src) == BB_COLD_PARTITION) { rtx new_note; rtx note = BB_HEAD (e_fall->src); @@ -810,7 +812,7 @@ fixup_reorder_chain (void) } if (JUMP_P (BB_END (bb)) && !any_condjump_p (BB_END (bb)) - && (bb->succ->flags & EDGE_CROSSING)) + && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING)) REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb))); } @@ -860,8 +862,12 @@ fixup_reorder_chain (void) FOR_EACH_BB (bb) { edge e; - for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next) - continue; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->flags & EDGE_FALLTHRU) + break; + if (e && !can_fallthru (e->src, e->dest)) force_nonfallthru (e); } @@ -916,6 +922,7 @@ static void fixup_fallthru_exit_predecessor (void) { edge e; + edge_iterator ei; basic_block bb = NULL; /* This transformation is not valid before reload, because we might @@ -923,7 +930,7 @@ fixup_fallthru_exit_predecessor (void) value. */ gcc_assert (reload_completed); - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if (e->flags & EDGE_FALLTHRU) bb = e->src; @@ -1225,7 +1232,8 @@ can_copy_bbs_p (basic_block *bbs, unsigned n) for (i = 0; i < n; i++) { /* In case we should redirect abnormal edge during duplication, fail. */ - for (e = bbs[i]->succ; e; e = e->succ_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bbs[i]->succs) if ((e->flags & EDGE_ABNORMAL) && e->dest->rbi->duplicated) { @@ -1307,10 +1315,11 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, new_edges[j] = NULL; for (i = 0; i < n; i++) { + edge_iterator ei; new_bb = new_bbs[i]; bb = bbs[i]; - for (e = new_bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, new_bb->succs) { for (j = 0; j < n_edges; j++) if (edges[j] && edges[j]->src == bb && edges[j]->dest == e->dest) diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 01b9a4612de..a38af1643ae 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -64,9 +64,10 @@ flow_loops_cfg_dump (const struct loops *loops, FILE *file) FOR_EACH_BB (bb) { edge succ; + edge_iterator ei; fprintf (file, ";; %d succs { ", bb->index); - for (succ = bb->succ; succ; succ = succ->succ_next) + FOR_EACH_EDGE (succ, ei, bb->succs) fprintf (file, "%d ", succ->dest->index); fprintf (file, "}\n"); } @@ -242,10 +243,11 @@ static void flow_loop_entry_edges_find (struct loop *loop) { edge e; + edge_iterator ei; int num_entries; num_entries = 0; - for (e = loop->header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) num_entries++; @@ -256,7 +258,7 @@ flow_loop_entry_edges_find (struct loop *loop) loop->entry_edges = xmalloc (num_entries * sizeof (edge *)); num_entries = 0; - for (e = loop->header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) loop->entry_edges[num_entries++] = e; @@ -284,8 +286,9 @@ flow_loop_exit_edges_find (struct loop *loop) bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; node = bbs[i]; - for (e = node->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, node->succs) { basic_block dest = e->dest; @@ -306,8 +309,9 @@ flow_loop_exit_edges_find (struct loop *loop) num_exits = 0; for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; node = bbs[i]; - for (e = node->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, node->succs) { basic_block dest = e->dest; @@ -348,10 +352,11 @@ flow_loop_nodes_find (basic_block header, struct loop *loop) { basic_block node; edge e; + edge_iterator ei; node = stack[--sp]; - for (e = node->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, node->preds) { basic_block ancestor = e->src; @@ -390,9 +395,10 @@ mark_single_exit_loops (struct loops *loops) FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->loop_father == loops->tree_root) continue; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -407,7 +413,7 @@ mark_single_exit_loops (struct loops *loops) /* If we have already seen an exit, mark this by the edge that surely does not occur as any exit. */ if (loop->single_exit) - loop->single_exit = ENTRY_BLOCK_PTR->succ; + loop->single_exit = EDGE_SUCC (ENTRY_BLOCK_PTR, 0); else loop->single_exit = e; } @@ -420,7 +426,7 @@ mark_single_exit_loops (struct loops *loops) if (!loop) continue; - if (loop->single_exit == ENTRY_BLOCK_PTR->succ) + if (loop->single_exit == EDGE_SUCC (ENTRY_BLOCK_PTR, 0)) loop->single_exit = NULL; } @@ -448,9 +454,10 @@ flow_loop_pre_header_scan (struct loop *loop) /* Count number of edges along trace from loop header to root of pre-header extended basic block. Usually this is only one or two edges. */ - for (num = 1; ebb->pred->src != ENTRY_BLOCK_PTR && ! ebb->pred->pred_next; + for (num = 1; + EDGE_PRED (ebb, 0)->src != ENTRY_BLOCK_PTR && EDGE_COUNT (ebb->preds) == 1; num++) - ebb = ebb->pred->src; + ebb = EDGE_PRED (ebb, 0)->src; loop->pre_header_edges = xmalloc (num * sizeof (edge)); loop->num_pre_header_edges = num; @@ -458,7 +465,7 @@ flow_loop_pre_header_scan (struct loop *loop) /* Store edges in order that they are followed. The source of the first edge is the root node of the pre-header extended basic block and the destination of the last last edge is the loop header. */ - for (e = loop->entry_edges[0]; num; e = e->src->pred) + for (e = loop->entry_edges[0]; num; e = EDGE_PRED (e->src, 0)) loop->pre_header_edges[--num] = e; } @@ -470,11 +477,12 @@ flow_loop_pre_header_find (basic_block header) { basic_block pre_header; edge e; + edge_iterator ei; /* If block p is a predecessor of the header and is the only block that the header does not dominate, then it is the pre-header. */ pre_header = NULL; - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block node = e->src; @@ -630,9 +638,9 @@ update_latch_info (basic_block jump) { alloc_aux_for_block (jump, sizeof (int)); HEADER_BLOCK (jump) = 0; - alloc_aux_for_edge (jump->pred, sizeof (int)); - LATCH_EDGE (jump->pred) = 0; - set_immediate_dominator (CDI_DOMINATORS, jump, jump->pred->src); + alloc_aux_for_edge (EDGE_PRED (jump, 0), sizeof (int)); + LATCH_EDGE (EDGE_PRED (jump, 0)) = 0; + set_immediate_dominator (CDI_DOMINATORS, jump, EDGE_PRED (jump, 0)->src); } /* A callback for make_forwarder block, to redirect all edges except for @@ -670,10 +678,11 @@ canonicalize_loop_headers (void) /* Split blocks so that each loop has only single latch. */ FOR_EACH_BB (header) { + edge_iterator ei; int num_latches = 0; int have_abnormal_edge = 0; - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -693,16 +702,16 @@ canonicalize_loop_headers (void) HEADER_BLOCK (header) = num_latches; } - if (HEADER_BLOCK (ENTRY_BLOCK_PTR->succ->dest)) + if (HEADER_BLOCK (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest)) { basic_block bb; /* We could not redirect edges freely here. On the other hand, we can simply split the edge from entry block. */ - bb = split_edge (ENTRY_BLOCK_PTR->succ); + bb = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); - alloc_aux_for_edge (bb->succ, sizeof (int)); - LATCH_EDGE (bb->succ) = 0; + alloc_aux_for_edge (EDGE_SUCC (bb, 0), sizeof (int)); + LATCH_EDGE (EDGE_SUCC (bb, 0)) = 0; alloc_aux_for_block (bb, sizeof (int)); HEADER_BLOCK (bb) = 0; } @@ -711,6 +720,7 @@ canonicalize_loop_headers (void) { int max_freq, is_heavy; edge heavy, tmp_edge; + edge_iterator ei; if (HEADER_BLOCK (header) <= 1) continue; @@ -719,11 +729,11 @@ canonicalize_loop_headers (void) is_heavy = 1; heavy = NULL; max_freq = 0; - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) > max_freq) max_freq = EDGE_FREQUENCY (e); - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO) { @@ -817,19 +827,20 @@ flow_loops_find (struct loops *loops, int flags) num_loops = 0; FOR_EACH_BB (header) { + edge_iterator ei; int more_latches = 0; header->loop_depth = 0; /* If we have an abnormal predecessor, do not consider the loop (not worth the problems). */ - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) if (e->flags & EDGE_ABNORMAL) break; if (e) continue; - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -893,6 +904,7 @@ flow_loops_find (struct loops *loops, int flags) for (b = 0; b < n_basic_blocks; b++) { struct loop *loop; + edge_iterator ei; /* Search the nodes of the CFG in reverse completion order so that we can find outer loops first. */ @@ -908,7 +920,7 @@ flow_loops_find (struct loops *loops, int flags) num_loops++; /* Look for the latch for this header block. */ - for (e = header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -1096,6 +1108,7 @@ get_loop_body_in_bfs_order (const struct loop *loop) while (i < loop->num_nodes) { edge e; + edge_iterator ei; if (!bitmap_bit_p (visited, bb->index)) { @@ -1104,7 +1117,7 @@ get_loop_body_in_bfs_order (const struct loop *loop) blocks[i++] = bb; } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) { @@ -1132,20 +1145,21 @@ get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges) edge *edges, e; unsigned i, n; basic_block * body; + edge_iterator ei; gcc_assert (loop->latch != EXIT_BLOCK_PTR); body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) - for (e = body[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, body[i]->succs) if (!flow_bb_inside_loop_p (loop, e->dest)) n++; edges = xmalloc (n * sizeof (edge)); *n_edges = n; n = 0; for (i = 0; i < loop->num_nodes; i++) - for (e = body[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, body[i]->succs) if (!flow_bb_inside_loop_p (loop, e->dest)) edges[n++] = e; free (body); @@ -1166,7 +1180,7 @@ num_loop_branches (const struct loop *loop) body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) - if (body[i]->succ && body[i]->succ->succ_next) + if (EDGE_COUNT (body[i]->succs) >= 2) n++; free (body); @@ -1317,21 +1331,19 @@ verify_loop_structure (struct loops *loops) continue; if ((loops->state & LOOPS_HAVE_PREHEADERS) - && (!loop->header->pred->pred_next - || loop->header->pred->pred_next->pred_next)) + && EDGE_COUNT (loop->header->preds) != 2) { error ("Loop %d's header does not have exactly 2 entries.", i); err = 1; } if (loops->state & LOOPS_HAVE_SIMPLE_LATCHES) { - if (!loop->latch->succ - || loop->latch->succ->succ_next) + if (EDGE_COUNT (loop->latch->succs) != 1) { error ("Loop %d's latch does not have exactly 1 successor.", i); err = 1; } - if (loop->latch->succ->dest != loop->header) + if (EDGE_SUCC (loop->latch, 0)->dest != loop->header) { error ("Loop %d's latch does not have header as successor.", i); err = 1; @@ -1362,11 +1374,12 @@ verify_loop_structure (struct loops *loops) irreds = sbitmap_alloc (last_basic_block); FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->flags & BB_IRREDUCIBLE_LOOP) SET_BIT (irreds, bb->index); else RESET_BIT (irreds, bb->index); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_IRREDUCIBLE_LOOP) e->flags |= EDGE_ALL_FLAGS + 1; } @@ -1377,6 +1390,8 @@ verify_loop_structure (struct loops *loops) /* Compare. */ FOR_EACH_BB (bb) { + edge_iterator ei; + if ((bb->flags & BB_IRREDUCIBLE_LOOP) && !TEST_BIT (irreds, bb->index)) { @@ -1389,7 +1404,7 @@ verify_loop_structure (struct loops *loops) error ("Basic block %d should not be marked irreducible.", bb->index); err = 1; } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & EDGE_IRREDUCIBLE_LOOP) && !(e->flags & (EDGE_ALL_FLAGS + 1))) @@ -1417,9 +1432,10 @@ verify_loop_structure (struct loops *loops) memset (sizes, 0, sizeof (unsigned) * loops->num); FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->loop_father == loops->tree_root) continue; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -1482,9 +1498,11 @@ edge loop_latch_edge (const struct loop *loop) { edge e; + edge_iterator ei; - for (e = loop->header->pred; e->src != loop->latch; e = e->pred_next) - continue; + FOR_EACH_EDGE (e, ei, loop->header->preds) + if (e->src == loop->latch) + break; return e; } @@ -1494,9 +1512,11 @@ edge loop_preheader_edge (const struct loop *loop) { edge e; + edge_iterator ei; - for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next) - continue; + FOR_EACH_EDGE (e, ei, loop->header->preds) + if (e->src != loop->latch) + break; return e; } diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c index 4c50bbe959c..87c85e329d8 100644 --- a/gcc/cfgloopanal.c +++ b/gcc/cfgloopanal.c @@ -267,6 +267,7 @@ mark_irreducible_loops (struct loops *loops) { basic_block act; edge e; + edge_iterator ei; int i, src, dest; struct graph *g; int *queue1 = xmalloc ((last_basic_block + loops->num) * sizeof (int)); @@ -278,7 +279,7 @@ mark_irreducible_loops (struct loops *loops) FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { act->flags &= ~BB_IRREDUCIBLE_LOOP; - for (e = act->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, act->succs) e->flags &= ~EDGE_IRREDUCIBLE_LOOP; } @@ -286,7 +287,7 @@ mark_irreducible_loops (struct loops *loops) g = new_graph (last_basic_block + loops->num); FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - for (e = act->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, act->succs) { /* Ignore edges to exit. */ if (e->dest == EXIT_BLOCK_PTR) @@ -415,6 +416,7 @@ unsigned expected_loop_iterations (const struct loop *loop) { edge e; + edge_iterator ei; if (loop->header->count) { @@ -423,7 +425,7 @@ expected_loop_iterations (const struct loop *loop) count_in = 0; count_latch = 0; - for (e = loop->header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) if (e->src == loop->latch) count_latch = e->count; else @@ -444,7 +446,7 @@ expected_loop_iterations (const struct loop *loop) freq_in = 0; freq_latch = 0; - for (e = loop->header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) if (e->src == loop->latch) freq_latch = EDGE_FREQUENCY (e); else diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c index 24b2399ddb8..e5ec7bddaab 100644 --- a/gcc/cfgloopmanip.c +++ b/gcc/cfgloopmanip.c @@ -96,7 +96,7 @@ remove_bbs (basic_block *bbs, int nbbs) static int find_path (edge e, basic_block **bbs) { - gcc_assert (!e->dest->pred->pred_next); + gcc_assert (EDGE_COUNT (e->dest->preds) <= 1); /* Find bbs in the path. */ *bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); @@ -115,9 +115,10 @@ static bool fix_bb_placement (struct loops *loops, basic_block bb) { edge e; + edge_iterator ei; struct loop *loop = loops->tree_root, *act; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -180,6 +181,7 @@ fix_bb_placements (struct loops *loops, basic_block from) while (qbeg != qend) { + edge_iterator ei; from = *qbeg; qbeg++; if (qbeg == qtop) @@ -200,7 +202,7 @@ fix_bb_placements (struct loops *loops, basic_block from) } /* Something has changed, insert predecessors into queue. */ - for (e = from->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, from->preds) { basic_block pred = e->src; struct loop *nca; @@ -262,10 +264,11 @@ fix_irreducible_loops (basic_block from) while (stack_top) { + edge_iterator ei; bb = stack[--stack_top]; RESET_BIT (on_stack, bb->index); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_IRREDUCIBLE_LOOP) break; if (e) @@ -276,13 +279,10 @@ fix_irreducible_loops (basic_block from) edges = get_loop_exit_edges (bb->loop_father, &n_edges); else { - n_edges = 0; - for (e = bb->succ; e; e = e->succ_next) - n_edges++; + n_edges = EDGE_COUNT (bb->succs); edges = xmalloc (n_edges * sizeof (edge)); - n_edges = 0; - for (e = bb->succ; e; e = e->succ_next) - edges[n_edges++] = e; + FOR_EACH_EDGE (e, ei, bb->succs) + edges[ei.index] = e; } for (i = 0; i < n_edges; i++) @@ -329,8 +329,8 @@ remove_path (struct loops *loops, edge e) e, but we only have basic block dominators. This is easy to fix -- when e->dest has exactly one predecessor, this corresponds to blocks dominated by e->dest, if not, split the edge. */ - if (e->dest->pred->pred_next) - e = loop_split_edge_with (e, NULL_RTX)->pred; + if (EDGE_COUNT (e->dest->preds) > 1) + e = EDGE_PRED (loop_split_edge_with (e, NULL_RTX), 0); /* It may happen that by removing path we remove one or more loops we belong to. In this case first unloop the loops, then proceed @@ -354,8 +354,9 @@ remove_path (struct loops *loops, edge e) SET_BIT (seen, rem_bbs[i]->index); for (i = 0; i < nrem; i++) { + edge_iterator ei; bb = rem_bbs[i]; - for (ae = rem_bbs[i]->succ; ae; ae = ae->succ_next) + FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs) if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index)) { SET_BIT (seen, ae->dest->index); @@ -457,9 +458,10 @@ scale_bbs_frequencies (basic_block *bbs, int nbbs, int num, int den) for (i = 0; i < nbbs; i++) { + edge_iterator ei; bbs[i]->frequency = (bbs[i]->frequency * num) / den; bbs[i]->count = RDIV (bbs[i]->count * num, den); - for (e = bbs[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bbs[i]->succs) e->count = (e->count * num) /den; } } @@ -498,14 +500,15 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, int freq, prob, tot_prob; gcov_type cnt; edge e; + edge_iterator ei; loop->header = header_edge->dest; loop->latch = latch_edge->src; freq = EDGE_FREQUENCY (header_edge); cnt = header_edge->count; - prob = switch_bb->succ->probability; - tot_prob = prob + switch_bb->succ->succ_next->probability; + prob = EDGE_SUCC (switch_bb, 0)->probability; + tot_prob = prob + EDGE_SUCC (switch_bb, 1)->probability; if (tot_prob == 0) tot_prob = 1; @@ -537,7 +540,7 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, /* Fix frequencies. */ switch_bb->frequency = freq; switch_bb->count = cnt; - for (e = switch_bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, switch_bb->succs) e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE; scale_loop_frequencies (loop, prob, tot_prob); scale_loop_frequencies (succ_bb->loop_father, tot_prob - prob, tot_prob); @@ -617,7 +620,7 @@ unloop (struct loops *loops, struct loop *loop) loops->parray[loop->num] = NULL; flow_loop_free (loop); - remove_edge (latch->succ); + remove_edge (EDGE_SUCC (latch, 0)); fix_bb_placements (loops, latch); /* If the loop was inside an irreducible region, we would have to somehow @@ -642,11 +645,12 @@ fix_loop_placement (struct loop *loop) basic_block *body; unsigned i; edge e; + edge_iterator ei; struct loop *father = loop->pred[0], *act; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) - for (e = body[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, body[i]->succs) if (!flow_bb_inside_loop_p (loop, e->dest)) { act = find_common_loop (loop, e->dest->loop_father); @@ -772,16 +776,16 @@ loop_delete_branch_edge (edge e, int really_delete) int irr; edge snd; - gcc_assert (src->succ->succ_next); + gcc_assert (EDGE_COUNT (src->succs) > 1); /* Cannot handle more than two exit edges. */ - if (src->succ->succ_next->succ_next) + if (EDGE_COUNT (src->succs) > 2) return false; /* And it must be just a simple branch. */ if (!any_condjump_p (BB_END (src))) return false; - snd = e == src->succ ? src->succ->succ_next : src->succ; + snd = e == EDGE_SUCC (src, 0) ? EDGE_SUCC (src, 1) : EDGE_SUCC (src, 0); newdest = snd->dest; if (newdest == EXIT_BLOCK_PTR) return false; @@ -795,8 +799,8 @@ loop_delete_branch_edge (edge e, int really_delete) if (!redirect_edge_and_branch (e, newdest)) return false; - src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP; - src->succ->flags |= irr; + EDGE_SUCC (src, 0)->flags &= ~EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (src, 0)->flags |= irr; return true; } @@ -1003,11 +1007,12 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops, new_bbs[i]->rbi->duplicated = 1; for (i = 0; i < n; i++) { + edge_iterator ei; new_bb = new_bbs[i]; if (new_bb->loop_father == target) new_bb->flags |= BB_IRREDUCIBLE_LOOP; - for (ae = new_bb->succ; ae; ae = ae->succ_next) + FOR_EACH_EDGE (ae, ei, new_bb->succs) if (ae->dest->rbi->duplicated && (ae->src->loop_father == target || ae->dest->loop_father == target)) @@ -1113,10 +1118,10 @@ mfb_keep_just (edge e) static void mfb_update_loops (basic_block jump) { - struct loop *loop = jump->succ->dest->loop_father; + struct loop *loop = EDGE_SUCC (jump, 0)->dest->loop_father; if (dom_computed[CDI_DOMINATORS]) - set_immediate_dominator (CDI_DOMINATORS, jump, jump->pred->src); + set_immediate_dominator (CDI_DOMINATORS, jump, EDGE_PRED (jump, 0)->src); add_bb_to_loop (jump, loop); loop->latch = jump; } @@ -1134,10 +1139,11 @@ create_preheader (struct loop *loop, int flags) struct loop *cloop, *ploop; int nentry = 0; bool irred = false; + edge_iterator ei; cloop = loop->outer; - for (e = loop->header->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) continue; @@ -1147,9 +1153,11 @@ create_preheader (struct loop *loop, int flags) gcc_assert (nentry); if (nentry == 1) { - for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next); - if (!(flags & CP_SIMPLE_PREHEADERS) - || !e->src->succ->succ_next) + FOR_EACH_EDGE (e, ei, loop->header->preds) + if (e->src != loop->latch) + break; + + if (!(flags & CP_SIMPLE_PREHEADERS) || EDGE_COUNT (e->src->succs) == 1) return NULL; } @@ -1167,7 +1175,7 @@ create_preheader (struct loop *loop, int flags) /* Reorganize blocks so that the preheader is not stuck in the middle of the loop. */ - for (e = dummy->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, dummy->preds) if (e->src != loop->latch) break; move_block_after (dummy, e->src); @@ -1178,7 +1186,7 @@ create_preheader (struct loop *loop, int flags) if (irred) { dummy->flags |= BB_IRREDUCIBLE_LOOP; - dummy->succ->flags |= EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (dummy, 0)->flags |= EDGE_IRREDUCIBLE_LOOP; } if (dump_file) @@ -1210,13 +1218,14 @@ force_single_succ_latches (struct loops *loops) for (i = 1; i < loops->num; i++) { + edge_iterator ei; loop = loops->parray[i]; - if (loop->latch != loop->header - && !loop->latch->succ->succ_next) + if (loop->latch != loop->header && EDGE_COUNT (loop->latch->succs) == 1) continue; - for (e = loop->header->pred; e->src != loop->latch; e = e->pred_next) - continue; + FOR_EACH_EDGE (e, ei, loop->header->preds) + if (e->src == loop->latch) + break; loop_split_edge_with (e, NULL_RTX); } @@ -1245,7 +1254,7 @@ loop_split_edge_with (edge e, rtx insns) add_bb_to_loop (new_bb, loop_c); new_bb->flags = insns ? BB_SUPERBLOCK : 0; - new_e = new_bb->succ; + new_e = EDGE_SUCC (new_bb, 0); if (e->flags & EDGE_IRREDUCIBLE_LOOP) { new_bb->flags |= BB_IRREDUCIBLE_LOOP; @@ -1323,9 +1332,9 @@ create_loop_notes (void) && onlyjump_p (insn)) { pbb = BLOCK_FOR_INSN (insn); - gcc_assert (pbb && pbb->succ && !pbb->succ->succ_next); + gcc_assert (pbb && EDGE_COUNT (pbb->succs) == 1); - if (!flow_bb_inside_loop_p (loop, pbb->succ->dest)) + if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (pbb, 0)->dest)) insn = BB_HEAD (first[loop->num]); } else diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index 8664f049901..bba68819a80 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -459,6 +459,7 @@ rtl_split_block (basic_block bb, void *insnp) basic_block new_bb; rtx insn = insnp; edge e; + edge_iterator ei; if (!insn) { @@ -482,9 +483,9 @@ rtl_split_block (basic_block bb, void *insnp) BB_END (bb) = insn; /* Redirect the outgoing edges. */ - new_bb->succ = bb->succ; - bb->succ = NULL; - for (e = new_bb->succ; e; e = e->succ_next) + new_bb->succs = bb->succs; + bb->succs = NULL; + FOR_EACH_EDGE (e, ei, new_bb->succs) e->src = new_bb; if (bb->global_live_at_start) @@ -625,10 +626,12 @@ rtl_can_merge_blocks (basic_block a,basic_block b) return false; /* There must be exactly one edge in between the blocks. */ - return (a->succ && !a->succ->succ_next && a->succ->dest == b - && !b->pred->pred_next && a != b + return (EDGE_COUNT (a->succs) == 1 + && EDGE_SUCC (a, 0)->dest == b + && EDGE_COUNT (b->preds) == 1 + && a != b /* Must be simple edge. */ - && !(a->succ->flags & EDGE_COMPLEX) + && !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX) && a->next_bb == b && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR /* If the jump insn has side effects, @@ -668,7 +671,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) edge tmp; rtx set; int fallthru = 0; - + edge_iterator ei; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -686,7 +689,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) return NULL; /* Verify that all targets will be TARGET. */ - for (tmp = src->succ; tmp; tmp = tmp->succ_next) + FOR_EACH_EDGE (tmp, ei, src->succs) if (tmp->dest != target && tmp != e) break; @@ -814,9 +817,10 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) } /* Keep only one edge out and set proper flags. */ - while (src->succ->succ_next) - remove_edge (src->succ); - e = src->succ; + while (EDGE_COUNT (src->succs) > 1) + remove_edge (e); + + e = EDGE_SUCC (src, 0); if (fallthru) e->flags = EDGE_FALLTHRU; else @@ -1040,28 +1044,37 @@ force_nonfallthru_and_redirect (edge e, basic_block target) if (e->src == ENTRY_BLOCK_PTR) { /* We can't redirect the entry block. Create an empty block - at the start of the function which we use to add the new - jump. */ - edge *pe1; - basic_block bb - = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); - + at the start of the function which we use to add the new + jump. */ + edge tmp; + edge_iterator ei; + bool found = false; + + basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); + /* Change the existing edge's source to be the new block, and add a new edge from the entry block to the new block. */ e->src = bb; - for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next) - if (*pe1 == e) - { - *pe1 = e->succ_next; - break; - } - e->succ_next = 0; - bb->succ = e; + for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_ordered_remove (edge, ENTRY_BLOCK_PTR->succs, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } + + gcc_assert (found); + + VEC_safe_insert (edge, bb->succs, 0, e); make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU); } } - if (e->src->succ->succ_next || abnormal_edge_flags) + if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags) { /* Create the new structures. */ @@ -1113,7 +1126,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target) } if (JUMP_P (BB_END (jump_block)) && !any_condjump_p (BB_END (jump_block)) - && (jump_block->succ->flags & EDGE_CROSSING)) + && (EDGE_SUCC (jump_block, 0)->flags & EDGE_CROSSING)) REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (jump_block))); @@ -1193,6 +1206,12 @@ rtl_tidy_fallthru_edge (edge e) { rtx q; basic_block b = e->src, c = b->next_bb; + edge e2; + edge_iterator ei; + + FOR_EACH_EDGE (e2, ei, b->succs) + if (e == e2) + break; /* ??? In a late-running flow pass, other folks may have deleted basic blocks by nopping out blocks, leaving multiple BARRIERs between here @@ -1215,7 +1234,7 @@ rtl_tidy_fallthru_edge (edge e) if (JUMP_P (q) && onlyjump_p (q) && (any_uncondjump_p (q) - || (b->succ == e && e->succ_next == NULL))) + || (EDGE_SUCC (b, 0) == e && ei.index == EDGE_COUNT (b->succs) - 1))) { #ifdef HAVE_cc0 /* If this was a conditional jump, we need to also delete @@ -1303,8 +1322,9 @@ rtl_split_edge (edge edge_in) if ((edge_in->flags & EDGE_FALLTHRU) == 0) { edge e; + edge_iterator ei; - for (e = edge_in->dest->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, edge_in->dest->preds) if (e->flags & EDGE_FALLTHRU) break; @@ -1518,7 +1538,8 @@ commit_one_edge_insertion (edge e, int watch_calls) /* Special case -- avoid inserting code between call and storing its return value. */ - if (watch_calls && (e->flags & EDGE_FALLTHRU) && !e->dest->pred->pred_next + if (watch_calls && (e->flags & EDGE_FALLTHRU) + && EDGE_COUNT (e->dest->preds) == 1 && e->src != ENTRY_BLOCK_PTR && CALL_P (BB_END (e->src))) { @@ -1538,7 +1559,7 @@ commit_one_edge_insertion (edge e, int watch_calls) { /* Figure out where to put these things. If the destination has one predecessor, insert there. Except for the exit block. */ - if (e->dest->pred->pred_next == NULL && e->dest != EXIT_BLOCK_PTR) + if (EDGE_COUNT (e->dest->preds) == 1 && e->dest != EXIT_BLOCK_PTR) { bb = e->dest; @@ -1564,7 +1585,7 @@ commit_one_edge_insertion (edge e, int watch_calls) /* If the source has one successor and the edge is not abnormal, insert there. Except for the entry block. */ else if ((e->flags & EDGE_ABNORMAL) == 0 - && e->src->succ->succ_next == NULL + && EDGE_COUNT (e->src->succs) == 1 && e->src != ENTRY_BLOCK_PTR) { bb = e->src; @@ -1619,7 +1640,7 @@ commit_one_edge_insertion (edge e, int watch_calls) NOTE_BASIC_BLOCK (new_note) = bb; if (JUMP_P (BB_END (bb)) && !any_condjump_p (BB_END (bb)) - && (bb->succ->flags & EDGE_CROSSING)) + && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING)) REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb))); if (after == bb_note) @@ -1645,9 +1666,9 @@ commit_one_edge_insertion (edge e, int watch_calls) for the (single) epilogue, which already has a fallthru edge to EXIT. */ - e = bb->succ; + e = EDGE_SUCC (bb, 0); gcc_assert (e->dest == EXIT_BLOCK_PTR - && !e->succ_next && (e->flags & EDGE_FALLTHRU)); + && EDGE_COUNT (bb->succs) == 1 && (e->flags & EDGE_FALLTHRU)); e->flags &= ~EDGE_FALLTHRU; emit_barrier_after (last); @@ -1677,17 +1698,15 @@ commit_edge_insertions (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - edge e, next; + edge e; + edge_iterator ei; - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->insns.r) - { - changed = true; - commit_one_edge_insertion (e, false); - } - } + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->insns.r) + { + changed = true; + commit_one_edge_insertion (e, false); + } } if (!changed) @@ -1724,17 +1743,15 @@ commit_edge_insertions_watch_calls (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - edge e, next; + edge e; + edge_iterator ei; - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->insns.r) - { - changed = true; - commit_one_edge_insertion (e, true); - } - } + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->insns.r) + { + changed = true; + commit_one_edge_insertion (e, true); + } } if (!changed) @@ -1963,10 +1980,11 @@ rtl_verify_flow_info_1 (void) int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0; edge e, fallthru = NULL; rtx note; + edge_iterator ei; if (INSN_P (BB_END (bb)) && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX)) - && bb->succ && bb->succ->succ_next + && EDGE_COUNT (bb->succs) >= 2 && any_condjump_p (BB_END (bb))) { if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability @@ -1977,7 +1995,7 @@ rtl_verify_flow_info_1 (void) err = 1; } } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) { @@ -2144,7 +2162,9 @@ rtl_verify_flow_info (void) FOR_EACH_BB_REVERSE (bb) { edge e; - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (!e) @@ -2258,9 +2278,11 @@ rtl_verify_flow_info (void) bool purge_dead_edges (basic_block bb) { - edge e, next; + edge e; rtx insn = BB_END (bb), note; bool purged = false; + bool found; + edge_iterator ei; /* If this instruction cannot trap, remove REG_EH_REGION notes. */ if (NONJUMP_INSN_P (insn) @@ -2275,23 +2297,31 @@ purge_dead_edges (basic_block bb) } /* Cleanup abnormal edges caused by exceptions or non-local gotos. */ - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; if (e->flags & EDGE_EH) { if (can_throw_internal (BB_END (bb))) - continue; + { + ei_next (&ei); + continue; + } } else if (e->flags & EDGE_ABNORMAL_CALL) { if (CALL_P (BB_END (bb)) && (! (note = find_reg_note (insn, REG_EH_REGION, NULL)) || INTVAL (XEXP (note, 0)) >= 0)) - continue; + { + ei_next (&ei); + continue; + } } else - continue; + { + ei_next (&ei); + continue; + } remove_edge (e); bb->flags |= BB_DIRTY; @@ -2302,6 +2332,7 @@ purge_dead_edges (basic_block bb) { rtx note; edge b,f; + edge_iterator ei; /* We do care only about conditional jumps and simplejumps. */ if (!any_condjump_p (insn) @@ -2320,10 +2351,8 @@ purge_dead_edges (basic_block bb) remove_note (insn, note); } - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; - /* Avoid abnormal flags to leak from computed jumps turned into simplejumps. */ @@ -2333,22 +2362,32 @@ purge_dead_edges (basic_block bb) if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn)) /* A conditional jump can fall through into the next block, so we should keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if (e->dest != EXIT_BLOCK_PTR && BB_HEAD (e->dest) == JUMP_LABEL (insn)) /* If the destination block is the target of the jump, keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn)) /* If the destination block is the exit block, and this instruction is a return, then keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) /* Keep the edges that correspond to exceptions thrown by this instruction and rematerialize the EDGE_ABNORMAL flag we just cleared above. */ { e->flags |= EDGE_ABNORMAL; + ei_next (&ei); continue; } @@ -2358,7 +2397,7 @@ purge_dead_edges (basic_block bb) remove_edge (e); } - if (!bb->succ || !purged) + if (EDGE_COUNT (bb->succs) == 0 || !purged) return purged; if (dump_file) @@ -2368,10 +2407,10 @@ purge_dead_edges (basic_block bb) return purged; /* Redistribute probabilities. */ - if (!bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) == 1) { - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; + EDGE_SUCC (bb, 0)->count = bb->count; } else { @@ -2395,8 +2434,8 @@ purge_dead_edges (basic_block bb) from non-local gotos and the like. If there were, we shouldn't have created the sibcall in the first place. Second, there should of course never have been a fallthru edge. */ - gcc_assert (bb->succ && !bb->succ->succ_next); - gcc_assert (bb->succ->flags == (EDGE_SIBCALL | EDGE_ABNORMAL)); + gcc_assert (EDGE_COUNT (bb->succs) == 1); + gcc_assert (EDGE_SUCC (bb, 0)->flags == (EDGE_SIBCALL | EDGE_ABNORMAL)); return 0; } @@ -2406,28 +2445,33 @@ purge_dead_edges (basic_block bb) as these are only created by conditional branches. If we find such an edge we know that there used to be a jump here and can then safely remove all non-fallthru edges. */ - for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)); - e = e->succ_next) - ; + found = false; + FOR_EACH_EDGE (e, ei, bb->succs) + if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))) + { + found = true; + break; + } - if (!e) + if (!found) return purged; - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; if (!(e->flags & EDGE_FALLTHRU)) { bb->flags |= BB_DIRTY; remove_edge (e); purged = true; } + else + ei_next (&ei); } - gcc_assert (bb->succ && !bb->succ->succ_next); + gcc_assert (EDGE_COUNT (bb->succs) == 1); - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; + EDGE_SUCC (bb, 0)->count = bb->count; if (dump_file) fprintf (dump_file, "Purged non-fallthru edges from bb %i\n", @@ -2543,10 +2587,28 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) } /* In case we are redirecting fallthru edge to the branch edge of conditional jump, remove it. */ - if (src->succ->succ_next - && !src->succ->succ_next->succ_next) + if (EDGE_COUNT (src->succs) == 2) { - edge s = e->succ_next ? e->succ_next : src->succ; + bool found = false; + unsigned ix = 0; + edge tmp, s; + edge_iterator ei; + + FOR_EACH_EDGE (tmp, ei, src->succs) + if (e == tmp) + { + found = true; + ix = ei.index; + break; + } + + gcc_assert (found); + + if (EDGE_COUNT (src->succs) > (ix + 1)) + s = EDGE_SUCC (src, ix + 1); + else + s = EDGE_SUCC (src, 0); + if (s->dest == dest && any_condjump_p (BB_END (src)) && onlyjump_p (BB_END (src))) @@ -2680,10 +2742,12 @@ cfg_layout_can_merge_blocks_p (basic_block a, basic_block b) return false; /* There must be exactly one edge in between the blocks. */ - return (a->succ && !a->succ->succ_next && a->succ->dest == b - && !b->pred->pred_next && a != b + return (EDGE_COUNT (a->succs) == 1 + && EDGE_SUCC (a, 0)->dest == b + && EDGE_COUNT (b->preds) == 1 + && a != b /* Must be simple edge. */ - && !(a->succ->flags & EDGE_COMPLEX) + && !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX) && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR /* If the jump insn has side effects, we can't kill the edge. */ @@ -2707,7 +2771,7 @@ cfg_layout_merge_blocks (basic_block a, basic_block b) /* We should have fallthru edge in a, or we can do dummy redirection to get it cleaned up. */ if (JUMP_P (BB_END (a))) - try_redirect_by_replacing_jump (a->succ, b, true); + try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true); gcc_assert (!JUMP_P (BB_END (a))); /* Possible line number notes should appear in between. */ @@ -2906,8 +2970,9 @@ rtl_flow_call_edges_add (sbitmap blocks) if (need_fake_edge_p (insn)) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == EXIT_BLOCK_PTR) { insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); @@ -2955,8 +3020,11 @@ rtl_flow_call_edges_add (sbitmap blocks) #ifdef ENABLE_CHECKING if (split_at_insn == BB_END (bb)) - for (e = bb->succ; e; e = e->succ_next) - gcc_assert (e->dest != EXIT_BLOCK_PTR); + { + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) + gcc_assert (e->dest != EXIT_BLOCK_PTR); + } #endif /* Note that the following may create a new basic block diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c index b4021c7706f..9862013ec45 100644 --- a/gcc/config/frv/frv.c +++ b/gcc/config/frv/frv.c @@ -6715,7 +6715,7 @@ frv_ifcvt_modify_tests (ce_if_block_t *ce_info, rtx *p_true, rtx *p_false) while (multiple_test_bb != test_bb) { bb[num_bb++] = multiple_test_bb; - multiple_test_bb = multiple_test_bb->pred->src; + multiple_test_bb = EDGE_PRED (multiple_test_bb, 0)->src; } } diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index b5a33fc01ba..540d390d50c 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -14909,46 +14909,49 @@ static void ix86_pad_returns (void) { edge e; + edge_iterator ei; - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) - { - basic_block bb = e->src; - rtx ret = BB_END (bb); - rtx prev; - bool replace = false; - - if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN - || !maybe_hot_bb_p (bb)) - continue; - for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev)) - if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL) - break; - if (prev && GET_CODE (prev) == CODE_LABEL) - { - edge e; - for (e = bb->pred; e; e = e->pred_next) - if (EDGE_FREQUENCY (e) && e->src->index >= 0 - && !(e->flags & EDGE_FALLTHRU)) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) + { + basic_block bb = e->src; + rtx ret = BB_END (bb); + rtx prev; + bool replace = false; + + if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN + || !maybe_hot_bb_p (bb)) + continue; + for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev)) + if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL) + break; + if (prev && GET_CODE (prev) == CODE_LABEL) + { + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) + if (EDGE_FREQUENCY (e) && e->src->index >= 0 + && !(e->flags & EDGE_FALLTHRU)) + replace = true; + } + if (!replace) + { + prev = prev_active_insn (ret); + if (prev + && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) + || GET_CODE (prev) == CALL_INSN)) replace = true; - } - if (!replace) - { - prev = prev_active_insn (ret); - if (prev - && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) - || GET_CODE (prev) == CALL_INSN)) - replace = true; - /* Empty functions get branch mispredict even when the jump destination - is not visible to us. */ - if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) - replace = true; - } - if (replace) - { - emit_insn_before (gen_return_internal_long (), ret); - delete_insn (ret); - } - } + /* Empty functions get branch mispredict even when the jump destination + is not visible to us. */ + if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) + replace = true; + } + if (replace) + { + emit_insn_before (gen_return_internal_long (), ret); + delete_insn (ret); + } + } } /* Implement machine specific optimizations. We implement padding of returns diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index 871c52de502..6cb3b6a5652 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -2034,7 +2034,7 @@ ia64_expand_prologue (void) { edge e; - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if ((e->flags & EDGE_FAKE) == 0 && (e->flags & EDGE_FALLTHRU) != 0) break; diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 602498bd9da..f12f7b35941 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -13555,7 +13555,7 @@ rs6000_emit_prologue (void) && DEFAULT_ABI != ABI_AIX && flag_pic && ! info->lr_save_p - && EXIT_BLOCK_PTR->pred != NULL); + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0); if (save_LR_around_toc_setup) { rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM); diff --git a/gcc/cse.c b/gcc/cse.c index b7da8a34978..ca577c5de6b 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -7392,6 +7392,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) rtx last_insns[2]; unsigned int i; rtx newreg; + edge_iterator ei; /* We expect to have two successors. Look at both before picking the final mode for the comparison. If we have more successors @@ -7402,7 +7403,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) found_equiv = false; mode = GET_MODE (cc_src); insn_count = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { rtx insn; rtx end; @@ -7410,8 +7411,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) if (e->flags & EDGE_COMPLEX) continue; - if (! e->dest->pred - || e->dest->pred->pred_next + if (EDGE_COUNT (e->dest->preds) != 1 || e->dest == EXIT_BLOCK_PTR) continue; diff --git a/gcc/df.c b/gcc/df.c index 2a59ca42b2d..ee10362660d 100644 --- a/gcc/df.c +++ b/gcc/df.c @@ -3796,18 +3796,19 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, int changed; int i = bb->index; edge e; + edge_iterator ei; SET_BIT (visited, bb->index); gcc_assert (TEST_BIT (pending, bb->index)); RESET_BIT (pending, i); -#define HS(E_ANTI, E_ANTI_NEXT, E_ANTI_BB, E_ANTI_START_BB, IN_SET, \ - E, E_NEXT, E_BB, E_START_BB, OUT_SET) \ +#define HS(E_ANTI, E_ANTI_BB, E_ANTI_START_BB, IN_SET, \ + E, E_BB, E_START_BB, OUT_SET) \ do \ { \ /* Calculate of predecessor_outs. */ \ bitmap_zero (IN_SET[i]); \ - for (e = bb->E_ANTI; e; e = e->E_ANTI_NEXT) \ + FOR_EACH_EDGE (e, ei, bb->E_ANTI) \ { \ if (e->E_ANTI_BB == E_ANTI_START_BB) \ continue; \ @@ -3827,7 +3828,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, if (!changed) \ break; \ \ - for (e = bb->E; e; e = e->E_NEXT) \ + FOR_EACH_EDGE (e, ei, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3838,7 +3839,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, SET_BIT (pending, e->E_BB->index); \ } \ \ - for (e = bb->E; e; e = e->E_NEXT) \ + FOR_EACH_EDGE (e, ei, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3852,11 +3853,11 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, } while (0) if (dataflow->dir == DF_FORWARD) - HS (pred, pred_next, src, ENTRY_BLOCK_PTR, dataflow->in, - succ, succ_next, dest, EXIT_BLOCK_PTR, dataflow->out); + HS (preds, src, ENTRY_BLOCK_PTR, dataflow->in, + succs, dest, EXIT_BLOCK_PTR, dataflow->out); else - HS (succ, succ_next, dest, EXIT_BLOCK_PTR, dataflow->out, - pred, pred_next, src, ENTRY_BLOCK_PTR, dataflow->in); + HS (succs, dest, EXIT_BLOCK_PTR, dataflow->out, + preds, src, ENTRY_BLOCK_PTR, dataflow->in); } /* This function will perform iterative bitvector dataflow described by diff --git a/gcc/dominance.c b/gcc/dominance.c index bbb0b21484b..680c4561c9d 100644 --- a/gcc/dominance.c +++ b/gcc/dominance.c @@ -206,7 +206,8 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, /* We call this _only_ if bb is not already visited. */ edge e; TBB child_i, my_i = 0; - edge *stack; + edge_iterator *stack; + edge_iterator ei, einext; int sp; /* Start block (ENTRY_BLOCK_PTR for forward problem, EXIT_BLOCK for backward problem). */ @@ -214,19 +215,19 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, /* Ending block. */ basic_block ex_block; - stack = xmalloc ((n_basic_blocks + 3) * sizeof (edge)); + stack = xmalloc ((n_basic_blocks + 3) * sizeof (edge_iterator)); sp = 0; /* Initialize our border blocks, and the first edge. */ if (reverse) { - e = bb->pred; + ei = ei_start (bb->preds); en_block = EXIT_BLOCK_PTR; ex_block = ENTRY_BLOCK_PTR; } else { - e = bb->succ; + ei = ei_start (bb->succs); en_block = ENTRY_BLOCK_PTR; ex_block = EXIT_BLOCK_PTR; } @@ -238,9 +239,9 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, /* This loop traverses edges e in depth first manner, and fills the stack. */ - while (e) + while (!ei_end_p (ei)) { - edge e_next; + e = ei_edge (ei); /* Deduce from E the current and the next block (BB and BN), and the next edge. */ @@ -253,22 +254,22 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, with the next edge out of the current node. */ if (bn == ex_block || di->dfs_order[bn->index]) { - e = e->pred_next; + ei_next (&ei); continue; } bb = e->dest; - e_next = bn->pred; + einext = ei_start (bn->preds); } else { bn = e->dest; if (bn == ex_block || di->dfs_order[bn->index]) { - e = e->succ_next; + ei_next (&ei); continue; } bb = e->src; - e_next = bn->succ; + einext = ei_start (bn->succs); } gcc_assert (bn != en_block); @@ -283,13 +284,13 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, di->dfs_parent[child_i] = my_i; /* Save the current point in the CFG on the stack, and recurse. */ - stack[sp++] = e; - e = e_next; + stack[sp++] = ei; + ei = einext; } if (!sp) break; - e = stack[--sp]; + ei = stack[--sp]; /* OK. The edge-list was exhausted, meaning normally we would end the recursion. After returning from the recursive call, @@ -300,10 +301,7 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, the block not yet completed (the parent of the one above) in e->src. This could be used e.g. for computing the number of descendants or the tree depth. */ - if (reverse) - e = e->pred_next; - else - e = e->succ_next; + ei_next (&ei); } free (stack); } @@ -341,7 +339,7 @@ calc_dfs_tree (struct dom_info *di, enum cdi_direction reverse) FOR_EACH_BB_REVERSE (b) { - if (b->succ) + if (EDGE_COUNT (b->succs) > 0) { if (di->dfs_order[b->index] == 0) saw_unconnected = true; @@ -478,6 +476,8 @@ calc_idoms (struct dom_info *di, enum cdi_direction reverse) { TBB v, w, k, par; basic_block en_block; + edge_iterator ei, einext; + if (reverse) en_block = EXIT_BLOCK_PTR; else @@ -488,43 +488,38 @@ calc_idoms (struct dom_info *di, enum cdi_direction reverse) while (v > 1) { basic_block bb = di->dfs_to_bb[v]; - edge e, e_next; + edge e; par = di->dfs_parent[v]; k = v; + + ei = (reverse) ? ei_start (bb->succs) : ei_start (bb->preds); + if (reverse) { - e = bb->succ; - /* If this block has a fake edge to exit, process that first. */ if (bitmap_bit_p (di->fake_exit_edge, bb->index)) { - e_next = e; + einext = ei; + einext.index = 0; goto do_fake_exit_edge; } } - else - e = bb->pred; /* Search all direct predecessors for the smallest node with a path to them. That way we have the smallest node with also a path to us only over nodes behind us. In effect we search for our semidominator. */ - for (; e ; e = e_next) + while (!ei_end_p (ei)) { TBB k1; basic_block b; - if (reverse) - { - b = e->dest; - e_next = e->succ_next; - } - else - { - b = e->src; - e_next = e->pred_next; - } + e = ei_edge (ei); + b = (reverse) ? e->dest : e->src; + einext = ei; + ei_next (&einext); + if (b == en_block) { do_fake_exit_edge: @@ -539,6 +534,8 @@ calc_idoms (struct dom_info *di, enum cdi_direction reverse) k1 = di->key[eval (di, k1)]; if (k1 < k) k = k1; + + ei = einext; } di->key[v] = k; @@ -870,12 +867,13 @@ recount_dominator (enum cdi_direction dir, basic_block bb) { basic_block dom_bb = NULL; edge e; + edge_iterator ei; gcc_assert (dom_computed[dir]); if (dir == CDI_DOMINATORS) { - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { /* Ignore the predecessors that either are not reachable from the entry block, or whose dominator was not determined yet. */ @@ -888,7 +886,7 @@ recount_dominator (enum cdi_direction dir, basic_block bb) } else { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!dominated_by_p (dir, e->dest, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->dest); diff --git a/gcc/domwalk.c b/gcc/domwalk.c index 7ac7ecf4f3e..15b1dff82db 100644 --- a/gcc/domwalk.c +++ b/gcc/domwalk.c @@ -201,7 +201,7 @@ walk_dominator_tree (struct dom_walk_data *walk_data, basic_block bb) { /* The destination block may have become unreachable, in which case there's no point in optimizing it. */ - if (dest->pred) + if (EDGE_COUNT (dest->preds) > 0) walk_dominator_tree (walk_data, dest); } diff --git a/gcc/except.c b/gcc/except.c index 6b3a1e2f2ae..f6d6dd1666c 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -1449,13 +1449,16 @@ emit_to_new_bb_before (rtx seq, rtx insn) rtx last; basic_block bb; edge e; + edge_iterator ei; /* If there happens to be an fallthru edge (possibly created by cleanup_cfg call), we don't want it to go into newly created landing pad or other EH construct. */ - for (e = BLOCK_FOR_INSN (insn)->pred; e; e = e->pred_next) + for (ei = ei_start (BLOCK_FOR_INSN (insn)->preds); (e = ei_safe_edge (ei)); ) if (e->flags & EDGE_FALLTHRU) force_nonfallthru (e); + else + ei_next (&ei); last = emit_insn_before (seq, insn); if (BARRIER_P (last)) last = PREV_INSN (last); @@ -1623,8 +1626,8 @@ connect_post_landing_pads (void) emit_jump (outer->post_landing_pad); src = BLOCK_FOR_INSN (region->resume); dest = BLOCK_FOR_INSN (outer->post_landing_pad); - while (src->succ) - remove_edge (src->succ); + while (EDGE_COUNT (src->succs) > 0) + remove_edge (EDGE_SUCC (src, 0)); e = make_edge (src, dest, 0); e->probability = REG_BR_PROB_BASE; e->count = src->count; @@ -1991,10 +1994,10 @@ sjlj_emit_function_enter (rtx dispatch_label) || NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_BASIC_BLOCK)) break; if (NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG) - insert_insn_on_edge (seq, ENTRY_BLOCK_PTR->succ); + insert_insn_on_edge (seq, EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); else { - rtx last = BB_END (ENTRY_BLOCK_PTR->succ->dest); + rtx last = BB_END (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest); for (; ; fn_begin = NEXT_INSN (fn_begin)) if ((NOTE_P (fn_begin) && NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG) @@ -2018,6 +2021,7 @@ sjlj_emit_function_exit (void) { rtx seq; edge e; + edge_iterator ei; start_sequence (); @@ -2031,7 +2035,7 @@ sjlj_emit_function_exit (void) post-dominates all can_throw_internal instructions. This is the last possible moment. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if (e->flags & EDGE_FALLTHRU) break; if (e) @@ -2198,16 +2202,18 @@ finish_eh_generation (void) commit_edge_insertions (); FOR_EACH_BB (bb) { - edge e, next; + edge e; + edge_iterator ei; bool eh = false; - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; if (e->flags & EDGE_EH) { remove_edge (e); eh = true; } + else + ei_next (&ei); } if (eh) rtl_make_eh_edge (NULL, bb, BB_END (bb)); diff --git a/gcc/final.c b/gcc/final.c index 04787e46d1c..493f63ddfeb 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -677,6 +677,7 @@ compute_alignments (void) rtx label = BB_HEAD (bb); int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0; edge e; + edge_iterator ei; if (!LABEL_P (label) || probably_never_executed_bb_p (bb)) @@ -684,7 +685,7 @@ compute_alignments (void) max_log = LABEL_ALIGN (label); max_skip = LABEL_ALIGN_MAX_SKIP; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_FALLTHRU) has_fallthru = 1, fallthru_frequency += EDGE_FREQUENCY (e); diff --git a/gcc/flow.c b/gcc/flow.c index 1cabde3c874..4914d72a4a0 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -1091,6 +1091,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) int rescan, changed; basic_block bb; edge e; + edge_iterator ei; bb = *qhead++; if (qhead == qend) @@ -1100,8 +1101,8 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* Begin by propagating live_at_start from the successor blocks. */ CLEAR_REG_SET (new_live_at_end); - if (bb->succ) - for (e = bb->succ; e; e = e->succ_next) + if (EDGE_COUNT (bb->succs) > 0) + FOR_EACH_EDGE (e, ei, bb->succs) { basic_block sb = e->dest; @@ -1257,7 +1258,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* Queue all predecessors of BB so that we may re-examine their live_at_end. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pb = e->src; if (pb->aux == NULL) @@ -1362,8 +1363,9 @@ initialize_uninitialized_subregs (void) edge e; int reg, did_something = 0; find_regno_partial_param param; + edge_iterator ei; - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { basic_block bb = e->dest; regset map = bb->global_live_at_start; @@ -1827,19 +1829,19 @@ init_propagate_block_info (basic_block bb, regset live, regset local_set, int i; /* Identify the successor blocks. */ - bb_true = bb->succ->dest; - if (bb->succ->succ_next != NULL) + bb_true = EDGE_SUCC (bb, 0)->dest; + if (EDGE_COUNT (bb->succs) > 1) { - bb_false = bb->succ->succ_next->dest; + bb_false = EDGE_SUCC (bb, 1)->dest; - if (bb->succ->flags & EDGE_FALLTHRU) + if (EDGE_SUCC (bb, 0)->flags & EDGE_FALLTHRU) { basic_block t = bb_false; bb_false = bb_true; bb_true = t; } else - gcc_assert (bb->succ->succ_next->flags & EDGE_FALLTHRU); + gcc_assert (EDGE_SUCC (bb, 1)->flags & EDGE_FALLTHRU); } else { @@ -1921,9 +1923,9 @@ init_propagate_block_info (basic_block bb, regset live, regset local_set, && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))) && (flags & PROP_SCAN_DEAD_STORES) - && (bb->succ == NULL - || (bb->succ->succ_next == NULL - && bb->succ->dest == EXIT_BLOCK_PTR + && (EDGE_COUNT (bb->succs) == 0 + || (EDGE_COUNT (bb->succs) == 1 + && EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR && ! current_function_calls_eh_return))) { rtx insn, set; diff --git a/gcc/function.c b/gcc/function.c index a7613e13908..0545b05a5cd 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -4956,6 +4956,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) #if defined (HAVE_epilogue) || defined(HAVE_return) rtx epilogue_end = NULL_RTX; #endif + edge_iterator ei; #ifdef HAVE_prologue if (HAVE_prologue) @@ -4975,16 +4976,16 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) /* Can't deal with multiple successors of the entry block at the moment. Function should always have at least one entry point. */ - gcc_assert (ENTRY_BLOCK_PTR->succ && !ENTRY_BLOCK_PTR->succ->succ_next); + gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1); - insert_insn_on_edge (seq, ENTRY_BLOCK_PTR->succ); + insert_insn_on_edge (seq, EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); inserted = 1; } #endif /* If the exit block has no non-fake predecessors, we don't need an epilogue. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if ((e->flags & EDGE_FAKE) == 0) break; if (e == NULL) @@ -5000,10 +5001,9 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) emit (conditional) return instructions. */ basic_block last; - edge e_next; rtx label; - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if (e->flags & EDGE_FALLTHRU) break; if (e == NULL) @@ -5021,6 +5021,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) if (BB_HEAD (last) == label && LABEL_P (label)) { + edge_iterator ei2; rtx epilogue_line_note = NULL_RTX; /* Locate the line number associated with the closing brace, @@ -5034,18 +5035,23 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) break; } - for (e = last->pred; e; e = e_next) + for (ei2 = ei_start (last->preds); (e = ei_safe_edge (ei2)); ) { basic_block bb = e->src; rtx jump; - e_next = e->pred_next; if (bb == ENTRY_BLOCK_PTR) - continue; + { + ei_next (&ei2); + continue; + } jump = BB_END (bb); if (!JUMP_P (jump) || JUMP_LABEL (jump) != label) - continue; + { + ei_next (&ei2); + continue; + } /* If we have an unconditional jump, we can replace that with a simple return instruction. */ @@ -5060,16 +5066,25 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) else if (condjump_p (jump)) { if (! redirect_jump (jump, 0, 0)) - continue; + { + ei_next (&ei2); + continue; + } /* If this block has only one successor, it both jumps and falls through to the fallthru block, so we can't delete the edge. */ - if (bb->succ->succ_next == NULL) - continue; + if (EDGE_COUNT (bb->succs) == 1) + { + ei_next (&ei2); + continue; + } } else - continue; + { + ei_next (&ei2); + continue; + } /* Fix up the CFG for the successful change we just made. */ redirect_edge_succ (e, EXIT_BLOCK_PTR); @@ -5081,7 +5096,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) emit_barrier_after (BB_END (last)); emit_return_into_block (last, epilogue_line_note); epilogue_end = BB_END (last); - last->succ->flags &= ~EDGE_FALLTHRU; + EDGE_SUCC (last, 0)->flags &= ~EDGE_FALLTHRU; goto epilogue_done; } } @@ -5091,7 +5106,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) There really shouldn't be a mixture -- either all should have been converted or none, however... */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if (e->flags & EDGE_FALLTHRU) break; if (e == NULL) @@ -5152,7 +5167,7 @@ epilogue_done: #ifdef HAVE_sibcall_epilogue /* Emit sibling epilogues before any sibling call sites. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); ) { basic_block bb = e->src; rtx insn = BB_END (bb); @@ -5161,7 +5176,10 @@ epilogue_done: if (!CALL_P (insn) || ! SIBLING_CALL_P (insn)) - continue; + { + ei_next (&ei); + continue; + } start_sequence (); emit_insn (gen_sibcall_epilogue ()); @@ -5176,6 +5194,7 @@ epilogue_done: i = PREV_INSN (insn); newinsn = emit_insn_before (seq, insn); + ei_next (&ei); } #endif diff --git a/gcc/gcse.c b/gcc/gcse.c index daac742b8c6..9250f128d1f 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -3400,7 +3400,7 @@ find_implicit_sets (void) count = 0; FOR_EACH_BB (bb) /* Check for more than one successor. */ - if (bb->succ && bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) > 1) { cond = fis_get_condition (BB_END (bb)); @@ -3413,7 +3413,7 @@ find_implicit_sets (void) dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest : FALLTHRU_EDGE (bb)->dest; - if (dest && ! dest->pred->pred_next + if (dest && EDGE_COUNT (dest->preds) == 1 && dest != EXIT_BLOCK_PTR) { new = gen_rtx_SET (VOIDmode, XEXP (cond, 0), @@ -3570,9 +3570,11 @@ static int bypass_block (basic_block bb, rtx setcc, rtx jump) { rtx insn, note; - edge e, enext, edest; + edge e, edest; int i, change; int may_be_loop_header; + unsigned removed_p; + edge_iterator ei; insn = (setcc != NULL) ? setcc : jump; @@ -3584,7 +3586,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) find_used_regs (&XEXP (note, 0), NULL); may_be_loop_header = false; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_DFS_BACK) { may_be_loop_header = true; @@ -3592,22 +3594,32 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) } change = 0; - for (e = bb->pred; e; e = enext) + for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) { - enext = e->pred_next; + removed_p = 0; + if (e->flags & EDGE_COMPLEX) - continue; + { + ei_next (&ei); + continue; + } /* We can't redirect edges from new basic blocks. */ if (e->src->index >= bypass_last_basic_block) - continue; + { + ei_next (&ei); + continue; + } /* The irreducible loops created by redirecting of edges entering the loop from outside would decrease effectiveness of some of the following optimizations, so prevent this. */ if (may_be_loop_header && !(e->flags & EDGE_DFS_BACK)) - continue; + { + ei_next (&ei); + continue; + } for (i = 0; i < reg_use_count; i++) { @@ -3651,9 +3663,11 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) } else if (GET_CODE (new) == LABEL_REF) { + edge_iterator ei2; + dest = BLOCK_FOR_INSN (XEXP (new, 0)); /* Don't bypass edges containing instructions. */ - for (edest = bb->succ; edest; edest = edest->succ_next) + FOR_EACH_EDGE (edest, ei2, bb->succs) if (edest->dest == dest && edest->insns.r) { dest = NULL; @@ -3670,7 +3684,9 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))) { edge e2; - for (e2 = e->src->succ; e2; e2 = e2->succ_next) + edge_iterator ei2; + + FOR_EACH_EDGE (e2, ei2, e->src->succs) if (e2->dest == dest) { dest = NULL; @@ -3704,9 +3720,12 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) e->src->index, old_dest->index, dest->index); } change = 1; + removed_p = 1; break; } } + if (!removed_p) + ei_next (&ei); } return change; } @@ -3739,7 +3758,7 @@ bypass_conditional_jumps (void) EXIT_BLOCK_PTR, next_bb) { /* Check for more than one predecessor. */ - if (bb->pred && bb->pred->pred_next) + if (EDGE_COUNT (bb->preds) > 1) { setcc = NULL_RTX; for (insn = BB_HEAD (bb); @@ -3886,12 +3905,13 @@ compute_pre_data (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; /* If the current block is the destination of an abnormal edge, we kill all trapping expressions because we won't be able to properly place the instruction on the edge. So make them neither anticipatable nor transparent. This is fairly conservative. */ - for (e = bb->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_ABNORMAL) { sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr); @@ -3931,8 +3951,9 @@ static int pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited) { edge pred; - - for (pred = bb->pred; pred != NULL; pred = pred->pred_next) + edge_iterator ei; + + FOR_EACH_EDGE (pred, ei, bb->preds) { basic_block pred_bb = pred->src; @@ -4051,7 +4072,8 @@ insert_insn_end_bb (struct expr *expr, basic_block bb, int pre) if (JUMP_P (insn) || (NONJUMP_INSN_P (insn) - && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))) + && (EDGE_COUNT (bb->succs) > 1 + || EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL))) { #ifdef HAVE_cc0 rtx note; @@ -4092,7 +4114,7 @@ insert_insn_end_bb (struct expr *expr, basic_block bb, int pre) /* Likewise if the last insn is a call, as will happen in the presence of exception handling. */ else if (CALL_P (insn) - && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))) + && (EDGE_COUNT (bb->succs) > 1 || EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL)) { /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers, we search backward and place the instructions before the first @@ -4810,6 +4832,7 @@ static int hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited) { edge pred; + edge_iterator ei; int visited_allocated_locally = 0; @@ -4819,7 +4842,7 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, visited = xcalloc (last_basic_block, 1); } - for (pred = bb->pred; pred != NULL; pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, bb->preds) { basic_block pred_bb = pred->src; @@ -6188,6 +6211,7 @@ insert_store (struct ls_expr * expr, edge e) rtx reg, insn; basic_block bb; edge tmp; + edge_iterator ei; /* We did all the deleted before this insert, so if we didn't delete a store, then we haven't set the reaching reg yet either. */ @@ -6204,7 +6228,7 @@ insert_store (struct ls_expr * expr, edge e) insert it at the start of the BB, and reset the insert bits on the other edges so we don't try to insert it on the other edges. */ bb = e->dest; - for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next) + FOR_EACH_EDGE (tmp, ei, e->dest->preds) if (!(tmp->flags & EDGE_FAKE)) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); @@ -6218,7 +6242,7 @@ insert_store (struct ls_expr * expr, edge e) insertion vector for these edges, and insert at the start of the BB. */ if (!tmp && bb != EXIT_BLOCK_PTR) { - for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next) + FOR_EACH_EDGE (tmp, ei, e->dest->preds) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); RESET_BIT (pre_insert_map[index], expr->index); @@ -6256,33 +6280,40 @@ insert_store (struct ls_expr * expr, edge e) static void remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr) { - edge *stack = xmalloc (sizeof (edge) * n_basic_blocks), act; + edge_iterator *stack, ei; + int sp; + edge act; sbitmap visited = sbitmap_alloc (last_basic_block); - int stack_top = 0; rtx last, insn, note; rtx mem = smexpr->pattern; + stack = xmalloc (sizeof (edge_iterator) * n_basic_blocks); + sp = 0; + ei = ei_start (bb->succs); + sbitmap_zero (visited); - act = bb->succ; + act = (EDGE_COUNT (ei.container) > 0 ? EDGE_I (ei.container, 0) : NULL); while (1) { if (!act) { - if (!stack_top) + if (!sp) { free (stack); sbitmap_free (visited); return; } - act = stack[--stack_top]; + act = ei_edge (stack[--sp]); } bb = act->dest; if (bb == EXIT_BLOCK_PTR || TEST_BIT (visited, bb->index)) { - act = act->succ_next; + if (!ei_end_p (ei)) + ei_next (&ei); + act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL; continue; } SET_BIT (visited, bb->index); @@ -6310,12 +6341,17 @@ remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr) INSN_UID (insn)); remove_note (insn, note); } - act = act->succ_next; - if (bb->succ) + + if (!ei_end_p (ei)) + ei_next (&ei); + act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL; + + if (EDGE_COUNT (bb->succs) > 0) { if (act) - stack[stack_top++] = act; - act = bb->succ; + stack[sp++] = ei; + ei = ei_start (bb->succs); + act = (EDGE_COUNT (ei.container) > 0 ? EDGE_I (ei.container, 0) : NULL); } } } diff --git a/gcc/global.c b/gcc/global.c index 4398e30b377..424bdad73f5 100644 --- a/gcc/global.c +++ b/gcc/global.c @@ -748,8 +748,9 @@ global_conflicts (void) regs live across such edges. */ { edge e; + edge_iterator ei; - for (e = b->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, b->preds) if (e->flags & EDGE_ABNORMAL) break; @@ -2339,12 +2340,14 @@ calculate_reg_pav (void) sbitmap_zero (wset); for (i = 0; i < nel; i++) { + edge_iterator ei; + bb = bb_array [i]; changed_p = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) changed_p = modify_bb_reg_pav (bb, e->src, changed_p); if (changed_p) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { succ = e->dest; if (succ->index != EXIT_BLOCK && !TEST_BIT (wset, succ->index)) diff --git a/gcc/graph.c b/gcc/graph.c index 8fa2e4fe2db..cd232db985a 100644 --- a/gcc/graph.c +++ b/gcc/graph.c @@ -308,6 +308,7 @@ print_rtl_graph_with_bb (const char *base, rtx rtx_first) if ((i = end[INSN_UID (tmp_rtx)]) >= 0) { edge e; + edge_iterator ei; bb = BASIC_BLOCK (i); @@ -316,7 +317,7 @@ print_rtl_graph_with_bb (const char *base, rtx rtx_first) /* Now specify the edges to all the successors of this basic block. */ - for (e = bb->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR) { diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index c7c053113f4..709a6756f4c 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -65,8 +65,8 @@ #define MAX_CONDITIONAL_EXECUTE (BRANCH_COST + 1) #endif -#define NULL_EDGE ((struct edge_def *)NULL) -#define NULL_BLOCK ((struct basic_block_def *)NULL) +#define NULL_EDGE ((edge) NULL) +#define NULL_BLOCK ((basic_block) NULL) /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */ static int num_possible_if_blocks; @@ -126,7 +126,8 @@ mark_loop_exit_edges (void) { FOR_EACH_BB (bb) { - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) { if (find_common_loop (bb->loop_father, e->dest->loop_father) != bb->loop_father) @@ -249,11 +250,11 @@ static basic_block block_fallthru (basic_block bb) { edge e; + edge_iterator ei; - for (e = bb->succ; - e != NULL_EDGE && (e->flags & EDGE_FALLTHRU) == 0; - e = e->succ_next) - ; + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->flags & EDGE_FALLTHRU) + break; return (e) ? e->dest : NULL_BLOCK; } @@ -2226,7 +2227,7 @@ merge_if_block (struct ce_if_block * ce_info) /* The outgoing edge for the current COMBO block should already be correct. Verify this. */ - if (combo_bb->succ == NULL_EDGE) + if (EDGE_COUNT (combo_bb->succs) == 0) { if (find_reg_note (last, REG_NORETURN, NULL)) ; @@ -2242,11 +2243,11 @@ merge_if_block (struct ce_if_block * ce_info) blocks taking us to our final destination. */ else if (JUMP_P (last)) ; - else if (combo_bb->succ->dest == EXIT_BLOCK_PTR + else if (EDGE_SUCC (combo_bb, 0)->dest == EXIT_BLOCK_PTR && CALL_P (last) && SIBLING_CALL_P (last)) ; - else if ((combo_bb->succ->flags & EDGE_EH) + else if ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH) && can_throw_internal (last)) ; else @@ -2259,8 +2260,7 @@ merge_if_block (struct ce_if_block * ce_info) is more than one remaining edge, it must come from elsewhere. There may be zero incoming edges if the THEN block didn't actually join back up (as with a call to abort). */ - else if ((join_bb->pred == NULL - || join_bb->pred->pred_next == NULL) + else if (EDGE_COUNT (join_bb->preds) < 2 && join_bb != EXIT_BLOCK_PTR) { /* We can merge the JOIN. */ @@ -2277,13 +2277,13 @@ merge_if_block (struct ce_if_block * ce_info) /* The outgoing edge for the current COMBO block should already be correct. Verify this. */ - if (combo_bb->succ->succ_next != NULL_EDGE - || combo_bb->succ->dest != join_bb) + if (EDGE_COUNT (combo_bb->succs) > 1 + || EDGE_SUCC (combo_bb, 0)->dest != join_bb) abort (); /* Remove the jump and cruft from the end of the COMBO block. */ if (join_bb != EXIT_BLOCK_PTR) - tidy_fallthru_edge (combo_bb->succ); + tidy_fallthru_edge (EDGE_SUCC (combo_bb, 0)); } num_updated_if_blocks++; @@ -2302,11 +2302,12 @@ find_if_header (basic_block test_bb, int pass) edge else_edge; /* The kind of block we're looking for has exactly two successors. */ - if ((then_edge = test_bb->succ) == NULL_EDGE - || (else_edge = then_edge->succ_next) == NULL_EDGE - || else_edge->succ_next != NULL_EDGE) + if (EDGE_COUNT (test_bb->succs) != 2) return NULL; + then_edge = EDGE_SUCC (test_bb, 0); + else_edge = EDGE_SUCC (test_bb, 1); + /* Neither edge should be abnormal. */ if ((then_edge->flags & EDGE_COMPLEX) || (else_edge->flags & EDGE_COMPLEX)) @@ -2378,17 +2379,16 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) rtx insn; rtx end; int n_insns = 0; + edge_iterator ei; if (!cur_bb || !target_bb) return -1; /* If no edges, obviously it doesn't jump or fallthru. */ - if (cur_bb->succ == NULL_EDGE) + if (EDGE_COUNT (cur_bb->succs) == 0) return FALSE; - for (cur_edge = cur_bb->succ; - cur_edge != NULL_EDGE; - cur_edge = cur_edge->succ_next) + FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs) { if (cur_edge->flags & EDGE_COMPLEX) /* Anything complex isn't what we want. */ @@ -2445,12 +2445,11 @@ find_if_block (struct ce_if_block * ce_info) basic_block then_bb = ce_info->then_bb; basic_block else_bb = ce_info->else_bb; basic_block join_bb = NULL_BLOCK; - edge then_succ = then_bb->succ; - edge else_succ = else_bb->succ; int then_predecessors; int else_predecessors; edge cur_edge; basic_block next; + edge_iterator ei; ce_info->last_test_bb = test_bb; @@ -2458,11 +2457,10 @@ find_if_block (struct ce_if_block * ce_info) were && tests (which jump to the else block) or || tests (which jump to the then block). */ if (HAVE_conditional_execution && reload_completed - && test_bb->pred != NULL_EDGE - && test_bb->pred->pred_next == NULL_EDGE - && test_bb->pred->flags == EDGE_FALLTHRU) + && EDGE_COUNT (test_bb->preds) == 1 + && EDGE_PRED (test_bb, 0)->flags == EDGE_FALLTHRU) { - basic_block bb = test_bb->pred->src; + basic_block bb = EDGE_PRED (test_bb, 0)->src; basic_block target_bb; int max_insns = MAX_CONDITIONAL_EXECUTE; int n_insns; @@ -2495,10 +2493,10 @@ find_if_block (struct ce_if_block * ce_info) total_insns += n_insns; blocks++; - if (bb->pred == NULL_EDGE || bb->pred->pred_next != NULL_EDGE) + if (EDGE_COUNT (bb->preds) != 1) break; - bb = bb->pred->src; + bb = EDGE_PRED (bb, 0)->src; n_insns = block_jumps_and_fallthru_p (bb, target_bb); } while (n_insns >= 0 && (total_insns + n_insns) <= max_insns); @@ -2515,9 +2513,7 @@ find_if_block (struct ce_if_block * ce_info) /* Count the number of edges the THEN and ELSE blocks have. */ then_predecessors = 0; - for (cur_edge = then_bb->pred; - cur_edge != NULL_EDGE; - cur_edge = cur_edge->pred_next) + FOR_EACH_EDGE (cur_edge, ei, then_bb->preds) { then_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) @@ -2525,9 +2521,7 @@ find_if_block (struct ce_if_block * ce_info) } else_predecessors = 0; - for (cur_edge = else_bb->pred; - cur_edge != NULL_EDGE; - cur_edge = cur_edge->pred_next) + FOR_EACH_EDGE (cur_edge, ei, else_bb->preds) { else_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) @@ -2540,9 +2534,9 @@ find_if_block (struct ce_if_block * ce_info) return FALSE; /* The THEN block of an IF-THEN combo must have zero or one successors. */ - if (then_succ != NULL_EDGE - && (then_succ->succ_next != NULL_EDGE - || (then_succ->flags & EDGE_COMPLEX) + if (EDGE_COUNT (then_bb->succs) > 0 + && (EDGE_COUNT (then_bb->succs) > 1 + || (EDGE_SUCC (then_bb, 0)->flags & EDGE_COMPLEX) || (flow2_completed && tablejump_p (BB_END (then_bb), NULL, NULL)))) return FALSE; @@ -2552,9 +2546,9 @@ find_if_block (struct ce_if_block * ce_info) Check for the last insn of the THEN block being an indirect jump, which is listed as not having any successors, but confuses the rest of the CE code processing. ??? we should fix this in the future. */ - if (then_succ == NULL) + if (EDGE_COUNT (then_bb->succs) == 0) { - if (else_bb->pred->pred_next == NULL_EDGE) + if (EDGE_COUNT (else_bb->preds) == 1) { rtx last_insn = BB_END (then_bb); @@ -2577,7 +2571,7 @@ find_if_block (struct ce_if_block * ce_info) /* If the THEN block's successor is the other edge out of the TEST block, then we have an IF-THEN combo without an ELSE. */ - else if (then_succ->dest == else_bb) + else if (EDGE_SUCC (then_bb, 0)->dest == else_bb) { join_bb = else_bb; else_bb = NULL_BLOCK; @@ -2586,13 +2580,12 @@ find_if_block (struct ce_if_block * ce_info) /* If the THEN and ELSE block meet in a subsequent block, and the ELSE has exactly one predecessor and one successor, and the outgoing edge is not complex, then we have an IF-THEN-ELSE combo. */ - else if (else_succ != NULL_EDGE - && then_succ->dest == else_succ->dest - && else_bb->pred->pred_next == NULL_EDGE - && else_succ->succ_next == NULL_EDGE - && ! (else_succ->flags & EDGE_COMPLEX) + else if (EDGE_COUNT (else_bb->succs) == 1 + && EDGE_SUCC (then_bb, 0)->dest == EDGE_SUCC (else_bb, 0)->dest + && EDGE_COUNT (else_bb->preds) == 1 + && ! (EDGE_SUCC (else_bb, 0)->flags & EDGE_COMPLEX) && ! (flow2_completed && tablejump_p (BB_END (else_bb), NULL, NULL))) - join_bb = else_succ->dest; + join_bb = EDGE_SUCC (else_bb, 0)->dest; /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */ else @@ -2726,7 +2719,7 @@ find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge) /* Delete the trap block if possible. */ remove_edge (trap_bb == then_bb ? then_edge : else_edge); - if (trap_bb->pred == NULL) + if (EDGE_COUNT (trap_bb->preds) == 0) delete_basic_block (trap_bb); /* If the non-trap block and the test are now adjacent, merge them. @@ -2771,7 +2764,7 @@ block_has_only_trap (basic_block bb) return NULL_RTX; /* The block must have no successors. */ - if (bb->succ) + if (EDGE_COUNT (bb->succs) > 0) return NULL_RTX; /* The only instruction in the THEN block must be the trap. */ @@ -2866,7 +2859,6 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge) { basic_block then_bb = then_edge->dest; basic_block else_bb = else_edge->dest, new_bb; - edge then_succ = then_bb->succ; int then_bb_index, bb_cost; /* If we are partitioning hot/cold basic blocks, we don't want to @@ -2888,15 +2880,15 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge) return FALSE; /* THEN has one successor. */ - if (!then_succ || then_succ->succ_next != NULL) + if (EDGE_COUNT (then_bb->succs) != 1) return FALSE; /* THEN does not fall through, but is not strange either. */ - if (then_succ->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)) + if (EDGE_SUCC (then_bb, 0)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)) return FALSE; /* THEN has one predecessor. */ - if (then_bb->pred->pred_next != NULL) + if (EDGE_COUNT (then_bb->preds) != 1) return FALSE; /* THEN must do something. */ @@ -2916,7 +2908,7 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge) /* Registers set are dead, or are predicable. */ if (! dead_or_predicable (test_bb, then_bb, else_bb, - then_bb->succ->dest, 1)) + EDGE_SUCC (then_bb, 0)->dest, 1)) return FALSE; /* Conversion went ok, including moving the insns and fixing up the @@ -2957,7 +2949,7 @@ find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge) { basic_block then_bb = then_edge->dest; basic_block else_bb = else_edge->dest; - edge else_succ = else_bb->succ; + edge else_succ; int bb_cost; rtx note; @@ -2980,15 +2972,17 @@ find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge) return FALSE; /* ELSE has one successor. */ - if (!else_succ || else_succ->succ_next != NULL) + if (EDGE_COUNT (else_bb->succs) != 1) return FALSE; + else + else_succ = EDGE_SUCC (else_bb, 0); /* ELSE outgoing edge is not complex. */ if (else_succ->flags & EDGE_COMPLEX) return FALSE; /* ELSE has one predecessor. */ - if (else_bb->pred->pred_next != NULL) + if (EDGE_COUNT (else_bb->preds) != 1) return FALSE; /* THEN is not EXIT. */ diff --git a/gcc/lambda-code.c b/gcc/lambda-code.c index c1eb476c665..cc6c9bcd04d 100644 --- a/gcc/lambda-code.c +++ b/gcc/lambda-code.c @@ -1868,7 +1868,7 @@ lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest, /* Create the new iv, and insert it's increment on the latch block. */ - bb = temp->latch->pred->src; + bb = EDGE_PRED (temp->latch, 0)->src; bsi = bsi_last (bb); create_iv (newlowerbound, build_int_cst (integer_type_node, LL_STEP (newloop)), @@ -2282,7 +2282,7 @@ perfect_nestify (struct loops *loops, VEC_safe_push (tree, phis, PHI_ARG_DEF (phi, 0)); mark_for_rewrite (PHI_RESULT (phi)); } - e = redirect_edge_and_branch (preheaderbb->succ, headerbb); + e = redirect_edge_and_branch (EDGE_SUCC (preheaderbb, 0), headerbb); unmark_all_for_rewrite (); bb_ann (olddest)->phi_nodes = NULL; /* Add back the old exit phis. */ @@ -2294,7 +2294,7 @@ perfect_nestify (struct loops *loops, phiname = VEC_pop (tree, phis); phi = create_phi_node (phiname, preheaderbb); - add_phi_arg (&phi, def, preheaderbb->pred); + add_phi_arg (&phi, def, EDGE_PRED (preheaderbb, 0)); } nestify_update_pending_stmts (e); @@ -2332,7 +2332,7 @@ perfect_nestify (struct loops *loops, /* Create the new iv. */ ivvar = create_tmp_var (integer_type_node, "perfectiv"); add_referenced_tmp_var (ivvar); - bsi = bsi_last (newloop->latch->pred->src); + bsi = bsi_last (EDGE_PRED (newloop->latch, 0)->src); create_iv (VEC_index (tree, lbounds, 0), build_int_cst (integer_type_node, VEC_index (int, steps, 0)), diff --git a/gcc/lcm.c b/gcc/lcm.c index 3432332a06a..c3e5f93313a 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -102,6 +102,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, edge e; basic_block *worklist, *qin, *qout, *qend; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -126,7 +127,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) e->src->aux = EXIT_BLOCK_PTR; /* Iterate until the worklist is empty. */ @@ -157,7 +158,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) { *qin++ = e->src; @@ -251,6 +252,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -280,7 +282,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, do not want to be overly optimistic. Consider an outgoing edge from the entry block. That edge should always have a LATER value the same as EARLIEST for that edge. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); /* Add all the blocks to the worklist. This prevents an early exit from @@ -310,12 +312,12 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, /* Compute the intersection of LATERIN for each incoming edge to B. */ sbitmap_ones (laterin[bb->index]); - for (e = bb->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); /* Calculate LATER for all outgoing edges. */ - for (e = bb->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], earliest[(size_t) e->aux], laterin[e->src->index], @@ -336,7 +338,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, for the EXIT block. We allocated an extra entry in the LATERIN array for just this purpose. */ sbitmap_ones (laterin[last_basic_block]); - for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) sbitmap_a_and_b (laterin[last_basic_block], laterin[last_basic_block], later[(size_t) e->aux]); @@ -478,6 +480,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -501,7 +504,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) e->dest->aux = ENTRY_BLOCK_PTR; /* Iterate until the worklist is empty. */ @@ -534,7 +537,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) { *qin++ = e->dest; @@ -604,6 +607,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, int num_edges, i; edge e; basic_block *worklist, *tos, bb; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -624,7 +628,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, do not want to be overly optimistic. Consider an incoming edge to the exit block. That edge should always have a NEARER value the same as FARTHEST for that edge. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); /* Add all the blocks to the worklist. This prevents an early exit @@ -644,12 +648,12 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, /* Compute the intersection of NEARER for each outgoing edge from B. */ sbitmap_ones (nearerout[bb->index]); - for (e = bb->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], nearer[(size_t) e->aux]); /* Calculate NEARER for all incoming edges. */ - for (e = bb->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], farthest[(size_t) e->aux], nearerout[e->dest->index], @@ -667,7 +671,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, for the ENTRY block. We allocated an extra entry in the NEAREROUT array for just this purpose. */ sbitmap_ones (nearerout[last_basic_block]); - for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) sbitmap_a_and_b (nearerout[last_basic_block], nearerout[last_basic_block], nearer[(size_t) e->aux]); @@ -912,8 +916,9 @@ static void make_preds_opaque (basic_block b, int j) { edge e; + edge_iterator ei; - for (e = b->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, b->preds) { basic_block pb = e->src; diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index 7b4f4d42b5f..1b5ca4dc6d2 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -358,11 +358,11 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, /* Expand the condition testing the assumptions and if it does not pass, reset the count register to 0. */ add_test (XEXP (ass, 0), preheader, set_zero); - preheader->succ->flags &= ~EDGE_FALLTHRU; - cnt = preheader->succ->count; - preheader->succ->probability = 0; - preheader->succ->count = 0; - irr = preheader->succ->flags & EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (preheader, 0)->flags &= ~EDGE_FALLTHRU; + cnt = EDGE_SUCC (preheader, 0)->count; + EDGE_SUCC (preheader, 0)->probability = 0; + EDGE_SUCC (preheader, 0)->count = 0; + irr = EDGE_SUCC (preheader, 0)->flags & EDGE_IRREDUCIBLE_LOOP; te = make_edge (preheader, new_preheader, EDGE_FALLTHRU | irr); te->probability = REG_BR_PROB_BASE; te->count = cnt; @@ -374,7 +374,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, for (ass = XEXP (ass, 1); ass; ass = XEXP (ass, 1)) { bb = loop_split_edge_with (te, NULL_RTX); - te = bb->succ; + te = EDGE_SUCC (bb, 0); add_test (XEXP (ass, 0), bb, set_zero); make_edge (bb, set_zero, irr); } diff --git a/gcc/loop-init.c b/gcc/loop-init.c index 35fa12ea3e6..8db45ec0e40 100644 --- a/gcc/loop-init.c +++ b/gcc/loop-init.c @@ -35,6 +35,7 @@ loop_optimizer_init (FILE *dumpfile) { struct loops *loops = xcalloc (1, sizeof (struct loops)); edge e; + edge_iterator ei; static bool first_time = true; if (first_time) @@ -45,9 +46,12 @@ loop_optimizer_init (FILE *dumpfile) /* Avoid annoying special cases of edges going to exit block. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) - if ((e->flags & EDGE_FALLTHRU) && e->src->succ->succ_next) + + for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); ) + if ((e->flags & EDGE_FALLTHRU) && EDGE_COUNT (e->src->succs) > 1) split_edge (e); + else + ei_next (&ei); /* Find the loops. */ diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c index e0824e03ef5..21dc8b88144 100644 --- a/gcc/loop-invariant.c +++ b/gcc/loop-invariant.c @@ -219,6 +219,7 @@ find_exits (struct loop *loop, basic_block *body, bitmap may_exit, bitmap has_exit) { unsigned i; + edge_iterator ei; edge e; struct loop *outermost_exit = loop, *aexit; bool has_call = false; @@ -239,7 +240,7 @@ find_exits (struct loop *loop, basic_block *body, } } - for (e = body[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index ed06d1dbb1e..e4d757a0798 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -1782,6 +1782,8 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr) while (1) { + basic_block tmp_bb; + insn = BB_END (e->src); if (any_condjump_p (insn)) { @@ -1813,8 +1815,12 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr) } } - e = e->src->pred; - if (e->pred_next + /* This is a bit subtle. Store away e->src in tmp_bb, since we + modify `e' and this can invalidate the subsequent count of + e->src's predecessors by looking at the wrong block. */ + tmp_bb = e->src; + e = EDGE_PRED (tmp_bb, 0); + if (EDGE_COUNT (tmp_bb->preds) > 1 || e->src == ENTRY_BLOCK_PTR) break; } @@ -2493,7 +2499,7 @@ check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) { basic_block exit_bb; rtx condition, at; - edge ei; + edge ein; exit_bb = e->src; desc->simple_p = false; @@ -2510,18 +2516,18 @@ check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) if (!any_condjump_p (BB_END (exit_bb))) return; - ei = exit_bb->succ; - if (ei == e) - ei = ei->succ_next; + ein = EDGE_SUCC (exit_bb, 0); + if (ein == e) + ein = EDGE_SUCC (exit_bb, 1); desc->out_edge = e; - desc->in_edge = ei; + desc->in_edge = ein; /* Test whether the condition is suitable. */ - if (!(condition = get_condition (BB_END (ei->src), &at, false, false))) + if (!(condition = get_condition (BB_END (ein->src), &at, false, false))) return; - if (ei->flags & EDGE_FALLTHRU) + if (ein->flags & EDGE_FALLTHRU) { condition = reversed_condition (condition); if (!condition) @@ -2543,13 +2549,14 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc) edge e; struct niter_desc act; bool any = false; + edge_iterator ei; desc->simple_p = false; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { - for (e = body[i]->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index f0e95dc7622..6ea0987e97e 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -454,7 +454,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; unsigned n_remove_edges, i; - edge *remove_edges, ei; + edge *remove_edges, ein; struct niter_desc *desc = get_simple_loop_desc (loop); struct split_ivs_info *si_info = NULL; @@ -495,12 +495,12 @@ peel_loop_completely (struct loops *loops, struct loop *loop) free (remove_edges); } - ei = desc->in_edge; + ein = desc->in_edge; free_simple_loop_desc (loop); /* Now remove the unreachable part of the last iteration and cancel the loop. */ - remove_path (loops, ei); + remove_path (loops, ein); if (dump_file) fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel); @@ -748,15 +748,15 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) basic_block exit_block = desc->in_edge->src->rbi->copy; /* Find a new in and out edge; they are in the last copy we have made. */ - if (exit_block->succ->dest == desc->out_edge->dest) + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { - desc->out_edge = exit_block->succ; - desc->in_edge = exit_block->succ->succ_next; + desc->out_edge = EDGE_SUCC (exit_block, 0); + desc->in_edge = EDGE_SUCC (exit_block, 1); } else { - desc->out_edge = exit_block->succ->succ_next; - desc->in_edge = exit_block->succ; + desc->out_edge = EDGE_SUCC (exit_block, 1); + desc->in_edge = EDGE_SUCC (exit_block, 0); } } @@ -1008,11 +1008,11 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ, block_label (preheader), p, NULL_RTX); - swtch = loop_split_edge_with (swtch->pred, branch_code); + swtch = loop_split_edge_with (EDGE_PRED (swtch, 0), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); - swtch->succ->probability = REG_BR_PROB_BASE - p; + EDGE_SUCC (swtch, 0)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, - swtch->succ->flags & EDGE_IRREDUCIBLE_LOOP); + EDGE_SUCC (swtch, 0)->flags & EDGE_IRREDUCIBLE_LOOP); e->probability = p; } @@ -1025,11 +1025,11 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ, block_label (preheader), p, NULL_RTX); - swtch = loop_split_edge_with (swtch->succ, branch_code); + swtch = loop_split_edge_with (EDGE_SUCC (swtch, 0), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); - swtch->succ->probability = REG_BR_PROB_BASE - p; + EDGE_SUCC (swtch, 0)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, - swtch->succ->flags & EDGE_IRREDUCIBLE_LOOP); + EDGE_SUCC (swtch, 0)->flags & EDGE_IRREDUCIBLE_LOOP); e->probability = p; } @@ -1061,15 +1061,15 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) basic_block exit_block = desc->in_edge->src->rbi->copy; /* Find a new in and out edge; they are in the last copy we have made. */ - if (exit_block->succ->dest == desc->out_edge->dest) + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { - desc->out_edge = exit_block->succ; - desc->in_edge = exit_block->succ->succ_next; + desc->out_edge = EDGE_SUCC (exit_block, 0); + desc->in_edge = EDGE_SUCC (exit_block, 1); } else { - desc->out_edge = exit_block->succ->succ_next; - desc->in_edge = exit_block->succ; + desc->out_edge = EDGE_SUCC (exit_block, 1); + desc->in_edge = EDGE_SUCC (exit_block, 0); } } diff --git a/gcc/loop-unswitch.c b/gcc/loop-unswitch.c index 2eb3396a759..49608151ff7 100644 --- a/gcc/loop-unswitch.c +++ b/gcc/loop-unswitch.c @@ -180,14 +180,14 @@ may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn) enum machine_mode mode; /* BB must end in a simple conditional jump. */ - if (!bb->succ || !bb->succ->succ_next || bb->succ->succ_next->succ_next) + if (EDGE_COUNT (bb->succs) != 2) return NULL_RTX; if (!any_condjump_p (BB_END (bb))) return NULL_RTX; /* With branches inside loop. */ - if (!flow_bb_inside_loop_p (loop, bb->succ->dest) - || !flow_bb_inside_loop_p (loop, bb->succ->succ_next->dest)) + if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 0)->dest) + || !flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 1)->dest)) return NULL_RTX; /* It must be executed just once each iteration (because otherwise we @@ -414,16 +414,15 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on, /* Some sanity checking. */ if (!flow_bb_inside_loop_p (loop, unswitch_on)) abort (); - if (!unswitch_on->succ || !unswitch_on->succ->succ_next || - unswitch_on->succ->succ_next->succ_next) + if (EDGE_COUNT (unswitch_on->succs) != 2) abort (); if (!just_once_each_iteration_p (loop, unswitch_on)) abort (); if (loop->inner) abort (); - if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->dest)) + if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 0)->dest)) abort (); - if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->succ_next->dest)) + if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 1)->dest)) abort (); entry = loop_preheader_edge (loop); @@ -444,7 +443,7 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on, unswitch_on_alt = unswitch_on->rbi->copy; true_edge = BRANCH_EDGE (unswitch_on_alt); false_edge = FALLTHRU_EDGE (unswitch_on); - latch_edge = loop->latch->rbi->copy->succ; + latch_edge = EDGE_SUCC (loop->latch->rbi->copy, 0); /* Create a block with the condition. */ prob = true_edge->probability; @@ -463,19 +462,19 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on, if (irred_flag) { switch_bb->flags |= BB_IRREDUCIBLE_LOOP; - switch_bb->succ->flags |= EDGE_IRREDUCIBLE_LOOP; - switch_bb->succ->succ_next->flags |= EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (switch_bb, 0)->flags |= EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (switch_bb, 1)->flags |= EDGE_IRREDUCIBLE_LOOP; } else { switch_bb->flags &= ~BB_IRREDUCIBLE_LOOP; - switch_bb->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP; - switch_bb->succ->succ_next->flags &= ~EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (switch_bb, 0)->flags &= ~EDGE_IRREDUCIBLE_LOOP; + EDGE_SUCC (switch_bb, 1)->flags &= ~EDGE_IRREDUCIBLE_LOOP; } /* Loopify from the copy of LOOP body, constructing the new loop. */ nloop = loopify (loops, latch_edge, - loop->header->rbi->copy->pred, switch_bb, true); + EDGE_PRED (loop->header->rbi->copy, 0), switch_bb, true); /* Remove branches that are now unreachable in new loops. */ remove_path (loops, true_edge); diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c index 1207c31aa4f..ea68597cb6c 100644 --- a/gcc/modulo-sched.c +++ b/gcc/modulo-sched.c @@ -671,9 +671,9 @@ generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg, rtx orig_loop_bct = NULL_RTX; /* Loop header edge. */ - e = ps->g->bb->pred; + e = EDGE_PRED (ps->g->bb, 0); if (e->src == ps->g->bb) - e = e->pred_next; + e = EDGE_PRED (ps->g->bb, 1); /* Generate the prolog, inserting its insns on the loop-entry edge. */ start_sequence (); @@ -726,9 +726,9 @@ generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg, loop_exit_label_insn = emit_label (loop_exit_label); } - e = ps->g->bb->succ; + e = EDGE_SUCC (ps->g->bb, 0); if (e->dest == ps->g->bb) - e = e->succ_next; + e = EDGE_SUCC (ps->g->bb, 1); e->insns.r = get_insns (); end_sequence (); @@ -742,7 +742,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg, basic_block epilog_bb = BLOCK_FOR_INSN (last_epilog_insn); basic_block precond_bb = BLOCK_FOR_INSN (precond_jump); basic_block orig_loop_bb = BLOCK_FOR_INSN (precond_exit_label_insn); - edge epilog_exit_edge = epilog_bb->succ; + edge epilog_exit_edge = EDGE_SUCC (epilog_bb, 0); /* Do loop preconditioning to take care of cases were the loop count is less than the stage count. Update the CFG properly. */ @@ -851,28 +851,25 @@ sms_schedule (FILE *dump_file) continue; /* Check if bb has two successors, one being itself. */ - e = bb->succ; - if (!e || !e->succ_next || e->succ_next->succ_next) + if (EDGE_COUNT (bb->succs) != 2) continue; - if (e->dest != bb && e->succ_next->dest != bb) + if (EDGE_SUCC (bb, 0)->dest != bb && EDGE_SUCC (bb, 1)->dest != bb) continue; - if ((e->flags & EDGE_COMPLEX) - || (e->succ_next->flags & EDGE_COMPLEX)) + if ((EDGE_SUCC (bb, 0)->flags & EDGE_COMPLEX) + || (EDGE_SUCC (bb, 1)->flags & EDGE_COMPLEX)) continue; /* Check if bb has two predecessors, one being itself. */ - /* In view of above tests, suffices to check e->pred_next->pred_next? */ - e = bb->pred; - if (!e || !e->pred_next || e->pred_next->pred_next) + if (EDGE_COUNT (bb->preds) != 2) continue; - if (e->src != bb && e->pred_next->src != bb) + if (EDGE_PRED (bb, 0)->src != bb && EDGE_PRED (bb, 1)->src != bb) continue; - if ((e->flags & EDGE_COMPLEX) - || (e->pred_next->flags & EDGE_COMPLEX)) + if ((EDGE_PRED (bb, 0)->flags & EDGE_COMPLEX) + || (EDGE_PRED (bb, 1)->flags & EDGE_COMPLEX)) continue; /* For debugging. */ @@ -884,9 +881,9 @@ sms_schedule (FILE *dump_file) } get_block_head_tail (bb->index, &head, &tail); - pre_header_edge = bb->pred; - if (bb->pred->src != bb) - pre_header_edge = bb->pred->pred_next; + pre_header_edge = EDGE_PRED (bb, 0); + if (EDGE_PRED (bb, 0)->src != bb) + pre_header_edge = EDGE_PRED (bb, 1); /* Perfrom SMS only on loops that their average count is above threshold. */ if (bb->count < pre_header_edge->count * SMS_LOOP_AVERAGE_COUNT_THRESHOLD) @@ -926,9 +923,9 @@ sms_schedule (FILE *dump_file) if ( !(count_reg = doloop_register_get (tail, &comp))) continue; - e = bb->pred; + e = EDGE_PRED (bb, 0); if (e->src == bb) - pre_header = e->pred_next->src; + pre_header = EDGE_PRED (bb, 1)->src; else pre_header = e->src; @@ -987,9 +984,9 @@ sms_schedule (FILE *dump_file) get_block_head_tail (g->bb->index, &head, &tail); - pre_header_edge = g->bb->pred; - if (g->bb->pred->src != g->bb) - pre_header_edge = g->bb->pred->pred_next; + pre_header_edge = EDGE_PRED (g->bb, 0); + if (EDGE_PRED (g->bb, 0)->src != g->bb) + pre_header_edge = EDGE_PRED (g->bb, 1); if (stats_file) { diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c index 0238acefccf..144cc5f67b6 100644 --- a/gcc/postreload-gcse.c +++ b/gcc/postreload-gcse.c @@ -968,11 +968,12 @@ static bool bb_has_well_behaved_predecessors (basic_block bb) { edge pred; + edge_iterator ei; - if (! bb->pred) + if (EDGE_COUNT (bb->preds) == 0) return false; - for (pred = bb->pred; pred != NULL; pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, bb->preds) { if ((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) return false; @@ -1023,6 +1024,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn, int npred_ok = 0; gcov_type ok_count = 0; /* Redundant load execution count. */ gcov_type critical_count = 0; /* Execution count of critical edges. */ + edge_iterator ei; /* The execution count of the loads to be added to make the load fully redundant. */ @@ -1038,7 +1040,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn, return; /* Check potential for replacing load with copy for predecessors. */ - for (pred = bb->pred; pred; pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, bb->preds) { rtx next_pred_bb_end; diff --git a/gcc/predict.c b/gcc/predict.c index 18b6b90814c..8611f30d8aa 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -248,7 +248,7 @@ can_predict_insn_p (rtx insn) { return (JUMP_P (insn) && any_condjump_p (insn) - && BLOCK_FOR_INSN (insn)->succ->succ_next); + && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2); } /* Predict edge E by given predictor if possible. */ @@ -287,13 +287,15 @@ static void dump_prediction (FILE *file, enum br_predictor predictor, int probability, basic_block bb, int used) { - edge e = bb->succ; + edge e; + edge_iterator ei; if (!file) return; - while (e && (e->flags & EDGE_FALLTHRU)) - e = e->succ_next; + FOR_EACH_EDGE (e, ei, bb->succs) + if (! (e->flags & EDGE_FALLTHRU)) + break; fprintf (file, " %s heuristics%s: %.1f%%", predictor_info[predictor].name, @@ -321,11 +323,12 @@ set_even_probabilities (basic_block bb) { int nedges = 0; edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & (EDGE_EH | EDGE_FAKE))) nedges ++; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & (EDGE_EH | EDGE_FAKE))) e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; else @@ -430,14 +433,14 @@ combine_predictions_for_insn (rtx insn, basic_block bb) /* Save the prediction into CFG in case we are seeing non-degenerated conditional jump. */ - if (bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) > 1) { BRANCH_EDGE (bb)->probability = combined_probability; FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - combined_probability; } } - else if (bb->succ->succ_next) + else if (EDGE_COUNT (bb->succs) > 1) { int prob = INTVAL (XEXP (prob_note, 0)); @@ -445,7 +448,7 @@ combine_predictions_for_insn (rtx insn, basic_block bb) FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob; } else - bb->succ->probability = REG_BR_PROB_BASE; + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; } /* Combine predictions into single probability and store them into CFG. @@ -463,11 +466,12 @@ combine_predictions_for_bb (FILE *file, basic_block bb) struct edge_prediction *pred; int nedges = 0; edge e, first = NULL, second = NULL; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & (EDGE_EH | EDGE_FAKE))) { - nedges ++; + nedges ++; if (first && !second) second = e; if (!first) @@ -547,7 +551,7 @@ combine_predictions_for_bb (FILE *file, basic_block bb) int predictor = pred->predictor; int probability = pred->probability; - if (pred->edge != bb->succ) + if (pred->edge != EDGE_SUCC (bb, 0)) probability = REG_BR_PROB_BASE - probability; dump_prediction (file, predictor, probability, bb, !first_match || best_predictor == predictor); @@ -651,6 +655,7 @@ predict_loops (struct loops *loops_info, bool rtlsimpleloops) { int header_found = 0; edge e; + edge_iterator ei; bb = bbs[j]; @@ -664,7 +669,7 @@ predict_loops (struct loops *loops_info, bool rtlsimpleloops) /* Loop branch heuristics - predict an edge back to a loop's head as taken. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == loop->header && e->src == loop->latch) { @@ -675,7 +680,7 @@ predict_loops (struct loops *loops_info, bool rtlsimpleloops) /* Loop exit heuristics - predict an edge exiting the loop if the conditional has no loop header successors as not taken. */ if (!header_found) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest->index < 0 || !flow_bb_inside_loop_p (loop, e->dest)) predict_edge @@ -814,18 +819,19 @@ estimate_probability (struct loops *loops_info) { rtx last_insn = BB_END (bb); edge e; + edge_iterator ei; if (! can_predict_insn_p (last_insn)) continue; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths trought function. */ if ((e->dest == EXIT_BLOCK_PTR - || (e->dest->succ && !e->dest->succ->succ_next - && e->dest->succ->dest == EXIT_BLOCK_PTR)) + || (EDGE_COUNT (e->dest->succs) == 1 + && EDGE_SUCC (e->dest, 0)->dest == EXIT_BLOCK_PTR)) && !predicted_by_p (bb, PRED_NULL_RETURN) && !predicted_by_p (bb, PRED_CONST_RETURN) && !predicted_by_p (bb, PRED_NEGATIVE_RETURN) @@ -1021,12 +1027,13 @@ tree_predict_by_opcode (basic_block bb) tree type; tree val; bitmap visited; + edge_iterator ei; if (!stmt || TREE_CODE (stmt) != COND_EXPR) return; - for (then_edge = bb->succ; then_edge; then_edge = then_edge->succ_next) + FOR_EACH_EDGE (then_edge, ei, bb->succs) if (then_edge->flags & EDGE_TRUE_VALUE) - break; + break; cond = TREE_OPERAND (stmt, 0); if (!COMPARISON_CLASS_P (cond)) return; @@ -1180,8 +1187,9 @@ apply_return_prediction (int *heads) int phi_num_args, i; enum br_predictor pred; enum prediction direction; + edge_iterator ei; - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { return_stmt = last_stmt (e->src); if (TREE_CODE (return_stmt) == RETURN_EXPR) @@ -1297,19 +1305,21 @@ tree_estimate_probability (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other cases are often used for fast paths trought function. */ if (e->dest == EXIT_BLOCK_PTR && TREE_CODE (last_stmt (bb)) == RETURN_EXPR - && bb->pred && bb->pred->pred_next) + && EDGE_COUNT (bb->preds) > 1) { edge e1; + edge_iterator ei1; - for (e1 = bb->pred; e1; e1 = e1->pred_next) + FOR_EACH_EDGE (e1, ei1, bb->preds) if (!predicted_by_p (e1->src, PRED_NULL_RETURN) && !predicted_by_p (e1->src, PRED_CONST_RETURN) && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN) @@ -1447,8 +1457,8 @@ last_basic_block_p (basic_block bb) return (bb->next_bb == EXIT_BLOCK_PTR || (bb->next_bb->next_bb == EXIT_BLOCK_PTR - && bb->succ && !bb->succ->succ_next - && bb->succ->dest->next_bb == EXIT_BLOCK_PTR)); + && EDGE_COUNT (bb->succs) == 1 + && EDGE_SUCC (bb, 0)->dest->next_bb == EXIT_BLOCK_PTR)); } /* Sets branch probabilities according to PREDiction and @@ -1462,6 +1472,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, enum prediction taken) { edge e; + edge_iterator ei; int y; if (heads[bb->index] < 0) @@ -1501,7 +1512,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, if (y == last_basic_block) return; - for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (y)->succs) if (e->dest->index >= 0 && dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb)) predict_edge_def (e, pred, taken); @@ -1557,9 +1568,10 @@ propagate_freq (struct loop *loop) { if (BLOCK_INFO (bb)->tovisit) { + edge_iterator ei; int count = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) count++; else if (BLOCK_INFO (e->src)->tovisit @@ -1575,6 +1587,7 @@ propagate_freq (struct loop *loop) last = head; for (bb = head; bb; bb = nextbb) { + edge_iterator ei; sreal cyclic_probability, frequency; memcpy (&cyclic_probability, &real_zero, sizeof (real_zero)); @@ -1587,12 +1600,12 @@ propagate_freq (struct loop *loop) if (bb != head) { #ifdef ENABLE_CHECKING - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) abort (); #endif - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (EDGE_INFO (e)->back_edge) { sreal_add (&cyclic_probability, &cyclic_probability, @@ -1637,15 +1650,15 @@ propagate_freq (struct loop *loop) BLOCK_INFO (bb)->tovisit = 0; /* Compute back edge frequencies. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == head) { sreal tmp; - + /* EDGE_INFO (e)->back_edge_prob - = ((e->probability * BLOCK_INFO (bb)->frequency) - / REG_BR_PROB_BASE); */ - + = ((e->probability * BLOCK_INFO (bb)->frequency) + / REG_BR_PROB_BASE); */ + sreal_init (&tmp, e->probability, 0); sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency); sreal_mul (&EDGE_INFO (e)->back_edge_prob, @@ -1653,7 +1666,7 @@ propagate_freq (struct loop *loop) } /* Propagate to successor blocks. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & EDGE_DFS_BACK) && BLOCK_INFO (e->dest)->npredecessors) { @@ -1664,10 +1677,10 @@ propagate_freq (struct loop *loop) nextbb = e->dest; else BLOCK_INFO (last)->next = e->dest; - + last = e->dest; } - } + } } } @@ -1686,7 +1699,8 @@ estimate_loops_at_level (struct loop *first_loop) estimate_loops_at_level (loop->inner); - if (loop->latch->succ) /* Do not do this for dummy function loop. */ + /* Do not do this for dummy function loop. */ + if (EDGE_COUNT (loop->latch->succs) > 0) { /* Find current loop back edge and mark it. */ e = loop_latch_edge (loop); @@ -1787,7 +1801,7 @@ estimate_bb_frequencies (struct loops *loops) mark_dfs_back_edges (); - ENTRY_BLOCK_PTR->succ->probability = REG_BR_PROB_BASE; + EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->probability = REG_BR_PROB_BASE; /* Set up block info for each basic block. */ alloc_aux_for_blocks (sizeof (struct block_info_def)); @@ -1795,9 +1809,10 @@ estimate_bb_frequencies (struct loops *loops) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; BLOCK_INFO (bb)->tovisit = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0); sreal_mul (&EDGE_INFO (e)->back_edge_prob, diff --git a/gcc/profile.c b/gcc/profile.c index d7e6f58104d..2bad285e1d9 100644 --- a/gcc/profile.c +++ b/gcc/profile.c @@ -142,8 +142,9 @@ instrument_edges (struct edge_list *el) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { struct edge_info *inf = EDGE_INFO (e); @@ -239,7 +240,9 @@ get_exec_counts (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) num_edges++; } @@ -295,11 +298,12 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!EDGE_INFO (e)->ignore) BB_INFO (bb)->succ_count++; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!EDGE_INFO (e)->ignore) BB_INFO (bb)->pred_count++; } @@ -317,7 +321,9 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) { num_edges++; @@ -380,9 +386,10 @@ compute_branch_probabilities (void) if (bi->succ_count == 0) { edge e; + edge_iterator ei; gcov_type total = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) total += e->count; bb->count = total; bi->count_valid = 1; @@ -391,9 +398,10 @@ compute_branch_probabilities (void) else if (bi->pred_count == 0) { edge e; + edge_iterator ei; gcov_type total = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) total += e->count; bb->count = total; bi->count_valid = 1; @@ -405,15 +413,16 @@ compute_branch_probabilities (void) if (bi->succ_count == 1) { edge e; + edge_iterator ei; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) total += e->count; /* Seedgeh for the invalid edge, and set its count. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore) break; @@ -432,15 +441,16 @@ compute_branch_probabilities (void) if (bi->pred_count == 1) { edge e; + edge_iterator ei; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) total += e->count; /* Search for the invalid edge, and set its count. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore) break; @@ -485,6 +495,7 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; rtx note; if (bb->count < 0) @@ -493,7 +504,7 @@ compute_branch_probabilities (void) bb->index, (int)bb->count); bb->count = 0; } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Function may return twice in the cased the called function is setjmp or calls fork, but we can't represent this by extra @@ -518,11 +529,11 @@ compute_branch_probabilities (void) } if (bb->count) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; if (bb->index >= 0 && block_ends_with_condjump_p (bb) - && bb->succ->succ_next) + && EDGE_COUNT (bb->succs) >= 2) { int prob; edge e; @@ -530,9 +541,9 @@ compute_branch_probabilities (void) /* Find the branch edge. It is possible that we do have fake edges here. */ - for (e = bb->succ; e->flags & (EDGE_FAKE | EDGE_FALLTHRU); - e = e->succ_next) - continue; /* Loop body has been intentionally left blank. */ + FOR_EACH_EDGE (e, ei, bb->succs) + if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU))) + break; prob = e->probability; index = prob * 20 / REG_BR_PROB_BASE; @@ -561,7 +572,7 @@ compute_branch_probabilities (void) tree based profile guessing put into code. */ else if (profile_status == PROFILE_ABSENT && !ir_type () - && bb->succ && bb->succ->succ_next + && EDGE_COUNT (bb->succs) > 1 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, 0))) { int prob = INTVAL (XEXP (note, 0)); @@ -578,12 +589,12 @@ compute_branch_probabilities (void) { int total = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) total ++; if (total) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) e->probability = REG_BR_PROB_BASE / total; else @@ -591,14 +602,13 @@ compute_branch_probabilities (void) } else { - for (e = bb->succ; e; e = e->succ_next) - total ++; - for (e = bb->succ; e; e = e->succ_next) + total += EDGE_COUNT (bb->succs); + FOR_EACH_EDGE (e, ei, bb->succs) e->probability = REG_BR_PROB_BASE / total; } if (bb->index >= 0 && block_ends_with_condjump_p (bb) - && bb->succ->succ_next) + && EDGE_COUNT (bb->succs) >= 2) num_branches++, num_never_executed; } } @@ -789,6 +799,7 @@ branch_prob (void) int need_exit_edge = 0, need_entry_edge = 0; int have_exit_edge = 0, have_entry_edge = 0; edge e; + edge_iterator ei; /* Functions returning multiple times are not handled by extra edges. Instead we simply allow negative counts on edges from exit to the @@ -796,7 +807,7 @@ branch_prob (void) with the extra edges because that would result in flowgraph that needs to have fake edges outside the spanning tree. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->dest != EXIT_BLOCK_PTR) @@ -804,7 +815,7 @@ branch_prob (void) if (e->dest == EXIT_BLOCK_PTR) have_exit_edge = 1; } - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->src != ENTRY_BLOCK_PTR) @@ -915,11 +926,12 @@ branch_prob (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; offset = gcov_write_tag (GCOV_TAG_ARCS); gcov_write_unsigned (BB_TO_GCOV_INDEX (bb)); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { struct edge_info *i = EDGE_INFO (e); if (!i->ignore) @@ -1037,10 +1049,10 @@ branch_prob (void) /* Notice GOTO expressions we eliminated while constructing the CFG. */ - if (bb->succ && !bb->succ->succ_next && bb->succ->goto_locus) + if (EDGE_COUNT (bb->succs) == 1 && EDGE_SUCC (bb, 0)->goto_locus) { /* ??? source_locus type is marked deprecated in input.h. */ - source_locus curr_location = bb->succ->goto_locus; + source_locus curr_location = EDGE_SUCC (bb, 0)->goto_locus; /* ??? The FILE/LINE API is inconsistent for these cases. */ #ifdef USE_MAPPED_LOCATION output_location (LOCATION_FILE (curr_location), diff --git a/gcc/ra-build.c b/gcc/ra-build.c index 69f6aaa5872..b66e0972c7d 100644 --- a/gcc/ra-build.c +++ b/gcc/ra-build.c @@ -924,6 +924,7 @@ live_in (struct df *df, struct curr_use *use, rtx insn) are allowed. */ while (1) { + unsigned int i; int uid = INSN_UID (insn); basic_block bb = BLOCK_FOR_INSN (insn); number_seen[uid]++; @@ -940,7 +941,7 @@ live_in (struct df *df, struct curr_use *use, rtx insn) edge e; unsigned HOST_WIDE_INT undef = use->undefined; struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; - if ((e = bb->pred) == NULL) + if (EDGE_COUNT (bb->preds) == 0) return; /* We now check, if we already traversed the predecessors of this block for the current pass and the current set of undefined @@ -952,8 +953,9 @@ live_in (struct df *df, struct curr_use *use, rtx insn) info->pass = loc_vpass; info->undefined = undef; /* All but the last predecessor are handled recursively. */ - for (; e->pred_next; e = e->pred_next) + for (e = NULL, i = 0; i < EDGE_COUNT (bb->preds) - 1; i++) { + e = EDGE_PRED (bb, i); insn = live_in_edge (df, use, e); if (insn) live_in (df, use, insn); diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c index 23c26ba9c3f..fa00e370648 100644 --- a/gcc/ra-rewrite.c +++ b/gcc/ra-rewrite.c @@ -1350,13 +1350,17 @@ rewrite_program2 (bitmap new_deaths) int in_ir = 0; edge e; int num = 0; + edge_iterator ei; bitmap_iterator bi; HARD_REG_SET cum_colors, colors; CLEAR_HARD_REG_SET (cum_colors); - for (e = bb->pred; e && num < 5; e = e->pred_next, num++) + FOR_EACH_EDGE (e, ei, bb->preds) { int j; + + if (num >= 5) + break; CLEAR_HARD_REG_SET (colors); EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j, bi) { @@ -1366,6 +1370,7 @@ rewrite_program2 (bitmap new_deaths) update_spill_colors (&colors, web, 1); } IOR_HARD_REG_SET (cum_colors, colors); + num++; } if (num == 5) in_ir = 1; diff --git a/gcc/ra.c b/gcc/ra.c index a821623ba52..0b84dfc90b0 100644 --- a/gcc/ra.c +++ b/gcc/ra.c @@ -682,7 +682,9 @@ reg_alloc (void) if (last) { edge e; - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { basic_block bb = e->src; last = BB_END (bb); diff --git a/gcc/recog.c b/gcc/recog.c index 7d6ce19c943..61e1186d07a 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -3116,9 +3116,9 @@ peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) if (note || (was_call && nonlocal_goto_handler_labels)) { edge eh_edge; + edge_iterator ei; - for (eh_edge = bb->succ; eh_edge - ; eh_edge = eh_edge->succ_next) + FOR_EACH_EDGE (eh_edge, ei, bb->succs) if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) break; diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c index ee9c1e90402..b13753863a4 100644 --- a/gcc/reg-stack.c +++ b/gcc/reg-stack.c @@ -442,7 +442,9 @@ reg_to_stack (FILE *file) FOR_EACH_BB_REVERSE (bb) { edge e; - for (e = bb->pred; e; e = e->pred_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) if (!(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) BLOCK_INFO (bb)->predecessors++; @@ -2528,6 +2530,7 @@ convert_regs_entry (void) { int inserted = 0; edge e; + edge_iterator ei; basic_block block; FOR_EACH_BB_REVERSE (block) @@ -2557,7 +2560,7 @@ convert_regs_entry (void) Note that we are inserting converted code here. This code is never seen by the convert_regs pass. */ - for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { basic_block block = e->dest; block_info bi = BLOCK_INFO (block); @@ -2717,7 +2720,7 @@ compensate_edge (edge e, FILE *file) instead of to the edge, because emit_swap can do minimal insn scheduling. We can do this when there is only one edge out, and it is not abnormal. */ - else if (block->succ->succ_next == NULL && !(e->flags & EDGE_ABNORMAL)) + else if (EDGE_COUNT (block->succs) == 1 && !(e->flags & EDGE_ABNORMAL)) { /* change_stack kills values in regstack. */ tmpstack = regstack; @@ -2764,6 +2767,7 @@ convert_regs_1 (FILE *file, basic_block block) rtx insn, next; edge e, beste = NULL; bool control_flow_insn_deleted = false; + edge_iterator ei; inserted = 0; deleted = 0; @@ -2774,7 +2778,7 @@ convert_regs_1 (FILE *file, basic_block block) if multiple such exists, take one with largest count, prefer critical one (as splitting critical edges is more expensive), or one with lowest index, to avoid random changes with different orders of the edges. */ - for (e = block->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, block->preds) { if (e->flags & EDGE_DFS_BACK) ; @@ -2923,7 +2927,7 @@ convert_regs_1 (FILE *file, basic_block block) bi->stack_out = regstack; /* Compensate the back edges, as those wasn't visited yet. */ - for (e = block->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, block->succs) { if (e->flags & EDGE_DFS_BACK || (e->dest == EXIT_BLOCK_PTR)) @@ -2933,7 +2937,7 @@ convert_regs_1 (FILE *file, basic_block block) inserted |= compensate_edge (e, file); } } - for (e = block->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, block->preds) { if (e != beste && !(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) @@ -2967,6 +2971,7 @@ convert_regs_2 (FILE *file, basic_block block) do { edge e; + edge_iterator ei; block = *--sp; @@ -2983,12 +2988,12 @@ convert_regs_2 (FILE *file, basic_block block) stack the successor in all cases and hand over the task of fixing up the discrepancy to convert_regs_1. */ - for (e = block->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, block->succs) if (! (e->flags & EDGE_DFS_BACK)) { BLOCK_INFO (e->dest)->predecessors--; if (!BLOCK_INFO (e->dest)->predecessors) - *sp++ = e->dest; + *sp++ = e->dest; } inserted |= convert_regs_1 (file, block); @@ -3009,6 +3014,7 @@ convert_regs (FILE *file) int inserted; basic_block b; edge e; + edge_iterator ei; /* Initialize uninitialized registers on function entry. */ inserted = convert_regs_entry (); @@ -3022,7 +3028,7 @@ convert_regs (FILE *file) prevent double fxch that often appears at the head of a loop. */ /* Process all blocks reachable from all entry points. */ - for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) inserted |= convert_regs_2 (file, e->dest); /* ??? Process all unreachable blocks. Though there's no excuse diff --git a/gcc/regrename.c b/gcc/regrename.c index c2e773d875f..330ed3b4284 100644 --- a/gcc/regrename.c +++ b/gcc/regrename.c @@ -1750,14 +1750,13 @@ copyprop_hardreg_forward (void) processed, begin with the value data that was live at the end of the predecessor block. */ /* ??? Ought to use more intelligent queuing of blocks. */ - if (bb->pred) - for (bbp = bb; bbp && bbp != bb->pred->src; bbp = bbp->prev_bb); - if (bb->pred - && ! bb->pred->pred_next - && ! (bb->pred->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) - && bb->pred->src != ENTRY_BLOCK_PTR + if (EDGE_COUNT (bb->preds) > 0) + for (bbp = bb; bbp && bbp != EDGE_PRED (bb, 0)->src; bbp = bbp->prev_bb); + if (EDGE_COUNT (bb->preds) == 1 + && ! (EDGE_PRED (bb, 0)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) + && EDGE_PRED (bb, 0)->src != ENTRY_BLOCK_PTR && bbp) - all_vd[bb->index] = all_vd[bb->pred->src->index]; + all_vd[bb->index] = all_vd[EDGE_PRED (bb, 0)->src->index]; else init_value_data (all_vd + bb->index); diff --git a/gcc/reload1.c b/gcc/reload1.c index 052acc092ff..3a8c19e9711 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -8033,10 +8033,11 @@ fixup_abnormal_edges (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; /* Look for cases we are interested in - calls or instructions causing exceptions. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_ABNORMAL_CALL) break; @@ -8049,7 +8050,7 @@ fixup_abnormal_edges (void) { rtx insn = BB_END (bb), stop = NEXT_INSN (BB_END (bb)); rtx next; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; /* Get past the new insns generated. Allow notes, as the insns may diff --git a/gcc/sbitmap.c b/gcc/sbitmap.c index f1d9c86ee30..554a4258076 100644 --- a/gcc/sbitmap.c +++ b/gcc/sbitmap.c @@ -514,12 +514,14 @@ sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb) basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; + unsigned ix; - for (e = b->succ; e != 0; e = e->succ_next) + for (e = NULL, ix = 0; ix < EDGE_COUNT (b->succs); ix++) { + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; - + sbitmap_copy (dst, src[e->dest->index]); break; } @@ -527,11 +529,12 @@ sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb) if (e == 0) sbitmap_ones (dst); else - for (e = e->succ_next; e != 0; e = e->succ_next) + for (++ix; ix < EDGE_COUNT (b->succs); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -551,9 +554,11 @@ sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb) basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; + unsigned ix; - for (e = b->pred; e != 0; e = e->pred_next) + for (e = NULL, ix = 0; ix < EDGE_COUNT (b->preds); ix++) { + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; @@ -564,11 +569,12 @@ sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb) if (e == 0) sbitmap_ones (dst); else - for (e = e->pred_next; e != 0; e = e->pred_next) + for (++ix; ix < EDGE_COUNT (b->preds); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; @@ -588,9 +594,11 @@ sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb) basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; + unsigned ix; - for (e = b->succ; e != 0; e = e->succ_next) + for (ix = 0; ix < EDGE_COUNT (b->succs); ix++) { + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -598,14 +606,15 @@ sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb) break; } - if (e == 0) + if (ix == EDGE_COUNT (b->succs)) sbitmap_zero (dst); else - for (e = e->succ_next; e != 0; e = e->succ_next) + for (ix++; ix < EDGE_COUNT (b->succs); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -625,8 +634,9 @@ sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb) basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; + unsigned ix; - for (e = b->pred; e != 0; e = e->pred_next) + for (e = NULL, ix = 0; ix < EDGE_COUNT (b->preds); ix++) { if (e->src== ENTRY_BLOCK_PTR) continue; @@ -635,14 +645,15 @@ sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb) break; } - if (e == 0) + if (ix == EDGE_COUNT (b->preds)) sbitmap_zero (dst); else - for (e = e->pred_next; e != 0; e = e->pred_next) + for (ix++; ix < EDGE_COUNT (b->preds); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index 2344d1e9de4..286d047a245 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -175,7 +175,9 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, { basic_block b = BLOCK_FOR_INSN (insn); edge e; - for (e = b->succ; e; e = e->succ_next) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, b->succs) if (e->flags & EDGE_FALLTHRU) /* The jump may be a by-product of a branch that has been merged in the main codepath after being conditionalized. Therefore @@ -280,6 +282,7 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, { edge f; rtx h; + edge_iterator ei; /* An obscure special case, where we do have partially dead instruction scheduled after last control flow instruction. @@ -291,9 +294,10 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, A safer solution can be to bring the code into sequence, do the split and re-emit it back in case this will ever trigger problem. */ - f = bb->prev_bb->succ; - while (f && !(f->flags & EDGE_FALLTHRU)) - f = f->succ_next; + + FOR_EACH_EDGE (f, ei, bb->prev_bb->succs) + if (f->flags & EDGE_FALLTHRU) + break; if (f) { @@ -588,11 +592,12 @@ schedule_ebbs (FILE *dump_file) for (;;) { edge e; + edge_iterator ei; tail = BB_END (bb); if (bb->next_bb == EXIT_BLOCK_PTR || LABEL_P (BB_HEAD (bb->next_bb))) break; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if ((e->flags & EDGE_FALLTHRU) != 0) break; if (! e) diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 4da38c530c6..7fe01978fdb 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -390,9 +390,9 @@ build_control_flow (struct edge_list *edge_list) unreachable = 0; FOR_EACH_BB (b) { - if (b->pred == NULL - || (b->pred->src == b - && b->pred->pred_next == NULL)) + if (EDGE_COUNT (b->preds) == 0 + || (EDGE_PRED (b, 0)->src == b + && EDGE_COUNT (b->preds) == 1)) unreachable = 1; } @@ -615,7 +615,7 @@ find_rgns (struct edge_list *edge_list) char no_loops = 1; int node, child, loop_head, i, head, tail; int count = 0, sp, idx = 0; - int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index]; + int current_edge = out_edges[EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->index]; int num_bbs, num_insns, unreachable; int too_large_failure; basic_block bb; @@ -802,6 +802,7 @@ find_rgns (struct edge_list *edge_list) if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index)) { edge e; + edge_iterator ei; basic_block jbb; /* Now check that the loop is reducible. We do this separate @@ -842,7 +843,7 @@ find_rgns (struct edge_list *edge_list) /* Decrease degree of all I's successors for topological ordering. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; @@ -860,9 +861,8 @@ find_rgns (struct edge_list *edge_list) FOR_EACH_BB (jbb) /* Leaf nodes have only a single successor which must be EXIT_BLOCK. */ - if (jbb->succ - && jbb->succ->dest == EXIT_BLOCK_PTR - && jbb->succ->succ_next == NULL) + if (EDGE_COUNT (jbb->succs) == 1 + && EDGE_SUCC (jbb, 0)->dest == EXIT_BLOCK_PTR) { queue[++tail] = jbb->index; SET_BIT (in_queue, jbb->index); @@ -878,7 +878,7 @@ find_rgns (struct edge_list *edge_list) { edge e; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->src == ENTRY_BLOCK_PTR) continue; @@ -935,7 +935,7 @@ find_rgns (struct edge_list *edge_list) edge e; child = queue[++head]; - for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds) { node = e->src->index; @@ -990,9 +990,7 @@ find_rgns (struct edge_list *edge_list) CONTAINING_RGN (child) = nr_regions; queue[head] = queue[tail--]; - for (e = BASIC_BLOCK (child)->succ; - e; - e = e->succ_next) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs) if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; } diff --git a/gcc/tracer.c b/gcc/tracer.c index 4fcedca153a..968d093b723 100644 --- a/gcc/tracer.c +++ b/gcc/tracer.c @@ -118,8 +118,9 @@ find_best_successor (basic_block bb) { edge e; edge best = NULL; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!best || better_p (e, best)) best = e; if (!best || ignore_bb_p (best->dest)) @@ -136,8 +137,9 @@ find_best_predecessor (basic_block bb) { edge e; edge best = NULL; + edge_iterator ei; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!best || better_p (e, best)) best = e; if (!best || ignore_bb_p (best->src)) @@ -269,14 +271,17 @@ tail_duplicate (void) blocks[bb2->index] = NULL; } traced_insns += bb2->frequency * counts [bb2->index]; - if (bb2->pred && bb2->pred->pred_next + if (EDGE_COUNT (bb2->preds) > 1 && can_duplicate_block_p (bb2)) { - edge e = bb2->pred; + edge e; + edge_iterator ei; basic_block old = bb2; - while (e->src != bb) - e = e->pred_next; + FOR_EACH_EDGE (e, ei, bb2->preds) + if (e->src == bb) + break; + nduplicated += counts [bb2->index]; bb2 = duplicate_block (bb2, e); @@ -319,18 +324,19 @@ tail_duplicate (void) static void layout_superblocks (void) { - basic_block end = ENTRY_BLOCK_PTR->succ->dest; - basic_block bb = ENTRY_BLOCK_PTR->succ->dest->next_bb; + basic_block end = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest; + basic_block bb = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->next_bb; while (bb != EXIT_BLOCK_PTR) { + edge_iterator ei; edge e, best = NULL; while (end->rbi->next) end = end->rbi->next; - for (e = end->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, end->succs) if (e->dest != EXIT_BLOCK_PTR - && e->dest != ENTRY_BLOCK_PTR->succ->dest + && e->dest != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest && !e->dest->rbi->visited && (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best))) best = e; diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index 6f5284ec10c..db0d1b518cf 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -441,7 +441,7 @@ make_edges (void) /* Finally, if no edges were created above, this is a regular basic block that only needs a fallthru edge. */ - if (bb->succ == NULL) + if (EDGE_COUNT (bb->succs) == 0) make_edge (bb, bb->next_bb, EDGE_FALLTHRU); } @@ -483,7 +483,7 @@ make_ctrl_stmt_edges (basic_block bb) case RESX_EXPR: make_eh_edges (last); /* Yet another NORETURN hack. */ - if (bb->succ == NULL) + if (EDGE_COUNT (bb->succs) == 0) make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); break; @@ -698,7 +698,7 @@ make_goto_expr_edges (basic_block bb) } /* Degenerate case of computed goto with no labels. */ - if (!for_call && !bb->succ) + if (!for_call && EDGE_COUNT (bb->succs) == 0) make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } @@ -1009,20 +1009,19 @@ tree_can_merge_blocks_p (basic_block a, basic_block b) tree stmt; block_stmt_iterator bsi; - if (!a->succ - || a->succ->succ_next) + if (EDGE_COUNT (a->succs) != 1) return false; - if (a->succ->flags & EDGE_ABNORMAL) + if (EDGE_SUCC (a, 0)->flags & EDGE_ABNORMAL) return false; - if (a->succ->dest != b) + if (EDGE_SUCC (a, 0)->dest != b) return false; if (b == EXIT_BLOCK_PTR) return false; - if (b->pred->pred_next) + if (EDGE_COUNT (b->preds) > 1) return false; /* If A ends by a statement causing exceptions or something similar, we @@ -1069,7 +1068,7 @@ tree_merge_blocks (basic_block a, basic_block b) /* Ensure that B follows A. */ move_block_after (b, a); - gcc_assert (a->succ->flags & EDGE_FALLTHRU); + gcc_assert (EDGE_SUCC (a, 0)->flags & EDGE_FALLTHRU); gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a))); /* Remove labels from B and set bb_for_stmt to A for other statements. */ @@ -1650,17 +1649,16 @@ cfg_remove_useless_stmts_bb (basic_block bb) /* Check whether we come here from a condition, and if so, get the condition. */ - if (!bb->pred - || bb->pred->pred_next - || !(bb->pred->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) + if (EDGE_COUNT (bb->preds) != 1 + || !(EDGE_PRED (bb, 0)->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) return; - cond = COND_EXPR_COND (last_stmt (bb->pred->src)); + cond = COND_EXPR_COND (last_stmt (EDGE_PRED (bb, 0)->src)); if (TREE_CODE (cond) == VAR_DECL || TREE_CODE (cond) == PARM_DECL) { var = cond; - val = (bb->pred->flags & EDGE_FALSE_VALUE + val = (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE ? boolean_false_node : boolean_true_node); } else if (TREE_CODE (cond) == TRUTH_NOT_EXPR @@ -1668,12 +1666,12 @@ cfg_remove_useless_stmts_bb (basic_block bb) || TREE_CODE (TREE_OPERAND (cond, 0)) == PARM_DECL)) { var = TREE_OPERAND (cond, 0); - val = (bb->pred->flags & EDGE_FALSE_VALUE + val = (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE ? boolean_true_node : boolean_false_node); } else { - if (bb->pred->flags & EDGE_FALSE_VALUE) + if (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE) cond = invert_truthvalue (cond); if (TREE_CODE (cond) == EQ_EXPR && (TREE_CODE (TREE_OPERAND (cond, 0)) == VAR_DECL @@ -1776,8 +1774,8 @@ remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb) } /* Remove edges to BB's successors. */ - while (bb->succ != NULL) - ssa_remove_edge (bb->succ); + while (EDGE_COUNT (bb->succs) > 0) + ssa_remove_edge (EDGE_SUCC (bb, 0)); } @@ -1859,12 +1857,11 @@ tree_block_forwards_to (basic_block bb) single successor has phi nodes. */ if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR - || !bb->succ - || bb->succ->succ_next - || bb->succ->dest == EXIT_BLOCK_PTR - || (bb->succ->flags & EDGE_ABNORMAL) != 0 + || EDGE_COUNT (bb->succs) != 1 + || EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR + || (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) != 0 || phi_nodes (bb) - || phi_nodes (bb->succ->dest)) + || phi_nodes (EDGE_SUCC (bb, 0)->dest)) return NULL; /* Walk past any labels at the start of this block. */ @@ -1882,11 +1879,11 @@ tree_block_forwards_to (basic_block bb) edge dest; /* Recursive call to pick up chains of forwarding blocks. */ - dest = tree_block_forwards_to (bb->succ->dest); + dest = tree_block_forwards_to (EDGE_SUCC (bb, 0)->dest); - /* If none found, we forward to bb->succ at minimum. */ + /* If none found, we forward to bb->succs[0] at minimum. */ if (!dest) - dest = bb->succ; + dest = EDGE_SUCC (bb, 0); ann->forwardable = 1; return dest; @@ -1933,9 +1930,10 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) bool retval = false; tree expr = bsi_stmt (bsi), val; - if (bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) > 1) { - edge e, next; + edge e; + edge_iterator ei; switch (TREE_CODE (expr)) { @@ -1958,9 +1956,8 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) return false; /* Remove all the edges except the one that is always executed. */ - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; if (e != taken_edge) { taken_edge->probability += e->probability; @@ -1968,12 +1965,14 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) ssa_remove_edge (e); retval = true; } + else + ei_next (&ei); } if (taken_edge->probability > REG_BR_PROB_BASE) taken_edge->probability = REG_BR_PROB_BASE; } else - taken_edge = bb->succ; + taken_edge = EDGE_SUCC (bb, 0); bsi_remove (&bsi); taken_edge->flags = EDGE_FALLTHRU; @@ -2016,7 +2015,7 @@ find_taken_edge (basic_block bb, tree val) if (TREE_CODE (stmt) == SWITCH_EXPR) return find_taken_edge_switch_expr (bb, val); - return bb->succ; + return EDGE_SUCC (bb, 0); } @@ -2248,11 +2247,7 @@ dump_cfg_stats (FILE *file) n_edges = 0; FOR_EACH_BB (bb) - { - edge e; - for (e = bb->succ; e; e = e->succ_next) - n_edges++; - } + n_edges += EDGE_COUNT (bb->succs); size = n_edges * sizeof (struct edge_def); total += size; fprintf (file, fmt_str_1, "Edges", n_edges, SCALE (size), LABEL (size)); @@ -2294,6 +2289,7 @@ static void tree_cfg2vcg (FILE *file) { edge e; + edge_iterator ei; basic_block bb; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); @@ -2304,7 +2300,7 @@ tree_cfg2vcg (FILE *file) fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n"); /* Write blocks and edges. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"", e->dest->index); @@ -2349,7 +2345,7 @@ tree_cfg2vcg (FILE *file) bb->index, bb->index, head_name, head_line, end_name, end_line); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index); @@ -2497,6 +2493,7 @@ disband_implicit_edges (void) basic_block bb; block_stmt_iterator last; edge e; + edge_iterator ei; tree stmt, label; FOR_EACH_BB (bb) @@ -2510,7 +2507,7 @@ disband_implicit_edges (void) from cfg_remove_useless_stmts here since it violates the invariants for tree--cfg correspondence and thus fits better here where we do it anyway. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != bb->next_bb) continue; @@ -2531,15 +2528,14 @@ disband_implicit_edges (void) { /* Remove the RETURN_EXPR if we may fall though to the exit instead. */ - gcc_assert (bb->succ); - gcc_assert (!bb->succ->succ_next); - gcc_assert (bb->succ->dest == EXIT_BLOCK_PTR); + gcc_assert (EDGE_COUNT (bb->succs) == 1); + gcc_assert (EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR); if (bb->next_bb == EXIT_BLOCK_PTR && !TREE_OPERAND (stmt, 0)) { bsi_remove (&last); - bb->succ->flags |= EDGE_FALLTHRU; + EDGE_SUCC (bb, 0)->flags |= EDGE_FALLTHRU; } continue; } @@ -2550,7 +2546,7 @@ disband_implicit_edges (void) continue; /* Find a fallthru edge and emit the goto if necessary. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; @@ -2834,7 +2830,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi, would have to examine the PHIs to prove that none of them used the value set by the statement we want to insert on E. That hardly seems worth the effort. */ - if (dest->pred->pred_next == NULL + if (EDGE_COUNT (dest->preds) == 1 && ! phi_nodes (dest) && dest != EXIT_BLOCK_PTR) { @@ -2866,7 +2862,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi, Except for the entry block. */ src = e->src; if ((e->flags & EDGE_ABNORMAL) == 0 - && src->succ->succ_next == NULL + && EDGE_COUNT (src->succs) == 1 && src != ENTRY_BLOCK_PTR) { *bsi = bsi_last (src); @@ -2897,7 +2893,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi, dest = split_edge (e); if (new_bb) *new_bb = dest; - e = dest->pred; + e = EDGE_PRED (dest, 0); goto restart; } @@ -2914,13 +2910,14 @@ bsi_commit_edge_inserts (int *new_blocks) basic_block bb; edge e; int blocks; + edge_iterator ei; blocks = n_basic_blocks; - bsi_commit_edge_inserts_1 (ENTRY_BLOCK_PTR->succ); + bsi_commit_edge_inserts_1 (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); FOR_EACH_BB (bb) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) bsi_commit_edge_inserts_1 (e); if (new_blocks) @@ -2990,6 +2987,7 @@ tree_split_edge (edge edge_in) edge new_edge, e; tree phi; int i, num_elem; + edge_iterator ei; /* Abnormal edges cannot be split. */ gcc_assert (!(edge_in->flags & EDGE_ABNORMAL)); @@ -3000,7 +2998,7 @@ tree_split_edge (edge edge_in) /* Place the new block in the block list. Try to keep the new block near its "logical" location. This is of most help to humans looking at debugging dumps. */ - for (e = dest->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, dest->preds) if (e->src->next_bb == dest) break; if (!e) @@ -3424,6 +3422,7 @@ tree_verify_flow_info (void) block_stmt_iterator bsi; tree stmt; edge e; + edge_iterator ei; if (ENTRY_BLOCK_PTR->stmt_list) { @@ -3437,7 +3436,7 @@ tree_verify_flow_info (void) err = 1; } - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) if (e->flags & EDGE_FALLTHRU) { error ("Fallthru to exit from bb %d\n", e->src->index); @@ -3503,7 +3502,7 @@ tree_verify_flow_info (void) if (is_ctrl_stmt (stmt)) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) { error ("Fallthru edge after a control statement in bb %d \n", @@ -3532,7 +3531,7 @@ tree_verify_flow_info (void) || !(false_edge->flags & EDGE_FALSE_VALUE) || (true_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL)) || (false_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL)) - || bb->succ->succ_next->succ_next) + || EDGE_COUNT (bb->succs) >= 3) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); @@ -3567,7 +3566,7 @@ tree_verify_flow_info (void) { /* FIXME. We should double check that the labels in the destination blocks have their address taken. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)) || !(e->flags & EDGE_ABNORMAL)) @@ -3580,14 +3579,14 @@ tree_verify_flow_info (void) break; case RETURN_EXPR: - if (!bb->succ || bb->succ->succ_next - || (bb->succ->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL + if (EDGE_COUNT (bb->succs) != 1 + || (EDGE_SUCC (bb, 0)->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); err = 1; } - if (bb->succ->dest != EXIT_BLOCK_PTR) + if (EDGE_SUCC (bb, 0)->dest != EXIT_BLOCK_PTR) { error ("Return edge does not point to exit in bb %d\n", bb->index); @@ -3643,7 +3642,7 @@ tree_verify_flow_info (void) err = 1; } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!e->dest->aux) { @@ -3675,7 +3674,7 @@ tree_verify_flow_info (void) } } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) e->dest->aux = (void *)0; } @@ -3697,13 +3696,14 @@ static void tree_make_forwarder_block (edge fallthru) { edge e; + edge_iterator ei; basic_block dummy, bb; tree phi, new_phi, var, prev, next; dummy = fallthru->src; bb = fallthru->dest; - if (!bb->pred->pred_next) + if (EDGE_COUNT (bb->preds) == 1) return; /* If we redirected a branch we must create new phi nodes at the @@ -3728,7 +3728,7 @@ tree_make_forwarder_block (edge fallthru) set_phi_nodes (bb, prev); /* Add the arguments we have stored on edges. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e == fallthru) continue; @@ -3752,6 +3752,7 @@ tree_forwarder_block_p (basic_block bb) { block_stmt_iterator bsi; edge e; + edge_iterator ei; /* If we have already determined that this block is not forwardable, then no further checks are necessary. */ @@ -3760,10 +3761,9 @@ tree_forwarder_block_p (basic_block bb) /* BB must have a single outgoing normal edge. Otherwise it can not be a forwarder block. */ - if (!bb->succ - || bb->succ->succ_next - || bb->succ->dest == EXIT_BLOCK_PTR - || (bb->succ->flags & EDGE_ABNORMAL) + if (EDGE_COUNT (bb->succs) != 1 + || EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR + || (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) || bb == ENTRY_BLOCK_PTR) { bb_ann (bb)->forwardable = 0; @@ -3771,7 +3771,7 @@ tree_forwarder_block_p (basic_block bb) } /* Successors of the entry block are not forwarders. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) if (e->dest == bb) { bb_ann (bb)->forwardable = 0; @@ -3818,9 +3818,9 @@ tree_forwarder_block_p (basic_block bb) static bool thread_jumps (void) { - edge e, next, last, old; + edge e, last, old; basic_block bb, dest, tmp, old_dest, curr, dom; - tree phi; + tree phi; int arg; bool retval = false; @@ -3829,8 +3829,10 @@ thread_jumps (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { + edge_iterator ei; + /* Don't waste time on unreachable blocks. */ - if (!bb->pred) + if (EDGE_COUNT (bb->preds) == 0) continue; /* Nor on forwarders. */ @@ -3844,17 +3846,19 @@ thread_jumps (void) /* Examine each of our block's successors to see if it is forwardable. */ - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { int freq; gcov_type count; - next = e->succ_next; /* If the edge is abnormal or its destination is not forwardable, then there's nothing to do. */ if ((e->flags & EDGE_ABNORMAL) || !tree_forwarder_block_p (e->dest)) - continue; + { + ei_next (&ei); + continue; + } count = e->count; freq = EDGE_FREQUENCY (e); @@ -3862,19 +3866,19 @@ thread_jumps (void) /* Now walk through as many forwarder block as possible to find the ultimate destination we want to thread our jump to. */ - last = e->dest->succ; + last = EDGE_SUCC (e->dest, 0); bb_ann (e->dest)->forwardable = 0; - for (dest = e->dest->succ->dest; + for (dest = EDGE_SUCC (e->dest, 0)->dest; tree_forwarder_block_p (dest); - last = dest->succ, - dest = dest->succ->dest) + last = EDGE_SUCC (dest, 0), + dest = EDGE_SUCC (dest, 0)->dest) { /* An infinite loop detected. We redirect the edge anyway, so that the loop is shrunk into single basic block. */ if (!bb_ann (dest)->forwardable) break; - if (dest->succ->dest == EXIT_BLOCK_PTR) + if (EDGE_SUCC (dest, 0)->dest == EXIT_BLOCK_PTR) break; bb_ann (dest)->forwardable = 0; @@ -3883,11 +3887,14 @@ thread_jumps (void) /* Reset the forwardable marks to 1. */ for (tmp = e->dest; tmp != dest; - tmp = tmp->succ->dest) + tmp = EDGE_SUCC (tmp, 0)->dest) bb_ann (tmp)->forwardable = 1; if (dest == e->dest) - continue; + { + ei_next (&ei); + continue; + } old = find_edge (bb, dest); if (old) @@ -3903,7 +3910,10 @@ thread_jumps (void) /* That might mean that no forwarding at all is possible. */ if (dest == e->dest) - continue; + { + ei_next (&ei); + continue; + } old = find_edge (bb, dest); } @@ -3916,7 +3926,7 @@ thread_jumps (void) /* Update the profile. */ if (profile_status != PROFILE_ABSENT) - for (curr = old_dest; curr != dest; curr = curr->succ->dest) + for (curr = old_dest; curr != dest; curr = EDGE_SUCC (curr, 0)->dest) { curr->frequency -= freq; if (curr->frequency < 0) @@ -3924,9 +3934,9 @@ thread_jumps (void) curr->count -= count; if (curr->count < 0) curr->count = 0; - curr->succ->count -= count; - if (curr->succ->count < 0) - curr->succ->count = 0; + EDGE_SUCC (curr, 0)->count -= count; + if (EDGE_SUCC (curr, 0)->count < 0) + EDGE_SUCC (curr, 0)->count = 0; } if (!old) @@ -3951,9 +3961,9 @@ thread_jumps (void) become unreachable). */ for (; old_dest != dest; old_dest = tmp) { - tmp = old_dest->succ->dest; + tmp = EDGE_SUCC (old_dest, 0)->dest; - if (old_dest->pred) + if (EDGE_COUNT (old_dest->preds) > 0) break; delete_basic_block (old_dest); @@ -3974,7 +3984,7 @@ thread_jumps (void) for (; old_dest != dest; old_dest = tmp) { - tmp = old_dest->succ->dest; + tmp = EDGE_SUCC (old_dest, 0)->dest; if (get_immediate_dominator (CDI_DOMINATORS, tmp) == old_dest && !dominated_by_p (CDI_DOMINATORS, bb, tmp)) @@ -4042,9 +4052,10 @@ tree_try_redirect_by_replacing_jump (edge e, basic_block target) edge tmp; block_stmt_iterator b; tree stmt; + edge_iterator ei; /* Verify that all targets will be TARGET. */ - for (tmp = src->succ; tmp; tmp = tmp->succ_next) + FOR_EACH_EDGE (tmp, ei, src->succs) if (tmp->dest != target && tmp != e) break; @@ -4166,13 +4177,14 @@ tree_split_block (basic_block bb, void *stmt) tree act; basic_block new_bb; edge e; + edge_iterator ei; new_bb = create_empty_bb (bb); /* Redirect the outgoing edges. */ - new_bb->succ = bb->succ; - bb->succ = NULL; - for (e = new_bb->succ; e; e = e->succ_next) + new_bb->succs = bb->succs; + bb->succs = NULL; + FOR_EACH_EDGE (e, ei, new_bb->succs) e->src = new_bb; if (stmt && TREE_CODE ((tree) stmt) == LABEL_EXPR) @@ -4289,11 +4301,12 @@ add_phi_args_after_copy_bb (basic_block bb_copy) { basic_block bb, dest; edge e, e_copy; + edge_iterator ei; tree phi, phi_copy, phi_next, def; bb = bb_copy->rbi->original; - for (e_copy = bb_copy->succ; e_copy; e_copy = e_copy->succ_next) + FOR_EACH_EDGE (e_copy, ei, bb_copy->succs) { if (!phi_nodes (e_copy->dest)) continue; @@ -4309,7 +4322,7 @@ add_phi_args_after_copy_bb (basic_block bb_copy) /* During loop unrolling the target of the latch edge is copied. In this case we are not looking for edge to dest, but to duplicated block whose original was dest. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest->rbi->duplicated && e->dest->rbi->original == dest) break; @@ -4453,6 +4466,7 @@ rewrite_to_new_ssa_names_bb (basic_block bb, htab_t map) { unsigned i; edge e; + edge_iterator ei; tree phi, stmt; block_stmt_iterator bsi; use_optype uses; @@ -4462,7 +4476,7 @@ rewrite_to_new_ssa_names_bb (basic_block bb, htab_t map) v_must_def_optype v_must_defs; stmt_ann_t ann; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_ABNORMAL) break; @@ -4506,7 +4520,7 @@ rewrite_to_new_ssa_names_bb (basic_block bb, htab_t map) (V_MUST_DEF_OP_PTR (v_must_defs, i), stmt, map); } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) { rewrite_to_new_ssa_names_use @@ -4771,43 +4785,33 @@ dump_function_to_file (tree fn, FILE *file, int flags) /* Pretty print of the loops intermediate representation. */ static void print_loop (FILE *, struct loop *, int); -static void print_pred_bbs (FILE *, edge); -static void print_succ_bbs (FILE *, edge); +static void print_pred_bbs (FILE *, basic_block bb); +static void print_succ_bbs (FILE *, basic_block bb); /* Print the predecessors indexes of edge E on FILE. */ static void -print_pred_bbs (FILE *file, edge e) +print_pred_bbs (FILE *file, basic_block bb) { - if (e == NULL) - return; - - else if (e->pred_next == NULL) + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) fprintf (file, "bb_%d", e->src->index); - - else - { - fprintf (file, "bb_%d, ", e->src->index); - print_pred_bbs (file, e->pred_next); - } } /* Print the successors indexes of edge E on FILE. */ static void -print_succ_bbs (FILE *file, edge e) +print_succ_bbs (FILE *file, basic_block bb) { - if (e == NULL) - return; - else if (e->succ_next == NULL) - fprintf (file, "bb_%d", e->dest->index); - else - { - fprintf (file, "bb_%d, ", e->dest->index); - print_succ_bbs (file, e->succ_next); - } + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + fprintf (file, "bb_%d", e->src->index); } @@ -4836,9 +4840,9 @@ print_loop (FILE *file, struct loop *loop, int indent) { /* Print the basic_block's header. */ fprintf (file, "%s bb_%d (preds = {", s_indent, bb->index); - print_pred_bbs (file, bb->pred); + print_pred_bbs (file, bb); fprintf (file, "}, succs = {"); - print_succ_bbs (file, bb->succ); + print_succ_bbs (file, bb); fprintf (file, "})\n"); /* Print the basic_block's body. */ @@ -4966,6 +4970,7 @@ tree_flow_call_edges_add (sbitmap blocks) Handle this by adding a dummy instruction in a new last basic block. */ if (check_last_block) { + edge_iterator ei; basic_block bb = EXIT_BLOCK_PTR->prev_bb; block_stmt_iterator bsi = bsi_last (bb); tree t = NULL_TREE; @@ -4976,7 +4981,7 @@ tree_flow_call_edges_add (sbitmap blocks) { edge e; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == EXIT_BLOCK_PTR) { bsi_insert_on_edge (e, build_empty_stmt ()); @@ -5018,8 +5023,11 @@ tree_flow_call_edges_add (sbitmap blocks) mark that edge as fake and remove it later. */ #ifdef ENABLE_CHECKING if (stmt == last_stmt) - for (e = bb->succ; e; e = e->succ_next) - gcc_assert (e->dest != EXIT_BLOCK_PTR); + { + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) + gcc_assert (e->dest != EXIT_BLOCK_PTR); + } #endif /* Note that the following may create a new basic block @@ -5048,20 +5056,22 @@ bool tree_purge_dead_eh_edges (basic_block bb) { bool changed = false; - edge e, next; + edge e; + edge_iterator ei; tree stmt = last_stmt (bb); if (stmt && tree_can_throw_internal (stmt)) return false; - for (e = bb->succ; e ; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; if (e->flags & EDGE_EH) { ssa_remove_edge (e); changed = true; } + else + ei_next (&ei); } return changed; @@ -5114,10 +5124,11 @@ split_critical_edges (void) { basic_block bb; edge e; + edge_iterator ei; FOR_ALL_BB (bb) { - for (e = bb->succ; e ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL)) { split_edge (e); @@ -5227,10 +5238,11 @@ execute_warn_function_return (void) #endif tree last; edge e; + edge_iterator ei; if (warn_missing_noreturn && !TREE_THIS_VOLATILE (cfun->decl) - && EXIT_BLOCK_PTR->pred == NULL + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0 && !lang_hooks.function.missing_noreturn_ok_p (cfun->decl)) warning ("%Jfunction might be possible candidate for " "attribute %", @@ -5238,14 +5250,14 @@ execute_warn_function_return (void) /* If we have a path to EXIT, then we do return. */ if (TREE_THIS_VOLATILE (cfun->decl) - && EXIT_BLOCK_PTR->pred != NULL) + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0) { #ifdef USE_MAPPED_LOCATION location = UNKNOWN_LOCATION; #else locus = NULL; #endif - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -5270,10 +5282,10 @@ execute_warn_function_return (void) /* If we see "return;" in some basic block, then we do reach the end without returning a value. */ else if (warn_return_type - && EXIT_BLOCK_PTR->pred != NULL + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl)))) { - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { tree last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -5307,17 +5319,17 @@ extract_true_false_edges_from_block (basic_block b, edge *true_edge, edge *false_edge) { - edge e = b->succ; + edge e = EDGE_SUCC (b, 0); if (e->flags & EDGE_TRUE_VALUE) { *true_edge = e; - *false_edge = e->succ_next; + *false_edge = EDGE_SUCC (b, 1); } else { *false_edge = e; - *true_edge = e->succ_next; + *true_edge = EDGE_SUCC (b, 1); } } diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c index 3ee16739cf8..88696465856 100644 --- a/gcc/tree-if-conv.c +++ b/gcc/tree-if-conv.c @@ -187,9 +187,9 @@ tree_if_conversion (struct loop *loop, bool for_vectorizer) /* If current bb has only one successor, then consider it as an unconditional goto. */ - if (bb->succ && !bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) == 1) { - basic_block bb_n = bb->succ->dest; + basic_block bb_n = EDGE_SUCC (bb, 0)->dest; if (cond != NULL_TREE) add_to_predicate_list (bb_n, cond); cond = NULL_TREE; @@ -472,6 +472,7 @@ static bool if_convertable_bb_p (struct loop *loop, basic_block bb, bool exit_bb_seen) { edge e; + edge_iterator ei; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "----------[%d]-------------\n", bb->index); @@ -493,7 +494,7 @@ if_convertable_bb_p (struct loop *loop, basic_block bb, bool exit_bb_seen) } /* Be less adventurous and handle only normal edges. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP)) { @@ -524,6 +525,7 @@ if_convertable_loop_p (struct loop *loop, bool for_vectorizer ATTRIBUTE_UNUSED) block_stmt_iterator itr; unsigned int i; edge e; + edge_iterator ei; bool exit_bb_seen = false; /* Handle only inner most loop. */ @@ -556,7 +558,7 @@ if_convertable_loop_p (struct loop *loop, bool for_vectorizer ATTRIBUTE_UNUSED) /* If one of the loop header's edge is exit edge then do not apply if-conversion. */ - for (e = loop->header->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, loop->header->succs) if ( e->flags & EDGE_LOOP_EXIT) return false; @@ -679,11 +681,12 @@ find_phi_replacement_condition (basic_block bb, tree *cond, basic_block p2 = NULL; basic_block true_bb = NULL; tree tmp_cond; + edge_iterator ei; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (p1 == NULL) - p1 = e->src; + p1 = e->src; else { gcc_assert (!p2); @@ -869,6 +872,7 @@ combine_blocks (struct loop *loop) if (bb == exit_bb) { edge new_e; + edge_iterator ei; /* Connect this node with loop header. */ new_e = make_edge (ifc_bbs[0], bb, EDGE_FALLTHRU); @@ -877,7 +881,7 @@ combine_blocks (struct loop *loop) if (exit_bb != loop->latch) { /* Redirect non-exit edge to loop->latch. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & EDGE_LOOP_EXIT)) { redirect_edge_and_branch (e, loop->latch); @@ -888,10 +892,10 @@ combine_blocks (struct loop *loop) } /* It is time to remove this basic block. First remove edges. */ - while (bb->succ != NULL) - ssa_remove_edge (bb->succ); - while (bb->pred != NULL) - ssa_remove_edge (bb->pred); + while (EDGE_COUNT (bb->succs) > 0) + ssa_remove_edge (EDGE_SUCC (bb, 0)); + while (EDGE_COUNT (bb->preds) > 0) + ssa_remove_edge (EDGE_PRED (bb, 0)); /* Remove labels and make stmts member of loop->header. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); ) @@ -970,7 +974,8 @@ static bool pred_blocks_visited_p (basic_block bb, bitmap *visited) { edge e; - for (e = bb->pred; e; e = e->pred_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) if (!bitmap_bit_p (*visited, e->src->index)) return false; @@ -1041,11 +1046,15 @@ static bool bb_with_exit_edge_p (basic_block bb) { edge e; + edge_iterator ei; bool exit_edge_found = false; - for (e = bb->succ; e && !exit_edge_found ; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_LOOP_EXIT) - exit_edge_found = true; + { + exit_edge_found = true; + break; + } return exit_edge_found; } diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c index 696bdfdc1d1..ec51faaa3cb 100644 --- a/gcc/tree-into-ssa.c +++ b/gcc/tree-into-ssa.c @@ -245,12 +245,13 @@ compute_global_livein (bitmap livein, bitmap def_blocks) while (tos != worklist) { edge e; + edge_iterator ei; /* Pull a block off the worklist. */ bb = *--tos; /* For each predecessor block. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pred = e->src; int pred_index = pred->index; @@ -320,8 +321,9 @@ ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) edge e; tree phi, use; unsigned uid; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -730,6 +732,7 @@ ssa_rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) sbitmap names_to_rename = walk_data->global_data; edge e; bool abnormal_phi; + edge_iterator ei; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index); @@ -737,7 +740,7 @@ ssa_rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) /* Mark the unwind point for this block. */ VARRAY_PUSH_TREE (block_defs_stack, NULL_TREE); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_ABNORMAL) break; abnormal_phi = (e != NULL); @@ -774,8 +777,9 @@ rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb) { edge e; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; @@ -803,8 +807,9 @@ ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) edge e; sbitmap names_to_rename = walk_data->global_data; use_operand_p op; + edge_iterator ei; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; @@ -1033,7 +1038,8 @@ insert_phi_nodes_for (tree var, bitmap *dfs, varray_type *work_stack) /* If we are rewriting ssa names, add also the phi arguments. */ if (TREE_CODE (var) == SSA_NAME) { - for (e = bb->pred; e; e = e->pred_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) add_phi_arg (&phi, var, e); } } @@ -1445,13 +1451,7 @@ rewrite_into_ssa (bool all) dfs = (bitmap *) xmalloc (last_basic_block * sizeof (bitmap *)); FOR_EACH_BB (bb) { - edge e; - int count = 0; - - for (e = bb->pred; e; e = e->pred_next) - count++; - - bb_ann (bb)->num_preds = count; + bb_ann (bb)->num_preds = EDGE_COUNT (bb->preds); dfs[bb->index] = BITMAP_XMALLOC (); } @@ -1580,13 +1580,7 @@ rewrite_ssa_into_ssa (void) dfs = (bitmap *) xmalloc (last_basic_block * sizeof (bitmap *)); FOR_EACH_BB (bb) { - edge e; - int count = 0; - - for (e = bb->pred; e; e = e->pred_next) - count++; - - bb_ann (bb)->num_preds = count; + bb_ann (bb)->num_preds = EDGE_COUNT (bb->preds); dfs[bb->index] = BITMAP_XMALLOC (); } diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c index 659a5cc0f06..a61a7ad935c 100644 --- a/gcc/tree-mudflap.c +++ b/gcc/tree-mudflap.c @@ -538,7 +538,7 @@ mf_build_check_statement_for (tree addr, tree size, /* We expect that the conditional jump we will construct will not be taken very often as it basically is an exception condition. */ - predict_edge_def (then_bb->pred, PRED_MUDFLAP, NOT_TAKEN); + predict_edge_def (EDGE_PRED (then_bb, 0), PRED_MUDFLAP, NOT_TAKEN); /* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */ e = find_edge (cond_bb, join_bb); diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c index e23f0a182b6..4d9986d1953 100644 --- a/gcc/tree-outof-ssa.c +++ b/gcc/tree-outof-ssa.c @@ -581,13 +581,14 @@ coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) edge e; tree phi, var, tmp; int x, y; + edge_iterator ei; /* Code cannot be inserted on abnormal edges. Look for all abnormal edges, and coalesce any PHI results with their arguments across that edge. */ FOR_EACH_BB (bb) - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR && e->flags & EDGE_ABNORMAL) for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { @@ -1930,7 +1931,8 @@ rewrite_trees (var_map map, tree *values) phi = phi_nodes (bb); if (phi) { - for (e = bb->pred; e; e = e->pred_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) eliminate_phi (e, phi_arg_from_edge (phi, e), g); } } diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index d21f197af27..17770cd35c8 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -2151,6 +2151,7 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; + edge_iterator ei; if (flags & TDF_BLOCKS) { @@ -2174,8 +2175,8 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); - for (e = bb->pred; e; e = e->pred_next) - if (flags & TDF_SLIM) + FOR_EACH_EDGE (e, ei, bb->preds) + if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->src == ENTRY_BLOCK_PTR) @@ -2210,11 +2211,12 @@ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; + edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (flags & TDF_SLIM) { pp_string (buffer, " "); @@ -2280,10 +2282,11 @@ dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; + edge_iterator ei; /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 53c125c698d..c6e1c4ed872 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1639,10 +1639,11 @@ void insert_edge_copies (tree stmt, basic_block bb) { edge e; + edge_iterator ei; bool first_copy; first_copy = true; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { /* We don't need to insert copies on abnormal edges. The value of the scalar replacement is not guaranteed to diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c index dfa719a446e..735feb18f72 100644 --- a/gcc/tree-ssa-dce.c +++ b/gcc/tree-ssa-dce.c @@ -504,7 +504,8 @@ find_obviously_necessary_stmts (struct edge_list *el) and we currently do not have a means to recognize the finite ones. */ FOR_EACH_BB (bb) { - for (e = bb->succ; e; e = e->succ_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_DFS_BACK) mark_control_dependent_edges_necessary (e->dest, el); } @@ -731,7 +732,6 @@ remove_dead_stmt (block_stmt_iterator *i, basic_block bb) if (is_ctrl_stmt (t)) { basic_block post_dom_bb; - edge e; /* The post dominance info has to be up-to-date. */ gcc_assert (dom_computed[CDI_POST_DOMINATORS] == DOM_OK); /* Get the immediate post dominator of bb. */ @@ -746,30 +746,26 @@ remove_dead_stmt (block_stmt_iterator *i, basic_block bb) } /* Redirect the first edge out of BB to reach POST_DOM_BB. */ - redirect_edge_and_branch (bb->succ, post_dom_bb); - PENDING_STMT (bb->succ) = NULL; - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; + redirect_edge_and_branch (EDGE_SUCC (bb, 0), post_dom_bb); + PENDING_STMT (EDGE_SUCC (bb, 0)) = NULL; + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; + EDGE_SUCC (bb, 0)->count = bb->count; /* The edge is no longer associated with a conditional, so it does not have TRUE/FALSE flags. */ - bb->succ->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); + EDGE_SUCC (bb, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); /* If the edge reaches any block other than the exit, then it is a fallthru edge; if it reaches the exit, then it is not a fallthru edge. */ if (post_dom_bb != EXIT_BLOCK_PTR) - bb->succ->flags |= EDGE_FALLTHRU; + EDGE_SUCC (bb, 0)->flags |= EDGE_FALLTHRU; else - bb->succ->flags &= ~EDGE_FALLTHRU; + EDGE_SUCC (bb, 0)->flags &= ~EDGE_FALLTHRU; /* Remove the remaining the outgoing edges. */ - for (e = bb->succ->succ_next; e != NULL;) - { - edge tmp = e; - e = e->succ_next; - remove_edge (tmp); - } + while (EDGE_COUNT (bb->succs) != 1) + remove_edge (EDGE_SUCC (bb, 1)); } bsi_remove (i); diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c index 8bce1c3a9aa..1e6830bb54b 100644 --- a/gcc/tree-ssa-dom.c +++ b/gcc/tree-ssa-dom.c @@ -591,6 +591,7 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) { tree cond, cached_lhs; edge e1; + edge_iterator ei; /* Do not forward entry edges into the loop. In the case loop has multiple entry edges we may end up in constructing irreducible @@ -599,7 +600,7 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) edges forward to the same destination block. */ if (!e->flags & EDGE_DFS_BACK) { - for (e1 = e->dest->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e1, ei, e->dest->preds) if (e1->flags & EDGE_DFS_BACK) break; if (e1) @@ -879,24 +880,21 @@ dom_opt_finalize_block (struct dom_walk_data *walk_data, basic_block bb) the edge from BB through its successor. Do this before we remove entries from our equivalence tables. */ - if (bb->succ - && ! bb->succ->succ_next - && (bb->succ->flags & EDGE_ABNORMAL) == 0 - && (get_immediate_dominator (CDI_DOMINATORS, bb->succ->dest) != bb - || phi_nodes (bb->succ->dest))) + if (EDGE_COUNT (bb->succs) == 1 + && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0 + && (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (bb, 0)->dest) != bb + || phi_nodes (EDGE_SUCC (bb, 0)->dest))) { - thread_across_edge (walk_data, bb->succ); + thread_across_edge (walk_data, EDGE_SUCC (bb, 0)); } else if ((last = last_stmt (bb)) && TREE_CODE (last) == COND_EXPR && (COMPARISON_CLASS_P (COND_EXPR_COND (last)) || TREE_CODE (COND_EXPR_COND (last)) == SSA_NAME) - && bb->succ - && (bb->succ->flags & EDGE_ABNORMAL) == 0 - && bb->succ->succ_next - && (bb->succ->succ_next->flags & EDGE_ABNORMAL) == 0 - && ! bb->succ->succ_next->succ_next) + && EDGE_COUNT (bb->succs) == 2 + && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0 + && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0) { edge true_edge, false_edge; tree cond, inverted = NULL; @@ -1111,8 +1109,9 @@ single_incoming_edge_ignoring_loop_edges (basic_block bb) { edge retval = NULL; edge e; + edge_iterator ei; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { /* A loop back edge can be identified by the destination of the edge dominating the source of the edge. */ @@ -1161,7 +1160,7 @@ record_equivalences_from_incoming_edge (struct dom_walk_data *walk_data ATTRIBUT /* If we have a single predecessor (ignoring loop backedges), then extract EDGE_FLAGS from the single incoming edge. Otherwise just return as there is nothing to do. */ - if (bb->pred + if (EDGE_COUNT (bb->preds) >= 1 && parent_block_last_stmt) { edge e = single_incoming_edge_ignoring_loop_edges (bb); @@ -1192,7 +1191,7 @@ record_equivalences_from_incoming_edge (struct dom_walk_data *walk_data ATTRIBUT /* Similarly when the parent block ended in a SWITCH_EXPR. We can only know the value of the switch's condition if the dominator parent is also the only predecessor of this block. */ - else if (bb->pred->src == parent + else if (EDGE_PRED (bb, 0)->src == parent && TREE_CODE (parent_block_last_stmt) == SWITCH_EXPR) { tree switch_cond = SWITCH_COND (parent_block_last_stmt); @@ -2185,10 +2184,11 @@ static void cprop_into_successor_phis (basic_block bb, bitmap nonzero_vars) { edge e; + edge_iterator ei; /* This can get rather expensive if the implementation is naive in how it finds the phi alternative associated with a particular edge. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; int phi_num_args; diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c index 41b970f1720..d716a7e76ae 100644 --- a/gcc/tree-ssa-live.c +++ b/gcc/tree-ssa-live.c @@ -488,6 +488,7 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) basic_block def_bb = NULL; edge e; var_map map = live->map; + edge_iterator ei; bitmap_iterator bi; var = partition_to_var (map, i); @@ -504,15 +505,15 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) b = VARRAY_TOP_INT (stack); VARRAY_POP (stack); - for (e = BASIC_BLOCK (b)->pred; e; e = e->pred_next) - if (e->src != ENTRY_BLOCK_PTR) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (b)->preds) + if (e->src != ENTRY_BLOCK_PTR) { /* Its not live on entry to the block its defined in. */ if (e->src == def_bb) continue; if (!bitmap_bit_p (live->livein[i], e->src->index)) { - bitmap_set_bit (live->livein[i], e->src->index); + bitmap_set_bit (live->livein[i], e->src->index); VARRAY_PUSH_INT (stack, e->src->index); } } @@ -570,7 +571,7 @@ calculate_live_on_entry (var_map map) #ifdef ENABLE_CHECKING int num; #endif - + edge_iterator ei; saw_def = BITMAP_XMALLOC (); @@ -642,7 +643,7 @@ calculate_live_on_entry (var_map map) bb = ENTRY_BLOCK_PTR; num = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { int entry_block = e->dest->index; if (e->dest == EXIT_BLOCK_PTR) @@ -765,7 +766,8 @@ calculate_live_on_exit (tree_live_info_p liveinfo) on_entry = live_entry_blocks (liveinfo, i); EXECUTE_IF_SET_IN_BITMAP (on_entry, 0, b, bi) { - for (e = BASIC_BLOCK(b)->pred; e; e = e->pred_next) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (b)->preds) if (e->src != ENTRY_BLOCK_PTR) bitmap_set_bit (on_exit[e->src->index], i); } diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c index 4d25d0f5b31..ecdf6f2cc2c 100644 --- a/gcc/tree-ssa-loop-ch.c +++ b/gcc/tree-ssa-loop-ch.c @@ -59,19 +59,16 @@ should_duplicate_loop_header_p (basic_block header, struct loop *loop, if (header->aux) return false; - gcc_assert (header->succ); - if (!header->succ->succ_next) + gcc_assert (EDGE_COUNT (header->succs) > 0); + if (EDGE_COUNT (header->succs) == 1) return false; - if (header->succ->succ_next->succ_next) - return false; - if (flow_bb_inside_loop_p (loop, header->succ->dest) - && flow_bb_inside_loop_p (loop, header->succ->succ_next->dest)) + if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) + && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ - if (header != loop->header - && header->pred->pred_next) + if (header != loop->header && EDGE_COUNT (header->preds) >= 2) return false; last = last_stmt (header); @@ -176,10 +173,10 @@ copy_loop_headers (void) { /* Find a successor of header that is inside a loop; i.e. the new header after the condition is copied. */ - if (flow_bb_inside_loop_p (loop, header->succ->dest)) - exit = header->succ; + if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest)) + exit = EDGE_SUCC (header, 0); else - exit = header->succ->succ_next; + exit = EDGE_SUCC (header, 1); bbs[n_bbs++] = header; header = exit->dest; } @@ -194,8 +191,8 @@ copy_loop_headers (void) /* Ensure that the header will have just the latch as a predecessor inside the loop. */ - if (exit->dest->pred->pred_next) - exit = loop_split_edge_with (exit, NULL)->succ; + if (EDGE_COUNT (exit->dest->preds) > 1) + exit = EDGE_SUCC (loop_split_edge_with (exit, NULL), 0); if (!tree_duplicate_sese_region (loop_preheader_edge (loop), exit, bbs, n_bbs, NULL)) diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 0330a278009..c7a60768ee9 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -603,8 +603,8 @@ loop_commit_inserts (void) { bb = BASIC_BLOCK (i); add_bb_to_loop (bb, - find_common_loop (bb->succ->dest->loop_father, - bb->pred->src->loop_father)); + find_common_loop (EDGE_SUCC (bb, 0)->dest->loop_father, + EDGE_PRED (bb, 0)->src->loop_father)); } } @@ -1316,6 +1316,7 @@ fill_always_executed_in (struct loop *loop, sbitmap contains_call) for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; bb = bbs[i]; if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) @@ -1324,7 +1325,7 @@ fill_always_executed_in (struct loop *loop, sbitmap contains_call) if (TEST_BIT (contains_call, bb->index)) break; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!flow_bb_inside_loop_p (loop, e->dest)) break; if (e) diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c index 733cdd2e76b..66fee03b1a1 100644 --- a/gcc/tree-ssa-loop-ivcanon.c +++ b/gcc/tree-ssa-loop-ivcanon.c @@ -74,9 +74,9 @@ create_canonical_iv (struct loop *loop, edge exit, tree niter) } cond = last_stmt (exit->src); - in = exit->src->succ; + in = EDGE_SUCC (exit->src, 0); if (in == exit) - in = in->succ_next; + in = EDGE_SUCC (exit->src, 1); /* Note that we do not need to worry about overflows, since type of niter is always unsigned and all comparisons are diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index e33d57209c3..33c275fda3b 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -1510,9 +1510,10 @@ find_interesting_uses (struct ivopts_data *data) for (i = 0; i < data->current_loop->num_nodes; i++) { + edge_iterator ei; bb = body[i]; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR && !flow_bb_inside_loop_p (data->current_loop, e->dest)) find_interesting_uses_outside (data, e); @@ -4128,7 +4129,7 @@ compute_phi_arg_on_exit (edge exit, tree stmts, tree op) block_stmt_iterator bsi; tree phi, stmt, def, next; - if (exit->dest->pred->pred_next) + if (EDGE_COUNT (exit->dest->preds) > 1) split_loop_exit_edge (exit); if (TREE_CODE (stmts) == STATEMENT_LIST) diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c index 78572580af7..25c366c1284 100644 --- a/gcc/tree-ssa-loop-manip.c +++ b/gcc/tree-ssa-loop-manip.c @@ -123,10 +123,11 @@ add_exit_phis_edge (basic_block exit, tree use) basic_block def_bb = bb_for_stmt (def_stmt); struct loop *def_loop; edge e; + edge_iterator ei; /* Check that some of the edges entering the EXIT block exits a loop in that USE is defined. */ - for (e = exit->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, exit->preds) { def_loop = find_common_loop (def_bb->loop_father, e->src->loop_father); if (!flow_bb_inside_loop_p (def_loop, e->dest)) @@ -138,7 +139,7 @@ add_exit_phis_edge (basic_block exit, tree use) phi = create_phi_node (use, exit); - for (e = exit->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, exit->preds) add_phi_arg (&phi, use, e); SSA_NAME_DEF_STMT (use) = def_stmt; @@ -192,10 +193,11 @@ get_loops_exits (void) bitmap exits = BITMAP_XMALLOC (); basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) { - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->src != ENTRY_BLOCK_PTR && !flow_bb_inside_loop_p (e->src->loop_father, bb)) { @@ -404,7 +406,7 @@ split_loop_exit_edge (edge exit) for (phi = phi_nodes (dest); phi; phi = TREE_CHAIN (phi)) { - op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, bb->succ); + op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, EDGE_SUCC (bb, 0)); name = USE_FROM_PTR (op_p); @@ -468,17 +470,17 @@ ip_normal_pos (struct loop *loop) basic_block bb; edge exit; - if (loop->latch->pred->pred_next) + if (EDGE_COUNT (loop->latch->preds) > 1) return NULL; - bb = loop->latch->pred->src; + bb = EDGE_PRED (loop->latch, 0)->src; last = last_stmt (bb); if (TREE_CODE (last) != COND_EXPR) return NULL; - exit = bb->succ; + exit = EDGE_SUCC (bb, 0); if (exit->dest == loop->latch) - exit = exit->succ_next; + exit = EDGE_SUCC (bb, 1); if (flow_bb_inside_loop_p (loop, exit->dest)) return NULL; @@ -732,7 +734,7 @@ lv_adjust_loop_entry_edge (basic_block first_head, /* Adjust edges appropriately to connect new head with first head as well as second head. */ - e0 = new_head->succ; + e0 = EDGE_SUCC (new_head, 0); e0->flags &= ~EDGE_FALLTHRU; e0->flags |= EDGE_FALSE_VALUE; e1 = make_edge (new_head, first_head, EDGE_TRUE_VALUE); @@ -816,10 +818,10 @@ tree_ssa_loop_version (struct loops *loops, struct loop * loop, *condition_bb = lv_adjust_loop_entry_edge (first_head, second_head, entry, cond_expr); - latch_edge = loop->latch->rbi->copy->succ; + latch_edge = EDGE_SUCC (loop->latch->rbi->copy, 0); nloop = loopify (loops, latch_edge, - loop->header->rbi->copy->pred, + EDGE_PRED (loop->header->rbi->copy, 0), *condition_bb, false /* Do not redirect all edges. */); @@ -839,7 +841,7 @@ tree_ssa_loop_version (struct loops *loops, struct loop * loop, (*condition_bb)->flags |= BB_IRREDUCIBLE_LOOP; loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP; loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP; - (*condition_bb)->pred->flags |= EDGE_IRREDUCIBLE_LOOP; + EDGE_PRED ((*condition_bb), 0)->flags |= EDGE_IRREDUCIBLE_LOOP; } /* At this point condition_bb is loop predheader with two successors, diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c index 686545bf26f..381456cc8d8 100644 --- a/gcc/tree-ssa-loop-niter.c +++ b/gcc/tree-ssa-loop-niter.c @@ -566,8 +566,8 @@ simplify_using_initial_conditions (struct loop *loop, tree expr, bb != ENTRY_BLOCK_PTR; bb = get_immediate_dominator (CDI_DOMINATORS, bb)) { - e = bb->pred; - if (e->pred_next) + e = EDGE_PRED (bb, 0); + if (EDGE_COUNT (bb->preds) > 1) continue; if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) diff --git a/gcc/tree-ssa-loop-unswitch.c b/gcc/tree-ssa-loop-unswitch.c index f10407538c2..d4ab19263ae 100644 --- a/gcc/tree-ssa-loop-unswitch.c +++ b/gcc/tree-ssa-loop-unswitch.c @@ -172,10 +172,10 @@ simplify_using_entry_checks (struct loop *loop, tree cond) ? boolean_true_node : boolean_false_node); - if (e->src->pred->pred_next) + if (EDGE_COUNT (e->src->preds) > 1) return cond; - e = e->src->pred; + e = EDGE_PRED (e->src, 0); if (e->src == ENTRY_BLOCK_PTR) return cond; } @@ -283,9 +283,7 @@ tree_unswitch_loop (struct loops *loops, struct loop *loop, /* Some sanity checking. */ gcc_assert (flow_bb_inside_loop_p (loop, unswitch_on)); - gcc_assert (unswitch_on->succ != NULL); - gcc_assert (unswitch_on->succ->succ_next != NULL); - gcc_assert (unswitch_on->succ->succ_next->succ_next == NULL); + gcc_assert (EDGE_COUNT (unswitch_on->succs) == 2); gcc_assert (loop->inner == NULL); return tree_ssa_loop_version (loops, loop, unshare_expr (cond), diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c index e61e736f5a3..7c38b8aae98 100644 --- a/gcc/tree-ssa-phiopt.c +++ b/gcc/tree-ssa-phiopt.c @@ -185,39 +185,35 @@ candidate_bb_for_phi_optimization (basic_block bb, /* One of the alternatives must come from a block ending with a COND_EXPR. */ - last0 = last_stmt (bb->pred->src); - last1 = last_stmt (bb->pred->pred_next->src); + last0 = last_stmt (EDGE_PRED (bb, 0)->src); + last1 = last_stmt (EDGE_PRED (bb, 1)->src); if (last0 && TREE_CODE (last0) == COND_EXPR) { - cond_block = bb->pred->src; - other_block = bb->pred->pred_next->src; + cond_block = EDGE_PRED (bb, 0)->src; + other_block = EDGE_PRED (bb, 1)->src; } else if (last1 && TREE_CODE (last1) == COND_EXPR) { - other_block = bb->pred->src; - cond_block = bb->pred->pred_next->src; + other_block = EDGE_PRED (bb, 0)->src; + cond_block = EDGE_PRED (bb, 1)->src; } else return false; /* COND_BLOCK must have precisely two successors. We indirectly verify that those successors are BB and OTHER_BLOCK. */ - if (!cond_block->succ - || !cond_block->succ->succ_next - || cond_block->succ->succ_next->succ_next - || (cond_block->succ->flags & EDGE_ABNORMAL) != 0 - || (cond_block->succ->succ_next->flags & EDGE_ABNORMAL) != 0) + if (EDGE_COUNT (cond_block->succs) != 2 + || (EDGE_SUCC (cond_block, 0)->flags & EDGE_ABNORMAL) != 0 + || (EDGE_SUCC (cond_block, 1)->flags & EDGE_ABNORMAL) != 0) return false; /* OTHER_BLOCK must have a single predecessor which is COND_BLOCK, OTHER_BLOCK must have a single successor which is BB and OTHER_BLOCK must have no PHI nodes. */ - if (!other_block->pred - || other_block->pred->src != cond_block - || other_block->pred->pred_next - || !other_block->succ - || other_block->succ->dest != bb - || other_block->succ->succ_next + if (EDGE_COUNT (other_block->preds) != 1 + || EDGE_PRED (other_block, 0)->src != cond_block + || EDGE_COUNT (other_block->succs) != 1 + || EDGE_SUCC (other_block, 0)->dest != bb || phi_nodes (other_block)) return false; @@ -252,20 +248,20 @@ replace_phi_with_stmt (block_stmt_iterator bsi, basic_block bb, bb_ann (bb)->phi_nodes = NULL; /* Remove the empty basic block. */ - if (cond_block->succ->dest == bb) + if (EDGE_SUCC (cond_block, 0)->dest == bb) { - cond_block->succ->flags |= EDGE_FALLTHRU; - cond_block->succ->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); + EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU; + EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); - block_to_remove = cond_block->succ->succ_next->dest; + block_to_remove = EDGE_SUCC (cond_block, 1)->dest; } else { - cond_block->succ->succ_next->flags |= EDGE_FALLTHRU; - cond_block->succ->succ_next->flags + EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU; + EDGE_SUCC (cond_block, 1)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); - block_to_remove = cond_block->succ->dest; + block_to_remove = EDGE_SUCC (cond_block, 0)->dest; } delete_basic_block (block_to_remove); @@ -477,7 +473,7 @@ value_replacement (basic_block bb, tree phi, tree arg0, tree arg1) edge from OTHER_BLOCK which reaches BB and represents the desired path from COND_BLOCK. */ if (e->dest == other_block) - e = e->dest->succ; + e = EDGE_SUCC (e->dest, 0); /* Now we know the incoming edge to BB that has the argument for the RHS of our new assignment statement. */ diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index 92932b63def..b0809d14b73 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1137,12 +1137,13 @@ compute_antic_aux (basic_block block) setting the BB_VISITED flag. */ if (! (block->flags & BB_VISITED)) { - for (e = block->pred; e; e = e->pred_next) - if (e->flags & EDGE_ABNORMAL) - { - block->flags |= BB_VISITED; - break; - } + edge_iterator ei; + FOR_EACH_EDGE (e, ei, block->preds) + if (e->flags & EDGE_ABNORMAL) + { + block->flags |= BB_VISITED; + break; + } } if (block->flags & BB_VISITED) { @@ -1157,14 +1158,14 @@ compute_antic_aux (basic_block block) /* If the block has no successors, ANTIC_OUT is empty, because it is the exit block. */ - if (block->succ == NULL); + if (EDGE_COUNT (block->succs) == 0); /* If we have one successor, we could have some phi nodes to translate through. */ - else if (block->succ->succ_next == NULL) + else if (EDGE_COUNT (block->succs) == 1) { - phi_translate_set (ANTIC_OUT, ANTIC_IN(block->succ->dest), - block, block->succ->dest); + phi_translate_set (ANTIC_OUT, ANTIC_IN(EDGE_SUCC (block, 0)->dest), + block, EDGE_SUCC (block, 0)->dest); } /* If we have multiple successors, we take the intersection of all of them. */ @@ -1174,14 +1175,11 @@ compute_antic_aux (basic_block block) edge e; size_t i; basic_block bprime, first; + edge_iterator ei; worklist = VEC_alloc (basic_block, 2); - e = block->succ; - while (e) - { - VEC_safe_push (basic_block, worklist, e->dest); - e = e->succ_next; - } + FOR_EACH_EDGE (e, ei, block->succs) + VEC_safe_push (basic_block, worklist, e->dest); first = VEC_index (basic_block, worklist, 0); set_copy (ANTIC_OUT, ANTIC_IN (first)); @@ -1426,7 +1424,7 @@ insert_aux (basic_block block) bitmap_insert_into_set (NEW_SETS (block), ssa_name (i)); bitmap_value_replace_in_set (AVAIL_OUT (block), ssa_name (i)); } - if (block->pred->pred_next) + if (EDGE_COUNT (block->preds) > 1) { value_set_node_t node; for (node = ANTIC_IN (block)->head; @@ -1445,6 +1443,7 @@ insert_aux (basic_block block) edge pred; basic_block bprime; tree eprime; + edge_iterator ei; val = get_value_handle (node->expr); if (bitmap_set_contains_value (PHI_GEN (block), val)) @@ -1455,11 +1454,9 @@ insert_aux (basic_block block) fprintf (dump_file, "Found fully redundant value\n"); continue; } - + avail = xcalloc (last_basic_block, sizeof (tree)); - for (pred = block->pred; - pred; - pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, block->preds) { tree vprime; tree edoubleprime; @@ -1520,7 +1517,7 @@ insert_aux (basic_block block) partially redundant. */ if (!cant_insert && !all_same && by_some) { - tree type = TREE_TYPE (avail[block->pred->src->index]); + tree type = TREE_TYPE (avail[EDGE_PRED (block, 0)->src->index]); tree temp; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -1530,9 +1527,7 @@ insert_aux (basic_block block) } /* Make the necessary insertions. */ - for (pred = block->pred; - pred; - pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, block->preds) { tree stmts = alloc_stmt_list (); tree builtexpr; @@ -1547,7 +1542,7 @@ insert_aux (basic_block block) bsi_insert_on_edge (pred, stmts); avail[bprime->index] = builtexpr; } - } + } /* Now build a phi for the new variable. */ temp = create_tmp_var (type, "prephitmp"); add_referenced_tmp_var (temp); @@ -1562,9 +1557,7 @@ insert_aux (basic_block block) #endif bitmap_value_replace_in_set (AVAIL_OUT (block), PHI_RESULT (temp)); - for (pred = block->pred; - pred; - pred = pred->pred_next) + FOR_EACH_EDGE (pred, ei, block->preds) { add_phi_arg (&temp, avail[pred->src->index], pred); @@ -1926,9 +1919,9 @@ init_pre (void) ENTRY_BLOCK_PTR (FIXME, if ENTRY_BLOCK_PTR had an index number different than -1 we wouldn't have to hack this. tree-ssa-dce.c needs a similar change). */ - if (ENTRY_BLOCK_PTR->succ->dest->pred->pred_next) - if (!(ENTRY_BLOCK_PTR->succ->flags & EDGE_ABNORMAL)) - split_edge (ENTRY_BLOCK_PTR->succ); + if (EDGE_COUNT (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->preds) > 1) + if (!(EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->flags & EDGE_ABNORMAL)) + split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); FOR_ALL_BB (bb) bb->aux = xcalloc (1, sizeof (struct bb_value_sets)); diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c index f86f13b7fdc..e93b9c1ca1f 100644 --- a/gcc/tree-ssa-propagate.c +++ b/gcc/tree-ssa-propagate.c @@ -318,8 +318,9 @@ simulate_stmt (tree stmt) if (stmt_ends_bb_p (stmt)) { edge e; + edge_iterator ei; basic_block bb = bb_for_stmt (stmt); - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) add_control_edge (e); } } @@ -406,6 +407,7 @@ simulate_block (basic_block block) block_stmt_iterator j; unsigned int normal_edge_count; edge e, normal_edge; + edge_iterator ei; /* Note that we have simulated this block. */ SET_BIT (executable_blocks, block->index); @@ -434,7 +436,7 @@ simulate_block (basic_block block) worklist. */ normal_edge_count = 0; normal_edge = NULL; - for (e = block->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, block->succs) { if (e->flags & EDGE_ABNORMAL) add_control_edge (e); @@ -457,6 +459,7 @@ static void ssa_prop_init (void) { edge e; + edge_iterator ei; basic_block bb; /* Worklists of SSA edges. */ @@ -482,13 +485,13 @@ ssa_prop_init (void) for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) STMT_IN_SSA_EDGE_WORKLIST (bsi_stmt (si)) = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) e->flags &= ~EDGE_EXECUTABLE; } /* Seed the algorithm by adding the successors of the entry block to the edge worklist. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { if (e->dest != EXIT_BLOCK_PTR) { diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c index a76ce67633a..a1f6cda098c 100644 --- a/gcc/tree-ssa-threadupdate.c +++ b/gcc/tree-ssa-threadupdate.c @@ -141,7 +141,8 @@ static void remove_last_stmt_and_useless_edges (basic_block bb, basic_block dest_bb) { block_stmt_iterator bsi; - edge e, next; + edge e; + edge_iterator ei; bsi = bsi_last (bb); @@ -150,19 +151,18 @@ remove_last_stmt_and_useless_edges (basic_block bb, basic_block dest_bb) bsi_remove (&bsi); - next = NULL; - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; - if (e->dest != dest_bb) ssa_remove_edge (e); + else + ei_next (&ei); } /* BB now has a single outgoing edge. We need to update the flags for that single outgoing edge. */ - bb->succ->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); - bb->succ->flags |= EDGE_FALLTHRU; + EDGE_SUCC (bb, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); + EDGE_SUCC (bb, 0)->flags |= EDGE_FALLTHRU; } /* Create a duplicate of BB which only reaches the destination of the edge @@ -173,6 +173,7 @@ create_block_for_threading (basic_block bb, struct redirection_data *rd) { tree phi; edge e; + edge_iterator ei; /* We can use the generic block duplication code and simply remove the stuff we do not need. */ @@ -188,18 +189,19 @@ create_block_for_threading (basic_block bb, struct redirection_data *rd) specialized block copier. */ remove_last_stmt_and_useless_edges (rd->dup_block, rd->outgoing_edge->dest); - for (e = rd->dup_block->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, rd->dup_block->succs) e->count = 0; /* If there are any PHI nodes at the destination of the outgoing edge from the duplicate block, then we will need to add a new argument to them. The argument should have the same value as the argument associated with the outgoing edge stored in RD. */ - for (phi = phi_nodes (rd->dup_block->succ->dest); phi; + for (phi = phi_nodes (EDGE_SUCC (rd->dup_block, 0)->dest); phi; phi = PHI_CHAIN (phi)) { int indx = phi_arg_from_edge (phi, rd->outgoing_edge); - add_phi_arg (&phi, PHI_ARG_DEF_TREE (phi, indx), rd->dup_block->succ); + add_phi_arg (&phi, PHI_ARG_DEF_TREE (phi, indx), + EDGE_SUCC (rd->dup_block, 0)); } } @@ -238,10 +240,7 @@ thread_block (basic_block bb) /* E is an incoming edge into BB that we may or may not want to redirect to a duplicate of BB. */ edge e; - - /* The next edge in a predecessor list. Used in loops where E->pred_next - may change within the loop. */ - edge next; + edge_iterator ei; /* ALL indicates whether or not all incoming edges into BB should be threaded to a duplicate of BB. */ @@ -254,7 +253,7 @@ thread_block (basic_block bb) /* Look at each incoming edge into BB. Record each unique outgoing edge that we want to thread an incoming edge to. Also note if all incoming edges are threaded or not. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!e->aux) { @@ -312,16 +311,16 @@ thread_block (basic_block bb) If this turns out to be a performance problem, then we could create a list of incoming edges associated with each entry in REDIRECTION_DATA and walk over that list of edges instead. */ - next = NULL; - for (e = bb->pred; e; e = next) + for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) { edge new_dest = e->aux; - next = e->pred_next; - /* E was not threaded, then there is nothing to do. */ if (!new_dest) - continue; + { + ei_next (&ei); + continue; + } /* Go ahead and clear E->aux. It's not needed anymore and failure to clear it will cause all kinds of unpleasant problems later. */ @@ -373,7 +372,8 @@ thread_block (basic_block bb) if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Threaded jump %d --> %d to %d\n", - bb->pred->src->index, bb->index, bb->succ->dest->index); + EDGE_PRED (bb, 0)->src->index, bb->index, + EDGE_SUCC (bb, 0)->dest->index); remove_last_stmt_and_useless_edges (bb, rd->outgoing_edge->dest); } diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index 56c9c8f6be4..5b9b3ccd185 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -278,9 +278,10 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) edge e; bool err = false; int i, phi_num_args = PHI_NUM_ARGS (phi); + edge_iterator ei; /* Mark all the incoming edges. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) e->aux = (void *) 1; for (i = 0; i < phi_num_args; i++) @@ -326,7 +327,7 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) e->aux = (void *) 2; } - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->aux != (void *) 2) { @@ -580,10 +581,11 @@ verify_ssa (void) { edge e; tree phi; + edge_iterator ei; block_stmt_iterator bsi; /* Make sure that all edges have a clear 'aux' field. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->aux) { @@ -631,7 +633,7 @@ verify_ssa (void) /* Verify the uses in arguments of PHI nodes at the exits from the block. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c index 6f3fdcaaf93..a6c44933f79 100644 --- a/gcc/tree-tailcall.c +++ b/gcc/tree-tailcall.c @@ -190,6 +190,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) { basic_block bb, call_bb, at_bb; edge e; + edge_iterator ei; if (is_gimple_min_invariant (expr)) return expr; @@ -200,7 +201,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) /* Mark the blocks in the chain leading to the end. */ at_bb = bb_for_stmt (at); call_bb = bb_for_stmt (bsi_stmt (bsi)); - for (bb = call_bb; bb != at_bb; bb = bb->succ->dest) + for (bb = call_bb; bb != at_bb; bb = EDGE_SUCC (bb, 0)->dest) bb->aux = &bb->aux; bb->aux = &bb->aux; @@ -230,7 +231,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) break; } - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->src->aux) break; gcc_assert (e); @@ -244,7 +245,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) } /* Unmark the blocks. */ - for (bb = call_bb; bb != at_bb; bb = bb->succ->dest) + for (bb = call_bb; bb != at_bb; bb = EDGE_SUCC (bb, 0)->dest) bb->aux = NULL; bb->aux = NULL; @@ -371,7 +372,7 @@ find_tail_calls (basic_block bb, struct tailcall **ret) basic_block abb; stmt_ann_t ann; - if (bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) > 1) return; for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi)) @@ -412,8 +413,9 @@ find_tail_calls (basic_block bb, struct tailcall **ret) if (bsi_end_p (bsi)) { + edge_iterator ei; /* Recurse to the predecessors. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) find_tail_calls (e->src, ret); return; @@ -458,8 +460,8 @@ find_tail_calls (basic_block bb, struct tailcall **ret) while (bsi_end_p (absi)) { - ass_var = propagate_through_phis (ass_var, abb->succ); - abb = abb->succ->dest; + ass_var = propagate_through_phis (ass_var, EDGE_SUCC (abb, 0)); + abb = EDGE_SUCC (abb, 0)->dest; absi = bsi_start (abb); } @@ -677,7 +679,7 @@ eliminate_tail_call (struct tailcall *t) if (TREE_CODE (stmt) == MODIFY_EXPR) stmt = TREE_OPERAND (stmt, 1); - first = ENTRY_BLOCK_PTR->succ->dest; + first = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest; /* Remove the code after call_bsi that will become unreachable. The possibly unreachable code in other blocks is removed later in @@ -697,7 +699,7 @@ eliminate_tail_call (struct tailcall *t) } /* Replace the call by a jump to the start of function. */ - e = redirect_edge_and_branch (t->call_block->succ, first); + e = redirect_edge_and_branch (EDGE_SUCC (t->call_block, 0), first); gcc_assert (e); PENDING_STMT (e) = NULL_TREE; @@ -752,12 +754,12 @@ eliminate_tail_call (struct tailcall *t) var_ann (param)->default_def = new_name; phi = create_phi_node (name, first); SSA_NAME_DEF_STMT (name) = phi; - add_phi_arg (&phi, new_name, ENTRY_BLOCK_PTR->succ); + add_phi_arg (&phi, new_name, EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); /* For all calls the same set of variables should be clobbered. This means that there always should be the appropriate phi node except for the first time we eliminate the call. */ - gcc_assert (!first->pred->pred_next->pred_next); + gcc_assert (EDGE_COUNT (first->preds) <= 2); } add_phi_arg (&phi, V_MAY_DEF_OP (v_may_defs, i), e); @@ -819,15 +821,16 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) bool phis_constructed = false; struct tailcall *tailcalls = NULL, *act, *next; bool changed = false; - basic_block first = ENTRY_BLOCK_PTR->succ->dest; + basic_block first = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest; tree stmt, param, ret_type, tmp, phi; + edge_iterator ei; if (!suitable_for_tail_opt_p ()) return; if (opt_tailcalls) opt_tailcalls = suitable_for_tail_call_opt_p (); - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { /* Only traverse the normal exits, i.e. those that end with return statement. */ @@ -848,8 +851,8 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) if (!phis_constructed) { /* Ensure that there is only one predecessor of the block. */ - if (first->pred->pred_next) - first = split_edge (ENTRY_BLOCK_PTR->succ); + if (EDGE_COUNT (first->preds) > 1) + first = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); /* Copy the args if needed. */ for (param = DECL_ARGUMENTS (current_function_decl); @@ -868,7 +871,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) var_ann (param)->default_def = new_name; phi = create_phi_node (name, first); SSA_NAME_DEF_STMT (name) = phi; - add_phi_arg (&phi, new_name, first->pred); + add_phi_arg (&phi, new_name, EDGE_PRED (first, 0)); } phis_constructed = true; } @@ -881,7 +884,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) add_referenced_tmp_var (tmp); phi = create_phi_node (tmp, first); - add_phi_arg (&phi, build_int_cst (ret_type, 0), first->pred); + add_phi_arg (&phi, build_int_cst (ret_type, 0), EDGE_PRED (first, 0)); a_acc = PHI_RESULT (phi); } @@ -893,7 +896,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) add_referenced_tmp_var (tmp); phi = create_phi_node (tmp, first); - add_phi_arg (&phi, build_int_cst (ret_type, 1), first->pred); + add_phi_arg (&phi, build_int_cst (ret_type, 1), EDGE_PRED (first, 0)); m_acc = PHI_RESULT (phi); } } @@ -908,7 +911,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) if (a_acc || m_acc) { /* Modify the remaining return statements. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { stmt = last_stmt (e->src); diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c index c98d7046b8d..4493da19c52 100644 --- a/gcc/tree-vectorizer.c +++ b/gcc/tree-vectorizer.c @@ -1876,8 +1876,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, /* 1) Make sure the loop header has exactly two entries 2) Make sure we have a preheader basic block. */ - gcc_assert (loop->header->pred->pred_next); - gcc_assert (!loop->header->pred->pred_next->pred_next); + gcc_assert (EDGE_COUNT (loop->header->preds) == 2); loop_split_edge_with (loop_preheader_edge (loop), NULL); diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index 6e95e502941..72cb81d927e 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -526,7 +526,7 @@ prologue_stack_adjust (void) static bool vt_stack_adjustments (void) { - edge *stack; + edge_iterator *stack; int sp; /* Initialize entry block. */ @@ -534,22 +534,22 @@ vt_stack_adjustments (void) VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = frame_stack_adjust; /* Allocate stack for back-tracking up CFG. */ - stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator)); sp = 0; /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (!VTI (dest)->visited) @@ -558,10 +558,10 @@ vt_stack_adjustments (void) VTI (dest)->in.stack_adjust = VTI (src)->out.stack_adjust; bb_stack_adjust_offset (dest); - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); } else { @@ -572,9 +572,9 @@ vt_stack_adjustments (void) return false; } - if (e->succ_next) + if (! ei_one_before_end_p (ei)) /* Go to the next edge. */ - stack[sp - 1] = e->succ_next; + ei_next (&stack[sp - 1]); else /* Return to previous level if there are no more edges. */ sp--; @@ -1723,12 +1723,13 @@ vt_find_locations (void) if (!TEST_BIT (visited, bb->index)) { bool changed; + edge_iterator ei; SET_BIT (visited, bb->index); /* Calculate the IN set as union of predecessor OUT sets. */ dataflow_set_clear (&VTI (bb)->in); - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) { dataflow_set_union (&VTI (bb)->in, &VTI (e->src)->out); } @@ -1736,7 +1737,7 @@ vt_find_locations (void) changed = compute_bb_dataflow (bb); if (changed) { - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue;