function from rs6000-logue.c back to rs6000.c.
* config/rs6000/rs6000.c (create_TOC_reference): Likewise.
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
+ * auto-profile.c: Change class-key of PODs to struct and others
+ to class.
+ * basic-block.h: Same.
+ * bitmap.c (bitmap_alloc): Same.
+ * bitmap.h: Same.
+ * builtins.c (expand_builtin_prefetch): Same.
+ (expand_builtin_interclass_mathfn): Same.
+ (expand_builtin_strlen): Same.
+ (expand_builtin_mempcpy_args): Same.
+ (expand_cmpstr): Same.
+ (expand_builtin___clear_cache): Same.
+ (expand_ifn_atomic_bit_test_and): Same.
+ (expand_builtin_thread_pointer): Same.
+ (expand_builtin_set_thread_pointer): Same.
+ * caller-save.c (setup_save_areas): Same.
+ (replace_reg_with_saved_mem): Same.
+ (insert_restore): Same.
+ (insert_save): Same.
+ (add_used_regs): Same.
+ * cfg.c (get_bb_copy): Same.
+ (set_loop_copy): Same.
+ * cfg.h: Same.
+ * cfganal.h: Same.
+ * cfgexpand.c (alloc_stack_frame_space): Same.
+ (add_stack_var): Same.
+ (add_stack_var_conflict): Same.
+ (add_scope_conflicts_1): Same.
+ (update_alias_info_with_stack_vars): Same.
+ (expand_used_vars): Same.
+ * cfghooks.c (redirect_edge_and_branch_force): Same.
+ (delete_basic_block): Same.
+ (split_edge): Same.
+ (make_forwarder_block): Same.
+ (force_nonfallthru): Same.
+ (duplicate_block): Same.
+ (lv_flush_pending_stmts): Same.
+ * cfghooks.h: Same.
+ * cfgloop.c (flow_loops_cfg_dump): Same.
+ (flow_loop_nested_p): Same.
+ (superloop_at_depth): Same.
+ (get_loop_latch_edges): Same.
+ (flow_loop_dump): Same.
+ (flow_loops_dump): Same.
+ (flow_loops_free): Same.
+ (flow_loop_nodes_find): Same.
+ (establish_preds): Same.
+ (flow_loop_tree_node_add): Same.
+ (flow_loop_tree_node_remove): Same.
+ (flow_loops_find): Same.
+ (find_subloop_latch_edge_by_profile): Same.
+ (find_subloop_latch_edge_by_ivs): Same.
+ (mfb_redirect_edges_in_set): Same.
+ (form_subloop): Same.
+ (merge_latch_edges): Same.
+ (disambiguate_multiple_latches): Same.
+ (disambiguate_loops_with_multiple_latches): Same.
+ (flow_bb_inside_loop_p): Same.
+ (glb_enum_p): Same.
+ (get_loop_body_with_size): Same.
+ (get_loop_body): Same.
+ (fill_sons_in_loop): Same.
+ (get_loop_body_in_dom_order): Same.
+ (get_loop_body_in_custom_order): Same.
+ (release_recorded_exits): Same.
+ (get_loop_exit_edges): Same.
+ (num_loop_branches): Same.
+ (remove_bb_from_loops): Same.
+ (find_common_loop): Same.
+ (delete_loop): Same.
+ (cancel_loop): Same.
+ (verify_loop_structure): Same.
+ (loop_preheader_edge): Same.
+ (loop_exit_edge_p): Same.
+ (single_exit): Same.
+ (loop_exits_to_bb_p): Same.
+ (loop_exits_from_bb_p): Same.
+ (get_loop_location): Same.
+ (record_niter_bound): Same.
+ (get_estimated_loop_iterations_int): Same.
+ (max_stmt_executions_int): Same.
+ (likely_max_stmt_executions_int): Same.
+ (get_estimated_loop_iterations): Same.
+ (get_max_loop_iterations): Same.
+ (get_max_loop_iterations_int): Same.
+ (get_likely_max_loop_iterations): Same.
+ * cfgloop.h (simple_loop_desc): Same.
+ (get_loop): Same.
+ (loop_depth): Same.
+ (loop_outer): Same.
+ (loop_iterator::next): Same.
+ (loop_outermost): Same.
+ * cfgloopanal.c (mark_irreducible_loops): Same.
+ (num_loop_insns): Same.
+ (average_num_loop_insns): Same.
+ (expected_loop_iterations_unbounded): Same.
+ (expected_loop_iterations): Same.
+ (mark_loop_exit_edges): Same.
+ (single_likely_exit): Same.
+ * cfgloopmanip.c (fix_bb_placement): Same.
+ (fix_bb_placements): Same.
+ (remove_path): Same.
+ (place_new_loop): Same.
+ (add_loop): Same.
+ (scale_loop_frequencies): Same.
+ (scale_loop_profile): Same.
+ (create_empty_if_region_on_edge): Same.
+ (create_empty_loop_on_edge): Same.
+ (loopify): Same.
+ (unloop): Same.
+ (fix_loop_placements): Same.
+ (copy_loop_info): Same.
+ (duplicate_loop): Same.
+ (duplicate_subloops): Same.
+ (loop_redirect_edge): Same.
+ (can_duplicate_loop_p): Same.
+ (duplicate_loop_to_header_edge): Same.
+ (mfb_keep_just): Same.
+ (has_preds_from_loop): Same.
+ (create_preheader): Same.
+ (create_preheaders): Same.
+ (lv_adjust_loop_entry_edge): Same.
+ (loop_version): Same.
+ * cfgloopmanip.h: Same.
+ * cgraph.h: Same.
+ * cgraphbuild.c: Same.
+ * combine.c (make_extraction): Same.
+ * config/i386/i386-features.c: Same.
+ * config/i386/i386-features.h: Same.
+ * config/i386/i386.c (ix86_emit_outlined_ms2sysv_save): Same.
+ (ix86_emit_outlined_ms2sysv_restore): Same.
+ (ix86_noce_conversion_profitable_p): Same.
+ (ix86_init_cost): Same.
+ (ix86_simd_clone_usable): Same.
+ * configure.ac: Same.
+ * coretypes.h: Same.
+ * data-streamer-in.c (string_for_index): Same.
+ (streamer_read_indexed_string): Same.
+ (streamer_read_string): Same.
+ (bp_unpack_indexed_string): Same.
+ (bp_unpack_string): Same.
+ (streamer_read_uhwi): Same.
+ (streamer_read_hwi): Same.
+ (streamer_read_gcov_count): Same.
+ (streamer_read_wide_int): Same.
+ * data-streamer.h (streamer_write_bitpack): Same.
+ (bp_unpack_value): Same.
+ (streamer_write_char_stream): Same.
+ (streamer_write_hwi_in_range): Same.
+ (streamer_write_record_start): Same.
+ * ddg.c (create_ddg_dep_from_intra_loop_link): Same.
+ (add_cross_iteration_register_deps): Same.
+ (build_intra_loop_deps): Same.
+ * df-core.c (df_analyze): Same.
+ (loop_post_order_compute): Same.
+ (loop_inverted_post_order_compute): Same.
+ * df-problems.c (df_rd_alloc): Same.
+ (df_rd_simulate_one_insn): Same.
+ (df_rd_local_compute): Same.
+ (df_rd_init_solution): Same.
+ (df_rd_confluence_n): Same.
+ (df_rd_transfer_function): Same.
+ (df_rd_free): Same.
+ (df_rd_dump_defs_set): Same.
+ (df_rd_top_dump): Same.
+ (df_lr_alloc): Same.
+ (df_lr_reset): Same.
+ (df_lr_local_compute): Same.
+ (df_lr_init): Same.
+ (df_lr_confluence_n): Same.
+ (df_lr_free): Same.
+ (df_lr_top_dump): Same.
+ (df_lr_verify_transfer_functions): Same.
+ (df_live_alloc): Same.
+ (df_live_reset): Same.
+ (df_live_init): Same.
+ (df_live_confluence_n): Same.
+ (df_live_finalize): Same.
+ (df_live_free): Same.
+ (df_live_top_dump): Same.
+ (df_live_verify_transfer_functions): Same.
+ (df_mir_alloc): Same.
+ (df_mir_reset): Same.
+ (df_mir_init): Same.
+ (df_mir_confluence_n): Same.
+ (df_mir_free): Same.
+ (df_mir_top_dump): Same.
+ (df_word_lr_alloc): Same.
+ (df_word_lr_reset): Same.
+ (df_word_lr_init): Same.
+ (df_word_lr_confluence_n): Same.
+ (df_word_lr_free): Same.
+ (df_word_lr_top_dump): Same.
+ (df_md_alloc): Same.
+ (df_md_simulate_one_insn): Same.
+ (df_md_reset): Same.
+ (df_md_init): Same.
+ (df_md_free): Same.
+ (df_md_top_dump): Same.
+ * df-scan.c (df_insn_delete): Same.
+ (df_insn_rescan): Same.
+ (df_notes_rescan): Same.
+ (df_sort_and_compress_mws): Same.
+ (df_install_mws): Same.
+ (df_refs_add_to_chains): Same.
+ (df_ref_create_structure): Same.
+ (df_ref_record): Same.
+ (df_def_record_1): Same.
+ (df_find_hard_reg_defs): Same.
+ (df_uses_record): Same.
+ (df_get_conditional_uses): Same.
+ (df_get_call_refs): Same.
+ (df_recompute_luids): Same.
+ (df_get_entry_block_def_set): Same.
+ (df_entry_block_defs_collect): Same.
+ (df_get_exit_block_use_set): Same.
+ (df_exit_block_uses_collect): Same.
+ (df_mws_verify): Same.
+ (df_bb_verify): Same.
+ * df.h (df_scan_get_bb_info): Same.
+ * doc/tm.texi: Same.
+ * dse.c (record_store): Same.
+ * dumpfile.h: Same.
+ * emit-rtl.c (const_fixed_hasher::equal): Same.
+ (set_mem_attributes_minus_bitpos): Same.
+ (change_address): Same.
+ (adjust_address_1): Same.
+ (offset_address): Same.
+ * emit-rtl.h: Same.
+ * except.c (dw2_build_landing_pads): Same.
+ (sjlj_emit_dispatch_table): Same.
+ * explow.c (allocate_dynamic_stack_space): Same.
+ (emit_stack_probe): Same.
+ (probe_stack_range): Same.
+ * expmed.c (store_bit_field_using_insv): Same.
+ (store_bit_field_1): Same.
+ (store_integral_bit_field): Same.
+ (extract_bit_field_using_extv): Same.
+ (extract_bit_field_1): Same.
+ (emit_cstore): Same.
+ * expr.c (emit_block_move_via_cpymem): Same.
+ (expand_cmpstrn_or_cmpmem): Same.
+ (set_storage_via_setmem): Same.
+ (emit_single_push_insn_1): Same.
+ (expand_assignment): Same.
+ (store_constructor): Same.
+ (expand_expr_real_2): Same.
+ (expand_expr_real_1): Same.
+ (try_casesi): Same.
+ * flags.h: Same.
+ * function.c (try_fit_stack_local): Same.
+ (assign_stack_local_1): Same.
+ (assign_stack_local): Same.
+ (cut_slot_from_list): Same.
+ (insert_slot_to_list): Same.
+ (max_slot_level): Same.
+ (move_slot_to_level): Same.
+ (temp_address_hasher::equal): Same.
+ (remove_unused_temp_slot_addresses): Same.
+ (assign_temp): Same.
+ (combine_temp_slots): Same.
+ (update_temp_slot_address): Same.
+ (preserve_temp_slots): Same.
+ * function.h: Same.
+ * fwprop.c: Same.
+ * gcc-rich-location.h: Same.
+ * gcov.c: Same.
+ * genattrtab.c (check_attr_test): Same.
+ (check_attr_value): Same.
+ (convert_set_attr_alternative): Same.
+ (convert_set_attr): Same.
+ (check_defs): Same.
+ (copy_boolean): Same.
+ (get_attr_value): Same.
+ (expand_delays): Same.
+ (make_length_attrs): Same.
+ (min_fn): Same.
+ (make_alternative_compare): Same.
+ (simplify_test_exp): Same.
+ (tests_attr_p): Same.
+ (get_attr_order): Same.
+ (clear_struct_flag): Same.
+ (gen_attr): Same.
+ (compares_alternatives_p): Same.
+ (gen_insn): Same.
+ (gen_delay): Same.
+ (find_attrs_to_cache): Same.
+ (write_test_expr): Same.
+ (walk_attr_value): Same.
+ (write_attr_get): Same.
+ (eliminate_known_true): Same.
+ (write_insn_cases): Same.
+ (write_attr_case): Same.
+ (write_attr_valueq): Same.
+ (write_attr_value): Same.
+ (write_dummy_eligible_delay): Same.
+ (next_comma_elt): Same.
+ (find_attr): Same.
+ (make_internal_attr): Same.
+ (copy_rtx_unchanging): Same.
+ (gen_insn_reserv): Same.
+ (check_tune_attr): Same.
+ (make_automaton_attrs): Same.
+ (handle_arg): Same.
+ * genextract.c (gen_insn): Same.
+ (VEC_char_to_string): Same.
+ * genmatch.c (print_operand): Same.
+ (lower): Same.
+ (parser::parse_operation): Same.
+ (parser::parse_capture): Same.
+ (parser::parse_c_expr): Same.
+ (parser::parse_simplify): Same.
+ (main): Same.
+ * genoutput.c (output_operand_data): Same.
+ (output_get_insn_name): Same.
+ (compare_operands): Same.
+ (place_operands): Same.
+ (process_template): Same.
+ (validate_insn_alternatives): Same.
+ (validate_insn_operands): Same.
+ (gen_expand): Same.
+ (note_constraint): Same.
+ * genpreds.c (write_one_predicate_function): Same.
+ (add_constraint): Same.
+ (process_define_register_constraint): Same.
+ (write_lookup_constraint_1): Same.
+ (write_lookup_constraint_array): Same.
+ (write_insn_constraint_len): Same.
+ (write_reg_class_for_constraint_1): Same.
+ (write_constraint_satisfied_p_array): Same.
+ * genrecog.c (optimize_subroutine_group): Same.
+ * gensupport.c (process_define_predicate): Same.
+ (queue_pattern): Same.
+ (remove_from_queue): Same.
+ (process_rtx): Same.
+ (is_predicable): Same.
+ (change_subst_attribute): Same.
+ (subst_pattern_match): Same.
+ (alter_constraints): Same.
+ (alter_attrs_for_insn): Same.
+ (shift_output_template): Same.
+ (alter_output_for_subst_insn): Same.
+ (process_one_cond_exec): Same.
+ (subst_dup): Same.
+ (process_define_cond_exec): Same.
+ (mnemonic_htab_callback): Same.
+ (gen_mnemonic_attr): Same.
+ (read_md_rtx): Same.
+ * ggc-page.c: Same.
+ * gimple-loop-interchange.cc (dump_reduction): Same.
+ (dump_induction): Same.
+ (loop_cand::~loop_cand): Same.
+ (free_data_refs_with_aux): Same.
+ (tree_loop_interchange::interchange_loops): Same.
+ (tree_loop_interchange::map_inductions_to_loop): Same.
+ (tree_loop_interchange::move_code_to_inner_loop): Same.
+ (compute_access_stride): Same.
+ (compute_access_strides): Same.
+ (proper_loop_form_for_interchange): Same.
+ (tree_loop_interchange_compute_ddrs): Same.
+ (prune_datarefs_not_in_loop): Same.
+ (prepare_data_references): Same.
+ (pass_linterchange::execute): Same.
+ * gimple-loop-jam.c (bb_prevents_fusion_p): Same.
+ (unroll_jam_possible_p): Same.
+ (fuse_loops): Same.
+ (adjust_unroll_factor): Same.
+ (tree_loop_unroll_and_jam): Same.
+ * gimple-loop-versioning.cc (loop_versioning::~loop_versioning): Same.
+ (loop_versioning::expensive_stmt_p): Same.
+ (loop_versioning::version_for_unity): Same.
+ (loop_versioning::dump_inner_likelihood): Same.
+ (loop_versioning::find_per_loop_multiplication): Same.
+ (loop_versioning::analyze_term_using_scevs): Same.
+ (loop_versioning::record_address_fragment): Same.
+ (loop_versioning::analyze_expr): Same.
+ (loop_versioning::analyze_blocks): Same.
+ (loop_versioning::prune_conditions): Same.
+ (loop_versioning::merge_loop_info): Same.
+ (loop_versioning::add_loop_to_queue): Same.
+ (loop_versioning::decide_whether_loop_is_versionable): Same.
+ (loop_versioning::make_versioning_decisions): Same.
+ (loop_versioning::implement_versioning_decisions): Same.
+ * gimple-ssa-evrp-analyze.c
+ (evrp_range_analyzer::record_ranges_from_phis): Same.
+ * gimple-ssa-store-merging.c (split_store::split_store): Same.
+ (count_multiple_uses): Same.
+ (split_group): Same.
+ (imm_store_chain_info::output_merged_store): Same.
+ (pass_store_merging::process_store): Same.
+ * gimple-ssa-strength-reduction.c (slsr_process_phi): Same.
+ * gimple-ssa-warn-alloca.c (adjusted_warn_limit): Same.
+ (is_max): Same.
+ (alloca_call_type): Same.
+ (pass_walloca::execute): Same.
+ * gimple-streamer-in.c (input_phi): Same.
+ (input_gimple_stmt): Same.
+ * gimple-streamer.h: Same.
+ * godump.c (go_force_record_alignment): Same.
+ (go_format_type): Same.
+ (go_output_type): Same.
+ (go_output_fndecl): Same.
+ (go_output_typedef): Same.
+ (keyword_hash_init): Same.
+ (find_dummy_types): Same.
+ * graph.c (draw_cfg_nodes_no_loops): Same.
+ (draw_cfg_nodes_for_loop): Same.
+ * hard-reg-set.h (hard_reg_set_iter_next): Same.
+ * hsa-brig.c: Same.
+ * hsa-common.h (hsa_internal_fn_hasher::equal): Same.
+ * hsa-dump.c (dump_hsa_cfun): Same.
+ * hsa-gen.c (gen_function_def_parameters): Same.
+ * hsa-regalloc.c (dump_hsa_cfun_regalloc): Same.
+ * input.c (dump_line_table_statistics): Same.
+ (test_lexer): Same.
+ * input.h: Same.
+ * internal-fn.c (get_multi_vector_move): Same.
+ (expand_load_lanes_optab_fn): Same.
+ (expand_GOMP_SIMT_ENTER_ALLOC): Same.
+ (expand_GOMP_SIMT_EXIT): Same.
+ (expand_GOMP_SIMT_LAST_LANE): Same.
+ (expand_GOMP_SIMT_ORDERED_PRED): Same.
+ (expand_GOMP_SIMT_VOTE_ANY): Same.
+ (expand_GOMP_SIMT_XCHG_BFLY): Same.
+ (expand_GOMP_SIMT_XCHG_IDX): Same.
+ (expand_addsub_overflow): Same.
+ (expand_neg_overflow): Same.
+ (expand_mul_overflow): Same.
+ (expand_call_mem_ref): Same.
+ (expand_mask_load_optab_fn): Same.
+ (expand_scatter_store_optab_fn): Same.
+ (expand_gather_load_optab_fn): Same.
+ * ipa-cp.c (ipa_get_parm_lattices): Same.
+ (print_all_lattices): Same.
+ (ignore_edge_p): Same.
+ (build_toporder_info): Same.
+ (free_toporder_info): Same.
+ (push_node_to_stack): Same.
+ (ipcp_lattice<valtype>::set_contains_variable): Same.
+ (set_agg_lats_to_bottom): Same.
+ (ipcp_bits_lattice::meet_with): Same.
+ (set_single_call_flag): Same.
+ (initialize_node_lattices): Same.
+ (ipa_get_jf_ancestor_result): Same.
+ (ipcp_verify_propagated_values): Same.
+ (propagate_scalar_across_jump_function): Same.
+ (propagate_context_across_jump_function): Same.
+ (propagate_bits_across_jump_function): Same.
+ (ipa_vr_operation_and_type_effects): Same.
+ (propagate_vr_across_jump_function): Same.
+ (set_check_aggs_by_ref): Same.
+ (set_chain_of_aglats_contains_variable): Same.
+ (merge_aggregate_lattices): Same.
+ (agg_pass_through_permissible_p): Same.
+ (propagate_aggs_across_jump_function): Same.
+ (call_passes_through_thunk_p): Same.
+ (propagate_constants_across_call): Same.
+ (devirtualization_time_bonus): Same.
+ (good_cloning_opportunity_p): Same.
+ (context_independent_aggregate_values): Same.
+ (gather_context_independent_values): Same.
+ (perform_estimation_of_a_value): Same.
+ (estimate_local_effects): Same.
+ (value_topo_info<valtype>::add_val): Same.
+ (add_all_node_vals_to_toposort): Same.
+ (value_topo_info<valtype>::propagate_effects): Same.
+ (ipcp_propagate_stage): Same.
+ (ipcp_discover_new_direct_edges): Same.
+ (same_node_or_its_all_contexts_clone_p): Same.
+ (cgraph_edge_brings_value_p): Same.
+ (gather_edges_for_value): Same.
+ (create_specialized_node): Same.
+ (find_more_scalar_values_for_callers_subset): Same.
+ (find_more_contexts_for_caller_subset): Same.
+ (copy_plats_to_inter): Same.
+ (intersect_aggregates_with_edge): Same.
+ (find_aggregate_values_for_callers_subset): Same.
+ (cgraph_edge_brings_all_agg_vals_for_node): Same.
+ (decide_about_value): Same.
+ (decide_whether_version_node): Same.
+ (spread_undeadness): Same.
+ (identify_dead_nodes): Same.
+ (ipcp_store_vr_results): Same.
+ * ipa-devirt.c (final_warning_record::grow_type_warnings): Same.
+ * ipa-fnsummary.c (ipa_fn_summary::account_size_time): Same.
+ (redirect_to_unreachable): Same.
+ (edge_set_predicate): Same.
+ (evaluate_conditions_for_known_args): Same.
+ (evaluate_properties_for_edge): Same.
+ (ipa_fn_summary_t::duplicate): Same.
+ (ipa_call_summary_t::duplicate): Same.
+ (dump_ipa_call_summary): Same.
+ (ipa_dump_fn_summary): Same.
+ (eliminated_by_inlining_prob): Same.
+ (set_cond_stmt_execution_predicate): Same.
+ (set_switch_stmt_execution_predicate): Same.
+ (compute_bb_predicates): Same.
+ (will_be_nonconstant_expr_predicate): Same.
+ (phi_result_unknown_predicate): Same.
+ (analyze_function_body): Same.
+ (compute_fn_summary): Same.
+ (estimate_edge_devirt_benefit): Same.
+ (estimate_edge_size_and_time): Same.
+ (estimate_calls_size_and_time): Same.
+ (estimate_node_size_and_time): Same.
+ (remap_edge_change_prob): Same.
+ (remap_edge_summaries): Same.
+ (ipa_merge_fn_summary_after_inlining): Same.
+ (ipa_fn_summary_generate): Same.
+ (inline_read_section): Same.
+ (ipa_fn_summary_read): Same.
+ (ipa_fn_summary_write): Same.
+ * ipa-fnsummary.h: Same.
+ * ipa-hsa.c (ipa_hsa_read_section): Same.
+ * ipa-icf-gimple.c (func_checker::compare_loops): Same.
+ * ipa-icf.c (sem_function::param_used_p): Same.
+ * ipa-inline-analysis.c (do_estimate_edge_time): Same.
+ * ipa-inline.c (edge_badness): Same.
+ (inline_small_functions): Same.
+ * ipa-polymorphic-call.c
+ (ipa_polymorphic_call_context::stream_out): Same.
+ * ipa-predicate.c (predicate::remap_after_duplication): Same.
+ (predicate::remap_after_inlining): Same.
+ (predicate::stream_out): Same.
+ * ipa-predicate.h: Same.
+ * ipa-profile.c (ipa_profile_read_summary): Same.
+ * ipa-prop.c (ipa_get_param_decl_index_1): Same.
+ (count_formal_params): Same.
+ (ipa_dump_param): Same.
+ (ipa_alloc_node_params): Same.
+ (ipa_print_node_jump_functions_for_edge): Same.
+ (ipa_print_node_jump_functions): Same.
+ (ipa_load_from_parm_agg): Same.
+ (get_ancestor_addr_info): Same.
+ (ipa_compute_jump_functions_for_edge): Same.
+ (ipa_analyze_virtual_call_uses): Same.
+ (ipa_analyze_stmt_uses): Same.
+ (ipa_analyze_params_uses_in_bb): Same.
+ (update_jump_functions_after_inlining): Same.
+ (try_decrement_rdesc_refcount): Same.
+ (ipa_impossible_devirt_target): Same.
+ (update_indirect_edges_after_inlining): Same.
+ (combine_controlled_uses_counters): Same.
+ (ipa_edge_args_sum_t::duplicate): Same.
+ (ipa_write_jump_function): Same.
+ (ipa_write_indirect_edge_info): Same.
+ (ipa_write_node_info): Same.
+ (ipa_read_edge_info): Same.
+ (ipa_prop_read_section): Same.
+ (read_replacements_section): Same.
+ * ipa-prop.h (ipa_get_param_count): Same.
+ (ipa_get_param): Same.
+ (ipa_get_type): Same.
+ (ipa_get_param_move_cost): Same.
+ (ipa_set_param_used): Same.
+ (ipa_get_controlled_uses): Same.
+ (ipa_set_controlled_uses): Same.
+ (ipa_get_cs_argument_count): Same.
+ * ipa-pure-const.c (analyze_function): Same.
+ (pure_const_read_summary): Same.
+ * ipa-ref.h: Same.
+ * ipa-reference.c (ipa_reference_read_optimization_summary): Same.
+ * ipa-split.c (test_nonssa_use): Same.
+ (dump_split_point): Same.
+ (dominated_by_forbidden): Same.
+ (split_part_set_ssa_name_p): Same.
+ (find_split_points): Same.
+ * ira-build.c (finish_loop_tree_nodes): Same.
+ (low_pressure_loop_node_p): Same.
+ * ira-color.c (ira_reuse_stack_slot): Same.
+ * ira-int.h: Same.
+ * ira.c (setup_reg_equiv): Same.
+ (print_insn_chain): Same.
+ (ira): Same.
+ * loop-doloop.c (doloop_condition_get): Same.
+ (add_test): Same.
+ (record_reg_sets): Same.
+ (doloop_optimize): Same.
+ * loop-init.c (loop_optimizer_init): Same.
+ (fix_loop_structure): Same.
+ * loop-invariant.c (merge_identical_invariants): Same.
+ (compute_always_reached): Same.
+ (find_exits): Same.
+ (may_assign_reg_p): Same.
+ (find_invariants_bb): Same.
+ (find_invariants_body): Same.
+ (replace_uses): Same.
+ (can_move_invariant_reg): Same.
+ (free_inv_motion_data): Same.
+ (move_single_loop_invariants): Same.
+ (change_pressure): Same.
+ (mark_ref_regs): Same.
+ (calculate_loop_reg_pressure): Same.
+ * loop-iv.c (biv_entry_hasher::equal): Same.
+ (iv_extend_to_rtx_code): Same.
+ (check_iv_ref_table_size): Same.
+ (clear_iv_info): Same.
+ (latch_dominating_def): Same.
+ (iv_get_reaching_def): Same.
+ (iv_constant): Same.
+ (iv_subreg): Same.
+ (iv_extend): Same.
+ (iv_neg): Same.
+ (iv_add): Same.
+ (iv_mult): Same.
+ (get_biv_step): Same.
+ (record_iv): Same.
+ (analyzed_for_bivness_p): Same.
+ (record_biv): Same.
+ (iv_analyze_biv): Same.
+ (iv_analyze_expr): Same.
+ (iv_analyze_def): Same.
+ (iv_analyze_op): Same.
+ (iv_analyze): Same.
+ (iv_analyze_result): Same.
+ (biv_p): Same.
+ (eliminate_implied_conditions): Same.
+ (simplify_using_initial_values): Same.
+ (shorten_into_mode): Same.
+ (canonicalize_iv_subregs): Same.
+ (determine_max_iter): Same.
+ (check_simple_exit): Same.
+ (find_simple_exit): Same.
+ (get_simple_loop_desc): Same.
+ * loop-unroll.c (report_unroll): Same.
+ (decide_unrolling): Same.
+ (unroll_loops): Same.
+ (loop_exit_at_end_p): Same.
+ (decide_unroll_constant_iterations): Same.
+ (unroll_loop_constant_iterations): Same.
+ (compare_and_jump_seq): Same.
+ (unroll_loop_runtime_iterations): Same.
+ (decide_unroll_stupid): Same.
+ (unroll_loop_stupid): Same.
+ (referenced_in_one_insn_in_loop_p): Same.
+ (reset_debug_uses_in_loop): Same.
+ (analyze_iv_to_split_insn): Same.
+ * lra-eliminations.c (lra_debug_elim_table): Same.
+ (setup_can_eliminate): Same.
+ (form_sum): Same.
+ (lra_get_elimination_hard_regno): Same.
+ (lra_eliminate_regs_1): Same.
+ (eliminate_regs_in_insn): Same.
+ (update_reg_eliminate): Same.
+ (init_elimination): Same.
+ (lra_eliminate): Same.
+ * lra-int.h: Same.
+ * lra-lives.c (initiate_live_solver): Same.
+ * lra-remat.c (create_remat_bb_data): Same.
+ * lra-spills.c (lra_spill): Same.
+ * lra.c (lra_set_insn_recog_data): Same.
+ (lra_set_used_insn_alternative_by_uid): Same.
+ (init_reg_info): Same.
+ (expand_reg_info): Same.
+ * lto-cgraph.c (output_symtab): Same.
+ (read_identifier): Same.
+ (get_alias_symbol): Same.
+ (input_node): Same.
+ (input_varpool_node): Same.
+ (input_ref): Same.
+ (input_edge): Same.
+ (input_cgraph_1): Same.
+ (input_refs): Same.
+ (input_symtab): Same.
+ (input_offload_tables): Same.
+ (output_cgraph_opt_summary): Same.
+ (input_edge_opt_summary): Same.
+ (input_cgraph_opt_section): Same.
+ * lto-section-in.c (lto_free_raw_section_data): Same.
+ (lto_create_simple_input_block): Same.
+ (lto_free_function_in_decl_state_for_node): Same.
+ * lto-streamer-in.c (lto_tag_check_set): Same.
+ (lto_location_cache::revert_location_cache): Same.
+ (lto_location_cache::input_location): Same.
+ (lto_input_location): Same.
+ (stream_input_location_now): Same.
+ (lto_input_tree_ref): Same.
+ (lto_input_eh_catch_list): Same.
+ (input_eh_region): Same.
+ (lto_init_eh): Same.
+ (make_new_block): Same.
+ (input_cfg): Same.
+ (fixup_call_stmt_edges): Same.
+ (input_struct_function_base): Same.
+ (input_function): Same.
+ (lto_read_body_or_constructor): Same.
+ (lto_read_tree_1): Same.
+ (lto_read_tree): Same.
+ (lto_input_scc): Same.
+ (lto_input_tree_1): Same.
+ (lto_input_toplevel_asms): Same.
+ (lto_input_mode_table): Same.
+ (lto_reader_init): Same.
+ (lto_data_in_create): Same.
+ * lto-streamer-out.c (output_cfg): Same.
+ * lto-streamer.h: Same.
+ * modulo-sched.c (duplicate_insns_of_cycles): Same.
+ (generate_prolog_epilog): Same.
+ (mark_loop_unsched): Same.
+ (dump_insn_location): Same.
+ (loop_canon_p): Same.
+ (sms_schedule): Same.
+ * omp-expand.c (expand_omp_for_ordered_loops): Same.
+ (expand_omp_for_generic): Same.
+ (expand_omp_for_static_nochunk): Same.
+ (expand_omp_for_static_chunk): Same.
+ (expand_omp_simd): Same.
+ (expand_omp_taskloop_for_inner): Same.
+ (expand_oacc_for): Same.
+ (expand_omp_atomic_pipeline): Same.
+ (mark_loops_in_oacc_kernels_region): Same.
+ * omp-offload.c (oacc_xform_loop): Same.
+ * omp-simd-clone.c (simd_clone_adjust): Same.
+ * optabs-query.c (get_traditional_extraction_insn): Same.
+ * optabs.c (expand_vector_broadcast): Same.
+ (expand_binop_directly): Same.
+ (expand_twoval_unop): Same.
+ (expand_twoval_binop): Same.
+ (expand_unop_direct): Same.
+ (emit_indirect_jump): Same.
+ (emit_conditional_move): Same.
+ (emit_conditional_neg_or_complement): Same.
+ (emit_conditional_add): Same.
+ (vector_compare_rtx): Same.
+ (expand_vec_perm_1): Same.
+ (expand_vec_perm_const): Same.
+ (expand_vec_cond_expr): Same.
+ (expand_vec_series_expr): Same.
+ (maybe_emit_atomic_exchange): Same.
+ (maybe_emit_sync_lock_test_and_set): Same.
+ (expand_atomic_compare_and_swap): Same.
+ (expand_atomic_load): Same.
+ (expand_atomic_store): Same.
+ (maybe_emit_op): Same.
+ (valid_multiword_target_p): Same.
+ (create_integer_operand): Same.
+ (maybe_legitimize_operand_same_code): Same.
+ (maybe_legitimize_operand): Same.
+ (create_convert_operand_from_type): Same.
+ (can_reuse_operands_p): Same.
+ (maybe_legitimize_operands): Same.
+ (maybe_gen_insn): Same.
+ (maybe_expand_insn): Same.
+ (maybe_expand_jump_insn): Same.
+ (expand_insn): Same.
+ * optabs.h (create_expand_operand): Same.
+ (create_fixed_operand): Same.
+ (create_output_operand): Same.
+ (create_input_operand): Same.
+ (create_convert_operand_to): Same.
+ (create_convert_operand_from): Same.
+ * optinfo.h: Same.
+ * poly-int.h: Same.
+ * predict.c (optimize_insn_for_speed_p): Same.
+ (optimize_loop_for_size_p): Same.
+ (optimize_loop_for_speed_p): Same.
+ (optimize_loop_nest_for_speed_p): Same.
+ (get_base_value): Same.
+ (predicted_by_loop_heuristics_p): Same.
+ (predict_extra_loop_exits): Same.
+ (predict_loops): Same.
+ (predict_paths_for_bb): Same.
+ (predict_paths_leading_to): Same.
+ (propagate_freq): Same.
+ (pass_profile::execute): Same.
+ * predict.h: Same.
+ * profile-count.c (profile_count::differs_from_p): Same.
+ (profile_probability::differs_lot_from_p): Same.
+ * profile-count.h: Same.
+ * profile.c (branch_prob): Same.
+ * regrename.c (free_chain_data): Same.
+ (mark_conflict): Same.
+ (create_new_chain): Same.
+ (merge_overlapping_regs): Same.
+ (init_rename_info): Same.
+ (merge_chains): Same.
+ (regrename_analyze): Same.
+ (regrename_do_replace): Same.
+ (scan_rtx_reg): Same.
+ (record_out_operands): Same.
+ (build_def_use): Same.
+ * regrename.h: Same.
+ * reload.h: Same.
+ * reload1.c (init_reload): Same.
+ (maybe_fix_stack_asms): Same.
+ (copy_reloads): Same.
+ (count_pseudo): Same.
+ (count_spilled_pseudo): Same.
+ (find_reg): Same.
+ (find_reload_regs): Same.
+ (select_reload_regs): Same.
+ (spill_hard_reg): Same.
+ (fixup_eh_region_note): Same.
+ (set_reload_reg): Same.
+ (allocate_reload_reg): Same.
+ (compute_reload_subreg_offset): Same.
+ (reload_adjust_reg_for_icode): Same.
+ (emit_input_reload_insns): Same.
+ (emit_output_reload_insns): Same.
+ (do_input_reload): Same.
+ (inherit_piecemeal_p): Same.
+ * rtl.h: Same.
+ * sanopt.c (maybe_get_dominating_check): Same.
+ (maybe_optimize_ubsan_ptr_ifn): Same.
+ (can_remove_asan_check): Same.
+ (maybe_optimize_asan_check_ifn): Same.
+ (sanopt_optimize_walker): Same.
+ * sched-deps.c (add_dependence_list): Same.
+ (chain_to_prev_insn): Same.
+ (add_insn_mem_dependence): Same.
+ (create_insn_reg_set): Same.
+ (maybe_extend_reg_info_p): Same.
+ (sched_analyze_reg): Same.
+ (sched_analyze_1): Same.
+ (get_implicit_reg_pending_clobbers): Same.
+ (chain_to_prev_insn_p): Same.
+ (deps_analyze_insn): Same.
+ (deps_start_bb): Same.
+ (sched_free_deps): Same.
+ (init_deps): Same.
+ (init_deps_reg_last): Same.
+ (free_deps): Same.
+ * sched-ebb.c: Same.
+ * sched-int.h: Same.
+ * sched-rgn.c (add_branch_dependences): Same.
+ (concat_insn_mem_list): Same.
+ (deps_join): Same.
+ (sched_rgn_compute_dependencies): Same.
+ * sel-sched-ir.c (reset_target_context): Same.
+ (copy_deps_context): Same.
+ (init_id_from_df): Same.
+ (has_dependence_p): Same.
+ (change_loops_latches): Same.
+ (bb_top_order_comparator): Same.
+ (make_region_from_loop_preheader): Same.
+ (sel_init_pipelining): Same.
+ (get_loop_nest_for_rgn): Same.
+ (make_regions_from_the_rest): Same.
+ (sel_is_loop_preheader_p): Same.
+ * sel-sched-ir.h (inner_loop_header_p): Same.
+ (get_all_loop_exits): Same.
+ * selftest.h: Same.
+ * sese.c (sese_build_liveouts): Same.
+ (sese_insert_phis_for_liveouts): Same.
+ * sese.h (defined_in_sese_p): Same.
+ * sreal.c (sreal::stream_out): Same.
+ * sreal.h: Same.
+ * streamer-hooks.h: Same.
+ * target-globals.c (save_target_globals): Same.
+ * target-globals.h: Same.
+ * target.def: Same.
+ * target.h: Same.
+ * targhooks.c (default_has_ifunc_p): Same.
+ (default_empty_mask_is_expensive): Same.
+ (default_init_cost): Same.
+ * targhooks.h: Same.
+ * toplev.c: Same.
+ * tree-affine.c (aff_combination_mult): Same.
+ (aff_combination_expand): Same.
+ (aff_combination_constant_multiple_p): Same.
+ * tree-affine.h: Same.
+ * tree-cfg.c (build_gimple_cfg): Same.
+ (replace_loop_annotate_in_block): Same.
+ (replace_uses_by): Same.
+ (remove_bb): Same.
+ (dump_cfg_stats): Same.
+ (gimple_duplicate_sese_region): Same.
+ (gimple_duplicate_sese_tail): Same.
+ (move_block_to_fn): Same.
+ (replace_block_vars_by_duplicates): Same.
+ (move_sese_region_to_fn): Same.
+ (print_loops_bb): Same.
+ (print_loop): Same.
+ (print_loops): Same.
+ (debug): Same.
+ (debug_loops): Same.
+ * tree-cfg.h: Same.
+ * tree-chrec.c (chrec_fold_plus_poly_poly): Same.
+ (chrec_fold_multiply_poly_poly): Same.
+ (chrec_evaluate): Same.
+ (chrec_component_in_loop_num): Same.
+ (reset_evolution_in_loop): Same.
+ (is_multivariate_chrec): Same.
+ (chrec_contains_symbols): Same.
+ (nb_vars_in_chrec): Same.
+ (chrec_convert_1): Same.
+ (chrec_convert_aggressive): Same.
+ * tree-chrec.h: Same.
+ * tree-core.h: Same.
+ * tree-data-ref.c (dump_data_dependence_relation): Same.
+ (canonicalize_base_object_address): Same.
+ (data_ref_compare_tree): Same.
+ (prune_runtime_alias_test_list): Same.
+ (get_segment_min_max): Same.
+ (create_intersect_range_checks): Same.
+ (conflict_fn_no_dependence): Same.
+ (object_address_invariant_in_loop_p): Same.
+ (analyze_ziv_subscript): Same.
+ (analyze_siv_subscript_cst_affine): Same.
+ (analyze_miv_subscript): Same.
+ (analyze_overlapping_iterations): Same.
+ (build_classic_dist_vector_1): Same.
+ (add_other_self_distances): Same.
+ (same_access_functions): Same.
+ (build_classic_dir_vector): Same.
+ (subscript_dependence_tester_1): Same.
+ (subscript_dependence_tester): Same.
+ (access_functions_are_affine_or_constant_p): Same.
+ (get_references_in_stmt): Same.
+ (loop_nest_has_data_refs): Same.
+ (graphite_find_data_references_in_stmt): Same.
+ (find_data_references_in_bb): Same.
+ (get_base_for_alignment): Same.
+ (find_loop_nest_1): Same.
+ (find_loop_nest): Same.
+ * tree-data-ref.h (dr_alignment): Same.
+ (ddr_dependence_level): Same.
+ * tree-if-conv.c (fold_build_cond_expr): Same.
+ (add_to_predicate_list): Same.
+ (add_to_dst_predicate_list): Same.
+ (phi_convertible_by_degenerating_args): Same.
+ (idx_within_array_bound): Same.
+ (all_preds_critical_p): Same.
+ (pred_blocks_visited_p): Same.
+ (predicate_bbs): Same.
+ (build_region): Same.
+ (if_convertible_loop_p_1): Same.
+ (is_cond_scalar_reduction): Same.
+ (predicate_scalar_phi): Same.
+ (remove_conditions_and_labels): Same.
+ (combine_blocks): Same.
+ (version_loop_for_if_conversion): Same.
+ (versionable_outer_loop_p): Same.
+ (ifcvt_local_dce): Same.
+ (tree_if_conversion): Same.
+ (pass_if_conversion::gate): Same.
+ * tree-if-conv.h: Same.
+ * tree-inline.c (maybe_move_debug_stmts_to_successors): Same.
+ * tree-loop-distribution.c (bb_top_order_cmp): Same.
+ (free_rdg): Same.
+ (stmt_has_scalar_dependences_outside_loop): Same.
+ (copy_loop_before): Same.
+ (create_bb_after_loop): Same.
+ (const_with_all_bytes_same): Same.
+ (generate_memset_builtin): Same.
+ (generate_memcpy_builtin): Same.
+ (destroy_loop): Same.
+ (build_rdg_partition_for_vertex): Same.
+ (compute_access_range): Same.
+ (data_ref_segment_size): Same.
+ (latch_dominated_by_data_ref): Same.
+ (compute_alias_check_pairs): Same.
+ (fuse_memset_builtins): Same.
+ (finalize_partitions): Same.
+ (find_seed_stmts_for_distribution): Same.
+ (prepare_perfect_loop_nest): Same.
+ * tree-parloops.c (lambda_transform_legal_p): Same.
+ (loop_parallel_p): Same.
+ (reduc_stmt_res): Same.
+ (add_field_for_name): Same.
+ (create_call_for_reduction_1): Same.
+ (replace_uses_in_bb_by): Same.
+ (transform_to_exit_first_loop_alt): Same.
+ (try_transform_to_exit_first_loop_alt): Same.
+ (transform_to_exit_first_loop): Same.
+ (num_phis): Same.
+ (gen_parallel_loop): Same.
+ (gather_scalar_reductions): Same.
+ (get_omp_data_i_param): Same.
+ (try_create_reduction_list): Same.
+ (oacc_entry_exit_single_gang): Same.
+ (parallelize_loops): Same.
+ * tree-pass.h: Same.
+ * tree-predcom.c (determine_offset): Same.
+ (last_always_executed_block): Same.
+ (split_data_refs_to_components): Same.
+ (suitable_component_p): Same.
+ (valid_initializer_p): Same.
+ (find_looparound_phi): Same.
+ (insert_looparound_copy): Same.
+ (add_looparound_copies): Same.
+ (determine_roots_comp): Same.
+ (predcom_tmp_var): Same.
+ (initialize_root_vars): Same.
+ (initialize_root_vars_store_elim_1): Same.
+ (initialize_root_vars_store_elim_2): Same.
+ (finalize_eliminated_stores): Same.
+ (initialize_root_vars_lm): Same.
+ (remove_stmt): Same.
+ (determine_unroll_factor): Same.
+ (execute_pred_commoning_cbck): Same.
+ (base_names_in_chain_on): Same.
+ (combine_chains): Same.
+ (pcom_stmt_dominates_stmt_p): Same.
+ (try_combine_chains): Same.
+ (prepare_initializers_chain_store_elim): Same.
+ (prepare_initializers_chain): Same.
+ (prepare_initializers): Same.
+ (prepare_finalizers_chain): Same.
+ (prepare_finalizers): Same.
+ (insert_init_seqs): Same.
+ * tree-scalar-evolution.c (loop_phi_node_p): Same.
+ (compute_overall_effect_of_inner_loop): Same.
+ (add_to_evolution_1): Same.
+ (add_to_evolution): Same.
+ (follow_ssa_edge_binary): Same.
+ (follow_ssa_edge_expr): Same.
+ (backedge_phi_arg_p): Same.
+ (follow_ssa_edge_in_condition_phi_branch): Same.
+ (follow_ssa_edge_in_condition_phi): Same.
+ (follow_ssa_edge_inner_loop_phi): Same.
+ (follow_ssa_edge): Same.
+ (analyze_evolution_in_loop): Same.
+ (analyze_initial_condition): Same.
+ (interpret_loop_phi): Same.
+ (interpret_condition_phi): Same.
+ (interpret_rhs_expr): Same.
+ (interpret_expr): Same.
+ (interpret_gimple_assign): Same.
+ (analyze_scalar_evolution_1): Same.
+ (analyze_scalar_evolution): Same.
+ (analyze_scalar_evolution_for_address_of): Same.
+ (get_instantiated_value_entry): Same.
+ (loop_closed_phi_def): Same.
+ (instantiate_scev_name): Same.
+ (instantiate_scev_poly): Same.
+ (instantiate_scev_binary): Same.
+ (instantiate_scev_convert): Same.
+ (instantiate_scev_not): Same.
+ (instantiate_scev_r): Same.
+ (instantiate_scev): Same.
+ (resolve_mixers): Same.
+ (initialize_scalar_evolutions_analyzer): Same.
+ (scev_reset_htab): Same.
+ (scev_reset): Same.
+ (derive_simple_iv_with_niters): Same.
+ (simple_iv_with_niters): Same.
+ (expression_expensive_p): Same.
+ (final_value_replacement_loop): Same.
+ * tree-scalar-evolution.h (block_before_loop): Same.
+ * tree-ssa-address.h: Same.
+ * tree-ssa-dce.c (find_obviously_necessary_stmts): Same.
+ * tree-ssa-dom.c (edge_info::record_simple_equiv): Same.
+ (record_edge_info): Same.
+ * tree-ssa-live.c (var_map_base_fini): Same.
+ (remove_unused_locals): Same.
+ * tree-ssa-live.h: Same.
+ * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Same.
+ (pass_ch_vect::execute): Same.
+ (pass_ch::process_loop_p): Same.
+ * tree-ssa-loop-im.c (mem_ref_hasher::hash): Same.
+ (movement_possibility): Same.
+ (outermost_invariant_loop): Same.
+ (stmt_cost): Same.
+ (determine_max_movement): Same.
+ (invariantness_dom_walker::before_dom_children): Same.
+ (move_computations): Same.
+ (may_move_till): Same.
+ (force_move_till_op): Same.
+ (force_move_till): Same.
+ (memref_free): Same.
+ (record_mem_ref_loc): Same.
+ (set_ref_stored_in_loop): Same.
+ (mark_ref_stored): Same.
+ (sort_bbs_in_loop_postorder_cmp): Same.
+ (sort_locs_in_loop_postorder_cmp): Same.
+ (analyze_memory_references): Same.
+ (mem_refs_may_alias_p): Same.
+ (find_ref_loc_in_loop_cmp): Same.
+ (rewrite_mem_ref_loc::operator): Same.
+ (first_mem_ref_loc_1::operator): Same.
+ (sm_set_flag_if_changed::operator): Same.
+ (execute_sm_if_changed_flag_set): Same.
+ (execute_sm): Same.
+ (hoist_memory_references): Same.
+ (ref_always_accessed::operator): Same.
+ (refs_independent_p): Same.
+ (record_dep_loop): Same.
+ (ref_indep_loop_p_1): Same.
+ (ref_indep_loop_p): Same.
+ (can_sm_ref_p): Same.
+ (find_refs_for_sm): Same.
+ (loop_suitable_for_sm): Same.
+ (store_motion_loop): Same.
+ (store_motion): Same.
+ (fill_always_executed_in): Same.
+ * tree-ssa-loop-ivcanon.c (constant_after_peeling): Same.
+ (estimated_unrolled_size): Same.
+ (loop_edge_to_cancel): Same.
+ (remove_exits_and_undefined_stmts): Same.
+ (remove_redundant_iv_tests): Same.
+ (unloop_loops): Same.
+ (estimated_peeled_sequence_size): Same.
+ (try_peel_loop): Same.
+ (canonicalize_loop_induction_variables): Same.
+ (canonicalize_induction_variables): Same.
+ * tree-ssa-loop-ivopts.c (iv_inv_expr_hasher::equal): Same.
+ (name_info): Same.
+ (stmt_after_inc_pos): Same.
+ (contains_abnormal_ssa_name_p): Same.
+ (niter_for_exit): Same.
+ (find_bivs): Same.
+ (mark_bivs): Same.
+ (find_givs_in_bb): Same.
+ (find_induction_variables): Same.
+ (find_interesting_uses_cond): Same.
+ (outermost_invariant_loop_for_expr): Same.
+ (idx_find_step): Same.
+ (add_candidate_1): Same.
+ (add_iv_candidate_derived_from_uses): Same.
+ (alloc_use_cost_map): Same.
+ (prepare_decl_rtl): Same.
+ (generic_predict_doloop_p): Same.
+ (computation_cost): Same.
+ (determine_common_wider_type): Same.
+ (get_computation_aff_1): Same.
+ (get_use_type): Same.
+ (determine_group_iv_cost_address): Same.
+ (iv_period): Same.
+ (difference_cannot_overflow_p): Same.
+ (may_eliminate_iv): Same.
+ (determine_set_costs): Same.
+ (cheaper_cost_pair): Same.
+ (compare_cost_pair): Same.
+ (iv_ca_cand_for_group): Same.
+ (iv_ca_recount_cost): Same.
+ (iv_ca_set_remove_invs): Same.
+ (iv_ca_set_no_cp): Same.
+ (iv_ca_set_add_invs): Same.
+ (iv_ca_set_cp): Same.
+ (iv_ca_add_group): Same.
+ (iv_ca_cost): Same.
+ (iv_ca_compare_deps): Same.
+ (iv_ca_delta_reverse): Same.
+ (iv_ca_delta_commit): Same.
+ (iv_ca_cand_used_p): Same.
+ (iv_ca_delta_free): Same.
+ (iv_ca_new): Same.
+ (iv_ca_free): Same.
+ (iv_ca_dump): Same.
+ (iv_ca_extend): Same.
+ (iv_ca_narrow): Same.
+ (iv_ca_prune): Same.
+ (cheaper_cost_with_cand): Same.
+ (iv_ca_replace): Same.
+ (try_add_cand_for): Same.
+ (get_initial_solution): Same.
+ (try_improve_iv_set): Same.
+ (find_optimal_iv_set_1): Same.
+ (create_new_iv): Same.
+ (rewrite_use_compare): Same.
+ (remove_unused_ivs): Same.
+ (determine_scaling_factor): Same.
+ * tree-ssa-loop-ivopts.h: Same.
+ * tree-ssa-loop-manip.c (create_iv): Same.
+ (compute_live_loop_exits): Same.
+ (add_exit_phi): Same.
+ (add_exit_phis): Same.
+ (find_uses_to_rename_use): Same.
+ (find_uses_to_rename_def): Same.
+ (find_uses_to_rename_in_loop): Same.
+ (rewrite_into_loop_closed_ssa): Same.
+ (check_loop_closed_ssa_bb): Same.
+ (split_loop_exit_edge): Same.
+ (ip_end_pos): Same.
+ (ip_normal_pos): Same.
+ (copy_phi_node_args): Same.
+ (gimple_duplicate_loop_to_header_edge): Same.
+ (can_unroll_loop_p): Same.
+ (determine_exit_conditions): Same.
+ (scale_dominated_blocks_in_loop): Same.
+ (niter_for_unrolled_loop): Same.
+ (tree_transform_and_unroll_loop): Same.
+ (rewrite_all_phi_nodes_with_iv): Same.
+ * tree-ssa-loop-manip.h: Same.
+ * tree-ssa-loop-niter.c (number_of_iterations_ne_max): Same.
+ (number_of_iterations_ne): Same.
+ (assert_no_overflow_lt): Same.
+ (assert_loop_rolls_lt): Same.
+ (number_of_iterations_lt): Same.
+ (adjust_cond_for_loop_until_wrap): Same.
+ (tree_simplify_using_condition): Same.
+ (simplify_using_initial_conditions): Same.
+ (simplify_using_outer_evolutions): Same.
+ (loop_only_exit_p): Same.
+ (ssa_defined_by_minus_one_stmt_p): Same.
+ (number_of_iterations_popcount): Same.
+ (number_of_iterations_exit): Same.
+ (find_loop_niter): Same.
+ (finite_loop_p): Same.
+ (chain_of_csts_start): Same.
+ (get_val_for): Same.
+ (loop_niter_by_eval): Same.
+ (derive_constant_upper_bound_ops): Same.
+ (do_warn_aggressive_loop_optimizations): Same.
+ (record_estimate): Same.
+ (get_cst_init_from_scev): Same.
+ (record_nonwrapping_iv): Same.
+ (idx_infer_loop_bounds): Same.
+ (infer_loop_bounds_from_ref): Same.
+ (infer_loop_bounds_from_array): Same.
+ (infer_loop_bounds_from_pointer_arith): Same.
+ (infer_loop_bounds_from_signedness): Same.
+ (bound_index): Same.
+ (discover_iteration_bound_by_body_walk): Same.
+ (maybe_lower_iteration_bound): Same.
+ (estimate_numbers_of_iterations): Same.
+ (estimated_loop_iterations): Same.
+ (estimated_loop_iterations_int): Same.
+ (max_loop_iterations): Same.
+ (max_loop_iterations_int): Same.
+ (likely_max_loop_iterations): Same.
+ (likely_max_loop_iterations_int): Same.
+ (estimated_stmt_executions_int): Same.
+ (max_stmt_executions): Same.
+ (likely_max_stmt_executions): Same.
+ (estimated_stmt_executions): Same.
+ (stmt_dominates_stmt_p): Same.
+ (nowrap_type_p): Same.
+ (loop_exits_before_overflow): Same.
+ (scev_var_range_cant_overflow): Same.
+ (scev_probably_wraps_p): Same.
+ (free_numbers_of_iterations_estimates): Same.
+ * tree-ssa-loop-niter.h: Same.
+ * tree-ssa-loop-prefetch.c (release_mem_refs): Same.
+ (idx_analyze_ref): Same.
+ (analyze_ref): Same.
+ (gather_memory_references_ref): Same.
+ (mark_nontemporal_store): Same.
+ (emit_mfence_after_loop): Same.
+ (may_use_storent_in_loop_p): Same.
+ (mark_nontemporal_stores): Same.
+ (should_unroll_loop_p): Same.
+ (volume_of_dist_vector): Same.
+ (add_subscript_strides): Same.
+ (self_reuse_distance): Same.
+ (insn_to_prefetch_ratio_too_small_p): Same.
+ * tree-ssa-loop-split.c (split_at_bb_p): Same.
+ (patch_loop_exit): Same.
+ (find_or_create_guard_phi): Same.
+ (easy_exit_values): Same.
+ (connect_loop_phis): Same.
+ (connect_loops): Same.
+ (compute_new_first_bound): Same.
+ (split_loop): Same.
+ (tree_ssa_split_loops): Same.
+ * tree-ssa-loop-unswitch.c (tree_ssa_unswitch_loops): Same.
+ (is_maybe_undefined): Same.
+ (tree_may_unswitch_on): Same.
+ (simplify_using_entry_checks): Same.
+ (tree_unswitch_single_loop): Same.
+ (tree_unswitch_loop): Same.
+ (tree_unswitch_outer_loop): Same.
+ (empty_bb_without_guard_p): Same.
+ (used_outside_loop_p): Same.
+ (get_vop_from_header): Same.
+ (hoist_guard): Same.
+ * tree-ssa-loop.c (gate_oacc_kernels): Same.
+ (get_lsm_tmp_name): Same.
+ * tree-ssa-loop.h: Same.
+ * tree-ssa-reassoc.c (add_repeat_to_ops_vec): Same.
+ (build_and_add_sum): Same.
+ (no_side_effect_bb): Same.
+ (get_ops): Same.
+ (linearize_expr): Same.
+ (should_break_up_subtract): Same.
+ (linearize_expr_tree): Same.
+ * tree-ssa-scopedtables.c: Same.
+ * tree-ssa-scopedtables.h: Same.
+ * tree-ssa-structalias.c (condense_visit): Same.
+ (label_visit): Same.
+ (dump_pred_graph): Same.
+ (perform_var_substitution): Same.
+ (move_complex_constraints): Same.
+ (remove_preds_and_fake_succs): Same.
+ * tree-ssa-threadupdate.c (dbds_continue_enumeration_p): Same.
+ (determine_bb_domination_status): Same.
+ (duplicate_thread_path): Same.
+ (thread_through_all_blocks): Same.
+ * tree-ssa-threadupdate.h: Same.
+ * tree-streamer-in.c (streamer_read_string_cst): Same.
+ (input_identifier): Same.
+ (unpack_ts_type_common_value_fields): Same.
+ (unpack_ts_block_value_fields): Same.
+ (unpack_ts_translation_unit_decl_value_fields): Same.
+ (unpack_ts_omp_clause_value_fields): Same.
+ (streamer_read_tree_bitfields): Same.
+ (streamer_alloc_tree): Same.
+ (lto_input_ts_common_tree_pointers): Same.
+ (lto_input_ts_vector_tree_pointers): Same.
+ (lto_input_ts_poly_tree_pointers): Same.
+ (lto_input_ts_complex_tree_pointers): Same.
+ (lto_input_ts_decl_minimal_tree_pointers): Same.
+ (lto_input_ts_decl_common_tree_pointers): Same.
+ (lto_input_ts_decl_non_common_tree_pointers): Same.
+ (lto_input_ts_decl_with_vis_tree_pointers): Same.
+ (lto_input_ts_field_decl_tree_pointers): Same.
+ (lto_input_ts_function_decl_tree_pointers): Same.
+ (lto_input_ts_type_common_tree_pointers): Same.
+ (lto_input_ts_type_non_common_tree_pointers): Same.
+ (lto_input_ts_list_tree_pointers): Same.
+ (lto_input_ts_vec_tree_pointers): Same.
+ (lto_input_ts_exp_tree_pointers): Same.
+ (lto_input_ts_block_tree_pointers): Same.
+ (lto_input_ts_binfo_tree_pointers): Same.
+ (lto_input_ts_constructor_tree_pointers): Same.
+ (lto_input_ts_omp_clause_tree_pointers): Same.
+ (streamer_read_tree_body): Same.
+ * tree-streamer.h: Same.
+ * tree-switch-conversion.c (bit_test_cluster::is_beneficial): Same.
+ * tree-vect-data-refs.c (vect_get_smallest_scalar_type): Same.
+ (vect_analyze_possibly_independent_ddr): Same.
+ (vect_analyze_data_ref_dependence): Same.
+ (vect_compute_data_ref_alignment): Same.
+ (vect_enhance_data_refs_alignment): Same.
+ (vect_analyze_data_ref_access): Same.
+ (vect_check_gather_scatter): Same.
+ (vect_find_stmt_data_reference): Same.
+ (vect_create_addr_base_for_vector_ref): Same.
+ (vect_setup_realignment): Same.
+ (vect_supportable_dr_alignment): Same.
+ * tree-vect-loop-manip.c (rename_variables_in_bb): Same.
+ (adjust_phi_and_debug_stmts): Same.
+ (vect_set_loop_mask): Same.
+ (add_preheader_seq): Same.
+ (vect_maybe_permute_loop_masks): Same.
+ (vect_set_loop_masks_directly): Same.
+ (vect_set_loop_condition_masked): Same.
+ (vect_set_loop_condition_unmasked): Same.
+ (slpeel_duplicate_current_defs_from_edges): Same.
+ (slpeel_add_loop_guard): Same.
+ (slpeel_can_duplicate_loop_p): Same.
+ (create_lcssa_for_virtual_phi): Same.
+ (iv_phi_p): Same.
+ (vect_update_ivs_after_vectorizer): Same.
+ (vect_gen_vector_loop_niters_mult_vf): Same.
+ (slpeel_update_phi_nodes_for_loops): Same.
+ (slpeel_update_phi_nodes_for_guard1): Same.
+ (find_guard_arg): Same.
+ (slpeel_update_phi_nodes_for_guard2): Same.
+ (slpeel_update_phi_nodes_for_lcssa): Same.
+ (vect_do_peeling): Same.
+ (vect_create_cond_for_alias_checks): Same.
+ (vect_loop_versioning): Same.
+ * tree-vect-loop.c (vect_determine_vf_for_stmt): Same.
+ (vect_inner_phi_in_double_reduction_p): Same.
+ (vect_analyze_scalar_cycles_1): Same.
+ (vect_fixup_scalar_cycles_with_patterns): Same.
+ (vect_get_loop_niters): Same.
+ (bb_in_loop_p): Same.
+ (vect_get_max_nscalars_per_iter): Same.
+ (vect_verify_full_masking): Same.
+ (vect_compute_single_scalar_iteration_cost): Same.
+ (vect_analyze_loop_form_1): Same.
+ (vect_analyze_loop_form): Same.
+ (vect_active_double_reduction_p): Same.
+ (vect_analyze_loop_operations): Same.
+ (neutral_op_for_slp_reduction): Same.
+ (vect_is_simple_reduction): Same.
+ (vect_model_reduction_cost): Same.
+ (get_initial_def_for_reduction): Same.
+ (get_initial_defs_for_reduction): Same.
+ (vect_create_epilog_for_reduction): Same.
+ (vectorize_fold_left_reduction): Same.
+ (vectorizable_reduction): Same.
+ (vectorizable_induction): Same.
+ (vectorizable_live_operation): Same.
+ (loop_niters_no_overflow): Same.
+ (vect_get_loop_mask): Same.
+ (vect_transform_loop_stmt): Same.
+ (vect_transform_loop): Same.
+ * tree-vect-patterns.c (vect_reassociating_reduction_p): Same.
+ (vect_determine_precisions): Same.
+ (vect_pattern_recog_1): Same.
+ * tree-vect-slp.c (vect_analyze_slp_instance): Same.
+ * tree-vect-stmts.c (stmt_vectype): Same.
+ (process_use): Same.
+ (vect_init_vector_1): Same.
+ (vect_truncate_gather_scatter_offset): Same.
+ (get_group_load_store_type): Same.
+ (vect_build_gather_load_calls): Same.
+ (vect_get_strided_load_store_ops): Same.
+ (vectorizable_simd_clone_call): Same.
+ (vectorizable_store): Same.
+ (permute_vec_elements): Same.
+ (vectorizable_load): Same.
+ (vect_transform_stmt): Same.
+ (supportable_widening_operation): Same.
+ * tree-vectorizer.c (vec_info::replace_stmt): Same.
+ (vec_info::free_stmt_vec_info): Same.
+ (vect_free_loop_info_assumptions): Same.
+ (vect_loop_vectorized_call): Same.
+ (set_uid_loop_bbs): Same.
+ (vectorize_loops): Same.
+ * tree-vectorizer.h (STMT_VINFO_BB_VINFO): Same.
+ * tree.c (add_tree_to_fld_list): Same.
+ (fld_type_variant_equal_p): Same.
+ (fld_decl_context): Same.
+ (fld_incomplete_type_of): Same.
+ (free_lang_data_in_binfo): Same.
+ (need_assembler_name_p): Same.
+ (find_decls_types_r): Same.
+ (get_eh_types_for_runtime): Same.
+ (find_decls_types_in_eh_region): Same.
+ (find_decls_types_in_node): Same.
+ (assign_assembler_name_if_needed): Same.
+ * value-prof.c (stream_out_histogram_value): Same.
+ * value-prof.h: Same.
+ * var-tracking.c (use_narrower_mode): Same.
+ (prepare_call_arguments): Same.
+ (vt_expand_loc_callback): Same.
+ (resolve_expansions_pending_recursion): Same.
+ (vt_expand_loc): Same.
+ * varasm.c (const_hash_1): Same.
+ (compare_constant): Same.
+ (tree_output_constant_def): Same.
+ (simplify_subtraction): Same.
+ (get_pool_constant): Same.
+ (output_constant_pool_2): Same.
+ (output_constant_pool_1): Same.
+ (mark_constants_in_pattern): Same.
+ (mark_constant_pool): Same.
+ (get_section_anchor): Same.
+ * vr-values.c (compare_range_with_value): Same.
+ (vr_values::extract_range_from_phi_node): Same.
+ * vr-values.h: Same.
+ * web.c (unionfind_union): Same.
+ * wide-int.h: Same.
+
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
/* Intermediate edge info used when propagating AutoFDO profile information.
We can't edge->count() directly since it's computed from edge's probability
while probability is yet not decided during propagation. */
-#define AFDO_EINFO(e) ((struct edge_info *) e->aux)
+#define AFDO_EINFO(e) ((class edge_info *) e->aux)
class edge_info
{
public:
PTR GTY ((skip (""))) aux;
/* Innermost loop containing the block. */
- struct loop *loop_father;
+ class loop *loop_father;
/* The dominance and postdominance information node. */
struct et_node * GTY ((skip (""))) dom[2];
bit_obstack = &bitmap_default_obstack;
map = bit_obstack->heads;
if (map)
- bit_obstack->heads = (struct bitmap_head *) map->first;
+ bit_obstack->heads = (class bitmap_head *) map->first;
else
map = XOBNEW (&bit_obstack->obstack, bitmap_head);
bitmap_initialize (map, bit_obstack PASS_MEM_STAT);
/* Obstack for allocating bitmaps and elements from. */
struct bitmap_obstack {
struct bitmap_element *elements;
- struct bitmap_head *heads;
+ bitmap_head *heads;
struct obstack obstack;
};
if (targetm.have_prefetch ())
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_address_operand (&ops[0], op0);
create_integer_operand (&ops[1], INTVAL (op1));
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
rtx_insn *last = get_last_insn ();
tree orig_arg = arg;
if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
return NULL_RTX;
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx pat;
tree len;
tree src = CALL_EXPR_ARG (exp, 0);
static rtx
expand_movstr (tree dest, tree src, rtx target, memop_ret retmode)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx dest_mem;
rtx src_mem;
if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
target = NULL_RTX;
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, insn_mode);
create_fixed_operand (&ops[1], arg1_rtx);
create_fixed_operand (&ops[2], arg2_rtx);
if (targetm.have_clear_cache ())
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
begin = CALL_EXPR_ARG (exp, 0);
begin_rtx = expand_expr (begin, NULL_RTX, Pmode, EXPAND_NORMAL);
machine_mode mode = TYPE_MODE (TREE_TYPE (flag));
enum rtx_code code;
optab optab;
- struct expand_operand ops[5];
+ class expand_operand ops[5];
gcc_assert (flag_inline_atomics);
icode = direct_optab_handler (get_thread_pointer_optab, Pmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand op;
+ class expand_operand op;
/* If the target is not sutitable then create a new target. */
if (target == NULL_RTX
|| !REG_P (target)
icode = direct_optab_handler (set_thread_pointer_optab, Pmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand op;
+ class expand_operand op;
rtx val = expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
Pmode, EXPAND_NORMAL);
create_input_operand (&op, val, Pmode);
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
+ * c-opts.c (handle_deferred_opts): : Change class-key of PODs to struct
+ and others to class.
+ * c-pretty-print.h: Same.
+
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
if (!deps_seen)
return;
- struct mkdeps *deps = cpp_get_deps (parse_in);
+ mkdeps *deps = cpp_get_deps (parse_in);
for (size_t i = 0; i < deferred_count; i++)
{
/* The data type used to bundle information necessary for pretty-printing
a C or C++ entity. */
-struct c_pretty_printer;
+class c_pretty_printer;
/* The type of a C pretty-printer 'member' function. */
typedef void (*c_pretty_print_fn) (c_pretty_printer *, tree);
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
+ * c-decl.c (xref_tag): Change class-key of PODs to struct and others
+ to class.
+ (field_decl_cmp): Same.
+ * c-parser.c (c_parser_struct_or_union_specifier): Same.
+ * c-tree.h: Same.
+ * gimple-parser.c (c_parser_gimple_compound_statement): Same.
+
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
-static struct c_struct_parse_info *struct_parse_info;
+static class c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
tree
start_struct (location_t loc, enum tree_code code, tree name,
- struct c_struct_parse_info **enclosing_struct_parse_info)
+ class c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
- struct c_struct_parse_info *enclosing_struct_parse_info)
+ class c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
- struct c_struct_parse_info *struct_info;
+ class c_struct_parse_info *struct_info;
tree type = start_struct (struct_loc, code, ident, &struct_info);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
/* in c-decl.c */
struct c_spot_bindings;
-struct c_struct_parse_info;
+class c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
- struct c_struct_parse_info *);
+ class c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
- struct c_struct_parse_info **);
+ class c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
profile_probability::always ());
/* We leave the proper setting to fixup. */
- struct loop *loop_father = loops_for_fn (cfun)->tree_root;
+ class loop *loop_father = loops_for_fn (cfun)->tree_root;
/* If the new block is a loop header, allocate a loop
struct. Fixup will take care of proper placement within
the loop tree. */
}
else
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->num = is_loop_header_of;
loop->header = bb;
vec_safe_grow_cleared (loops_for_fn (cfun)->larray,
static void mark_referenced_regs (rtx *, refmarker_fn *mark, void *mark_arg);
static refmarker_fn mark_reg_as_referenced;
static refmarker_fn replace_reg_with_saved_mem;
-static int insert_save (struct insn_chain *, int, HARD_REG_SET *,
+static int insert_save (class insn_chain *, int, HARD_REG_SET *,
machine_mode *);
-static int insert_restore (struct insn_chain *, int, int, int,
+static int insert_restore (class insn_chain *, int, int, int,
machine_mode *);
-static struct insn_chain *insert_one_insn (struct insn_chain *, int, int,
+static class insn_chain *insert_one_insn (class insn_chain *, int, int,
rtx);
static void add_stored_regs (rtx, const_rtx, void *);
HARD_REG_SET hard_regs_used;
struct saved_hard_reg *saved_reg;
rtx_insn *insn;
- struct insn_chain *chain, *next;
+ class insn_chain *chain, *next;
unsigned int regno;
HARD_REG_SET hard_regs_to_save, used_regs, this_insn_sets;
reg_set_iterator rsi;
void
save_call_clobbered_regs (void)
{
- struct insn_chain *chain, *next, *last = NULL;
+ class insn_chain *chain, *next, *last = NULL;
machine_mode save_mode [FIRST_PSEUDO_REGISTER];
/* Computed in mark_set_regs, holds all registers set by the current
Return the extra number of registers saved. */
static int
-insert_restore (struct insn_chain *chain, int before_p, int regno,
+insert_restore (class insn_chain *chain, int before_p, int regno,
int maxrestore, machine_mode *save_mode)
{
int i, k;
rtx pat = NULL_RTX;
int code;
unsigned int numregs = 0;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
rtx mem;
/* A common failure mode if register status is not correct in the
/* Like insert_restore above, but save registers instead. */
static int
-insert_save (struct insn_chain *chain, int regno,
+insert_save (class insn_chain *chain, int regno,
HARD_REG_SET *to_save, machine_mode *save_mode)
{
int i;
rtx pat = NULL_RTX;
int code;
unsigned int numregs = 0;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
rtx mem;
/* A common failure mode if register status is not correct in the
}
/* Emit a new caller-save insn and set the code. */
-static struct insn_chain *
-insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat)
+static class insn_chain *
+insert_one_insn (class insn_chain *chain, int before_p, int code, rtx pat)
{
rtx_insn *insn = chain->insn;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
/* If INSN references CC0, put our insns in front of the insn that sets
CC0. This is always safe, since the only way we could be passed an
initialized so passes not needing this don't need to care. */
void
-set_loop_copy (struct loop *loop, struct loop *copy)
+set_loop_copy (class loop *loop, class loop *copy)
{
if (!copy)
copy_original_table_clear (loop_copy, loop->num);
/* Get the copy of LOOP. */
-struct loop *
-get_loop_copy (struct loop *loop)
+class loop *
+get_loop_copy (class loop *loop)
{
struct htab_bb_copy_original_entry *entry;
struct htab_bb_copy_original_entry key;
extern basic_block get_bb_original (basic_block);
extern void set_bb_copy (basic_block, basic_block);
extern basic_block get_bb_copy (basic_block);
-void set_loop_copy (struct loop *, struct loop *);
-struct loop *get_loop_copy (struct loop *);
+void set_loop_copy (class loop *, class loop *);
+class loop *get_loop_copy (class loop *);
/* Generic RAII class to allocate a bit from storage of integer type T.
The allocated bit is accessible as mask with the single bit set
extern int dfs_enumerate_from (basic_block, int,
bool (*)(const_basic_block, const void *),
basic_block *, int, const void *);
-extern void compute_dominance_frontiers (struct bitmap_head *);
-extern bitmap compute_idf (bitmap, struct bitmap_head *);
+extern void compute_dominance_frontiers (class bitmap_head *);
+extern bitmap compute_idf (bitmap, class bitmap_head *);
extern void bitmap_intersection_of_succs (sbitmap, sbitmap *, basic_block);
extern void bitmap_intersection_of_preds (sbitmap, sbitmap *, basic_block);
extern void bitmap_union_of_succs (sbitmap, sbitmap *, basic_block);
#define EOC ((size_t)-1)
/* We have an array of such objects while deciding allocation. */
-static struct stack_var *stack_vars;
+static class stack_var *stack_vars;
static size_t stack_vars_alloc;
static size_t stack_vars_num;
static hash_map<tree, size_t> *decl_to_stack_part;
static void
add_stack_var (tree decl, bool really_expand)
{
- struct stack_var *v;
+ class stack_var *v;
if (stack_vars_num >= stack_vars_alloc)
{
else
stack_vars_alloc = 32;
stack_vars
- = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
+ = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
}
if (!decl_to_stack_part)
decl_to_stack_part = new hash_map<tree, size_t>;
static void
add_stack_var_conflict (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return;
if (!a->conflicts)
static bool
stack_var_conflict_p (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return false;
/* Partitions containing an SSA name result from gimple registers
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
{
- struct stack_var *a = &stack_vars[i];
+ class stack_var *a = &stack_vars[i];
if (!a->conflicts)
a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
bitmap_ior_into (a->conflicts, work);
static void
union_stack_vars (size_t a, size_t b)
{
- struct stack_var *vb = &stack_vars[b];
+ class stack_var *vb = &stack_vars[b];
bitmap_iterator bi;
unsigned u;
with that location. */
static void
-expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
+expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
{
size_t si, i, j, n = stack_vars_num;
poly_uint64 large_size = 0, large_alloc = 0;
/* Assign rtl to each variable based on these partitions. */
if (stack_vars_num > 0)
{
- struct stack_vars_data data;
+ class stack_vars_data data;
data.asan_base = NULL_RTX;
data.asan_alignb = 0;
{
if (ret != NULL)
{
- struct loop *loop
+ class loop *loop
= find_common_loop (single_pred (ret)->loop_father,
single_succ (ret)->loop_father);
add_bb_to_loop (ret, loop);
if (current_loops != NULL)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
/* If we remove the header or the latch of a loop, mark the loop for
removal. */
profile_count count = e->count ();
edge f;
bool irr = (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
- struct loop *loop;
+ class loop *loop;
basic_block src = e->src, dest = e->dest;
if (!cfg_hooks->split_edge)
edge e, fallthru;
edge_iterator ei;
basic_block dummy, jump;
- struct loop *loop, *ploop, *cloop;
+ class loop *loop, *ploop, *cloop;
if (!cfg_hooks->make_forwarder_block)
internal_error ("%s does not support make_forwarder_block",
{
basic_block pred = single_pred (ret);
basic_block succ = single_succ (ret);
- struct loop *loop
+ class loop *loop
= find_common_loop (pred->loop_father, succ->loop_father);
rescan_loop_exit (e, false, true);
add_bb_to_loop (ret, loop);
of BB if the loop is not being copied. */
if (current_loops != NULL)
{
- struct loop *cloop = bb->loop_father;
- struct loop *copy = get_loop_copy (cloop);
+ class loop *cloop = bb->loop_father;
+ class loop *copy = get_loop_copy (cloop);
/* If we copied the loop header block but not the loop
we have created a loop with multiple entries. Ditch the loop,
add the new block to the outer loop and arrange for a fixup. */
a need to call the tree_duplicate_loop_to_header_edge rather
than duplicate_loop_to_header_edge when we are in tree mode. */
bool
-cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge e,
+cfg_hook_duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl,
sbitmap wont_exit, edge orig,
vec<edge> *to_remove,
void
copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs,
edge *edges, unsigned num_edges, edge *new_edges,
- struct loop *base, basic_block after, bool update_dominance)
+ class loop *base, basic_block after, bool update_dominance)
{
unsigned i, j;
basic_block bb, new_bb, dom_bb;
/* A hook for duplicating loop in CFG, currently this is used
in loop versioning. */
- bool (*cfg_hook_duplicate_loop_to_header_edge) (struct loop *, edge,
+ bool (*cfg_hook_duplicate_loop_to_header_edge) (class loop *, edge,
unsigned, sbitmap,
edge, vec<edge> *,
int);
extern int flow_call_edges_add (sbitmap);
extern void execute_on_growing_pred (edge);
extern void execute_on_shrinking_pred (edge);
-extern bool cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge,
+extern bool cfg_hook_duplicate_loop_to_header_edge (class loop *loop, edge,
unsigned int ndupl,
sbitmap wont_exit,
edge orig,
extern bool can_copy_bbs_p (basic_block *, unsigned);
extern void copy_bbs (basic_block *, unsigned, basic_block *,
- edge *, unsigned, edge *, struct loop *,
+ edge *, unsigned, edge *, class loop *,
basic_block, bool);
void profile_record_check_consistency (profile_record *);
/* Return nonzero if the nodes of LOOP are a subset of OUTER. */
bool
-flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
+flow_loop_nested_p (const class loop *outer, const class loop *loop)
{
unsigned odepth = loop_depth (outer);
/* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
loops within LOOP. */
-struct loop *
-superloop_at_depth (struct loop *loop, unsigned depth)
+class loop *
+superloop_at_depth (class loop *loop, unsigned depth)
{
unsigned ldepth = loop_depth (loop);
/* Returns the list of the latch edges of LOOP. */
static vec<edge>
-get_loop_latch_edges (const struct loop *loop)
+get_loop_latch_edges (const class loop *loop)
{
edge_iterator ei;
edge e;
using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
void
-flow_loop_dump (const struct loop *loop, FILE *file,
- void (*loop_dump_aux) (const struct loop *, FILE *, int),
+flow_loop_dump (const class loop *loop, FILE *file,
+ void (*loop_dump_aux) (const class loop *, FILE *, int),
int verbose)
{
basic_block *bbs;
using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
void
-flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
+flow_loops_dump (FILE *file, void (*loop_dump_aux) (const class loop *, FILE *, int), int verbose)
{
- struct loop *loop;
+ class loop *loop;
if (!current_loops || ! file)
return;
/* Free data allocated for LOOP. */
void
-flow_loop_free (struct loop *loop)
+flow_loop_free (class loop *loop)
{
struct loop_exit *exit, *next;
Return the number of nodes within the loop. */
int
-flow_loop_nodes_find (basic_block header, struct loop *loop)
+flow_loop_nodes_find (basic_block header, class loop *loop)
{
vec<basic_block> stack = vNULL;
int num_nodes = 1;
superloop is FATHER. */
static void
-establish_preds (struct loop *loop, struct loop *father)
+establish_preds (class loop *loop, class loop *father)
{
loop_p ploop;
unsigned depth = loop_depth (father) + 1;
of FATHERs siblings. */
void
-flow_loop_tree_node_add (struct loop *father, struct loop *loop,
- struct loop *after)
+flow_loop_tree_node_add (class loop *father, class loop *loop,
+ class loop *after)
{
if (after)
{
/* Remove LOOP from the loop hierarchy tree. */
void
-flow_loop_tree_node_remove (struct loop *loop)
+flow_loop_tree_node_remove (class loop *loop)
{
- struct loop *prev, *father;
+ class loop *prev, *father;
father = loop_outer (loop);
/* Allocates and returns new loop structure. */
-struct loop *
+class loop *
alloc_loop (void)
{
- struct loop *loop = ggc_cleared_alloc<struct loop> ();
+ class loop *loop = ggc_cleared_alloc<class loop> ();
loop->exits = ggc_cleared_alloc<loop_exit> ();
loop->exits->next = loop->exits->prev = loop->exits;
init_loops_structure (struct function *fn,
struct loops *loops, unsigned num_loops)
{
- struct loop *root;
+ class loop *root;
memset (loops, 0, sizeof *loops);
vec_alloc (loops->larray, num_loops);
basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
if (bb_loop_header_p (header))
{
- struct loop *loop;
+ class loop *loop;
/* The current active loop tree has valid loop-fathers for
header blocks. */
and assign basic-block ownership. */
for (i = 0; i < larray.length (); ++i)
{
- struct loop *loop = larray[i];
+ class loop *loop = larray[i];
basic_block header = loop->header;
edge_iterator ei;
edge e;
static int
sort_sibling_loops_cmp (const void *la_, const void *lb_)
{
- const struct loop *la = *(const struct loop * const *)la_;
- const struct loop *lb = *(const struct loop * const *)lb_;
+ const class loop *la = *(const class loop * const *)la_;
+ const class loop *lb = *(const class loop * const *)lb_;
return (sort_sibling_loops_cmp_rpo[la->header->index]
- sort_sibling_loops_cmp_rpo[lb->header->index]);
}
another edge. */
static edge
-find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
+find_subloop_latch_edge_by_ivs (class loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
{
edge e, latch = latches[0];
unsigned i;
returns NULL. */
static edge
-find_subloop_latch_edge (struct loop *loop)
+find_subloop_latch_edge (class loop *loop)
{
vec<edge> latches = get_loop_latch_edges (loop);
edge latch = NULL;
/* Creates a subloop of LOOP with latch edge LATCH. */
static void
-form_subloop (struct loop *loop, edge latch)
+form_subloop (class loop *loop, edge latch)
{
edge_iterator ei;
edge e, new_entry;
- struct loop *new_loop;
+ class loop *new_loop;
mfb_reis_set = new hash_set<edge>;
FOR_EACH_EDGE (e, ei, loop->header->preds)
a new latch of LOOP. */
static void
-merge_latch_edges (struct loop *loop)
+merge_latch_edges (class loop *loop)
{
vec<edge> latches = get_loop_latch_edges (loop);
edge latch, e;
loops with single latch edge. */
static void
-disambiguate_multiple_latches (struct loop *loop)
+disambiguate_multiple_latches (class loop *loop)
{
edge e;
void
disambiguate_loops_with_multiple_latches (void)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
{
/* Return nonzero if basic block BB belongs to LOOP. */
bool
-flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
+flow_bb_inside_loop_p (const class loop *loop, const_basic_block bb)
{
- struct loop *source_loop;
+ class loop *source_loop;
if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
static bool
glb_enum_p (const_basic_block bb, const void *glb_loop)
{
- const struct loop *const loop = (const struct loop *) glb_loop;
+ const class loop *const loop = (const class loop *) glb_loop;
return (bb != loop->header
&& dominated_by_p (CDI_DOMINATORS, bb, loop->header));
}
returned. */
unsigned
-get_loop_body_with_size (const struct loop *loop, basic_block *body,
+get_loop_body_with_size (const class loop *loop, basic_block *body,
unsigned max_size)
{
return dfs_enumerate_from (loop->header, 1, glb_enum_p,
header != latch, latch is the 1-st block. */
basic_block *
-get_loop_body (const struct loop *loop)
+get_loop_body (const class loop *loop)
{
basic_block *body, bb;
unsigned tv = 0;
array TOVISIT from index *TV. */
static void
-fill_sons_in_loop (const struct loop *loop, basic_block bb,
+fill_sons_in_loop (const class loop *loop, basic_block bb,
basic_block *tovisit, int *tv)
{
basic_block son, postpone = NULL;
the latch, then only blocks dominated by s are be after it. */
basic_block *
-get_loop_body_in_dom_order (const struct loop *loop)
+get_loop_body_in_dom_order (const class loop *loop)
{
basic_block *tovisit;
int tv;
/* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
basic_block *
-get_loop_body_in_custom_order (const struct loop *loop,
+get_loop_body_in_custom_order (const class loop *loop,
int (*bb_comparator) (const void *, const void *))
{
basic_block *bbs = get_loop_body (loop);
/* Get body of a LOOP in breadth first sort order. */
basic_block *
-get_loop_body_in_bfs_order (const struct loop *loop)
+get_loop_body_in_bfs_order (const class loop *loop)
{
basic_block *blocks;
basic_block bb;
rescan_loop_exit (edge e, bool new_edge, bool removed)
{
struct loop_exit *exits = NULL, *exit;
- struct loop *aloop, *cloop;
+ class loop *aloop, *cloop;
if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
return;
/* Returns the list of the exit edges of a LOOP. */
vec<edge>
-get_loop_exit_edges (const struct loop *loop)
+get_loop_exit_edges (const class loop *loop)
{
vec<edge> edges = vNULL;
edge e;
/* Counts the number of conditional branches inside LOOP. */
unsigned
-num_loop_branches (const struct loop *loop)
+num_loop_branches (const class loop *loop)
{
unsigned i, n;
basic_block * body;
/* Adds basic block BB to LOOP. */
void
-add_bb_to_loop (basic_block bb, struct loop *loop)
+add_bb_to_loop (basic_block bb, class loop *loop)
{
unsigned i;
loop_p ploop;
remove_bb_from_loops (basic_block bb)
{
unsigned i;
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
loop_p ploop;
edge_iterator ei;
edge e;
}
/* Finds nearest common ancestor in loop tree for given loops. */
-struct loop *
-find_common_loop (struct loop *loop_s, struct loop *loop_d)
+class loop *
+find_common_loop (class loop *loop_s, class loop *loop_d)
{
unsigned sdepth, ddepth;
/* Removes LOOP from structures and frees its data. */
void
-delete_loop (struct loop *loop)
+delete_loop (class loop *loop)
{
/* Remove the loop from structure. */
flow_loop_tree_node_remove (loop);
/* Cancels the LOOP; it must be innermost one. */
static void
-cancel_loop (struct loop *loop)
+cancel_loop (class loop *loop)
{
basic_block *bbs;
unsigned i;
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
gcc_assert (!loop->inner);
/* Cancels LOOP and all its subloops. */
void
-cancel_loop_tree (struct loop *loop)
+cancel_loop_tree (class loop *loop)
{
while (loop->inner)
cancel_loop_tree (loop->inner);
{
unsigned *sizes, i, j;
basic_block bb, *bbs;
- struct loop *loop;
+ class loop *loop;
int err = 0;
edge e;
unsigned num = number_of_loops (cfun);
/* Returns latch edge of LOOP. */
edge
-loop_latch_edge (const struct loop *loop)
+loop_latch_edge (const class loop *loop)
{
return find_edge (loop->latch, loop->header);
}
/* Returns preheader edge of LOOP. */
edge
-loop_preheader_edge (const struct loop *loop)
+loop_preheader_edge (const class loop *loop)
{
edge e;
edge_iterator ei;
/* Returns true if E is an exit of LOOP. */
bool
-loop_exit_edge_p (const struct loop *loop, const_edge e)
+loop_exit_edge_p (const class loop *loop, const_edge e)
{
return (flow_bb_inside_loop_p (loop, e->src)
&& !flow_bb_inside_loop_p (loop, e->dest));
is returned always. */
edge
-single_exit (const struct loop *loop)
+single_exit (const class loop *loop)
{
struct loop_exit *exit = loop->exits->next;
/* Returns true when BB has an incoming edge exiting LOOP. */
bool
-loop_exits_to_bb_p (struct loop *loop, basic_block bb)
+loop_exits_to_bb_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
/* Returns true when BB has an outgoing edge exiting LOOP. */
bool
-loop_exits_from_bb_p (struct loop *loop, basic_block bb)
+loop_exits_from_bb_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
/* Return location corresponding to the loop control condition if possible. */
dump_user_location_t
-get_loop_location (struct loop *loop)
+get_loop_location (class loop *loop)
{
rtx_insn *insn = NULL;
- struct niter_desc *desc = NULL;
+ class niter_desc *desc = NULL;
edge exit;
/* For a for or while loop, we would like to return the location
I_BOUND times. */
void
-record_niter_bound (struct loop *loop, const widest_int &i_bound,
+record_niter_bound (class loop *loop, const widest_int &i_bound,
bool realistic, bool upper)
{
/* Update the bounds only when there is no previous estimation, or when the
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_estimated_loop_iterations_int (struct loop *loop)
+get_estimated_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
the number of execution of the latch by one. */
HOST_WIDE_INT
-max_stmt_executions_int (struct loop *loop)
+max_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
HOST_WIDE_INT snit;
the number of execution of the latch by one. */
HOST_WIDE_INT
-likely_max_stmt_executions_int (struct loop *loop)
+likely_max_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
HOST_WIDE_INT snit;
returns true. */
bool
-get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
+get_estimated_loop_iterations (class loop *loop, widest_int *nit)
{
/* Even if the bound is not recorded, possibly we can derrive one from
profile. */
false, otherwise returns true. */
bool
-get_max_loop_iterations (const struct loop *loop, widest_int *nit)
+get_max_loop_iterations (const class loop *loop, widest_int *nit)
{
if (!loop->any_upper_bound)
return false;
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_max_loop_iterations_int (const struct loop *loop)
+get_max_loop_iterations_int (const class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
false, otherwise returns true. */
bool
-get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
+get_likely_max_loop_iterations (class loop *loop, widest_int *nit)
{
if (!loop->any_likely_upper_bound)
return false;
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_likely_max_loop_iterations_int (struct loop *loop)
+get_likely_max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
bool is_exit;
/* The next bound in the list. */
- struct nb_iter_bound *next;
+ class nb_iter_bound *next;
};
/* Description of the loop exit. */
static void remove (loop_exit *);
};
-typedef struct loop *loop_p;
+typedef class loop *loop_p;
/* An integer estimation of the number of iterations. Estimate_state
describes what is the state of the estimation. */
vec<loop_p, va_gc> *superloops;
/* The first inner (child) loop or NULL if innermost loop. */
- struct loop *inner;
+ class loop *inner;
/* Link to the next (sibling) loop. */
- struct loop *next;
+ class loop *next;
/* Auxiliary info specific to a pass. */
PTR GTY ((skip (""))) aux;
int orig_loop_num;
/* Upper bound on number of iterations of a loop. */
- struct nb_iter_bound *bounds;
+ class nb_iter_bound *bounds;
/* Non-overflow control ivs of a loop. */
struct control_iv *control_ivs;
struct loop_exit *exits;
/* Number of iteration analysis data for RTL. */
- struct niter_desc *simple_loop_desc;
+ class niter_desc *simple_loop_desc;
/* For sanity checking during loop fixup we record here the former
loop header for loops marked for removal. Note that this prevents
/* Set C to the LOOP constraint. */
static inline void
-loop_constraint_set (struct loop *loop, unsigned c)
+loop_constraint_set (class loop *loop, unsigned c)
{
loop->constraints |= c;
}
/* Clear C from the LOOP constraint. */
static inline void
-loop_constraint_clear (struct loop *loop, unsigned c)
+loop_constraint_clear (class loop *loop, unsigned c)
{
loop->constraints &= ~c;
}
/* Check if C is set in the LOOP constraint. */
static inline bool
-loop_constraint_set_p (struct loop *loop, unsigned c)
+loop_constraint_set_p (class loop *loop, unsigned c)
{
return (loop->constraints & c) == c;
}
hash_table<loop_exit_hasher> *GTY(()) exits;
/* Pointer to root of loop hierarchy tree. */
- struct loop *tree_root;
+ class loop *tree_root;
};
/* Loop recognition. */
extern void disambiguate_loops_with_multiple_latches (void);
extern void flow_loops_free (struct loops *);
extern void flow_loops_dump (FILE *,
- void (*)(const struct loop *, FILE *, int), int);
-extern void flow_loop_dump (const struct loop *, FILE *,
- void (*)(const struct loop *, FILE *, int), int);
-struct loop *alloc_loop (void);
-extern void flow_loop_free (struct loop *);
-int flow_loop_nodes_find (basic_block, struct loop *);
+ void (*)(const class loop *, FILE *, int), int);
+extern void flow_loop_dump (const class loop *, FILE *,
+ void (*)(const class loop *, FILE *, int), int);
+class loop *alloc_loop (void);
+extern void flow_loop_free (class loop *);
+int flow_loop_nodes_find (basic_block, class loop *);
unsigned fix_loop_structure (bitmap changed_bbs);
bool mark_irreducible_loops (void);
void release_recorded_exits (function *);
void sort_sibling_loops (function *);
/* Loop data structure manipulation/querying. */
-extern void flow_loop_tree_node_add (struct loop *, struct loop *,
- struct loop * = NULL);
-extern void flow_loop_tree_node_remove (struct loop *);
-extern bool flow_loop_nested_p (const struct loop *, const struct loop *);
-extern bool flow_bb_inside_loop_p (const struct loop *, const_basic_block);
-extern struct loop * find_common_loop (struct loop *, struct loop *);
-struct loop *superloop_at_depth (struct loop *, unsigned);
+extern void flow_loop_tree_node_add (class loop *, class loop *,
+ class loop * = NULL);
+extern void flow_loop_tree_node_remove (class loop *);
+extern bool flow_loop_nested_p (const class loop *, const class loop *);
+extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block);
+extern class loop * find_common_loop (class loop *, class loop *);
+class loop *superloop_at_depth (class loop *, unsigned);
struct eni_weights;
-extern int num_loop_insns (const struct loop *);
-extern int average_num_loop_insns (const struct loop *);
-extern unsigned get_loop_level (const struct loop *);
-extern bool loop_exit_edge_p (const struct loop *, const_edge);
-extern bool loop_exits_to_bb_p (struct loop *, basic_block);
-extern bool loop_exits_from_bb_p (struct loop *, basic_block);
+extern int num_loop_insns (const class loop *);
+extern int average_num_loop_insns (const class loop *);
+extern unsigned get_loop_level (const class loop *);
+extern bool loop_exit_edge_p (const class loop *, const_edge);
+extern bool loop_exits_to_bb_p (class loop *, basic_block);
+extern bool loop_exits_from_bb_p (class loop *, basic_block);
extern void mark_loop_exit_edges (void);
-extern dump_user_location_t get_loop_location (struct loop *loop);
+extern dump_user_location_t get_loop_location (class loop *loop);
/* Loops & cfg manipulation. */
-extern basic_block *get_loop_body (const struct loop *);
-extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
+extern basic_block *get_loop_body (const class loop *);
+extern unsigned get_loop_body_with_size (const class loop *, basic_block *,
unsigned);
-extern basic_block *get_loop_body_in_dom_order (const struct loop *);
-extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
-extern basic_block *get_loop_body_in_custom_order (const struct loop *,
+extern basic_block *get_loop_body_in_dom_order (const class loop *);
+extern basic_block *get_loop_body_in_bfs_order (const class loop *);
+extern basic_block *get_loop_body_in_custom_order (const class loop *,
int (*) (const void *, const void *));
-extern vec<edge> get_loop_exit_edges (const struct loop *);
-extern edge single_exit (const struct loop *);
-extern edge single_likely_exit (struct loop *loop);
-extern unsigned num_loop_branches (const struct loop *);
+extern vec<edge> get_loop_exit_edges (const class loop *);
+extern edge single_exit (const class loop *);
+extern edge single_likely_exit (class loop *loop);
+extern unsigned num_loop_branches (const class loop *);
-extern edge loop_preheader_edge (const struct loop *);
-extern edge loop_latch_edge (const struct loop *);
+extern edge loop_preheader_edge (const class loop *);
+extern edge loop_latch_edge (const class loop *);
-extern void add_bb_to_loop (basic_block, struct loop *);
+extern void add_bb_to_loop (basic_block, class loop *);
extern void remove_bb_from_loops (basic_block);
-extern void cancel_loop_tree (struct loop *);
-extern void delete_loop (struct loop *);
+extern void cancel_loop_tree (class loop *);
+extern void delete_loop (class loop *);
extern void verify_loop_structure (void);
/* Loop analysis. */
-extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
-gcov_type expected_loop_iterations_unbounded (const struct loop *,
+extern bool just_once_each_iteration_p (const class loop *, const_basic_block);
+gcov_type expected_loop_iterations_unbounded (const class loop *,
bool *read_profile_p = NULL, bool by_profile_only = false);
-extern unsigned expected_loop_iterations (struct loop *);
+extern unsigned expected_loop_iterations (class loop *);
extern rtx doloop_condition_get (rtx_insn *);
void mark_loop_for_removal (loop_p);
rtx niter_expr;
};
-extern void iv_analysis_loop_init (struct loop *);
-extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, struct rtx_iv *);
-extern bool iv_analyze_result (rtx_insn *, rtx, struct rtx_iv *);
+extern void iv_analysis_loop_init (class loop *);
+extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
+extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *);
extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx,
- struct rtx_iv *);
-extern rtx get_iv_value (struct rtx_iv *, rtx);
+ class rtx_iv *);
+extern rtx get_iv_value (class rtx_iv *, rtx);
extern bool biv_p (rtx_insn *, scalar_int_mode, rtx);
-extern void find_simple_exit (struct loop *, struct niter_desc *);
+extern void find_simple_exit (class loop *, class niter_desc *);
extern void iv_analysis_done (void);
-extern struct niter_desc *get_simple_loop_desc (struct loop *loop);
-extern void free_simple_loop_desc (struct loop *loop);
+extern class niter_desc *get_simple_loop_desc (class loop *loop);
+extern void free_simple_loop_desc (class loop *loop);
-static inline struct niter_desc *
-simple_loop_desc (struct loop *loop)
+static inline class niter_desc *
+simple_loop_desc (class loop *loop)
{
return loop->simple_loop_desc;
}
/* Returns the loop with index NUM from FNs loop tree. */
-static inline struct loop *
+static inline class loop *
get_loop (struct function *fn, unsigned num)
{
return (*loops_for_fn (fn)->larray)[num];
/* Returns the number of superloops of LOOP. */
static inline unsigned
-loop_depth (const struct loop *loop)
+loop_depth (const class loop *loop)
{
return vec_safe_length (loop->superloops);
}
/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
loop. */
-static inline struct loop *
-loop_outer (const struct loop *loop)
+static inline class loop *
+loop_outer (const class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
/* Returns true if LOOP has at least one exit edge. */
static inline bool
-loop_has_exit_edges (const struct loop *loop)
+loop_has_exit_edges (const class loop *loop)
{
return loop->exits->next->e != NULL;
}
inline
loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
{
- struct loop *aloop;
+ class loop *aloop;
unsigned i;
int mn;
extern void doloop_optimize_loops (void);
extern void move_loop_invariants (void);
-extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
+extern vec<basic_block> get_loop_hot_path (const class loop *loop);
/* Returns the outermost loop of the loop nest that contains LOOP.*/
-static inline struct loop *
-loop_outermost (struct loop *loop)
+static inline class loop *
+loop_outermost (class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
return (*loop->superloops)[1];
}
-extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
-extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
-extern HOST_WIDE_INT get_max_loop_iterations_int (const struct loop *);
-extern HOST_WIDE_INT get_likely_max_loop_iterations_int (struct loop *);
-extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
-extern bool get_max_loop_iterations (const struct loop *loop, widest_int *nit);
-extern bool get_likely_max_loop_iterations (struct loop *loop, widest_int *nit);
+extern void record_niter_bound (class loop *, const widest_int &, bool, bool);
+extern HOST_WIDE_INT get_estimated_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT get_max_loop_iterations_int (const class loop *);
+extern HOST_WIDE_INT get_likely_max_loop_iterations_int (class loop *);
+extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit);
+extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit);
+extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit);
extern int bb_loop_depth (const_basic_block);
/* Converts VAL to widest_int. */
/* Checks whether BB is executed exactly once in each LOOP iteration. */
bool
-just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
+just_once_each_iteration_p (const class loop *loop, const_basic_block bb)
{
/* It must be executed at least once each iteration. */
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
unsigned depth;
struct graph *g;
int num = number_of_loops (cfun);
- struct loop *cloop;
+ class loop *cloop;
bool irred_loop_found = false;
int i;
/* Counts number of insns inside LOOP. */
int
-num_loop_insns (const struct loop *loop)
+num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
unsigned i, ninsns = 0;
/* Counts number of insns executed on average per iteration LOOP. */
int
-average_num_loop_insns (const struct loop *loop)
+average_num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
unsigned i, binsns;
return -1 in those scenarios. */
gcov_type
-expected_loop_iterations_unbounded (const struct loop *loop,
+expected_loop_iterations_unbounded (const class loop *loop,
bool *read_profile_p,
bool by_profile_only)
{
by REG_BR_PROB_BASE. */
unsigned
-expected_loop_iterations (struct loop *loop)
+expected_loop_iterations (class loop *loop)
{
gcov_type expected = expected_loop_iterations_unbounded (loop);
return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
/* Returns the maximum level of nesting of subloops of LOOP. */
unsigned
-get_loop_level (const struct loop *loop)
+get_loop_level (const class loop *loop)
{
- const struct loop *ploop;
+ const class loop *ploop;
unsigned mx = 0, l;
for (ploop = loop->inner; ploop; ploop = ploop->next)
to noreturn call. */
edge
-single_likely_exit (struct loop *loop)
+single_likely_exit (class loop *loop)
{
edge found = single_exit (loop);
vec<edge> exits;
header != latch, latch is the 1-st block. */
vec<basic_block>
-get_loop_hot_path (const struct loop *loop)
+get_loop_hot_path (const class loop *loop)
{
basic_block bb = loop->header;
vec<basic_block> path = vNULL;
#include "tree-ssa-loop-manip.h"
#include "dumpfile.h"
-static void copy_loops_to (struct loop **, int,
- struct loop *);
+static void copy_loops_to (class loop **, int,
+ class loop *);
static void loop_redirect_edge (edge, basic_block);
static void remove_bbs (basic_block *, int);
static bool rpe_enum_p (const_basic_block, const void *);
static int find_path (edge, basic_block **);
-static void fix_loop_placements (struct loop *, bool *);
+static void fix_loop_placements (class loop *, bool *);
static bool fix_bb_placement (basic_block);
static void fix_bb_placements (basic_block, bool *, bitmap);
{
edge e;
edge_iterator ei;
- struct loop *loop = current_loops->tree_root, *act;
+ class loop *loop = current_loops->tree_root, *act;
FOR_EACH_EDGE (e, ei, bb->succs)
{
invalidate the information about irreducible regions. */
static bool
-fix_loop_placement (struct loop *loop, bool *irred_invalidated)
+fix_loop_placement (class loop *loop, bool *irred_invalidated)
{
unsigned i;
edge e;
vec<edge> exits = get_loop_exit_edges (loop);
- struct loop *father = current_loops->tree_root, *act;
+ class loop *father = current_loops->tree_root, *act;
bool ret = false;
FOR_EACH_VEC_ELT (exits, i, e)
bitmap loop_closed_ssa_invalidated)
{
basic_block *queue, *qtop, *qbeg, *qend;
- struct loop *base_loop, *target_loop;
+ class loop *base_loop, *target_loop;
edge e;
/* We pass through blocks back-reachable from FROM, testing whether some
FOR_EACH_EDGE (e, ei, from->preds)
{
basic_block pred = e->src;
- struct loop *nca;
+ class loop *nca;
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
*irred_invalidated = true;
int i, nrem, n_bord_bbs;
bool local_irred_invalidated = false;
edge_iterator ei;
- struct loop *l, *f;
+ class loop *l, *f;
if (! irred_invalidated)
irred_invalidated = &local_irred_invalidated;
/* Creates place for a new LOOP in loops structure of FN. */
void
-place_new_loop (struct function *fn, struct loop *loop)
+place_new_loop (struct function *fn, class loop *loop)
{
loop->num = number_of_loops (fn);
vec_safe_push (loops_for_fn (fn)->larray, loop);
outer. */
void
-add_loop (struct loop *loop, struct loop *outer)
+add_loop (class loop *loop, class loop *outer)
{
basic_block *bbs;
int i, n;
- struct loop *subloop;
+ class loop *subloop;
edge e;
edge_iterator ei;
/* Scale profile of loop by P. */
void
-scale_loop_frequencies (struct loop *loop, profile_probability p)
+scale_loop_frequencies (class loop *loop, profile_probability p)
{
basic_block *bbs;
they need to be scaled synchronously. */
void
-scale_loop_profile (struct loop *loop, profile_probability p,
+scale_loop_profile (class loop *loop, profile_probability p,
gcov_type iteration_bound)
{
edge e, preheader_e;
/* Recompute dominance information for basic blocks outside LOOP. */
static void
-update_dominators_in_loop (struct loop *loop)
+update_dominators_in_loop (class loop *loop)
{
vec<basic_block> dom_bbs = vNULL;
basic_block *body;
should be used only when the UPPER_BOUND expression is a loop
invariant. */
-struct loop *
+class loop *
create_empty_loop_on_edge (edge entry_edge,
tree initial_value,
tree stride, tree upper_bound,
tree iv,
tree *iv_before,
tree *iv_after,
- struct loop *outer)
+ class loop *outer)
{
basic_block loop_header, loop_latch, succ_bb, pred_bb;
- struct loop *loop;
+ class loop *loop;
gimple_stmt_iterator gsi;
gimple_seq stmts;
gcond *cond_expr;
Returns the newly created loop. Frequencies and counts in the new loop
are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
-struct loop *
+class loop *
loopify (edge latch_edge, edge header_edge,
basic_block switch_bb, edge true_edge, edge false_edge,
bool redirect_all_edges, profile_probability true_scale,
{
basic_block succ_bb = latch_edge->dest;
basic_block pred_bb = header_edge->src;
- struct loop *loop = alloc_loop ();
- struct loop *outer = loop_outer (succ_bb->loop_father);
+ class loop *loop = alloc_loop ();
+ class loop *outer = loop_outer (succ_bb->loop_father);
profile_count cnt;
loop->header = header_edge->dest;
basic blocks that had non-trivial update on their loop_father.*/
void
-unloop (struct loop *loop, bool *irred_invalidated,
+unloop (class loop *loop, bool *irred_invalidated,
bitmap loop_closed_ssa_invalidated)
{
basic_block *body;
- struct loop *ploop;
+ class loop *ploop;
unsigned i, n;
basic_block latch = loop->latch;
bool dummy = false;
invalidate the information about irreducible regions. */
static void
-fix_loop_placements (struct loop *loop, bool *irred_invalidated)
+fix_loop_placements (class loop *loop, bool *irred_invalidated)
{
- struct loop *outer;
+ class loop *outer;
while (loop_outer (loop))
{
the loop into its duplicate. */
void
-copy_loop_info (struct loop *loop, struct loop *target)
+copy_loop_info (class loop *loop, class loop *target)
{
gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
target->any_upper_bound = loop->any_upper_bound;
created loop into loops structure. If AFTER is non-null
the new loop is added at AFTER->next, otherwise in front of TARGETs
sibling list. */
-struct loop *
-duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
+class loop *
+duplicate_loop (class loop *loop, class loop *target, class loop *after)
{
- struct loop *cloop;
+ class loop *cloop;
cloop = alloc_loop ();
place_new_loop (cfun, cloop);
newly created loops into loop tree at the end of TARGETs sibling
list in the original order. */
void
-duplicate_subloops (struct loop *loop, struct loop *target)
+duplicate_subloops (class loop *loop, class loop *target)
{
- struct loop *aloop, *cloop, *tail;
+ class loop *aloop, *cloop, *tail;
for (tail = target->inner; tail && tail->next; tail = tail->next)
;
into TARGET loop, placing newly created loops into loop tree adding
them to TARGETs sibling list at the end in order. */
static void
-copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
+copy_loops_to (class loop **copied_loops, int n, class loop *target)
{
- struct loop *aloop, *tail;
+ class loop *aloop, *tail;
int i;
for (tail = target->inner; tail && tail->next; tail = tail->next)
/* Check whether LOOP's body can be duplicated. */
bool
-can_duplicate_loop_p (const struct loop *loop)
+can_duplicate_loop_p (const class loop *loop)
{
int ret;
basic_block *bbs = get_loop_body (loop);
impossible. */
bool
-duplicate_loop_to_header_edge (struct loop *loop, edge e,
+duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
{
- struct loop *target, *aloop;
- struct loop **orig_loops;
+ class loop *target, *aloop;
+ class loop **orig_loops;
unsigned n_orig_loops;
basic_block header = loop->header, latch = loop->latch;
basic_block *new_bbs, *bbs, *first_active;
n_orig_loops = 0;
for (aloop = loop->inner; aloop; aloop = aloop->next)
n_orig_loops++;
- orig_loops = XNEWVEC (struct loop *, n_orig_loops);
+ orig_loops = XNEWVEC (class loop *, n_orig_loops);
for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
orig_loops[i] = aloop;
/* True when a candidate preheader BLOCK has predecessors from LOOP. */
static bool
-has_preds_from_loop (basic_block block, struct loop *loop)
+has_preds_from_loop (basic_block block, class loop *loop)
{
edge e;
edge_iterator ei;
The function also updates dominators. */
basic_block
-create_preheader (struct loop *loop, int flags)
+create_preheader (class loop *loop, int flags)
{
edge e;
basic_block dummy;
void
create_preheaders (int flags)
{
- struct loop *loop;
+ class loop *loop;
if (!current_loops)
return;
void
force_single_succ_latches (void)
{
- struct loop *loop;
+ class loop *loop;
edge e;
FOR_EACH_LOOP (loop, 0)
If PLACE_AFTER is true, we place the new loop after LOOP in the
instruction stream, otherwise it is placed before LOOP. */
-struct loop *
-loop_version (struct loop *loop,
+class loop *
+loop_version (class loop *loop,
void *cond_expr, basic_block *condition_bb,
profile_probability then_prob, profile_probability else_prob,
profile_probability then_scale, profile_probability else_scale,
basic_block first_head, second_head;
edge entry, latch_edge, true_edge, false_edge;
int irred_flag;
- struct loop *nloop;
+ class loop *nloop;
basic_block cond_bb;
/* Record entry and latch edges for the loop */
extern edge mfb_kj_edge;
extern bool remove_path (edge, bool * = NULL, bitmap = NULL);
-extern void place_new_loop (struct function *, struct loop *);
-extern void add_loop (struct loop *, struct loop *);
-extern void scale_loop_frequencies (struct loop *, profile_probability);
-extern void scale_loop_profile (struct loop *, profile_probability, gcov_type);
+extern void place_new_loop (struct function *, class loop *);
+extern void add_loop (class loop *, class loop *);
+extern void scale_loop_frequencies (class loop *, profile_probability);
+extern void scale_loop_profile (class loop *, profile_probability, gcov_type);
extern edge create_empty_if_region_on_edge (edge, tree);
-extern struct loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
- tree *, tree *, struct loop *);
-extern struct loop *loopify (edge, edge,
+extern class loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
+ tree *, tree *, class loop *);
+extern class loop *loopify (edge, edge,
basic_block, edge, edge, bool,
profile_probability, profile_probability);
-extern void unloop (struct loop *, bool *, bitmap);
-extern void copy_loop_info (struct loop *loop, struct loop *target);
-extern struct loop * duplicate_loop (struct loop *, struct loop *,
- struct loop * = NULL);
-extern void duplicate_subloops (struct loop *, struct loop *);
-extern bool can_duplicate_loop_p (const struct loop *loop);
-extern bool duplicate_loop_to_header_edge (struct loop *, edge,
+extern void unloop (class loop *, bool *, bitmap);
+extern void copy_loop_info (class loop *loop, class loop *target);
+extern class loop * duplicate_loop (class loop *, class loop *,
+ class loop * = NULL);
+extern void duplicate_subloops (class loop *, class loop *);
+extern bool can_duplicate_loop_p (const class loop *loop);
+extern bool duplicate_loop_to_header_edge (class loop *, edge,
unsigned, sbitmap, edge,
vec<edge> *, int);
extern bool mfb_keep_just (edge);
-basic_block create_preheader (struct loop *, int);
+basic_block create_preheader (class loop *, int);
extern void create_preheaders (int);
extern void force_single_succ_latches (void);
-struct loop * loop_version (struct loop *, void *,
+class loop * loop_version (class loop *, void *,
basic_block *,
profile_probability, profile_probability,
profile_probability, profile_probability, bool);
/* The cgraph data structure.
Each function decl has assigned cgraph_node listing callees and callers. */
-class GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
+struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
{
-public:
friend class symbol_table;
/* Remove the node from cgraph and all inline clones inlined into it.
typedef cgraph_node_set_def *cgraph_node_set;
typedef struct varpool_node_set_def *varpool_node_set;
-class varpool_node;
+struct varpool_node;
/* A varpool node set is a collection of varpool nodes. A varpool node
can appear in multiple sets. */
/* LTO streaming. */
void stream_out (struct output_block *) const;
- void stream_in (struct lto_input_block *, struct data_in *data_in);
+ void stream_in (class lto_input_block *, class data_in *data_in);
private:
bool combine_speculation_with (tree, HOST_WIDE_INT, bool, tree);
for_user)) cgraph_edge
{
public:
- friend class cgraph_node;
+ friend struct cgraph_node;
friend class symbol_table;
/* Remove the edge in the cgraph. */
class GTY((tag ("SYMTAB"))) symbol_table
{
public:
- friend class symtab_node;
- friend class cgraph_node;
- friend class cgraph_edge;
+ friend struct symtab_node;
+ friend struct cgraph_node;
+ friend struct cgraph_edge;
symbol_table (): cgraph_max_uid (1), cgraph_max_summary_id (0),
edges_max_uid (1), edges_max_summary_id (0)
struct record_reference_ctx
{
bool only_vars;
- class varpool_node *varpool_node;
+ struct varpool_node *varpool_node;
};
/* Walk tree and record all calls and references to functions/variables.
For memory, assume that the desired extraction_mode and pos_mode
are the same as for a register operation, since at present we don't
have named patterns for aligned memory structures. */
- struct extraction_insn insn;
+ class extraction_insn insn;
unsigned int inner_size;
if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
&& get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
/* Return an appropriate const instance of xlogue_layout based upon values
in cfun->machine and crtl. */
-const struct xlogue_layout &
+const class xlogue_layout &
xlogue_layout::get_instance ()
{
enum xlogue_stub_sets stub_set;
return STUB_INDEX_OFFSET + m_stack_align_off_in;
}
- static const struct xlogue_layout &get_instance ();
+ static const class xlogue_layout &get_instance ();
static unsigned count_stub_managed_regs ();
static bool is_stub_managed_reg (unsigned regno, unsigned count);
rtx_insn *insn;
rtx sym, addr;
rtx rax = gen_rtx_REG (word_mode, AX_REG);
- const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+ const class xlogue_layout &xlogue = xlogue_layout::get_instance ();
/* AL should only be live with sysv_abi. */
gcc_assert (!ix86_eax_live_at_start_p ());
rtx sym, tmp;
rtx rsi = gen_rtx_REG (word_mode, SI_REG);
rtx r10 = NULL_RTX;
- const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+ const class xlogue_layout &xlogue = xlogue_layout::get_instance ();
HOST_WIDE_INT stub_ptr_offset = xlogue.get_stub_ptr_offset ();
HOST_WIDE_INT rsi_offset = frame.stack_realign_offset + stub_ptr_offset;
rtx rsi_frame_load = NULL_RTX;
/* Implement targetm.vectorize.init_cost. */
static void *
-ix86_init_cost (struct loop *)
+ix86_init_cost (class loop *)
{
unsigned *cost = XNEWVEC (unsigned, 3);
cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
static unsigned
ix86_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
unsigned *cost = (unsigned *) data;
(value 32 is used) as a heuristic. */
static unsigned
-ix86_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
+ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop)
{
basic_block *bbs;
rtx_insn *insn;
AS_IF([test $enable_build_format_warnings = no],
[wf_opt=-Wno-format],[wf_opt=])
ACX_PROG_CXX_WARNING_OPTS(
- m4_quote(m4_do([-W -Wall -Wno-narrowing -Wwrite-strings ],
+ m4_quote(m4_do([-W -Wall -Wclass-is-pod -Wmismatched-tags ],
+ [-Wno-narrowing -Wstruct-not-pod -Wwrite-strings ],
[-Wcast-qual -Wno-error=format-diag $wf_opt])),
[loose_warn])
ACX_PROG_CC_WARNING_OPTS(
typedef uint64_t gcov_type_unsigned;
struct bitmap_obstack;
-struct bitmap_head;
-typedef struct bitmap_head *bitmap;
-typedef const struct bitmap_head *const_bitmap;
+class bitmap_head;
+typedef class bitmap_head *bitmap;
+typedef const class bitmap_head *const_bitmap;
struct simple_bitmap_def;
typedef struct simple_bitmap_def *sbitmap;
typedef const struct simple_bitmap_def *const_sbitmap;
typedef opt_mode<scalar_mode> opt_scalar_mode;
typedef opt_mode<scalar_int_mode> opt_scalar_int_mode;
typedef opt_mode<scalar_float_mode> opt_scalar_float_mode;
-template<typename> class pod_mode;
+template<typename> struct pod_mode;
typedef pod_mode<scalar_mode> scalar_mode_pod;
typedef pod_mode<scalar_int_mode> scalar_int_mode_pod;
typedef pod_mode<fixed_size_mode> fixed_size_mode_pod;
/* Subclasses of rtx_def, using indentation to show the class
hierarchy, along with the relevant invariant.
Where possible, keep this list in the same order as in rtl.def. */
-class rtx_def;
- class rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */
- class rtx_insn_list; /* GET_CODE (X) == INSN_LIST */
- class rtx_sequence; /* GET_CODE (X) == SEQUENCE */
- class rtx_insn;
- class rtx_debug_insn; /* DEBUG_INSN_P (X) */
- class rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */
- class rtx_jump_insn; /* JUMP_P (X) */
- class rtx_call_insn; /* CALL_P (X) */
- class rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */
- class rtx_barrier; /* BARRIER_P (X) */
- class rtx_code_label; /* LABEL_P (X) */
- class rtx_note; /* NOTE_P (X) */
+struct rtx_def;
+ struct rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */
+ struct rtx_insn_list; /* GET_CODE (X) == INSN_LIST */
+ struct rtx_sequence; /* GET_CODE (X) == SEQUENCE */
+ struct rtx_insn;
+ struct rtx_debug_insn; /* DEBUG_INSN_P (X) */
+ struct rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */
+ struct rtx_jump_insn; /* JUMP_P (X) */
+ struct rtx_call_insn; /* CALL_P (X) */
+ struct rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */
+ struct rtx_barrier; /* BARRIER_P (X) */
+ struct rtx_code_label; /* LABEL_P (X) */
+ struct rtx_note; /* NOTE_P (X) */
struct rtvec_def;
typedef struct rtvec_def *rtvec;
/* Subclasses of symtab_node, using indentation to show the class
hierarchy. */
-class symtab_node;
+struct symtab_node;
struct cgraph_node;
- class varpool_node;
+ struct varpool_node;
union section;
typedef union section section;
struct cl_decoded_option;
struct cl_option_handlers;
struct diagnostic_context;
-struct pretty_printer;
+class pretty_printer;
/* Address space number for named address space support. */
typedef unsigned char addr_space_t;
set yet). */
typedef int alias_set_type;
-struct edge_def;
-typedef struct edge_def *edge;
-typedef const struct edge_def *const_edge;
+class edge_def;
+typedef class edge_def *edge;
+typedef const class edge_def *const_edge;
struct basic_block_def;
typedef struct basic_block_def *basic_block;
typedef const struct basic_block_def *const_basic_block;
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
+ * cp-tree.h: Change class-key of PODs to struct and others to class.
+ * search.c: Same.
+ * semantics.c (finalize_nrv_r): Same.
+
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
lookup_member_fuzzy (tree xbasetype, tree name, bool want_type_p)
{
tree type = NULL_TREE, basetype_path = NULL_TREE;
- struct lookup_field_fuzzy_info lffi (want_type_p);
+ class lookup_field_fuzzy_info lffi (want_type_p);
/* rval_binfo is the binfo associated with the found member, note,
this can be set with useful information, even when rval is not
static tree
finalize_nrv_r (tree* tp, int* walk_subtrees, void* data)
{
- struct nrv_data *dp = (struct nrv_data *)data;
+ class nrv_data *dp = (class nrv_data *)data;
tree_node **slot;
/* No need to walk into types. There wouldn't be any need to walk into
void
finalize_nrv (tree *tp, tree var, tree result)
{
- struct nrv_data data;
+ class nrv_data data;
/* Copy name from VAR to RESULT. */
DECL_NAME (result) = DECL_NAME (var);
IB. Write the length to RLEN. */
static const char *
-string_for_index (struct data_in *data_in, unsigned int loc, unsigned int *rlen)
+string_for_index (class data_in *data_in, unsigned int loc, unsigned int *rlen)
{
unsigned int len;
const char *result;
IB. Write the length to RLEN. */
const char *
-streamer_read_indexed_string (struct data_in *data_in,
- struct lto_input_block *ib, unsigned int *rlen)
+streamer_read_indexed_string (class data_in *data_in,
+ class lto_input_block *ib, unsigned int *rlen)
{
return string_for_index (data_in, streamer_read_uhwi (ib), rlen);
}
/* Read a NULL terminated string from the string table in DATA_IN. */
const char *
-streamer_read_string (struct data_in *data_in, struct lto_input_block *ib)
+streamer_read_string (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char *ptr;
Write the length to RLEN. */
const char *
-bp_unpack_indexed_string (struct data_in *data_in,
+bp_unpack_indexed_string (class data_in *data_in,
struct bitpack_d *bp, unsigned int *rlen)
{
return string_for_index (data_in, bp_unpack_var_len_unsigned (bp), rlen);
/* Read a NULL terminated string from the string table in DATA_IN. */
const char *
-bp_unpack_string (struct data_in *data_in, struct bitpack_d *bp)
+bp_unpack_string (class data_in *data_in, struct bitpack_d *bp)
{
unsigned int len;
const char *ptr;
/* Read an unsigned HOST_WIDE_INT number from IB. */
unsigned HOST_WIDE_INT
-streamer_read_uhwi (struct lto_input_block *ib)
+streamer_read_uhwi (class lto_input_block *ib)
{
unsigned HOST_WIDE_INT result;
int shift;
/* Read a HOST_WIDE_INT number from IB. */
HOST_WIDE_INT
-streamer_read_hwi (struct lto_input_block *ib)
+streamer_read_hwi (class lto_input_block *ib)
{
HOST_WIDE_INT result = 0;
int shift = 0;
/* Read gcov_type value from IB. */
gcov_type
-streamer_read_gcov_count (struct lto_input_block *ib)
+streamer_read_gcov_count (class lto_input_block *ib)
{
gcov_type ret = streamer_read_hwi (ib);
return ret;
input block IB. */
wide_int
-streamer_read_wide_int (struct lto_input_block *ib)
+streamer_read_wide_int (class lto_input_block *ib)
{
HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
int i;
input block IB. */
widest_int
-streamer_read_widest_int (struct lto_input_block *ib)
+streamer_read_widest_int (class lto_input_block *ib)
{
HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
int i;
void streamer_write_widest_int (struct output_block *, const widest_int &);
/* In data-streamer-in.c */
-const char *streamer_read_string (struct data_in *, struct lto_input_block *);
-const char *streamer_read_indexed_string (struct data_in *,
- struct lto_input_block *,
+const char *streamer_read_string (class data_in *, class lto_input_block *);
+const char *streamer_read_indexed_string (class data_in *,
+ class lto_input_block *,
unsigned int *);
-const char *bp_unpack_indexed_string (struct data_in *, struct bitpack_d *,
+const char *bp_unpack_indexed_string (class data_in *, struct bitpack_d *,
unsigned int *);
-const char *bp_unpack_string (struct data_in *, struct bitpack_d *);
-unsigned HOST_WIDE_INT streamer_read_uhwi (struct lto_input_block *);
-HOST_WIDE_INT streamer_read_hwi (struct lto_input_block *);
-gcov_type streamer_read_gcov_count (struct lto_input_block *);
-wide_int streamer_read_wide_int (struct lto_input_block *);
-widest_int streamer_read_widest_int (struct lto_input_block *);
+const char *bp_unpack_string (class data_in *, struct bitpack_d *);
+unsigned HOST_WIDE_INT streamer_read_uhwi (class lto_input_block *);
+HOST_WIDE_INT streamer_read_hwi (class lto_input_block *);
+gcov_type streamer_read_gcov_count (class lto_input_block *);
+wide_int streamer_read_wide_int (class lto_input_block *);
+widest_int streamer_read_widest_int (class lto_input_block *);
/* Returns a new bit-packing context for bit-packing into S. */
static inline struct bitpack_d
/* Returns a new bit-packing context for bit-unpacking from IB. */
static inline struct bitpack_d
-streamer_read_bitpack (struct lto_input_block *ib)
+streamer_read_bitpack (class lto_input_block *ib)
{
struct bitpack_d bp;
bp.word = streamer_read_uhwi (ib);
if (pos + nbits > BITS_PER_BITPACK_WORD)
{
bp->word = val
- = streamer_read_uhwi ((struct lto_input_block *)bp->stream);
+ = streamer_read_uhwi ((class lto_input_block *)bp->stream);
bp->pos = nbits;
return val & mask;
}
/* Read byte from the input block. */
static inline unsigned char
-streamer_read_uchar (struct lto_input_block *ib)
+streamer_read_uchar (class lto_input_block *ib)
{
if (ib->p >= ib->len)
lto_section_overrun (ib);
to be compile time constant. PURPOSE is used for error reporting. */
static inline HOST_WIDE_INT
-streamer_read_hwi_in_range (struct lto_input_block *ib,
+streamer_read_hwi_in_range (class lto_input_block *ib,
const char *purpose,
HOST_WIDE_INT min,
HOST_WIDE_INT max)
/* Return the next tag in the input block IB. */
static inline enum LTO_tags
-streamer_read_record_start (struct lto_input_block *ib)
+streamer_read_record_start (class lto_input_block *ib)
{
return streamer_read_enum (ib, LTO_tags, LTO_NUM_TAGS);
}
{
int regno = REGNO (SET_DEST (set));
df_ref first_def;
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
first_def = df_bb_regno_first_def_find (g->bb, regno);
gcc_assert (first_def);
if (flag_checking && DF_REF_ID (last_def) != DF_REF_ID (first_def))
{
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
gcc_assert (!bitmap_bit_p (&bb_info->gen, DF_REF_ID (first_def)));
}
build_inter_loop_deps (ddg_ptr g)
{
unsigned rd_num;
- struct df_rd_bb_info *rd_bb_info;
+ class df_rd_bb_info *rd_bb_info;
bitmap_iterator bi;
rd_bb_info = DF_RD_BB_INFO (g->bb);
{
int i;
/* Hold the dependency analysis state during dependency calculations. */
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
rtx_insn *head, *tail;
/* Build the dependence information, using the sched_analyze function. */
Functions to create, destroy and manipulate an instance of df.
----------------------------------------------------------------------------*/
-struct df_d *df;
+class df_d *df;
/* Add PROBLEM (and any dependent problems) to the DF instance. */
rest_of_handle_df_initialize (void)
{
gcc_assert (!df);
- df = XCNEW (struct df_d);
+ df = XCNEW (class df_d);
df->changeable_flags = 0;
bitmap_obstack_initialize (&df_bitmap_obstack);
Returns the number of blocks which is always loop->num_nodes. */
static int
-loop_post_order_compute (int *post_order, struct loop *loop)
+loop_post_order_compute (int *post_order, class loop *loop)
{
edge_iterator *stack;
int sp;
by LOOP. Returns the number of blocks which is always loop->num_nodes. */
static void
-loop_inverted_post_order_compute (vec<int> *post_order, struct loop *loop)
+loop_inverted_post_order_compute (vec<int> *post_order, class loop *loop)
{
basic_block bb;
edge_iterator *stack;
/* Analyze dataflow info for the basic blocks contained in LOOP. */
void
-df_analyze_loop (struct loop *loop)
+df_analyze_loop (class loop *loop)
{
free (df->postorder);
df_rd_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_rd_bb_info *bb_info = (struct df_rd_bb_info *) vbb_info;
+ class df_rd_bb_info *bb_info = (class df_rd_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->kill);
{
unsigned int bb_index;
bitmap_iterator bi;
- struct df_rd_problem_data *problem_data;
+ class df_rd_problem_data *problem_data;
if (df_rd->problem_data)
{
- problem_data = (struct df_rd_problem_data *) df_rd->problem_data;
+ problem_data = (class df_rd_problem_data *) df_rd->problem_data;
bitmap_clear (&problem_data->sparse_invalidated_by_call);
bitmap_clear (&problem_data->dense_invalidated_by_call);
}
else
{
- problem_data = XNEW (struct df_rd_problem_data);
+ problem_data = XNEW (class df_rd_problem_data);
df_rd->problem_data = problem_data;
bitmap_obstack_initialize (&problem_data->rd_bitmaps);
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
of kill sets. */
static void
-df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
+df_rd_bb_local_compute_process_def (class df_rd_bb_info *bb_info,
df_ref def,
int top_flag)
{
df_rd_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_clear (&seen_in_block);
unsigned int bb_index;
bitmap_iterator bi;
unsigned int regno;
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
bitmap sparse_invalidated = &problem_data->sparse_invalidated_by_call;
bitmap dense_invalidated = &problem_data->dense_invalidated_by_call;
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
bitmap_copy (&bb_info->out, &bb_info->gen);
bitmap_clear (&bb_info->in);
if (e->flags & EDGE_EH)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
bitmap sparse_invalidated = &problem_data->sparse_invalidated_by_call;
bitmap dense_invalidated = &problem_data->dense_invalidated_by_call;
bitmap_iterator bi;
static bool
df_rd_transfer_function (int bb_index)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
unsigned int regno;
bitmap_iterator bi;
bitmap in = &bb_info->in;
changed = bitmap_ior_and_compl (out, gen, in, kill);
else
{
- struct df_rd_problem_data *problem_data;
+ class df_rd_problem_data *problem_data;
bitmap_head tmp;
/* Note that TMP is _not_ a temporary bitmap if we end up replacing
OUT with TMP. Therefore, allocate TMP in the RD bitmaps obstack. */
- problem_data = (struct df_rd_problem_data *) df_rd->problem_data;
+ problem_data = (class df_rd_problem_data *) df_rd->problem_data;
bitmap_initialize (&tmp, &problem_data->rd_bitmaps);
bitmap_and_compl (&tmp, in, kill);
basic block, and mask out DEFs of registers that are not live.
Computing the mask looks costly, but the benefit of the pruning
outweighs the cost. */
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
bitmap regs_live_out = &df_lr_get_bb_info (bb_index)->out;
bitmap live_defs = BITMAP_ALLOC (&df_bitmap_obstack);
unsigned int regno;
static void
df_rd_free (void)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
if (problem_data)
{
static void
df_rd_start_dump (FILE *file)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
unsigned int m = DF_REG_SIZE (df);
unsigned int regno;
static void
df_rd_top_dump (basic_block bb, FILE *file)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info)
return;
static void
df_rd_bottom_dump (basic_block bb, FILE *file)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info)
return;
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_rd_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_rd_bb_info),/* Size of entry of block_info array. */
TV_DF_RD, /* Timing variable. */
true /* Reset blocks on dropping out of blocks_to_analyze. */
};
df_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_lr_bb_info *bb_info = (struct df_lr_bb_info *) vbb_info;
+ class df_lr_bb_info *bb_info = (class df_lr_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->use);
EXECUTE_IF_SET_IN_BITMAP (df_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->use.obstack)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
df_lr_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def, use;
{
/* The exit block is special for this problem and its bits are
computed from thin air. */
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (EXIT_BLOCK);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (EXIT_BLOCK);
bitmap_copy (&bb_info->use, df->exit_block_uses);
}
else
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->use);
bitmap_clear (&bb_info->out);
}
static bool
df_lr_transfer_function (int bb_index)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap use = &bb_info->use;
static void
df_lr_top_dump (basic_block bb, FILE *file)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
struct df_lr_problem_data *problem_data;
if (!bb_info)
return;
static void
df_lr_bottom_dump (basic_block bb, FILE *file)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
struct df_lr_problem_data *problem_data;
if (!bb_info)
return;
df_lr_verify_solution_start,/* Incremental solution verify start. */
df_lr_verify_solution_end, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_lr_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_lr_bb_info),/* Size of entry of block_info array. */
TV_DF_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
FOR_ALL_BB_FN (bb, cfun)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
if (bb_info)
df_live_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_live_bb_info *bb_info = (struct df_live_bb_info *) vbb_info;
+ class df_live_bb_info *bb_info = (class df_live_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->gen);
EXECUTE_IF_SET_IN_BITMAP (df_live->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
df_live_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def;
int luid = 0;
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
/* No register may reach a location where it is not used. Thus
we trim the rr result to the places where it is used. */
static bool
df_live_transfer_function (int bb_index)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
- struct df_live_bb_info *bb_live_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_live_info = df_live_get_bb_info (bb_index);
/* No register may reach a location where it is not used. Thus
we trim the rr result to the places where it is used. */
static void
df_live_top_dump (basic_block bb, FILE *file)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
struct df_live_problem_data *problem_data;
if (!bb_info)
static void
df_live_bottom_dump (basic_block bb, FILE *file)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
struct df_live_problem_data *problem_data;
if (!bb_info)
df_live_verify_solution_start,/* Incremental solution verify start. */
df_live_verify_solution_end, /* Incremental solution verify end. */
&problem_LR, /* Dependent problem. */
- sizeof (struct df_live_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_live_bb_info),/* Size of entry of block_info array. */
TV_DF_LIVE, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
FOR_ALL_BB_FN (bb, cfun)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
if (bb_info)
df_mir_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_mir_bb_info *bb_info = (struct df_mir_bb_info *) vbb_info;
+ class df_mir_bb_info *bb_info = (class df_mir_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->gen);
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
gcc_assert (bb_info);
df_mir_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
rtx_insn *insn;
int luid = 0;
static void
df_mir_confluence_0 (basic_block bb)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
bitmap_clear (&bb_info->in);
}
static bool
df_mir_transfer_function (int bb_index)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
static void
df_mir_top_dump (basic_block bb, FILE *file)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
if (!bb_info)
return;
static void
df_mir_bottom_dump (basic_block bb, FILE *file)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
if (!bb_info)
return;
df_mir_verify_solution_start, /* Incremental solution verify start. */
df_mir_verify_solution_end, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_mir_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_mir_bb_info),/* Size of entry of block_info array. */
TV_DF_MIR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
df_chain_create_bb (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_head cpy;
df_word_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_word_lr_bb_info *bb_info = (struct df_word_lr_bb_info *) vbb_info;
+ class df_word_lr_bb_info *bb_info = (class df_word_lr_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->use);
EXECUTE_IF_SET_IN_BITMAP (df_word_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->use.obstack)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
df_word_lr_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def, use;
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->use);
bitmap_clear (&bb_info->out);
}
static bool
df_word_lr_transfer_function (int bb_index)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap use = &bb_info->use;
static void
df_word_lr_top_dump (basic_block bb, FILE *file)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
if (!bb_info)
return;
static void
df_word_lr_bottom_dump (basic_block bb, FILE *file)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
if (!bb_info)
return;
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_word_lr_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_word_lr_bb_info),/* Size of entry of block_info array. */
TV_DF_WORD_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_md_bb_info *bb_info = (struct df_md_bb_info *) vbb_info;
+ class df_md_bb_info *bb_info = (class df_md_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->kill);
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->init.obstack)
{
}
static void
-df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
+df_md_bb_local_compute_process_def (class df_md_bb_info *bb_info,
df_ref def,
int top_flag)
{
df_md_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
rtx_insn *insn;
/* Artificials are only hard regs. */
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
df_md_transfer_function (int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->init);
df_md_transfer_function (bb_index);
static void
df_md_confluence_0 (basic_block bb)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
bitmap_copy (&bb_info->in, &bb_info->init);
}
static void
df_md_top_dump (basic_block bb, FILE *file)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info)
return;
static void
df_md_bottom_dump (basic_block bb, FILE *file)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info)
return;
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_md_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_md_bb_info),/* Size of entry of block_info array. */
TV_DF_MD, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
auto_vec<df_mw_hardreg *, 32> mw_vec;
};
-static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
+static void df_ref_record (enum df_ref_class, class df_collection_rec *,
rtx, rtx *,
basic_block, struct df_insn_info *,
enum df_ref_type, int ref_flags);
-static void df_def_record_1 (struct df_collection_rec *, rtx *,
+static void df_def_record_1 (class df_collection_rec *, rtx *,
basic_block, struct df_insn_info *,
int ref_flags);
-static void df_defs_record (struct df_collection_rec *, rtx,
+static void df_defs_record (class df_collection_rec *, rtx,
basic_block, struct df_insn_info *,
int ref_flags);
-static void df_uses_record (struct df_collection_rec *,
+static void df_uses_record (class df_collection_rec *,
rtx *, enum df_ref_type,
basic_block, struct df_insn_info *,
int ref_flags);
static void df_install_ref_incremental (df_ref);
-static void df_insn_refs_collect (struct df_collection_rec*,
+static void df_insn_refs_collect (class df_collection_rec*,
basic_block, struct df_insn_info *);
-static void df_canonize_collection_rec (struct df_collection_rec *);
+static void df_canonize_collection_rec (class df_collection_rec *);
static void df_get_regular_block_artificial_uses (bitmap);
static void df_get_eh_block_artificial_uses (bitmap);
static void df_ref_chain_delete_du_chain (df_ref);
static void df_ref_chain_delete (df_ref);
-static void df_refs_add_to_chains (struct df_collection_rec *,
+static void df_refs_add_to_chains (class df_collection_rec *,
basic_block, rtx_insn *, unsigned int);
-static bool df_insn_refs_verify (struct df_collection_rec *, basic_block,
+static bool df_insn_refs_verify (class df_collection_rec *, basic_block,
rtx_insn *, bool);
-static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap);
-static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap);
+static void df_entry_block_defs_collect (class df_collection_rec *, bitmap);
+static void df_exit_block_uses_collect (class df_collection_rec *, bitmap);
static void df_install_ref (df_ref, struct df_reg_info *,
struct df_ref_info *, bool);
/* Free all of the refs and the mw_hardregs in COLLECTION_REC. */
static void
-df_free_collection_rec (struct df_collection_rec *collection_rec)
+df_free_collection_rec (class df_collection_rec *collection_rec)
{
unsigned int ix;
struct df_scan_problem_data *problem_data
unsigned int uid = INSN_UID (insn);
struct df_insn_info *insn_info = NULL;
basic_block bb = BLOCK_FOR_INSN (insn);
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
if ((!df) || (!INSN_P (insn)))
return false;
{
basic_block bb = BLOCK_FOR_INSN (insn);
rtx note;
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
unsigned int i;
df_mw_hardreg_chain_delete_eq_uses (insn_info);
/* Sort and remove duplicates from the COLLECTION_REC. */
static void
-df_canonize_collection_rec (struct df_collection_rec *collection_rec)
+df_canonize_collection_rec (class df_collection_rec *collection_rec)
{
df_sort_and_compress_refs (&collection_rec->def_vec);
df_sort_and_compress_refs (&collection_rec->use_vec);
chains and update other necessary information. */
static void
-df_refs_add_to_chains (struct df_collection_rec *collection_rec,
+df_refs_add_to_chains (class df_collection_rec *collection_rec,
basic_block bb, rtx_insn *insn, unsigned int flags)
{
if (insn)
static df_ref
df_ref_create_structure (enum df_ref_class cl,
- struct df_collection_rec *collection_rec,
+ class df_collection_rec *collection_rec,
rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *info,
enum df_ref_type ref_type,
static void
df_ref_record (enum df_ref_class cl,
- struct df_collection_rec *collection_rec,
+ class df_collection_rec *collection_rec,
rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *insn_info,
enum df_ref_type ref_type,
Any change here has to be matched in df_find_hard_reg_defs_1. */
static void
-df_def_record_1 (struct df_collection_rec *collection_rec,
+df_def_record_1 (class df_collection_rec *collection_rec,
rtx *loc, basic_block bb, struct df_insn_info *insn_info,
int flags)
{
here has to be matched in df_find_hard_reg_defs. */
static void
-df_defs_record (struct df_collection_rec *collection_rec,
+df_defs_record (class df_collection_rec *collection_rec,
rtx x, basic_block bb, struct df_insn_info *insn_info,
int flags)
{
/* Process all the registers used in the rtx at address LOC. */
static void
-df_uses_record (struct df_collection_rec *collection_rec,
+df_uses_record (class df_collection_rec *collection_rec,
rtx *loc, enum df_ref_type ref_type,
basic_block bb, struct df_insn_info *insn_info,
int flags)
/* For all DF_REF_CONDITIONAL defs, add a corresponding uses. */
static void
-df_get_conditional_uses (struct df_collection_rec *collection_rec)
+df_get_conditional_uses (class df_collection_rec *collection_rec)
{
unsigned int ix;
df_ref ref;
/* Get call's extra defs and uses (track caller-saved registers). */
static void
-df_get_call_refs (struct df_collection_rec *collection_rec,
+df_get_call_refs (class df_collection_rec *collection_rec,
basic_block bb,
struct df_insn_info *insn_info,
int flags)
and reg chains. */
static void
-df_insn_refs_collect (struct df_collection_rec *collection_rec,
+df_insn_refs_collect (class df_collection_rec *collection_rec,
basic_block bb, struct df_insn_info *insn_info)
{
rtx note;
to COLLECTION_REC. */
static void
-df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
+df_bb_refs_collect (class df_collection_rec *collection_rec, basic_block bb)
{
collection_rec->def_vec.truncate (0);
collection_rec->use_vec.truncate (0);
reference to include. */
static void
-df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
+df_entry_block_defs_collect (class df_collection_rec *collection_rec,
bitmap entry_block_defs)
{
unsigned int i;
static void
df_record_entry_block_defs (bitmap entry_block_defs)
{
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
df_entry_block_defs_collect (&collection_rec, entry_block_defs);
/* Process bb_refs chain */
It uses df->exit_block_uses to determine register to include. */
static void
-df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exit_block_uses)
+df_exit_block_uses_collect (class df_collection_rec *collection_rec, bitmap exit_block_uses)
{
unsigned int i;
bitmap_iterator bi;
static void
df_record_exit_block_uses (bitmap exit_block_uses)
{
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
df_exit_block_uses_collect (&collection_rec, exit_block_uses);
/* Process bb_refs chain */
If ABORT_IF_FAIL is set, this function never returns false. */
static bool
-df_insn_refs_verify (struct df_collection_rec *collection_rec,
+df_insn_refs_verify (class df_collection_rec *collection_rec,
basic_block bb,
rtx_insn *insn,
bool abort_if_fail)
{
rtx_insn *insn;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
gcc_assert (bb_info);
#include "timevar.h"
struct dataflow;
-struct df_d;
+class df_d;
struct df_problem;
struct df_link;
struct df_insn_info;
/* This is used for debugging and for the dumpers to find the latest
instance so that the df info can be added to the dumps. This
should not be used by regular code. */
-extern struct df_d *df;
+extern class df_d *df;
#define df_scan (df->problems_by_index[DF_SCAN])
#define df_rd (df->problems_by_index[DF_RD])
#define df_lr (df->problems_by_index[DF_LR])
extern void df_finish_pass (bool);
extern void df_analyze_problem (struct dataflow *, bitmap, int *, int);
extern void df_analyze ();
-extern void df_analyze_loop (struct loop *);
+extern void df_analyze_loop (class loop *);
extern int df_get_n_blocks (enum df_flow_dir);
extern int *df_get_postorder (enum df_flow_dir);
extern void df_simple_dataflow (enum df_flow_dir, df_init_function,
return NULL;
}
-static inline struct df_rd_bb_info *
+static inline class df_rd_bb_info *
df_rd_get_bb_info (unsigned int index)
{
if (index < df_rd->block_info_size)
- return &((struct df_rd_bb_info *) df_rd->block_info)[index];
+ return &((class df_rd_bb_info *) df_rd->block_info)[index];
else
return NULL;
}
-static inline struct df_lr_bb_info *
+static inline class df_lr_bb_info *
df_lr_get_bb_info (unsigned int index)
{
if (index < df_lr->block_info_size)
- return &((struct df_lr_bb_info *) df_lr->block_info)[index];
+ return &((class df_lr_bb_info *) df_lr->block_info)[index];
else
return NULL;
}
-static inline struct df_md_bb_info *
+static inline class df_md_bb_info *
df_md_get_bb_info (unsigned int index)
{
if (index < df_md->block_info_size)
- return &((struct df_md_bb_info *) df_md->block_info)[index];
+ return &((class df_md_bb_info *) df_md->block_info)[index];
else
return NULL;
}
-static inline struct df_live_bb_info *
+static inline class df_live_bb_info *
df_live_get_bb_info (unsigned int index)
{
if (index < df_live->block_info_size)
- return &((struct df_live_bb_info *) df_live->block_info)[index];
+ return &((class df_live_bb_info *) df_live->block_info)[index];
else
return NULL;
}
-static inline struct df_word_lr_bb_info *
+static inline class df_word_lr_bb_info *
df_word_lr_get_bb_info (unsigned int index)
{
if (index < df_word_lr->block_info_size)
- return &((struct df_word_lr_bb_info *) df_word_lr->block_info)[index];
+ return &((class df_word_lr_bb_info *) df_word_lr->block_info)[index];
else
return NULL;
}
-static inline struct df_mir_bb_info *
+static inline class df_mir_bb_info *
df_mir_get_bb_info (unsigned int index)
{
if (index < df_mir->block_info_size)
- return &((struct df_mir_bb_info *) df_mir->block_info)[index];
+ return &((class df_mir_bb_info *) df_mir->block_info)[index];
else
return NULL;
}
hook returns true for both @code{ptr_mode} and @code{Pmode}.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (struct ao_ref *@var{ref})
+@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (ao_ref *@var{ref})
Define this to return nonzero if the memory reference @var{ref} may alias with the system C library errno location. The default version of this hook assumes the system C library errno location is either a declaration of type int or accessed by dereferencing a pointer to int.
@end deftypefn
all zeros. GCC can then try to branch around the instruction instead.
@end deftypefn
-@deftypefn {Target Hook} {void *} TARGET_VECTORIZE_INIT_COST (struct loop *@var{loop_info})
+@deftypefn {Target Hook} {void *} TARGET_VECTORIZE_INIT_COST (class loop *@var{loop_info})
This hook should initialize target-specific data structures in preparation for modeling the costs of vectorizing a loop or basic block. The default allocates three unsigned integers for accumulating costs for the prologue, body, and epilogue of the loop or basic block. If @var{loop_info} is non-NULL, it identifies the loop being vectorized; otherwise a single block is being vectorized.
@end deftypefn
-@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, struct _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
+@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
This hook should update the target-specific @var{data} in response to adding @var{count} copies of the given @var{kind} of statement to a loop or basic block. The default adds the builtin vectorizer cost for the copies of the statement to the accumulator specified by @var{where}, (the prologue, body, or epilogue) and returns the amount added. The return value should be viewed as a tentative cost that may later be revised.
@end deftypefn
body must be generated.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_PREDICT_DOLOOP_P (struct loop *@var{loop})
+@deftypefn {Target Hook} bool TARGET_PREDICT_DOLOOP_P (class loop *@var{loop})
Return true if we can predict it is possible to use a low-overhead loop
for a particular loop. The parameter @var{loop} is a pointer to the loop.
This target hook is required only when the target supports low-overhead
@var{bit_code} is @code{AND} or @code{IOR}, which is the op on the compares.
@end deftypefn
-@deftypefn {Target Hook} unsigned TARGET_LOOP_UNROLL_ADJUST (unsigned @var{nunroll}, struct loop *@var{loop})
+@deftypefn {Target Hook} unsigned TARGET_LOOP_UNROLL_ADJUST (unsigned @var{nunroll}, class loop *@var{loop})
This target hook returns a new value for the number of times @var{loop}
should be unrolled. The parameter @var{nunroll} is the number of times
the loop is to be unrolled. The parameter @var{loop} is a pointer to
} positions_needed;
/* The next store info for this insn. */
- struct store_info *next;
+ class store_info *next;
/* The right hand side of the store. This is used if there is a
subsequent reload of the mems address somewhere later in the
rtx mem;
/* The next read_info for this insn. */
- struct read_info_type *next;
+ class read_info_type *next;
};
-typedef struct read_info_type *read_info_t;
+typedef class read_info_type *read_info_t;
static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
while (ptr)
{
insn_info_t next = ptr->next_local_store;
- struct store_info *s_info = ptr->store_rec;
+ class store_info *s_info = ptr->store_rec;
bool del = true;
/* Skip the clobbers. We delete the active insn if this insn
/* In cfghooks.c */
extern void dump_bb (FILE *, basic_block, int, dump_flags_t);
-struct opt_pass;
+class opt_pass;
namespace gcc {
/* Return true if the given memory attributes are equal. */
bool
-mem_attrs_eq_p (const struct mem_attrs *p, const struct mem_attrs *q)
+mem_attrs_eq_p (const class mem_attrs *p, const class mem_attrs *q)
{
if (p == q)
return true;
{
poly_int64 apply_bitpos = 0;
tree type;
- struct mem_attrs attrs, *defattrs, *refattrs;
+ class mem_attrs attrs, *defattrs, *refattrs;
addr_space_t as;
/* It can happen that type_for_mode was given a mode for which there
{
rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
machine_mode mmode = GET_MODE (new_rtx);
- struct mem_attrs *defattrs;
+ class mem_attrs *defattrs;
mem_attrs attrs (*get_mem_attrs (memref));
defattrs = mode_mem_attrs[(int) mmode];
rtx addr = XEXP (memref, 0);
rtx new_rtx;
scalar_int_mode address_mode;
- struct mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
+ class mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
unsigned HOST_WIDE_INT max_align;
#ifdef POINTERS_EXTEND_UNSIGNED
scalar_int_mode pointer_mode
{
rtx new_rtx, addr = XEXP (memref, 0);
machine_mode address_mode;
- struct mem_attrs *defattrs;
+ class mem_attrs *defattrs;
mem_attrs attrs (*get_mem_attrs (memref));
address_mode = get_address_mode (memref);
#ifndef GCC_EMIT_RTL_H
#define GCC_EMIT_RTL_H
-struct temp_slot;
-typedef struct temp_slot *temp_slot_p;
+class temp_slot;
+typedef class temp_slot *temp_slot_p;
/* Information mainlined about RTL representation of incoming arguments. */
struct GTY(()) incoming_args {
vec<rtx, va_gc> *x_stack_slot_list;
/* List of empty areas in the stack frame. */
- struct frame_space *frame_space_list;
+ class frame_space *frame_space_list;
/* Place after which to insert the tail_recursion_label if we need one. */
rtx_note *x_stack_check_probe_note;
vec<temp_slot_p, va_gc> *x_used_temp_slots;
/* List of available temp slots. */
- struct temp_slot *x_avail_temp_slots;
+ class temp_slot *x_avail_temp_slots;
/* Current nesting level for temporaries. */
int x_temp_slot_level;
#define crtl (&x_rtl)
/* Return whether two MEM_ATTRs are equal. */
-bool mem_attrs_eq_p (const struct mem_attrs *, const struct mem_attrs *);
+bool mem_attrs_eq_p (const class mem_attrs *, const class mem_attrs *);
/* Set the alias set of MEM to SET. */
extern void set_mem_alias_set (rtx, alias_set_type);
make_single_succ_edge (bb, bb->next_bb, e_flags);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
stack pointer, such as acquiring the space by calling malloc(). */
if (targetm.have_allocate_stack ())
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We don't have to check against the predicate for operand 0 since
TARGET is known to be a pseudo of the proper mode, which must
be valid for the operand. */
{
if (targetm.have_probe_stack_address ())
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
insn_code icode = targetm.code_for_probe_stack_address;
create_address_operand (ops, address);
maybe_legitimize_operands (icode, 0, 1, ops);
/* Next see if we have an insn to check the stack. */
else if (targetm.have_check_stack ())
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
rtx addr = memory_address (Pmode,
gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
stack_pointer_rtx,
unsigned HOST_WIDE_INT bitnum,
rtx value, scalar_int_mode value_mode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx value1;
rtx xop0 = op0;
rtx_insn *last = get_last_insn ();
&& known_eq (bitsize, GET_MODE_BITSIZE (innermode))
&& multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode = optab_handler (vec_set_optab, outermode);
create_fixed_operand (&ops[0], op0);
&& known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
rtx arg0 = op0;
unsigned HOST_WIDE_INT subreg_off;
int unsignedp, rtx target,
machine_mode mode, machine_mode tmode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx spec_target = target;
rtx spec_target_subreg = 0;
scalar_int_mode ext_mode = extv->field_mode;
!= CODE_FOR_nothing)
&& multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
machine_mode outermode = new_mode;
machine_mode innermode = tmode;
enum insn_code icode
&& known_eq (bitsize, GET_MODE_BITSIZE (innermode))
&& multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, innermode);
ops[0].target = 1;
int unsignedp, rtx x, rtx y, int normalizep,
machine_mode target_mode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx op0, comparison, subtarget;
rtx_insn *last;
scalar_int_mode result_mode = targetm.cstore_mode (icode);
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[9];
+ class expand_operand ops[9];
unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
target = NULL_RTX;
- struct expand_operand ops[5];
+ class expand_operand ops[5];
create_output_operand (&ops[0], target, insn_mode);
create_fixed_operand (&ops[1], arg1_rtx);
create_fixed_operand (&ops[2], arg2_rtx);
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[9];
+ class expand_operand ops[9];
unsigned int nops;
nops = insn_data[(int) code].n_generator_args;
icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_input_operand (&ops[0], x, mode);
if (maybe_expand_insn (icode, 1, ops))
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_fixed_operand (&ops[0], mem);
create_input_operand (&ops[1], reg, mode);
bool
emit_storent_insn (rtx to, rtx from)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
machine_mode mode = GET_MODE (to);
enum insn_code code = optab_handler (storent_optab, mode);
!= CODE_FOR_nothing)
&& (elt = uniform_vector_p (exp)))
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], expand_normal (elt), eltmode);
expand_insn (icode, 2, ops);
&& mode == TYPE_MODE (TREE_TYPE (treeop0))
&& SCALAR_INT_MODE_P (mode))
{
- struct expand_operand eops[4];
+ class expand_operand eops[4];
machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
expand_operands (treeop0, treeop1,
subtarget, &op0, &op1, EXPAND_NORMAL);
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail,
if ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail,
rtx table_label, rtx default_label, rtx fallback_label,
profile_probability default_probability)
{
- struct expand_operand ops[5];
+ class expand_operand ops[5];
scalar_int_mode index_mode = SImode;
rtx op1, op2, index;
enum excess_precision x_flag_excess_precision;
};
-extern struct target_flag_state default_target_flag_state;
+extern class target_flag_state default_target_flag_state;
#if SWITCHABLE_TARGET
-extern struct target_flag_state *this_target_flag_state;
+extern class target_flag_state *this_target_flag_state;
#else
#define this_target_flag_state (&default_target_flag_state)
#endif
/* Forward declarations. */
-static struct temp_slot *find_temp_slot_from_address (rtx);
+static class temp_slot *find_temp_slot_from_address (rtx);
static void pad_to_arg_alignment (struct args_size *, int, struct args_size *);
static void pad_below (struct args_size *, machine_mode, tree);
static void reorder_blocks_1 (rtx_insn *, tree, vec<tree> *);
static void
add_frame_space (poly_int64 start, poly_int64 end)
{
- struct frame_space *space = ggc_alloc<frame_space> ();
+ class frame_space *space = ggc_alloc<frame_space> ();
space->next = crtl->frame_space_list;
crtl->frame_space_list = space;
space->start = start;
{
if (kind & ASLK_RECORD_PAD)
{
- struct frame_space **psp;
+ class frame_space **psp;
for (psp = &crtl->frame_space_list; *psp; psp = &(*psp)->next)
{
- struct frame_space *space = *psp;
+ class frame_space *space = *psp;
if (!try_fit_stack_local (space->start, space->length, size,
alignment, &slot_offset))
continue;
class GTY(()) temp_slot {
public:
/* Points to next temporary slot. */
- struct temp_slot *next;
+ class temp_slot *next;
/* Points to previous temporary slot. */
- struct temp_slot *prev;
+ class temp_slot *prev;
/* The rtx to used to reference the slot. */
rtx slot;
/* The size, in units, of the slot. */
struct GTY((for_user)) temp_slot_address_entry {
hashval_t hash;
rtx address;
- struct temp_slot *temp_slot;
+ class temp_slot *temp_slot;
};
struct temp_address_hasher : ggc_ptr_hash<temp_slot_address_entry>
/* Removes temporary slot TEMP from LIST. */
static void
-cut_slot_from_list (struct temp_slot *temp, struct temp_slot **list)
+cut_slot_from_list (class temp_slot *temp, class temp_slot **list)
{
if (temp->next)
temp->next->prev = temp->prev;
/* Inserts temporary slot TEMP to LIST. */
static void
-insert_slot_to_list (struct temp_slot *temp, struct temp_slot **list)
+insert_slot_to_list (class temp_slot *temp, class temp_slot **list)
{
temp->next = *list;
if (*list)
/* Returns the list of used temp slots at LEVEL. */
-static struct temp_slot **
+static class temp_slot **
temp_slots_at_level (int level)
{
if (level >= (int) vec_safe_length (used_temp_slots))
/* Moves temporary slot TEMP to LEVEL. */
static void
-move_slot_to_level (struct temp_slot *temp, int level)
+move_slot_to_level (class temp_slot *temp, int level)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, temp_slots_at_level (level));
/* Make temporary slot TEMP available. */
static void
-make_slot_available (struct temp_slot *temp)
+make_slot_available (class temp_slot *temp)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, &avail_temp_slots);
/* Add ADDRESS as an alias of TEMP_SLOT to the addess -> temp slot mapping. */
static void
-insert_temp_slot_address (rtx address, struct temp_slot *temp_slot)
+insert_temp_slot_address (rtx address, class temp_slot *temp_slot)
{
struct temp_slot_address_entry *t = ggc_alloc<temp_slot_address_entry> ();
t->address = address;
/* Find the temp slot corresponding to the object at address X. */
-static struct temp_slot *
+static class temp_slot *
find_temp_slot_from_address (rtx x)
{
- struct temp_slot *p;
+ class temp_slot *p;
struct temp_slot_address_entry tmp, *t;
/* First try the easy way:
assign_stack_temp_for_type (machine_mode mode, poly_int64 size, tree type)
{
unsigned int align;
- struct temp_slot *p, *best_p = 0, *selected = NULL, **pp;
+ class temp_slot *p, *best_p = 0, *selected = NULL, **pp;
rtx slot;
gcc_assert (known_size_p (size));
static void
combine_temp_slots (void)
{
- struct temp_slot *p, *q, *next, *next_q;
+ class temp_slot *p, *q, *next, *next_q;
int num_slots;
/* We can't combine slots, because the information about which slot
void
update_temp_slot_address (rtx old_rtx, rtx new_rtx)
{
- struct temp_slot *p;
+ class temp_slot *p;
if (rtx_equal_p (old_rtx, new_rtx))
return;
void
preserve_temp_slots (rtx x)
{
- struct temp_slot *p = 0, *next;
+ class temp_slot *p = 0, *next;
if (x == 0)
return;
void
free_temp_slots (void)
{
- struct temp_slot *p, *next;
+ class temp_slot *p, *next;
bool some_available = false;
for (p = *temp_slots_at_level (temp_slot_level); p; p = next)
class GTY(()) frame_space
{
public:
- struct frame_space *next;
+ class frame_space *next;
poly_int64 start;
poly_int64 length;
char *pass_startwith;
/* The stack usage of this function. */
- struct stack_usage *su;
+ class stack_usage *su;
/* Value histograms attached to particular statements. */
htab_t GTY((skip)) value_histograms;
single_def_use_dom_walker::before_dom_children (basic_block bb)
{
int bb_index = bb->index;
- struct df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
- struct df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
+ class df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
+ class df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_copy (local_md, &md_bb_info->in);
tree m_other_expr;
};
-struct op_location_t;
+class op_location_t;
/* A subclass of rich_location for showing problems with binary operations.
/* This is the size of the buffer used to read in source file lines. */
-struct function_info;
-struct block_info;
-struct source_info;
+class function_info;
+class block_info;
+class source_info;
/* Describes an arc between two basic blocks. */
struct arc_info
{
/* source and destination blocks. */
- struct block_info *src;
- struct block_info *dst;
+ class block_info *src;
+ class block_info *dst;
/* transition counts. */
gcov_type count;
/* Temporary chain for solving graph, and for chaining blocks on one
line. */
- struct block_info *chain;
+ class block_info *chain;
};
vector<line_info> lines;
/* Next function. */
- struct function_info *next;
+ class function_info *next;
/* Get demangled name of a function. The demangled name
is converted when it is used for the first time. */
class insn_def
{
public:
- struct insn_def *next; /* Next insn in chain. */
+ class insn_def *next; /* Next insn in chain. */
rtx def; /* The DEFINE_... */
int insn_code; /* Instruction number. */
int insn_index; /* Expression number in file, for errors. */
struct insn_ent
{
struct insn_ent *next; /* Next in chain. */
- struct insn_def *def; /* Instruction definition. */
+ class insn_def *def; /* Instruction definition. */
};
/* Each value of an attribute (either constant or computed) is assigned a
public:
char *name; /* Name of attribute. */
const char *enum_name; /* Enum name for DEFINE_ENUM_NAME. */
- struct attr_desc *next; /* Next attribute. */
+ class attr_desc *next; /* Next attribute. */
struct attr_value *first_value; /* First value of this attribute. */
struct attr_value *default_val; /* Default value for this attribute. */
file_location loc; /* Where in the .md files it occurs. */
{
public:
rtx def; /* DEFINE_DELAY expression. */
- struct delay_desc *next; /* Next DEFINE_DELAY. */
+ class delay_desc *next; /* Next DEFINE_DELAY. */
file_location loc; /* Where in the .md files it occurs. */
int num; /* Number of DEFINE_DELAY, starting at 1. */
};
{
struct attr_value *av;
struct insn_ent *ie;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value_list *next;
};
/* This one is indexed by the first character of the attribute name. */
#define MAX_ATTRS_INDEX 256
-static struct attr_desc *attrs[MAX_ATTRS_INDEX];
-static struct insn_def *defs;
-static struct delay_desc *delays;
+static class attr_desc *attrs[MAX_ATTRS_INDEX];
+static class insn_def *defs;
+static class delay_desc *delays;
struct attr_value_list **insn_code_values;
/* Other variables. */
static char *attr_printf (unsigned int, const char *, ...)
ATTRIBUTE_PRINTF_2;
static rtx make_numeric_value (int);
-static struct attr_desc *find_attr (const char **, int);
+static class attr_desc *find_attr (const char **, int);
static rtx mk_attr_alt (alternative_mask);
static char *next_comma_elt (const char **);
static rtx insert_right_side (enum rtx_code, rtx, rtx, int, int);
static bool attr_alt_subset_p (rtx, rtx);
static bool attr_alt_subset_of_compl_p (rtx, rtx);
static void clear_struct_flag (rtx);
-static void write_attr_valueq (FILE *, struct attr_desc *, const char *);
-static struct attr_value *find_most_used (struct attr_desc *);
-static void write_attr_set (FILE *, struct attr_desc *, int, rtx,
+static void write_attr_valueq (FILE *, class attr_desc *, const char *);
+static struct attr_value *find_most_used (class attr_desc *);
+static void write_attr_set (FILE *, class attr_desc *, int, rtx,
const char *, const char *, rtx,
int, int, unsigned int);
-static void write_attr_case (FILE *, struct attr_desc *,
+static void write_attr_case (FILE *, class attr_desc *,
struct attr_value *,
int, const char *, const char *, int, rtx);
-static void write_attr_value (FILE *, struct attr_desc *, rtx);
+static void write_attr_value (FILE *, class attr_desc *, rtx);
static void write_upcase (FILE *, const char *);
static void write_indent (FILE *, int);
static rtx identity_fn (rtx);
Return a perhaps modified replacement expression for the value. */
static rtx
-check_attr_value (file_location loc, rtx exp, struct attr_desc *attr)
+check_attr_value (file_location loc, rtx exp, class attr_desc *attr)
{
struct attr_value *av;
const char *p;
case ATTR:
{
- struct attr_desc *attr2 = find_attr (&XSTR (exp, 0), 0);
+ class attr_desc *attr2 = find_attr (&XSTR (exp, 0), 0);
if (attr2 == NULL)
error_at (loc, "unknown attribute `%s' in ATTR",
XSTR (exp, 0));
It becomes a COND with each test being (eq_attr "alternative" "n") */
static rtx
-convert_set_attr_alternative (rtx exp, struct insn_def *id)
+convert_set_attr_alternative (rtx exp, class insn_def *id)
{
int num_alt = id->num_alternatives;
rtx condexp;
list of values is given, convert to SET_ATTR_ALTERNATIVE first. */
static rtx
-convert_set_attr (rtx exp, struct insn_def *id)
+convert_set_attr (rtx exp, class insn_def *id)
{
rtx newexp;
const char *name_ptr;
static void
check_defs (void)
{
- struct insn_def *id;
- struct attr_desc *attr;
+ class insn_def *id;
+ class attr_desc *attr;
int i;
rtx value;
value. LOC is the location to use for error reporting. */
static rtx
-make_canonical (file_location loc, struct attr_desc *attr, rtx exp)
+make_canonical (file_location loc, class attr_desc *attr, rtx exp)
{
int i;
rtx newexp;
alternatives. LOC is the location to use for error reporting. */
static struct attr_value *
-get_attr_value (file_location loc, rtx value, struct attr_desc *attr,
+get_attr_value (file_location loc, rtx value, class attr_desc *attr,
int insn_code)
{
struct attr_value *av;
static void
expand_delays (void)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
rtx condexp;
rtx newexp;
int i;
the attribute. */
static void
-fill_attr (struct attr_desc *attr)
+fill_attr (class attr_desc *attr)
{
struct attr_value *av;
struct insn_ent *ie;
- struct insn_def *id;
+ class insn_def *id;
int i;
rtx value;
static rtx (*const address_fn[]) (rtx)
= {max_fn, min_fn, one_fn, identity_fn};
size_t i;
- struct attr_desc *length_attr, *new_attr;
+ class attr_desc *length_attr, *new_attr;
struct attr_value *av, *new_av;
struct insn_ent *ie, *new_ie;
static void
write_length_unit_log (FILE *outf)
{
- struct attr_desc *length_attr = find_attr (&length_str, 0);
+ class attr_desc *length_attr = find_attr (&length_str, 0);
struct attr_value *av;
struct insn_ent *ie;
unsigned int length_unit_log, length_or;
corresponding to INSN_CODE and INSN_INDEX. */
static rtx
-evaluate_eq_attr (rtx exp, struct attr_desc *attr, rtx value,
+evaluate_eq_attr (rtx exp, class attr_desc *attr, rtx value,
int insn_code, int insn_index)
{
rtx orexp, andexp;
simplify_test_exp (rtx exp, int insn_code, int insn_index)
{
rtx left, right;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av;
struct insn_ent *ie;
struct attr_value_list *iv;
otherwise return 0. */
static int
-tests_attr_p (rtx p, struct attr_desc *attr)
+tests_attr_p (rtx p, class attr_desc *attr)
{
const char *fmt;
int i, ie, j, je;
attr_desc pointers), and return the size of that array. */
static int
-get_attr_order (struct attr_desc ***ret)
+get_attr_order (class attr_desc ***ret)
{
int i, j;
int num = 0;
- struct attr_desc *attr;
- struct attr_desc **all, **sorted;
+ class attr_desc *attr;
+ class attr_desc **all, **sorted;
char *handled;
for (i = 0; i < MAX_ATTRS_INDEX; i++)
for (attr = attrs[i]; attr; attr = attr->next)
num++;
- all = XNEWVEC (struct attr_desc *, num);
- sorted = XNEWVEC (struct attr_desc *, num);
+ all = XNEWVEC (class attr_desc *, num);
+ sorted = XNEWVEC (class attr_desc *, num);
handled = XCNEWVEC (char, num);
num = 0;
for (i = 0; i < MAX_ATTRS_INDEX; i++)
if (DEBUG)
for (j = 0; j < num; j++)
{
- struct attr_desc *attr2;
+ class attr_desc *attr2;
struct attr_value *av;
attr = sorted[j];
static void
optimize_attrs (int num_insn_codes)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av;
struct insn_ent *ie;
rtx newexp;
int i;
struct attr_value_list *ivbuf;
struct attr_value_list *iv;
- struct attr_desc **topsort;
+ class attr_desc **topsort;
int topnum;
/* For each insn code, make a list of all the insn_ent's for it,
/* Add attribute value NAME to the beginning of ATTR's list. */
static void
-add_attr_value (struct attr_desc *attr, const char *name)
+add_attr_value (class attr_desc *attr, const char *name)
{
struct attr_value *av;
{
struct enum_type *et;
struct enum_value *ev;
- struct attr_desc *attr;
+ class attr_desc *attr;
const char *name_ptr;
char *p;
rtx def = info->def;
static void
gen_insn (md_rtx_info *info)
{
- struct insn_def *id;
+ class insn_def *id;
rtx def = info->def;
- id = oballoc (struct insn_def);
+ id = oballoc (class insn_def);
id->next = defs;
defs = id;
id->def = def;
static void
gen_delay (md_rtx_info *info)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
int i;
rtx def = info->def;
have_annul_false = 1;
}
- delay = oballoc (struct delay_desc);
+ delay = oballoc (class delay_desc);
delay->def = def;
delay->num = ++num_delays;
delay->next = delays;
{
int i;
const char *name;
- struct attr_desc *attr;
+ class attr_desc *attr;
if (exp == NULL)
return;
{
int comparison_operator = 0;
RTX_CODE code;
- struct attr_desc *attr;
+ class attr_desc *attr;
if (emit_parens)
fprintf (outf, "(");
/* Write out a function to obtain the attribute for a given INSN. */
static void
-write_attr_get (FILE *outf, struct attr_desc *attr)
+write_attr_get (FILE *outf, class attr_desc *attr)
{
struct attr_value *av, *common_av;
int i, j;
if ((attrs_seen_more_than_once & (1U << i)) != 0)
{
const char *name = cached_attrs[i];
- struct attr_desc *cached_attr;
+ class attr_desc *cached_attr;
if (i != j)
cached_attrs[j] = name;
cached_attr = find_attr (&name, 0);
and ";"). */
static void
-write_attr_set (FILE *outf, struct attr_desc *attr, int indent, rtx value,
+write_attr_set (FILE *outf, class attr_desc *attr, int indent, rtx value,
const char *prefix, const char *suffix, rtx known_true,
int insn_code, int insn_index, unsigned int attrs_cached)
{
/* Write out the computation for one attribute value. */
static void
-write_attr_case (FILE *outf, struct attr_desc *attr, struct attr_value *av,
+write_attr_case (FILE *outf, class attr_desc *attr, struct attr_value *av,
int write_case_lines, const char *prefix, const char *suffix,
int indent, rtx known_true)
{
/* Utilities to write in various forms. */
static void
-write_attr_valueq (FILE *outf, struct attr_desc *attr, const char *s)
+write_attr_valueq (FILE *outf, class attr_desc *attr, const char *s)
{
if (attr->is_numeric)
{
}
static void
-write_attr_value (FILE *outf, struct attr_desc *attr, rtx value)
+write_attr_value (FILE *outf, class attr_desc *attr, rtx value)
{
int op;
case ATTR:
{
- struct attr_desc *attr2 = find_attr (&XSTR (value, 0), 0);
+ class attr_desc *attr2 = find_attr (&XSTR (value, 0), 0);
if (attr->enum_name)
fprintf (outf, "(enum %s)", attr->enum_name);
else if (!attr->is_numeric)
static void
write_eligible_delay (FILE *outf, const char *kind)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
int max_slots;
char str[50];
const char *pstr;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av, *common_av;
int i;
return attr_string (start, *pstr - start);
}
-/* Return a `struct attr_desc' pointer for a given named attribute. If CREATE
+/* Return a `class attr_desc' pointer for a given named attribute. If CREATE
is nonzero, build a new attribute, if one does not exist. *NAME_P is
replaced by a pointer to a canonical copy of the string. */
-static struct attr_desc *
+static class attr_desc *
find_attr (const char **name_p, int create)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
int index;
const char *name = *name_p;
if (! create)
return NULL;
- attr = oballoc (struct attr_desc);
+ attr = oballoc (class attr_desc);
attr->name = DEF_ATTR_STRING (name);
attr->enum_name = 0;
attr->first_value = attr->default_val = NULL;
static void
make_internal_attr (const char *name, rtx value, int special)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
attr = find_attr (&name, 1);
gcc_assert (!attr->default_val);
/* Find the most used value of an attribute. */
static struct attr_value *
-find_most_used (struct attr_desc *attr)
+find_most_used (class attr_desc *attr)
{
struct attr_value *av;
struct attr_value *most_used;
static void
write_const_num_delay_slots (FILE *outf)
{
- struct attr_desc *attr = find_attr (&num_delay_slots_str, 0);
+ class attr_desc *attr = find_attr (&num_delay_slots_str, 0);
struct attr_value *av;
if (attr)
struct insn_reserv *decl = oballoc (struct insn_reserv);
rtx def = info->def;
- struct attr_desc attr = { };
+ class attr_desc attr = { };
attr.name = DEF_ATTR_STRING (XSTR (def, 0));
attr.loc = info->loc;
/* Try to find a const attribute (usually cpu or tune) that is used
in all define_insn_reservation conditions. */
-static struct attr_desc *
+static class attr_desc *
find_tune_attr (rtx exp)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
switch (GET_CODE (exp))
{
int i;
struct insn_reserv *decl;
rtx code_exp, lats_exp, byps_exp;
- struct attr_desc *tune_attr;
+ class attr_desc *tune_attr;
if (n_insn_reservs == 0)
return;
int
main (int argc, const char **argv)
{
- struct attr_desc *attr;
- struct insn_def *id;
+ class attr_desc *attr;
+ class insn_def *id;
int i;
progname = "genattrtab";
};
/* Forward declarations. */
-static void walk_rtx (md_rtx_info *, rtx, struct accum_extract *);
+static void walk_rtx (md_rtx_info *, rtx, class accum_extract *);
#define UPPER_OFFSET ('A' - ('z' - 'a' + 1))
in ACC. */
static void
push_pathstr_operand (int operand, bool is_vector,
- struct accum_extract *acc)
+ class accum_extract *acc)
{
if (is_vector && 'a' + operand > 'z')
acc->pathstr.safe_push (operand + UPPER_OFFSET);
unsigned int op_count, dup_count, j;
struct extraction *p;
struct code_ptr *link;
- struct accum_extract acc;
+ class accum_extract acc;
/* Walk the insn's pattern, remembering at all times the path
down to the walking point. */
}
static void
-walk_rtx (md_rtx_info *info, rtx x, struct accum_extract *acc)
+walk_rtx (md_rtx_info *info, rtx x, class accum_extract *acc)
{
RTX_CODE code;
int i, len;
/* libccp helpers. */
-static struct line_maps *line_table;
+static class line_maps *line_table;
/* The rich_location class within libcpp requires a way to expand
location_t instances, and relies on the client code
unsigned int fn;
};
-struct simplify;
+class simplify;
/* Identifier that maps to a user-defined predicate. */
/* The AST produced by parsing of the pattern definitions. */
-struct dt_operand;
-struct capture_info;
+class dt_operand;
+class capture_info;
/* The base class for operands. */
produced when the pattern applies in the leafs.
For a (match ...) the leafs are either empty if it is a simple predicate
or the single expression specifying the matched operands. */
- struct operand *result;
+ class operand *result;
/* Collected 'for' expression operators that have to be replaced
in the lowering phase. */
vec<vec<user_id *> > for_vec;
}
DEBUG_FUNCTION void
-print_matches (struct simplify *s, FILE *f = stderr)
+print_matches (class simplify *s, FILE *f = stderr)
{
fprintf (f, "for expression: ");
print_operand (s->match, f);
matching code. It represents the 'match' expression of all
simplifies and has those as its leafs. */
-struct dt_simplify;
+class dt_simplify;
/* A hash-map collecting semantically equivalent leafs in the decision
tree for splitting out to separate functions. */
public:
dt_node *root;
- void insert (struct simplify *, unsigned);
+ void insert (class simplify *, unsigned);
void gen (FILE *f, bool gimple);
void print (FILE *f = stderr);
/* Insert S into the decision tree. */
void
-decision_tree::insert (struct simplify *s, unsigned pattern_no)
+decision_tree::insert (class simplify *s, unsigned pattern_no)
{
current_id = s->id;
dt_operand **indexes = XCNEWVEC (dt_operand *, s->capture_max + 1);
/* Parse a capture.
capture = '@'<number> */
-struct operand *
+class operand *
parser::parse_capture (operand *op, bool require_existing)
{
location_t src_loc = eat_token (CPP_ATSIGN)->src_loc;
/* Parse an expression
expr = '(' <operation>[capture][flag][type] <operand>... ')' */
-struct operand *
+class operand *
parser::parse_expr ()
{
const cpp_token *token = peek ();
a standalone capture.
op = predicate | expr | c_expr | capture */
-struct operand *
+class operand *
parser::parse_op ()
{
const cpp_token *token = peek ();
- struct operand *op = NULL;
+ class operand *op = NULL;
if (token->type == CPP_OPEN_PAREN)
{
eat_token (CPP_OPEN_PAREN);
const cpp_token *loc = peek ();
parsing_match_operand = true;
- struct operand *match = parse_op ();
+ class operand *match = parse_op ();
finish_match_operand (match);
parsing_match_operand = false;
if (match->type == operand::OP_CAPTURE && !matcher)
}
}
- line_table = XCNEW (struct line_maps);
+ line_table = XCNEW (class line_maps);
linemap_init (line_table, 0);
line_table->reallocator = xrealloc;
line_table->round_alloc_size = round_alloc_size;
class data
{
public:
- struct data *next;
+ class data *next;
const char *name;
const char *template_code;
file_location loc;
};
/* This variable points to the first link in the insn chain. */
-static struct data *idata;
+static class data *idata;
/* This variable points to the end of the insn chain. This is where
everything relevant from the machien description is appended to. */
-static struct data **idata_end;
+static class data **idata_end;
\f
static void output_prologue (void);
static void output_operand_data (void);
static void output_insn_data (void);
static void output_get_insn_name (void);
-static void scan_operands (struct data *, rtx, int, int);
+static void scan_operands (class data *, rtx, int, int);
static int compare_operands (struct operand_data *,
struct operand_data *);
-static void place_operands (struct data *);
-static void process_template (struct data *, const char *);
-static void validate_insn_alternatives (struct data *);
-static void validate_insn_operands (struct data *);
+static void place_operands (class data *);
+static void process_template (class data *, const char *);
+static void validate_insn_alternatives (class data *);
+static void validate_insn_operands (class data *);
class constraint_data
{
public:
- struct constraint_data *next_this_letter;
+ class constraint_data *next_this_letter;
file_location loc;
unsigned int namelen;
char name[1];
are handled outside the define*_constraint mechanism. */
static const char indep_constraints[] = ",=+%*?!^$#&g";
-static struct constraint_data *
+static class constraint_data *
constraints_by_letter_table[1 << CHAR_BIT];
static int mdep_constraint_len (const char *, file_location, int);
static void
output_insn_data (void)
{
- struct data *d;
+ class data *d;
int name_offset = 0;
int next_name_offset;
const char * last_name = 0;
const char * next_name = 0;
- struct data *n;
+ class data *n;
for (n = idata, next_name_offset = 1; n; n = n->next, next_name_offset++)
if (n->name)
THIS_STRICT_LOW is nonzero if the containing rtx was a STRICT_LOW_PART. */
static void
-scan_operands (struct data *d, rtx part, int this_address_p,
+scan_operands (class data *d, rtx part, int this_address_p,
int this_strict_low)
{
int i, j;
find a subsequence that is the same, or allocate a new one at the end. */
static void
-place_operands (struct data *d)
+place_operands (class data *d)
{
struct operand_data *od, *od2;
int i;
templates, or C code to generate the assembler code template. */
static void
-process_template (struct data *d, const char *template_code)
+process_template (class data *d, const char *template_code)
{
const char *cp;
int i;
/* Check insn D for consistency in number of constraint alternatives. */
static void
-validate_insn_alternatives (struct data *d)
+validate_insn_alternatives (class data *d)
{
int n = 0, start;
/* Verify that there are no gaps in operand numbers for INSNs. */
static void
-validate_insn_operands (struct data *d)
+validate_insn_operands (class data *d)
{
int i;
}
static void
-validate_optab_operands (struct data *d)
+validate_optab_operands (class data *d)
{
if (!d->name || d->name[0] == '\0' || d->name[0] == '*')
return;
static void
init_insn_for_nothing (void)
{
- idata = XCNEW (struct data);
+ idata = XCNEW (class data);
new (idata) data ();
idata->name = "*placeholder_for_nothing";
idata->loc = file_location ("<internal>", 0, 0);
{
rtx exp = info->def;
const char *name = XSTR (exp, 0);
- struct constraint_data **iter, **slot, *new_cdata;
+ class constraint_data **iter, **slot, *new_cdata;
if (strcmp (name, "TARGET_MEM_CONSTRAINT") == 0)
name = general_mem;
return;
}
}
- new_cdata = XNEWVAR (struct constraint_data,
- sizeof (struct constraint_data) + namelen);
+ new_cdata = XNEWVAR (class constraint_data,
+ sizeof (class constraint_data) + namelen);
new (new_cdata) constraint_data ();
strcpy (CONST_CAST (char *, new_cdata->name), name);
new_cdata->namelen = namelen;
static int
mdep_constraint_len (const char *s, file_location loc, int opno)
{
- struct constraint_data *p;
+ class constraint_data *p;
p = constraints_by_letter_table[(unsigned int)s[0]];
class constraint_data
{
public:
- struct constraint_data *next_this_letter;
- struct constraint_data *next_textual;
+ class constraint_data *next_this_letter;
+ class constraint_data *next_textual;
const char *name;
const char *c_name; /* same as .name unless mangling is necessary */
file_location loc; /* location of definition */
/* Overview of all constraints beginning with a given letter. */
-static struct constraint_data *
+static class constraint_data *
constraints_by_letter_table[1<<CHAR_BIT];
/* For looking up all the constraints in the order that they appeared
in the machine description. */
-static struct constraint_data *first_constraint;
-static struct constraint_data **last_constraint_ptr = &first_constraint;
+static class constraint_data *first_constraint;
+static class constraint_data **last_constraint_ptr = &first_constraint;
#define FOR_ALL_CONSTRAINTS(iter_) \
for (iter_ = first_constraint; iter_; iter_ = iter_->next_textual)
rtx exp, bool is_memory, bool is_special_memory,
bool is_address, file_location loc)
{
- struct constraint_data *c, **iter, **slot;
+ class constraint_data *c, **iter, **slot;
const char *p;
bool need_mangled_name = false;
bool is_const_int;
}
- c = XOBNEW (rtl_obstack, struct constraint_data);
+ c = XOBNEW (rtl_obstack, class constraint_data);
c->name = name;
c->c_name = need_mangled_name ? mangle (name) : name;
c->loc = loc;
static void
choose_enum_order (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
enum_order = XNEWVEC (const constraint_data *, num_constraints);
unsigned int next = 0;
for (i = 0; i < ARRAY_SIZE (constraints_by_letter_table); i++)
{
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c)
continue;
{
if (i != 0)
printf (",\n ");
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c)
printf ("CONSTRAINT__UNKNOWN");
else if (c->namelen == 1)
for (i = 0; i < ARRAY_SIZE (constraints_by_letter_table); i++)
{
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c
|| c->namelen == 1)
/* Constraints with multiple characters should have the same
length. */
{
- struct constraint_data *c2 = c->next_this_letter;
+ class constraint_data *c2 = c->next_this_letter;
size_t len = c->namelen;
while (c2)
{
static void
write_reg_class_for_constraint_1 (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
puts ("enum reg_class\n"
"reg_class_for_constraint_1 (enum constraint_num c)\n"
static void
write_tm_constrs_h (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
printf ("\
/* Generated automatically by the program '%s'\n\
static void
write_insn_const_int_ok_for_constraint (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
puts ("bool\n"
"insn_const_int_ok_for_constraint (HOST_WIDE_INT ival, "
return first == last ? first : 0;
}
\f
-struct state;
+class state;
/* Describes a possible successful return from a routine. */
struct acceptance_type
return !operator == (a, b);
}
-struct decision;
+class decision;
/* Represents a transition between states, dependent on the result of
a test T. */
st.longest_backtrack, st.longest_backtrack_code);
}
-struct merge_pattern_info;
+class merge_pattern_info;
/* Represents a transition from one pattern to another. */
class merge_pattern_transition
public:
rtx data;
file_location loc;
- struct queue_elem *next;
+ class queue_elem *next;
/* In a DEFINE_INSN that came from a DEFINE_INSN_AND_SPLIT or
DEFINE_INSN_AND_REWRITE, SPLIT points to the generated DEFINE_SPLIT. */
- struct queue_elem *split;
+ class queue_elem *split;
};
#define MNEMONIC_ATTR_NAME "mnemonic"
#define MNEMONIC_HTAB_SIZE 1024
-static struct queue_elem *define_attr_queue;
-static struct queue_elem **define_attr_tail = &define_attr_queue;
-static struct queue_elem *define_pred_queue;
-static struct queue_elem **define_pred_tail = &define_pred_queue;
-static struct queue_elem *define_insn_queue;
-static struct queue_elem **define_insn_tail = &define_insn_queue;
-static struct queue_elem *define_cond_exec_queue;
-static struct queue_elem **define_cond_exec_tail = &define_cond_exec_queue;
-static struct queue_elem *define_subst_queue;
-static struct queue_elem **define_subst_tail = &define_subst_queue;
-static struct queue_elem *other_queue;
-static struct queue_elem **other_tail = &other_queue;
-static struct queue_elem *define_subst_attr_queue;
-static struct queue_elem **define_subst_attr_tail = &define_subst_attr_queue;
+static class queue_elem *define_attr_queue;
+static class queue_elem **define_attr_tail = &define_attr_queue;
+static class queue_elem *define_pred_queue;
+static class queue_elem **define_pred_tail = &define_pred_queue;
+static class queue_elem *define_insn_queue;
+static class queue_elem **define_insn_tail = &define_insn_queue;
+static class queue_elem *define_cond_exec_queue;
+static class queue_elem **define_cond_exec_tail = &define_cond_exec_queue;
+static class queue_elem *define_subst_queue;
+static class queue_elem **define_subst_tail = &define_subst_queue;
+static class queue_elem *other_queue;
+static class queue_elem **other_tail = &other_queue;
+static class queue_elem *define_subst_attr_queue;
+static class queue_elem **define_subst_attr_tail = &define_subst_attr_queue;
/* Mapping from DEFINE_* rtxes to their location in the source file. */
static hash_map <rtx, file_location> *rtx_locs;
static void remove_constraints (rtx);
-static int is_predicable (struct queue_elem *);
+static int is_predicable (class queue_elem *);
static void identify_predicable_attribute (void);
static int n_alternatives (const char *);
static void collect_insn_data (rtx, int *, int *);
-static const char *alter_test_for_insn (struct queue_elem *,
- struct queue_elem *);
+static const char *alter_test_for_insn (class queue_elem *,
+ class queue_elem *);
static char *shift_output_template (char *, const char *, int);
-static const char *alter_output_for_insn (struct queue_elem *,
- struct queue_elem *,
+static const char *alter_output_for_insn (class queue_elem *,
+ class queue_elem *,
int, int);
-static void process_one_cond_exec (struct queue_elem *);
+static void process_one_cond_exec (class queue_elem *);
static void process_define_cond_exec (void);
static void init_predicate_table (void);
static void record_insn_name (int, const char *);
-static bool has_subst_attribute (struct queue_elem *, struct queue_elem *);
+static bool has_subst_attribute (class queue_elem *, class queue_elem *);
static const char * alter_output_for_subst_insn (rtx, int);
-static void alter_attrs_for_subst_insn (struct queue_elem *, int);
-static void process_substs_on_one_elem (struct queue_elem *,
- struct queue_elem *);
+static void alter_attrs_for_subst_insn (class queue_elem *, int);
+static void process_substs_on_one_elem (class queue_elem *,
+ class queue_elem *);
static rtx subst_dup (rtx, int, int);
static void process_define_subst (void);
/* Queue PATTERN on LIST_TAIL. Return the address of the new queue
element. */
-static struct queue_elem *
-queue_pattern (rtx pattern, struct queue_elem ***list_tail,
+static class queue_elem *
+queue_pattern (rtx pattern, class queue_elem ***list_tail,
file_location loc)
{
- struct queue_elem *e = XNEW (struct queue_elem);
+ class queue_elem *e = XNEW (class queue_elem);
e->data = pattern;
e->loc = loc;
e->next = NULL;
/* Remove element ELEM from QUEUE. */
static void
-remove_from_queue (struct queue_elem *elem, struct queue_elem **queue)
+remove_from_queue (class queue_elem *elem, class queue_elem **queue)
{
- struct queue_elem *prev, *e;
+ class queue_elem *prev, *e;
prev = NULL;
for (e = *queue; e ; e = e->next)
{
static void
add_define_attr (const char *name)
{
- struct queue_elem *e = XNEW (struct queue_elem);
+ class queue_elem *e = XNEW (class queue_elem);
rtx t1 = rtx_alloc (DEFINE_ATTR);
XSTR (t1, 0) = name;
XSTR (t1, 1) = "no,yes";
rtx split;
rtvec attr;
int i;
- struct queue_elem *insn_elem;
- struct queue_elem *split_elem;
+ class queue_elem *insn_elem;
+ class queue_elem *split_elem;
int split_code = (GET_CODE (desc) == DEFINE_INSN_AND_REWRITE ? 5 : 6);
/* Create a split with values from the insn_and_split. */
a DEFINE_INSN. */
static int
-is_predicable (struct queue_elem *elem)
+is_predicable (class queue_elem *elem)
{
rtvec vec = XVEC (elem->data, 4);
const char *value;
/* Find attribute SUBST in ELEM and assign NEW_VALUE to it. */
static void
-change_subst_attribute (struct queue_elem *elem,
- struct queue_elem *subst_elem,
+change_subst_attribute (class queue_elem *elem,
+ class queue_elem *subst_elem,
const char *new_value)
{
rtvec attrs_vec = XVEC (elem->data, 4);
words, we suppose the default value of the attribute to be 'no' since it is
always generated automatically in read-rtl.c. */
static bool
-has_subst_attribute (struct queue_elem *elem, struct queue_elem *subst_elem)
+has_subst_attribute (class queue_elem *elem, class queue_elem *subst_elem)
{
rtvec attrs_vec = XVEC (elem->data, 4);
const char *value, *subst_name = XSTR (subst_elem->data, 0);
static void
identify_predicable_attribute (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
char *p_true, *p_false;
const char *value;
}
static const char *
-alter_test_for_insn (struct queue_elem *ce_elem,
- struct queue_elem *insn_elem)
+alter_test_for_insn (class queue_elem *ce_elem,
+ class queue_elem *insn_elem)
{
return rtx_reader_ptr->join_c_conditions (XSTR (ce_elem->data, 1),
XSTR (insn_elem->data, 2));
if (!global_changes_made)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
global_changes_made = true;
add_define_attr ("ce_enabled");
ELEM is a queue element, containing our rtl-template,
N_DUP - multiplication factor. */
static void
-alter_attrs_for_subst_insn (struct queue_elem * elem, int n_dup)
+alter_attrs_for_subst_insn (class queue_elem * elem, int n_dup)
{
rtvec vec = XVEC (elem->data, 4);
int num_elem;
}
static const char *
-alter_output_for_insn (struct queue_elem *ce_elem,
- struct queue_elem *insn_elem,
+alter_output_for_insn (class queue_elem *ce_elem,
+ class queue_elem *insn_elem,
int alt, int max_op)
{
const char *ce_out, *insn_out;
/* Replicate insns as appropriate for the given DEFINE_COND_EXEC. */
static void
-process_one_cond_exec (struct queue_elem *ce_elem)
+process_one_cond_exec (class queue_elem *ce_elem)
{
- struct queue_elem *insn_elem;
+ class queue_elem *insn_elem;
for (insn_elem = define_insn_queue; insn_elem ; insn_elem = insn_elem->next)
{
int alternatives, max_operand;
was applied, ELEM would be deleted. */
static void
-process_substs_on_one_elem (struct queue_elem *elem,
- struct queue_elem *queue)
+process_substs_on_one_elem (class queue_elem *elem,
+ class queue_elem *queue)
{
- struct queue_elem *subst_elem;
+ class queue_elem *subst_elem;
int i, j, patterns_match;
for (subst_elem = define_subst_queue;
static void
process_define_cond_exec (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
identify_predicable_attribute ();
if (have_error)
static void
process_define_subst (void)
{
- struct queue_elem *elem, *elem_attr;
+ class queue_elem *elem, *elem_attr;
/* Check if each define_subst has corresponding define_subst_attr. */
for (elem = define_subst_queue; elem ; elem = elem->next)
static void
gen_mnemonic_attr (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
rtx mnemonic_attr = NULL;
htab_t mnemonic_htab;
const char *str, *p;
static void
check_define_attr_duplicates ()
{
- struct queue_elem *elem;
+ class queue_elem *elem;
htab_t attr_htab;
char * attr_name;
void **slot;
to use elided pattern numbers for anything. */
do
{
- struct queue_elem **queue, *elem;
+ class queue_elem **queue, *elem;
/* Read all patterns from a given queue before moving on to the next. */
if (define_attr_queue != NULL)
sizeof (struct function),
sizeof (struct basic_block_def),
sizeof (struct cgraph_node),
- sizeof (struct loop),
+ sizeof (class loop),
};
/* The total number of orders. */
/* Dump LOOP's induction IV. */
static void
-dump_induction (struct loop *loop, induction_p iv)
+dump_induction (class loop *loop, induction_p iv)
{
fprintf (dump_file, " Induction: ");
print_generic_expr (dump_file, iv->var, TDF_SLIM);
class loop_cand
{
public:
- loop_cand (struct loop *, struct loop *);
+ loop_cand (class loop *, class loop *);
~loop_cand ();
reduction_p find_reduction_by_stmt (gimple *);
void undo_simple_reduction (reduction_p, bitmap);
/* The loop itself. */
- struct loop *m_loop;
+ class loop *m_loop;
/* The outer loop for interchange. It equals to loop if this loop cand
itself represents the outer loop. */
- struct loop *m_outer;
+ class loop *m_outer;
/* Vector of induction variables in loop. */
vec<induction_p> m_inductions;
/* Vector of reduction variables in loop. */
/* Constructor. */
-loop_cand::loop_cand (struct loop *loop, struct loop *outer)
+loop_cand::loop_cand (class loop *loop, class loop *outer)
: m_loop (loop), m_outer (outer), m_exit (single_exit (loop)),
m_bbs (get_loop_body (loop)), m_num_stmts (0), m_const_init_reduc (0)
{
/* Return single use stmt of VAR in LOOP, otherwise return NULL. */
static gimple *
-single_use_in_loop (tree var, struct loop *loop)
+single_use_in_loop (tree var, class loop *loop)
{
gimple *stmt, *res = NULL;
use_operand_p use_p;
class tree_loop_interchange
{
public:
- tree_loop_interchange (vec<struct loop *> loop_nest)
+ tree_loop_interchange (vec<class loop *> loop_nest)
: m_loop_nest (loop_nest), m_niters_iv_var (NULL_TREE),
m_dce_seeds (BITMAP_ALLOC (NULL)) { }
~tree_loop_interchange () { BITMAP_FREE (m_dce_seeds); }
bool valid_data_dependences (unsigned, unsigned, vec<ddr_p>);
void interchange_loops (loop_cand &, loop_cand &);
void map_inductions_to_loop (loop_cand &, loop_cand &);
- void move_code_to_inner_loop (struct loop *, struct loop *, basic_block *);
+ void move_code_to_inner_loop (class loop *, class loop *, basic_block *);
/* The whole loop nest in which interchange is ongoing. */
- vec<struct loop *> m_loop_nest;
+ vec<class loop *> m_loop_nest;
/* We create new IV which is only used in loop's exit condition check.
In case of 3-level loop nest interchange, when we interchange the
innermost two loops, new IV created in the middle level loop does
}
/* Prepare niters for both loops. */
- struct loop *loop_nest = m_loop_nest[0];
+ class loop *loop_nest = m_loop_nest[0];
edge instantiate_below = loop_preheader_edge (loop_nest);
gsi = gsi_last_bb (loop_preheader_edge (loop_nest)->src);
i_niters = number_of_latch_executions (iloop.m_loop);
/* Move stmts of outer loop to inner loop. */
void
-tree_loop_interchange::move_code_to_inner_loop (struct loop *outer,
- struct loop *inner,
+tree_loop_interchange::move_code_to_inner_loop (class loop *outer,
+ class loop *inner,
basic_block *outer_bbs)
{
basic_block oloop_exit_bb = single_exit (outer)->src;
arr[i][j - 1][k] = 0; */
static void
-compute_access_stride (struct loop *loop_nest, struct loop *loop,
+compute_access_stride (class loop *loop_nest, class loop *loop,
data_reference_p dr)
{
vec<tree> *strides = new vec<tree> ();
if (! chrec_contains_undetermined (scev))
{
tree sl = scev;
- struct loop *expected = loop;
+ class loop *expected = loop;
while (TREE_CODE (sl) == POLYNOMIAL_CHREC)
{
- struct loop *sl_loop = get_chrec_loop (sl);
+ class loop *sl_loop = get_chrec_loop (sl);
while (sl_loop != expected)
{
strides->safe_push (size_int (0));
all data references. If access strides cannot be computed at least
for two levels of loop for any data reference, it returns NULL. */
-static struct loop *
-compute_access_strides (struct loop *loop_nest, struct loop *loop,
+static class loop *
+compute_access_strides (class loop *loop_nest, class loop *loop,
vec<data_reference_p> datarefs)
{
unsigned i, j, num_loops = (unsigned) -1;
of loops that isn't in current LOOP_NEST. */
static void
-prune_access_strides_not_in_loop (struct loop *loop_nest,
- struct loop *innermost,
+prune_access_strides_not_in_loop (class loop *loop_nest,
+ class loop *innermost,
vec<data_reference_p> datarefs)
{
data_reference_p dr;
nest with LOOP. */
static bool
-proper_loop_form_for_interchange (struct loop *loop, struct loop **min_outer)
+proper_loop_form_for_interchange (class loop *loop, class loop **min_outer)
{
edge e0, e1, exit;
should be interchanged by looking into all DATAREFS. */
static bool
-should_interchange_loop_nest (struct loop *loop_nest, struct loop *innermost,
+should_interchange_loop_nest (class loop *loop_nest, class loop *innermost,
vec<data_reference_p> datarefs)
{
unsigned idx = loop_depth (innermost) - loop_depth (loop_nest);
gcc_assert (idx > 0);
/* Check if any two adjacent loops should be interchanged. */
- for (struct loop *loop = innermost;
+ for (class loop *loop = innermost;
loop != loop_nest; loop = loop_outer (loop), idx--)
if (should_interchange_loops (idx, idx - 1, datarefs, 0, 0,
loop == innermost, false))
vec<ddr_p> *ddrs)
{
struct data_reference *a, *b;
- struct loop *innermost = loop_nest.last ();
+ class loop *innermost = loop_nest.last ();
for (unsigned i = 0; datarefs.iterate (i, &a); ++i)
{
/* Prune DATAREFS by removing any data reference not inside of LOOP. */
static inline void
-prune_datarefs_not_in_loop (struct loop *loop, vec<data_reference_p> datarefs)
+prune_datarefs_not_in_loop (class loop *loop, vec<data_reference_p> datarefs)
{
unsigned i, j;
struct data_reference *dr;
inner loop of that basic block's father loop. On success, return the
outer loop of the result loop nest. */
-static struct loop *
-prepare_data_references (struct loop *loop, vec<data_reference_p> *datarefs)
+static class loop *
+prepare_data_references (class loop *loop, vec<data_reference_p> *datarefs)
{
- struct loop *loop_nest = loop;
+ class loop *loop_nest = loop;
vec<data_reference_p> *bb_refs;
basic_block bb, *bbs = get_loop_body_in_dom_order (loop);
in interchange. */
static bool
-prepare_perfect_loop_nest (struct loop *loop, vec<loop_p> *loop_nest,
+prepare_perfect_loop_nest (class loop *loop, vec<loop_p> *loop_nest,
vec<data_reference_p> *datarefs, vec<ddr_p> *ddrs)
{
- struct loop *start_loop = NULL, *innermost = loop;
- struct loop *outermost = loops_for_fn (cfun)->tree_root;
+ class loop *start_loop = NULL, *innermost = loop;
+ class loop *outermost = loops_for_fn (cfun)->tree_root;
/* Find loop nest from the innermost loop. The outermost is the innermost
outer*/
return 0;
bool changed_p = false;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
vec<loop_p> loop_nest = vNULL;
to the OLD loop or the outer loop of OLD now is inside LOOP. */
static void
-merge_loop_tree (struct loop *loop, struct loop *old)
+merge_loop_tree (class loop *loop, class loop *old)
{
basic_block *bbs;
int i, n;
- struct loop *subloop;
+ class loop *subloop;
edge e;
edge_iterator ei;
If so return true, otherwise return false. */
static bool
-unroll_jam_possible_p (struct loop *outer, struct loop *loop)
+unroll_jam_possible_p (class loop *outer, class loop *loop)
{
basic_block *bbs;
int i, n;
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
/* When fusing the loops we skip the latch block
of the first one, so it mustn't have any effects to
be in appropriate form. */
static void
-fuse_loops (struct loop *loop)
+fuse_loops (class loop *loop)
{
- struct loop *next = loop->next;
+ class loop *next = loop->next;
while (next)
{
merge_loop_tree (loop, next);
gcc_assert (!next->num_nodes);
- struct loop *ln = next->next;
+ class loop *ln = next->next;
delete_loop (next);
next = ln;
}
static unsigned int
tree_loop_unroll_and_jam (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
/* Go through all innermost loops. */
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
if (loop_depth (loop) < 2
|| optimize_loop_nest_for_size_p (outer))
vec<data_reference_p> datarefs;
vec<ddr_p> dependences;
unsigned unroll_factor, profit_unroll, removed;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unroll = false;
auto_vec<loop_p, 3> loop_nest;
/* The loop containing STMT (cached for convenience). If multiple
statements share the same address, they all belong to this loop. */
- struct loop *loop;
+ class loop *loop;
/* A decomposition of the calculation into a sum of terms plus an
optional base. When BASE is provided, it is never an SSA name.
/* The outermost loop that can handle all the version checks
described below. */
- struct loop *outermost;
+ class loop *outermost;
/* The first entry in the list of blocks that belong to this loop
(and not to subloops). m_next_block_in_loop provides the chain
/* If versioning succeeds, this points the version of the loop that
assumes the version conditions holds. */
- struct loop *optimized_loop;
+ class loop *optimized_loop;
};
/* The main pass structure. */
loop_info &m_li;
};
- loop_info &get_loop_info (struct loop *loop) { return m_loops[loop->num]; }
+ loop_info &get_loop_info (class loop *loop) { return m_loops[loop->num]; }
- unsigned int max_insns_for_loop (struct loop *);
+ unsigned int max_insns_for_loop (class loop *);
bool expensive_stmt_p (gimple *);
void version_for_unity (gimple *, tree);
inner_likelihood get_inner_likelihood (tree, unsigned HOST_WIDE_INT);
void dump_inner_likelihood (address_info &, address_term_info &);
void analyze_stride (address_info &, address_term_info &,
- tree, struct loop *);
+ tree, class loop *);
bool find_per_loop_multiplication (address_info &, address_term_info &);
bool analyze_term_using_scevs (address_info &, address_term_info &);
void analyze_arbitrary_term (address_info &, address_term_info &);
bool analyze_block (basic_block);
bool analyze_blocks ();
- void prune_loop_conditions (struct loop *, vr_values *);
+ void prune_loop_conditions (class loop *, vr_values *);
bool prune_conditions ();
- void merge_loop_info (struct loop *, struct loop *);
- void add_loop_to_queue (struct loop *);
- bool decide_whether_loop_is_versionable (struct loop *);
+ void merge_loop_info (class loop *, class loop *);
+ void add_loop_to_queue (class loop *);
+ bool decide_whether_loop_is_versionable (class loop *);
bool make_versioning_decisions ();
- bool version_loop (struct loop *);
+ bool version_loop (class loop *);
void implement_versioning_decisions ();
/* The function we're optimizing. */
auto_vec<basic_block> m_next_block_in_loop;
/* The list of loops that we've decided to version. */
- auto_vec<struct loop *> m_loops_to_version;
+ auto_vec<class loop *> m_loops_to_version;
/* A table of addresses in the current loop, keyed off their values
but not their offsets. */
interchange or outer-loop vectorization). */
unsigned int
-loop_versioning::max_insns_for_loop (struct loop *loop)
+loop_versioning::max_insns_for_loop (class loop *loop)
{
return (loop->inner
? PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_OUTER_INSNS)
void
loop_versioning::version_for_unity (gimple *stmt, tree name)
{
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
loop_info &li = get_loop_info (loop);
if (bitmap_set_bit (&li.unity_names, SSA_NAME_VERSION (name)))
/* This is the first time we've wanted to version LOOP for NAME.
Keep track of the outermost loop that can handle all versioning
checks in LI. */
- struct loop *outermost
+ class loop *outermost
= outermost_invariant_loop_for_expr (loop, name);
if (loop_depth (li.outermost) < loop_depth (outermost))
li.outermost = outermost;
void
loop_versioning::analyze_stride (address_info &address,
address_term_info &term,
- tree stride, struct loop *op_loop)
+ tree stride, class loop *op_loop)
{
term.stride = stride;
if (!mult || gimple_assign_rhs_code (mult) != MULT_EXPR)
return false;
- struct loop *mult_loop = loop_containing_stmt (mult);
+ class loop *mult_loop = loop_containing_stmt (mult);
if (!loop_outer (mult_loop))
return false;
if (!setter)
return false;
- struct loop *wrt_loop = loop_containing_stmt (setter);
+ class loop *wrt_loop = loop_containing_stmt (setter);
if (!loop_outer (wrt_loop))
return false;
/* Quick exit if no part of the address is calculated in STMT's loop,
since such addresses have no versioning opportunities. */
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (expr_invariant_in_loop_p (loop, expr))
return;
bool
loop_versioning::analyze_block (basic_block bb)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
loop_info &li = get_loop_info (loop);
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
versioning at that level could be useful in some cases. */
get_loop_info (get_loop (m_fn, 0)).rejected_p = true;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop_info &linfo = get_loop_info (loop);
/* See whether an inner loop prevents versioning of this loop. */
if (!linfo.rejected_p)
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
if (get_loop_info (inner).rejected_p)
{
linfo.rejected_p = true;
LOOP. */
void
-loop_versioning::prune_loop_conditions (struct loop *loop, vr_values *vrs)
+loop_versioning::prune_loop_conditions (class loop *loop, vr_values *vrs)
{
loop_info &li = get_loop_info (loop);
OUTER. */
void
-loop_versioning::merge_loop_info (struct loop *outer, struct loop *inner)
+loop_versioning::merge_loop_info (class loop *outer, class loop *inner)
{
loop_info &inner_li = get_loop_info (inner);
loop_info &outer_li = get_loop_info (outer);
/* Add LOOP to the queue of loops to version. */
void
-loop_versioning::add_loop_to_queue (struct loop *loop)
+loop_versioning::add_loop_to_queue (class loop *loop)
{
loop_info &li = get_loop_info (loop);
We have already made this decision for all inner loops of LOOP. */
bool
-loop_versioning::decide_whether_loop_is_versionable (struct loop *loop)
+loop_versioning::decide_whether_loop_is_versionable (class loop *loop)
{
loop_info &li = get_loop_info (loop);
return false;
/* Examine the decisions made for inner loops. */
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
{
loop_info &inner_li = get_loop_info (inner);
if (inner_li.rejected_p)
}
/* Hoist all version checks from subloops to this loop. */
- for (struct loop *subloop = loop->inner; subloop; subloop = subloop->next)
+ for (class loop *subloop = loop->inner; subloop; subloop = subloop->next)
merge_loop_info (loop, subloop);
return true;
AUTO_DUMP_SCOPE ("make_versioning_decisions",
dump_user_location_t::from_function_decl (m_fn->decl));
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop_info &linfo = get_loop_info (loop);
/* We can't version this loop, so individually version any
subloops that would benefit and haven't been versioned yet. */
linfo.rejected_p = true;
- for (struct loop *subloop = loop->inner; subloop;
+ for (class loop *subloop = loop->inner; subloop;
subloop = subloop->next)
if (get_loop_info (subloop).worth_versioning_p ())
add_loop_to_queue (subloop);
cached in the associated loop_info. Return true on success. */
bool
-loop_versioning::version_loop (struct loop *loop)
+loop_versioning::version_loop (class loop *loop)
{
loop_info &li = get_loop_info (loop);
user-facing at this point. */
bool any_succeeded_p = false;
- struct loop *loop;
+ class loop *loop;
unsigned int i;
FOR_EACH_VEC_ELT (m_loops_to_version, i, loop)
if (version_loop (loop))
use PHI arg ranges which may be still UNDEFINED but have
to use VARYING for them. But we can still resort to
SCEV for loop header PHIs. */
- struct loop *l;
+ class loop *l;
if (scev_initialized_p ()
&& interesting
&& (l = loop_containing_stmt (phi))
virtual unsigned int execute (function *);
private:
- hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores;
+ hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
/* Form a doubly-linked stack of the elements of m_stores, so that
we can iterate over them in a predictable way. Using this order
if there is exactly one original store in the range. */
static store_immediate_info *
-find_constituent_stores (struct merged_store_group *group,
+find_constituent_stores (class merged_store_group *group,
vec<store_immediate_info *> *stores,
unsigned int *first,
unsigned HOST_WIDE_INT bitpos,
static unsigned int
split_group (merged_store_group *group, bool allow_unaligned_store,
bool allow_unaligned_load, bool bzero_first,
- vec<struct split_store *> *split_stores,
+ vec<split_store *> *split_stores,
unsigned *total_orig,
unsigned *total_new)
{
if (align_bitpos)
align = least_bit_hwi (align_bitpos);
bytepos = group->start / BITS_PER_UNIT;
- struct split_store *store
+ split_store *store
= new split_store (bytepos, group->width, align);
unsigned int first = 0;
find_constituent_stores (group, &store->orig_stores,
ret = 1;
if (split_stores)
{
- struct split_store *store
+ split_store *store
= new split_store (bytepos, group->stores[0]->bitsize, align_base);
store->orig_stores.safe_push (group->stores[0]);
store->orig = true;
if (split_stores)
{
- struct split_store *store
+ split_store *store
= new split_store (try_pos, try_size, align);
info = find_constituent_stores (group, &store->orig_stores,
&first, try_bitpos, try_size);
if (total_orig)
{
unsigned int i;
- struct split_store *store;
+ split_store *store;
/* If we are reusing some original stores and any of the
original SSA_NAMEs had multiple uses, we need to subtract
those now before we add the new ones. */
if (orig_num_stmts < 2)
return false;
- auto_vec<struct split_store *, 32> split_stores;
+ auto_vec<class split_store *, 32> split_stores;
bool allow_unaligned_store
= !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
bool allow_unaligned_load = allow_unaligned_store;
if (!ins_stmt)
memset (&n, 0, sizeof (n));
- struct imm_store_chain_info **chain_info = NULL;
+ class imm_store_chain_info **chain_info = NULL;
if (base_addr)
chain_info = m_stores.get (base_addr);
/* Store aliases any existing chain? */
terminate_all_aliasing_chains (NULL, stmt);
/* Start a new chain. */
- struct imm_store_chain_info *new_chain
+ class imm_store_chain_info *new_chain
= new imm_store_chain_info (m_stores_head, base_addr);
info = new store_immediate_info (const_bitsize, const_bitpos,
const_bitregion_start,
tree cached_basis;
};
-typedef struct slsr_cand_d slsr_cand, *slsr_cand_t;
-typedef const struct slsr_cand_d *const_slsr_cand_t;
+typedef class slsr_cand_d slsr_cand, *slsr_cand_t;
+typedef const class slsr_cand_d *const_slsr_cand_t;
/* Pointers to candidates are chained together as part of a mapping
from base expressions to the candidates that use them. */
basic_block init_bb;
};
-typedef struct incr_info_d incr_info, *incr_info_t;
+typedef class incr_info_d incr_info, *incr_info_t;
/* Candidates are maintained in a vector. If candidate X dominates
candidate Y, then X appears before Y in the vector; but the
unsigned i;
tree arg0_base = NULL_TREE, base_type;
slsr_cand_t c;
- struct loop *cand_loop = gimple_bb (phi)->loop_father;
+ class loop *cand_loop = gimple_bb (phi)->loop_father;
unsigned savings = 0;
/* A CAND_PHI requires each of its arguments to have the same
// MAX_SIZE is WARN_ALLOCA= adjusted for VLAs. It is the maximum size
// in bytes we allow for arg.
-static struct alloca_type_and_limit
+static class alloca_type_and_limit
alloca_call_type_by_arg (tree arg, tree arg_casted, edge e,
unsigned HOST_WIDE_INT max_size)
{
// type to an unsigned type, set *INVALID_CASTED_TYPE to the
// problematic signed type.
-static struct alloca_type_and_limit
+static class alloca_type_and_limit
alloca_call_type (gimple *stmt, bool is_vla, tree *invalid_casted_type)
{
gcc_assert (gimple_alloca_call_p (stmt));
// If we couldn't find anything, try a few heuristics for things we
// can easily determine. Check these misc cases but only accept
// them if all predecessors have a known bound.
- struct alloca_type_and_limit ret = alloca_type_and_limit (ALLOCA_OK);
+ class alloca_type_and_limit ret = alloca_type_and_limit (ALLOCA_OK);
FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
{
gcc_assert (!len_casted || TYPE_UNSIGNED (TREE_TYPE (len_casted)));
continue;
tree invalid_casted_type = NULL;
- struct alloca_type_and_limit t
+ class alloca_type_and_limit t
= alloca_call_type (stmt, is_vla, &invalid_casted_type);
unsigned HOST_WIDE_INT adjusted_alloca_limit
the file being read. IB is the input block to use for reading. */
static gphi *
-input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
+input_phi (class lto_input_block *ib, basic_block bb, class data_in *data_in,
struct function *fn)
{
unsigned HOST_WIDE_INT ix;
descriptors in DATA_IN. */
static gimple *
-input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
+input_gimple_stmt (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag)
{
gimple *stmt;
FN is the function being processed. */
void
-input_bb (struct lto_input_block *ib, enum LTO_tags tag,
- struct data_in *data_in, struct function *fn,
+input_bb (class lto_input_block *ib, enum LTO_tags tag,
+ class data_in *data_in, struct function *fn,
int count_materialization_scale)
{
unsigned int index;
#include "tree-streamer.h"
/* In gimple-streamer-in.c */
-void input_bb (struct lto_input_block *, enum LTO_tags, struct data_in *,
+void input_bb (class lto_input_block *, enum LTO_tags, class data_in *,
struct function *, int);
/* In gimple-streamer-out.c */
calls from go_format_type() itself. */
static bool
-go_format_type (struct godump_container *container, tree type,
+go_format_type (class godump_container *container, tree type,
bool use_type_name, bool is_func_ok, unsigned int *p_art_i,
bool is_anon_record_or_union)
{
it. */
static void
-go_output_type (struct godump_container *container)
+go_output_type (class godump_container *container)
{
struct obstack *ob;
/* Output a function declaration. */
static void
-go_output_fndecl (struct godump_container *container, tree decl)
+go_output_fndecl (class godump_container *container, tree decl)
{
if (!go_format_type (container, TREE_TYPE (decl), false, true, NULL, false))
fprintf (go_dump_file, "// ");
/* Output a typedef or something like a struct definition. */
static void
-go_output_typedef (struct godump_container *container, tree decl)
+go_output_typedef (class godump_container *container, tree decl)
{
/* If we have an enum type, output the enum constants
separately. */
/* Output a variable. */
static void
-go_output_var (struct godump_container *container, tree decl)
+go_output_var (class godump_container *container, tree decl)
{
bool is_valid;
tree type_name;
};
static void
-keyword_hash_init (struct godump_container *container)
+keyword_hash_init (class godump_container *container)
{
size_t i;
size_t count = sizeof (keywords) / sizeof (keywords[0]);
bool
find_dummy_types (const char *const &ptr, godump_container *adata)
{
- struct godump_container *data = (struct godump_container *) adata;
+ class godump_container *data = (class godump_container *) adata;
const char *type = (const char *) ptr;
void **slot;
void **islot;
static void
go_finish (const char *filename)
{
- struct godump_container container;
+ class godump_container container;
unsigned int ix;
tree decl;
static void
draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
- struct loop *loop)
+ class loop *loop)
{
basic_block *body;
unsigned int i;
fillcolors[(loop_depth (loop) - 1) % 3],
loop->num);
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
draw_cfg_nodes_for_loop (pp, funcdef_no, inner);
if (loop->header == NULL)
extern char global_regs[FIRST_PSEUDO_REGISTER];
-struct simplifiable_subreg;
-struct subreg_shape;
+class simplifiable_subreg;
+class subreg_shape;
struct simplifiable_subregs_hasher : nofree_ptr_hash <simplifiable_subreg>
{
/* Structure representing a BRIG section, holding and writing its data. */
-class hsa_brig_section
+struct hsa_brig_section
{
-public:
/* Section name that will be output to the BRIG. */
const char *section_name;
/* Size in bytes of all data stored in the section. */
Return the offset of the directive. */
static unsigned
-emit_directive_variable (struct hsa_symbol *symbol)
+emit_directive_variable (class hsa_symbol *symbol)
{
struct BrigDirectiveVariable dirvar;
unsigned name_offset;
static inline hsa_bb *
hsa_bb_for_bb (basic_block bb)
{
- return (struct hsa_bb *) bb->aux;
+ return (class hsa_bb *) bb->aux;
}
/* Class for hashing local hsa_symbols. */
hash_map <tree, hsa_symbol *> m_string_constants_map;
/* Vector of pointers to spill symbols. */
- vec <struct hsa_symbol *> m_spill_symbols;
+ vec <class hsa_symbol *> m_spill_symbols;
/* Vector of pointers to global variables and transformed string constants
that are used by the function. */
- vec <struct hsa_symbol *> m_global_symbols;
+ vec <class hsa_symbol *> m_global_symbols;
/* Private function artificial variables. */
- vec <struct hsa_symbol *> m_private_variables;
+ vec <class hsa_symbol *> m_private_variables;
/* Vector of called function declarations. */
vec <tree> m_called_functions;
}
/* in hsa-common.c */
-extern struct hsa_function_representation *hsa_cfun;
+extern class hsa_function_representation *hsa_cfun;
extern hash_map <tree, vec <const char *> *> *hsa_decl_kernel_dependencies;
extern hsa_summary_t *hsa_summaries;
extern hsa_symbol *hsa_num_threads;
FOR_ALL_BB_FN (bb, cfun)
{
- hsa_bb *hbb = (struct hsa_bb *) bb->aux;
+ hsa_bb *hbb = (class hsa_bb *) bb->aux;
dump_hsa_bb (f, hbb);
}
}
for (parm = DECL_ARGUMENTS (cfun->decl); parm;
parm = DECL_CHAIN (parm))
{
- struct hsa_symbol **slot;
+ class hsa_symbol **slot;
hsa_symbol *arg
= new hsa_symbol (BRIG_TYPE_NONE, hsa_cfun->m_kern_p
if (!VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
{
- struct hsa_symbol **slot;
+ class hsa_symbol **slot;
hsa_cfun->m_output_arg = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG,
BRIG_LINKAGE_FUNCTION);
FOR_ALL_BB_FN (bb, cfun)
{
- hsa_bb *hbb = (struct hsa_bb *) bb->aux;
+ hsa_bb *hbb = (class hsa_bb *) bb->aux;
bitmap_print (dump_file, hbb->m_livein, "m_livein ", "\n");
dump_hsa_bb (f, hbb);
bitmap_print (dump_file, hbb->m_liveout, "m_liveout ", "\n");
location_t input_location = UNKNOWN_LOCATION;
-struct line_maps *line_table;
+class line_maps *line_table;
/* A stashed copy of "line_table" for use by selftest::line_table_test.
This needs to be a global so that it can be a GC root, and thus
prevent the stashed copy from being garbage-collected if the GC runs
during a line_table_test. */
-struct line_maps *saved_line_table;
+class line_maps *saved_line_table;
static fcache *fcache_tab;
static const size_t fcache_tab_size = 16;
/* Get location one beyond the final location in ordinary map IDX. */
static location_t
-get_end_location (struct line_maps *set, unsigned int idx)
+get_end_location (class line_maps *set, unsigned int idx)
{
if (idx == LINEMAPS_ORDINARY_USED (set) - 1)
return set->highest_location;
/* Forward decls. */
-struct lexer_test;
+class lexer_test;
class lexer_test_options;
/* A class for specifying options of a lexer_test.
#include "line-map.h"
-extern GTY(()) struct line_maps *line_table;
-extern GTY(()) struct line_maps *saved_line_table;
+extern GTY(()) class line_maps *line_table;
+extern GTY(()) class line_maps *saved_line_table;
/* A value which will never be used to represent a real location. */
#define UNKNOWN_LOCATION ((location_t) 0)
static void
expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, mem;
static void
expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, reg;
target = gen_reg_rtx (Pmode);
rtx size = expand_normal (gimple_call_arg (stmt, 0));
rtx align = expand_normal (gimple_call_arg (stmt, 1));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, Pmode);
create_input_operand (&ops[1], size, Pmode);
create_input_operand (&ops[2], align, Pmode);
{
gcc_checking_assert (!gimple_call_lhs (stmt));
rtx arg = expand_normal (gimple_call_arg (stmt, 0));
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_input_operand (&ops[0], arg, Pmode);
gcc_assert (targetm.have_omp_simt_exit ());
expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_last_lane ());
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], ctr, mode);
gcc_assert (targetm.have_omp_simt_ordered ());
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_vote_any ());
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
: usubv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
: subv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
enum insn_code icode = optab_handler (negv3_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
}
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
static void
expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, target, mask;
insn_code icode;
static void
expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, reg, mask;
insn_code icode;
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
rtx rhs_rtx = expand_normal (rhs);
- struct expand_operand ops[6];
+ class expand_operand ops[6];
int i = 0;
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
int i = 0;
- struct expand_operand ops[6];
+ class expand_operand ops[6];
create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
/* Return the param lattices structure corresponding to the Ith formal
parameter of the function described by INFO. */
-static inline struct ipcp_param_lattices *
-ipa_get_parm_lattices (struct ipa_node_params *info, int i)
+static inline class ipcp_param_lattices *
+ipa_get_parm_lattices (class ipa_node_params *info, int i)
{
gcc_assert (i >= 0 && i < ipa_get_param_count (info));
gcc_checking_assert (!info->ipcp_orig_node);
/* Return the lattice corresponding to the scalar value of the Ith formal
parameter of the function described by INFO. */
static inline ipcp_lattice<tree> *
-ipa_get_scalar_lat (struct ipa_node_params *info, int i)
+ipa_get_scalar_lat (class ipa_node_params *info, int i)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
return &plats->itself;
}
/* Return the lattice corresponding to the scalar value of the Ith formal
parameter of the function described by INFO. */
static inline ipcp_lattice<ipa_polymorphic_call_context> *
-ipa_get_poly_ctx_lat (struct ipa_node_params *info, int i)
+ipa_get_poly_ctx_lat (class ipa_node_params *info, int i)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
return &plats->ctxlat;
}
fprintf (f, "\nLattices:\n");
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
- struct ipa_node_params *info;
+ class ipa_node_params *info;
info = IPA_NODE_REF (node);
/* Skip constprop clones since we don't make lattices for them. */
for (i = 0; i < count; i++)
{
struct ipcp_agg_lattice *aglat;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
fprintf (f, " param [%d]: ", i);
plats->itself.print (f, dump_sources, dump_benefits);
fprintf (f, " ctxs: ");
static void
determine_versionability (struct cgraph_node *node,
- struct ipa_node_params *info)
+ class ipa_node_params *info)
{
const char *reason = NULL;
/* Allocate the arrays in TOPO and topologically sort the nodes into order. */
static void
-build_toporder_info (struct ipa_topo_info *topo)
+build_toporder_info (class ipa_topo_info *topo)
{
topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
TOPO. */
static void
-free_toporder_info (struct ipa_topo_info *topo)
+free_toporder_info (class ipa_topo_info *topo)
{
ipa_free_postorder_info ();
free (topo->order);
/* Add NODE to the stack in TOPO, unless it is already there. */
static inline void
-push_node_to_stack (struct ipa_topo_info *topo, struct cgraph_node *node)
+push_node_to_stack (class ipa_topo_info *topo, struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (info->node_enqueued)
return;
info->node_enqueued = 1;
is empty. */
static struct cgraph_node *
-pop_node_from_stack (struct ipa_topo_info *topo)
+pop_node_from_stack (class ipa_topo_info *topo)
{
if (topo->stack_top)
{
not previously set as such. */
static inline bool
-set_agg_lats_to_bottom (struct ipcp_param_lattices *plats)
+set_agg_lats_to_bottom (class ipcp_param_lattices *plats)
{
bool ret = !plats->aggs_bottom;
plats->aggs_bottom = true;
return true if they were not previously marked as such. */
static inline bool
-set_agg_lats_contain_variable (struct ipcp_param_lattices *plats)
+set_agg_lats_contain_variable (class ipcp_param_lattices *plats)
{
bool ret = !plats->aggs_contain_variable;
plats->aggs_contain_variable = true;
return true is any of them has not been marked as such so far. */
static inline bool
-set_all_contains_variable (struct ipcp_param_lattices *plats)
+set_all_contains_variable (class ipcp_param_lattices *plats)
{
bool ret;
ret = plats->itself.set_contains_variable ();
static void
initialize_node_lattices (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
struct cgraph_edge *ie;
bool disable = false, variable = false;
int i;
for (i = 0; i < ipa_get_param_count (info); i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
plats->m_value_range.init ();
}
{
for (i = 0; i < ipa_get_param_count (info); i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
if (disable)
{
plats->itself.set_to_bottom ();
passed. */
tree
-ipa_value_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc,
+ipa_value_from_jfunc (class ipa_node_params *info, struct ipa_jump_func *jfunc,
tree parm_type)
{
if (jfunc->type == IPA_JF_CONST)
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
else if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
ipcp_lattice<tree> *src_lat;
int src_idx;
bool ret;
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx;
ipcp_lattice<ipa_polymorphic_call_context> *src_lat;
enum availability availability;
cgraph_node *callee = cs->callee->function_symbol (&availability);
- struct ipa_node_params *callee_info = IPA_NODE_REF (callee);
+ class ipa_node_params *callee_info = IPA_NODE_REF (callee);
tree parm_type = ipa_get_type (callee_info, idx);
/* For K&R C programs, ipa_get_type() could return NULL_TREE. Avoid the
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
tree operand = NULL_TREE;
enum tree_code code;
unsigned src_idx;
operand = build_int_cstu (size_type_node, offset);
}
- struct ipcp_param_lattices *src_lats
+ class ipcp_param_lattices *src_lats
= ipa_get_parm_lattices (caller_info, src_idx);
/* Try to propagate bits if src_lattice is bottom, but jfunc is known.
static bool
propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
- struct ipcp_param_lattices *dest_plats,
+ class ipcp_param_lattices *dest_plats,
tree param_type)
{
ipcp_vr_lattice *dest_lat = &dest_plats->m_value_range;
if (TREE_CODE_CLASS (operation) == tcc_unary)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
tree operand_type = ipa_get_type (caller_info, src_idx);
- struct ipcp_param_lattices *src_lats
+ class ipcp_param_lattices *src_lats
= ipa_get_parm_lattices (caller_info, src_idx);
if (src_lats->m_value_range.bottom_p ())
aggs_by_ref to NEW_AGGS_BY_REF. */
static bool
-set_check_aggs_by_ref (struct ipcp_param_lattices *dest_plats,
+set_check_aggs_by_ref (class ipcp_param_lattices *dest_plats,
bool new_aggs_by_ref)
{
if (dest_plats->aggs)
true. */
static bool
-merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
+merge_agg_lats_step (class ipcp_param_lattices *dest_plats,
HOST_WIDE_INT offset, HOST_WIDE_INT val_size,
struct ipcp_agg_lattice ***aglat,
bool pre_existing, bool *change)
static bool
merge_aggregate_lattices (struct cgraph_edge *cs,
- struct ipcp_param_lattices *dest_plats,
- struct ipcp_param_lattices *src_plats,
+ class ipcp_param_lattices *dest_plats,
+ class ipcp_param_lattices *src_plats,
int src_idx, HOST_WIDE_INT offset_delta)
{
bool pre_existing = dest_plats->aggs != NULL;
rules about propagating values passed by reference. */
static bool
-agg_pass_through_permissible_p (struct ipcp_param_lattices *src_plats,
+agg_pass_through_permissible_p (class ipcp_param_lattices *src_plats,
struct ipa_jump_func *jfunc)
{
return src_plats->aggs
static bool
propagate_aggs_across_jump_function (struct cgraph_edge *cs,
struct ipa_jump_func *jfunc,
- struct ipcp_param_lattices *dest_plats)
+ class ipcp_param_lattices *dest_plats)
{
bool ret = false;
if (jfunc->type == IPA_JF_PASS_THROUGH
&& ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (agg_pass_through_permissible_p (src_plats, jfunc))
else if (jfunc->type == IPA_JF_ANCESTOR
&& ipa_get_jf_ancestor_agg_preserved (jfunc))
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (src_plats->aggs && src_plats->aggs_by_ref)
static bool
propagate_constants_across_call (struct cgraph_edge *cs)
{
- struct ipa_node_params *callee_info;
+ class ipa_node_params *callee_info;
enum availability availability;
cgraph_node *callee;
- struct ipa_edge_args *args;
+ class ipa_edge_args *args;
bool ret = false;
int i, args_count, parms_count;
for (; (i < args_count) && (i < parms_count); i++)
{
struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
- struct ipcp_param_lattices *dest_plats;
+ class ipcp_param_lattices *dest_plats;
tree param_type = ipa_get_type (callee_info, i);
dest_plats = ipa_get_parm_lattices (callee_info, i);
for (ie = node->indirect_calls; ie; ie = ie->next_callee)
{
struct cgraph_node *callee;
- struct ipa_fn_summary *isummary;
+ class ipa_fn_summary *isummary;
enum availability avail;
tree target;
bool speculative;
gcc_assert (size_cost > 0);
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (max_count > profile_count::zero ())
{
int factor = RDIV (count_sum.probability_in
vector. Return NULL if there are none. */
static vec<ipa_agg_jf_item, va_gc> *
-context_independent_aggregate_values (struct ipcp_param_lattices *plats)
+context_independent_aggregate_values (class ipcp_param_lattices *plats)
{
vec<ipa_agg_jf_item, va_gc> *res = NULL;
it. */
static bool
-gather_context_independent_values (struct ipa_node_params *info,
+gather_context_independent_values (class ipa_node_params *info,
vec<tree> *known_csts,
vec<ipa_polymorphic_call_context>
*known_contexts,
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
if (lat->is_single_const ())
static void
estimate_local_effects (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts;
vec<ipa_polymorphic_call_context> known_contexts;
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
ipcp_value<tree> *val;
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
if (!plats->virt_call)
continue;
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
struct ipa_agg_jump_function *ajf;
struct ipcp_agg_lattice *aglat;
static void
add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
struct ipcp_agg_lattice *aglat;
connected components. */
static void
-propagate_constants_topo (struct ipa_topo_info *topo)
+propagate_constants_topo (class ipa_topo_info *topo)
{
int i;
summaries interprocedurally. */
static void
-ipcp_propagate_stage (struct ipa_topo_info *topo)
+ipcp_propagate_stage (class ipa_topo_info *topo)
{
struct cgraph_node *node;
FOR_EACH_DEFINED_FUNCTION (node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
determine_versionability (node, info);
if (node->has_gimple_body_p ())
{
- info->lattices = XCNEWVEC (struct ipcp_param_lattices,
+ info->lattices = XCNEWVEC (class ipcp_param_lattices,
ipa_get_param_count (info));
initialize_node_lattices (node);
}
if (cs && !agg_contents && !polymorphic)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int c = ipa_get_controlled_uses (info, param_index);
if (c != IPA_UNDESCRIBED_USE)
{
if (node == dest)
return true;
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
return info->is_all_contexts_clone && info->ipcp_orig_node == dest;
}
cgraph_edge_brings_value_p (cgraph_edge *cs, ipcp_value_source<tree> *src,
cgraph_node *dest, ipcp_value<tree> *dest_val)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
enum availability availability;
cgraph_node *real_dest = cs->callee->function_symbol (&availability);
return true;
struct ipcp_agg_lattice *aglat;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
src->index);
if (src->offset == -1)
return (plats->itself.is_single_const ()
cgraph_node *dest,
ipcp_value<ipa_polymorphic_call_context> *)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
cgraph_node *real_dest = cs->callee->function_symbol ();
if (!same_node_or_its_all_contexts_clone_p (real_dest, dest)
&& values_equal_for_ipcp_p (src->val->value,
caller_info->known_contexts[src->index]);
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
src->index);
return plats->ctxlat.is_single_const ()
&& values_equal_for_ipcp_p (src->val->value,
Return it or NULL if for some reason it cannot be created. */
static struct ipa_replace_map *
-get_replacement_map (struct ipa_node_params *info, tree value, int parm_num)
+get_replacement_map (class ipa_node_params *info, tree value, int parm_num)
{
struct ipa_replace_map *replace_map;
struct ipa_agg_replacement_value *aggvals,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
+ class ipa_node_params *new_info, *info = IPA_NODE_REF (node);
vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
struct ipa_agg_replacement_value *av;
struct cgraph_node *new_node;
vec<tree> known_csts,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
offsets (minus OFFSET) of lattices that contain only a single value. */
static vec<ipa_agg_jf_item>
-copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
+copy_plats_to_inter (class ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
vec<ipa_agg_jf_item> res = vNULL;
subtracting OFFSET). */
static void
-intersect_with_plats (struct ipcp_param_lattices *plats,
+intersect_with_plats (class ipcp_param_lattices *plats,
vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
if (jfunc->type == IPA_JF_PASS_THROUGH
&& ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
if (caller_info->ipcp_orig_node)
{
struct cgraph_node *orig_node = caller_info->ipcp_orig_node;
- struct ipcp_param_lattices *orig_plats;
+ class ipcp_param_lattices *orig_plats;
orig_plats = ipa_get_parm_lattices (IPA_NODE_REF (orig_node),
src_idx);
if (agg_pass_through_permissible_p (orig_plats, jfunc))
}
else
{
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (agg_pass_through_permissible_p (src_plats, jfunc))
{
else if (jfunc->type == IPA_JF_ANCESTOR
&& ipa_get_jf_ancestor_agg_preserved (jfunc))
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
HOST_WIDE_INT delta = ipa_get_jf_ancestor_offset (jfunc);
if (caller_info->ipcp_orig_node)
find_aggregate_values_for_callers_subset (struct cgraph_node *node,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *dest_info = IPA_NODE_REF (node);
+ class ipa_node_params *dest_info = IPA_NODE_REF (node);
struct ipa_agg_replacement_value *res;
struct ipa_agg_replacement_value **tail = &res;
struct cgraph_edge *cs;
struct cgraph_edge *cs;
vec<ipa_agg_jf_item> inter = vNULL;
struct ipa_agg_jf_item *item;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
int j;
/* Among other things, the following check should deal with all by_ref
cgraph_edge_brings_all_scalars_for_node (struct cgraph_edge *cs,
struct cgraph_node *node)
{
- struct ipa_node_params *dest_info = IPA_NODE_REF (node);
+ class ipa_node_params *dest_info = IPA_NODE_REF (node);
int count = ipa_get_param_count (dest_info);
- struct ipa_node_params *caller_info;
- struct ipa_edge_args *args;
+ class ipa_node_params *caller_info;
+ class ipa_edge_args *args;
int i;
caller_info = IPA_NODE_REF (cs->caller);
cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
struct cgraph_node *node)
{
- struct ipa_node_params *orig_node_info;
+ class ipa_node_params *orig_node_info;
struct ipa_agg_replacement_value *aggval;
int i, ec, count;
for (i = 0; i < count; i++)
{
static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
- struct ipcp_param_lattices *plats;
+ class ipcp_param_lattices *plats;
bool interesting = false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
if (aggval->index == i)
static bool
decide_whether_version_node (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts;
vec<ipa_polymorphic_call_context> known_contexts;
for (i = 0; i < count;i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
if (ipa_edge_within_scc (cs))
{
struct cgraph_node *callee;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
callee = cs->callee->function_symbol (NULL);
info = IPA_NODE_REF (callee);
TOPO and make specialized clones if deemed beneficial. */
static void
-ipcp_decision_stage (struct ipa_topo_info *topo)
+ipcp_decision_stage (class ipa_topo_info *topo)
{
int i;
static unsigned int
ipcp_driver (void)
{
- struct ipa_topo_info topo;
+ class ipa_topo_info topo;
if (edge_clone_summaries == NULL)
edge_clone_summaries = new edge_clone_summary_t (symtab);
}
}
-struct final_warning_record *final_warning_records;
+class final_warning_record *final_warning_records;
/* Return vector containing possible targets of polymorphic call of type
OTR_TYPE calling method OTR_TOKEN within type of OTR_OUTER_TYPE and OFFSET.
}
if (!found)
{
- struct size_time_entry new_entry;
+ class size_time_entry new_entry;
new_entry.size = size;
new_entry.time = time;
new_entry.exec_predicate = exec_pred;
e->make_direct (target);
else
e->redirect_callee (target);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
e->inline_failed = CIF_UNREACHABLE;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
&& (!e->speculative || e->callee))
e = redirect_to_unreachable (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
if (predicate && *predicate != true)
{
if (!es->predicate)
{
clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
int i;
struct condition *c;
vec<ipa_agg_jump_function_p> *known_aggs_ptr)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
- struct ipa_fn_summary *info = ipa_fn_summaries->get (callee);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (callee);
vec<tree> known_vals = vNULL;
vec<ipa_agg_jump_function_p> known_aggs = vNULL;
&& !e->call_stmt_cannot_inline_p
&& ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
{
- struct ipa_node_params *caller_parms_info, *callee_pi;
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_node_params *caller_parms_info, *callee_pi;
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int i, count = ipa_get_cs_argument_count (args);
if (e->caller->global.inlined_to)
{
vec<size_time_entry, va_gc> *entry = info->size_time_table;
/* Use SRC parm info since it may not be copied yet. */
- struct ipa_node_params *parms_info = IPA_NODE_REF (src);
+ class ipa_node_params *parms_info = IPA_NODE_REF (src);
vec<tree> known_vals = vNULL;
int count = ipa_get_param_count (parms_info);
int i, j;
for (edge = dst->callees; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
if (!edge->inline_failed)
for (edge = dst->indirect_calls; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
gcc_checking_assert (edge->inline_failed);
void
ipa_call_summary_t::duplicate (struct cgraph_edge *src,
struct cgraph_edge *dst,
- struct ipa_call_summary *srcinfo,
- struct ipa_call_summary *info)
+ class ipa_call_summary *srcinfo,
+ class ipa_call_summary *info)
{
new (info) ipa_call_summary (*srcinfo);
info->predicate = NULL;
static void
dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
- struct ipa_fn_summary *info)
+ class ipa_fn_summary *info)
{
struct cgraph_edge *edge;
for (edge = node->callees; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
int i;
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
{
if (node->definition)
{
- struct ipa_fn_summary *s = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *s = ipa_fn_summaries->get (node);
if (s != NULL)
{
size_time_entry *e;
static void
set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
basic_block bb)
{
gimple *last;
static void
set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
basic_block bb)
{
gimple *lastg;
unshare_expr_without_location (max));
p = p1 & p2;
}
- *(struct predicate *) e->aux
- = p.or_with (summary->conds, *(struct predicate *) e->aux);
+ *(class predicate *) e->aux
+ = p.or_with (summary->conds, *(class predicate *) e->aux);
}
}
static void
compute_bb_predicates (struct ipa_func_body_info *fbi,
struct cgraph_node *node,
- struct ipa_fn_summary *summary)
+ class ipa_fn_summary *summary)
{
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
bool done = false;
predicate this_bb_predicate
= *(predicate *) e->src->aux;
if (e->aux)
- this_bb_predicate &= (*(struct predicate *) e->aux);
+ this_bb_predicate &= (*(class predicate *) e->aux);
p = p.or_with (summary->conds, this_bb_predicate);
if (p == true)
break;
static predicate
will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
tree expr,
vec<predicate> nonconstant_names)
{
static predicate
will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
gimple *stmt,
vec<predicate> nonconstant_names)
{
static basic_block
get_minimal_bb (basic_block init_bb, basic_block use_bb)
{
- struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
+ class loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
if (l && l->header->count < init_bb->count)
return l->header;
return init_bb;
NONCONSTANT_NAMES, if possible. */
static void
-predicate_for_phi_result (struct ipa_fn_summary *summary, gphi *phi,
+predicate_for_phi_result (class ipa_fn_summary *summary, gphi *phi,
predicate *p,
vec<predicate> nonconstant_names)
{
basic_block bb;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
sreal freq;
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
predicate bb_predicate;
struct ipa_func_body_info fbi;
vec<predicate> nonconstant_names = vNULL;
if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
- struct predicate p = bb_predicate & will_be_nonconstant;
+ class predicate p = bb_predicate & will_be_nonconstant;
/* We can ignore statement when we proved it is never going
to happen, but we cannot do that for call statements
if (nonconstant_names.exists () && !early)
{
- struct loop *loop;
+ class loop *loop;
predicate loop_iterations = true;
predicate loop_stride = true;
vec<edge> exits;
edge ex;
unsigned int j;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
bb_predicate = *(predicate *) loop->header->aux;
exits = get_loop_exit_edges (loop);
{
HOST_WIDE_INT self_stack_size;
struct cgraph_edge *e;
- struct ipa_fn_summary *info;
+ class ipa_fn_summary *info;
gcc_assert (!node->global.inlined_to);
{
tree target;
struct cgraph_node *callee;
- struct ipa_fn_summary *isummary;
+ class ipa_fn_summary *isummary;
enum availability avail;
bool speculative;
vec<ipa_agg_jump_function_p> known_aggs,
ipa_hints *hints)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int call_size = es->call_stmt_size;
int call_time = es->call_stmt_time;
int cur_size;
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (e);
/* Do not care about zero sized builtins. */
if (e->inline_failed && !es->call_stmt_size)
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (e);
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
estimate_edge_size_and_time (e, size,
vec<inline_param_summary>
inline_param_summary)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int size = 0;
sreal time = 0;
if (ipa_node_params_sum)
{
int i;
- struct ipa_edge_args *args = IPA_EDGE_REF (edge);
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
- struct ipa_call_summary *inlined_es
+ class ipa_edge_args *args = IPA_EDGE_REF (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *inlined_es
= ipa_call_summaries->get (inlined_edge);
if (es->param.length () == 0)
static void
remap_edge_summaries (struct cgraph_edge *inlined_edge,
struct cgraph_node *node,
- struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+ class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
struct cgraph_edge *e, *next;
for (e = node->callees; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
}
for (e = node->indirect_calls; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
/* Same as remap_predicate, but set result into hint *HINT. */
static void
-remap_hint_predicate (struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+remap_hint_predicate (class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
predicate **hint,
vec<int> operand_map,
vec<int> offset_map,
ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
struct cgraph_node *to = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to : edge->caller);
- struct ipa_fn_summary *info = ipa_fn_summaries->get (to);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
vec<int> operand_map = vNULL;
int i;
predicate toplev_predicate;
predicate true_p = true;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
if (es->predicate)
toplev_predicate = *es->predicate;
evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL);
if (ipa_node_params_sum && callee_info->conds)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (edge);
+ class ipa_edge_args *args = IPA_EDGE_REF (edge);
int count = ipa_get_cs_argument_count (args);
int i;
void
ipa_update_overall_fn_summary (struct cgraph_node *node)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int i;
/* Write inline summary for edge E to OB. */
static void
-read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e,
+read_ipa_call_summary (class lto_input_block *ib, struct cgraph_edge *e,
bool prevails)
{
- struct ipa_call_summary *es = prevails
+ class ipa_call_summary *es = prevails
? ipa_call_summaries->get_create (e) : NULL;
predicate p;
int length, i;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i, count2, j;
unsigned int f_count;
{
unsigned int index;
struct cgraph_node *node;
- struct ipa_fn_summary *info;
+ class ipa_fn_summary *info;
lto_symtab_encoder_t encoder;
struct bitpack_d bp;
struct cgraph_edge *e;
gcc_assert (!info || !info->size_time_table);
for (j = 0; j < count2; j++)
{
- struct size_time_entry e;
+ class size_time_entry e;
e.size = streamer_read_uhwi (&ib);
e.time = sreal::stream_in (&ib);
static void
write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int i;
streamer_write_uhwi (ob, es->call_stmt_size);
cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
if (cnode && cnode->definition && !cnode->alias)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
struct bitpack_d bp;
struct cgraph_edge *edge;
int i;
static ipa_fn_summary_t *create_ggc (symbol_table *symtab)
{
- struct ipa_fn_summary_t *summary = new (ggc_alloc <ipa_fn_summary_t> ())
+ class ipa_fn_summary_t *summary = new (ggc_alloc <ipa_fn_summary_t> ())
ipa_fn_summary_t (symtab);
summary->disable_insertion_hook ();
return summary;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
if ((bb1->loop_father == NULL) != (bb2->loop_father == NULL))
return return_false ();
- struct loop *l1 = bb1->loop_father;
- struct loop *l2 = bb2->loop_father;
+ class loop *l1 = bb1->loop_father;
+ class loop *l2 = bb2->loop_father;
if (l1 == NULL)
return true;
if (ipa_node_params_sum == NULL)
return true;
- struct ipa_node_params *parms_info = IPA_NODE_REF (get_node ());
+ class ipa_node_params *parms_info = IPA_NODE_REF (get_node ());
if (vec_safe_length (parms_info->descriptors) <= i)
return true;
vec<tree> known_vals;
vec<ipa_polymorphic_call_context> known_contexts;
vec<ipa_agg_jump_function_p> known_aggs;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
int min_size;
callee = edge->callee->ultimate_alias_target ();
estimate_size_after_inlining (struct cgraph_node *node,
struct cgraph_edge *edge)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
ipa_fn_summary *s = ipa_fn_summaries->get (node);
if (!es->predicate || *es->predicate != false)
{
estimate_growth (struct cgraph_node *node)
{
struct growth_data d = { node, false, false, 0 };
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
int growth;
sreal edge_time, unspec_edge_time;
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
- struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ class ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
ipa_hints hints;
cgraph_node *caller = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to
&& (node->has_gimple_body_p () || node->thunk.thunk_p)
&& opt_for_fn (node->decl, optimize))
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
/* Do not account external functions, they will be optimized out
/* Stream in the context from IB and DATA_IN. */
void
-ipa_polymorphic_call_context::stream_in (struct lto_input_block *ib,
- struct data_in *data_in)
+ipa_polymorphic_call_context::stream_in (class lto_input_block *ib,
+ class data_in *data_in)
{
struct bitpack_d bp = streamer_read_bitpack (ib);
for other purposes). */
predicate
-predicate::remap_after_inlining (struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+predicate::remap_after_inlining (class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
/* Read predicate from IB. */
void
-predicate::stream_in (struct lto_input_block *ib)
+predicate::stream_in (class lto_input_block *ib)
{
clause_t clause;
int k = 0;
It can be NULL, which means this not a load from an aggregate. */
predicate
-add_condition (struct ipa_fn_summary *summary, int operand_num,
+add_condition (class ipa_fn_summary *summary, int operand_num,
HOST_WIDE_INT size, struct agg_position_info *aggpos,
enum tree_code code, tree val)
{
predicate remap_after_duplication (clause_t);
/* Return predicate equal to THIS after inlining. */
- predicate remap_after_inlining (struct ipa_fn_summary *,
- struct ipa_fn_summary *,
+ predicate remap_after_inlining (class ipa_fn_summary *,
+ class ipa_fn_summary *,
vec<int>, vec<int>, clause_t, const predicate &);
- void stream_in (struct lto_input_block *);
+ void stream_in (class lto_input_block *);
void stream_out (struct output_block *);
private:
};
void dump_condition (FILE *f, conditions conditions, int cond);
-predicate add_condition (struct ipa_fn_summary *summary, int operand_num,
+predicate add_condition (class ipa_fn_summary *summary, int operand_num,
HOST_WIDE_INT size, struct agg_position_info *aggpos,
enum tree_code code, tree val);
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_profile,
&data, &len);
to INFO. */
int
-ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
+ipa_get_param_decl_index (class ipa_node_params *info, tree ptree)
{
return ipa_get_param_decl_index_1 (info->descriptors, ptree);
}
using ipa_initialize_node_params. */
void
-ipa_dump_param (FILE *file, struct ipa_node_params *info, int i)
+ipa_dump_param (FILE *file, class ipa_node_params *info, int i)
{
fprintf (file, "param #%i", i);
if ((*info->descriptors)[i].decl_or_type)
static bool
ipa_alloc_node_params (struct cgraph_node *node, int param_count)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (!info->descriptors && param_count)
{
void
ipa_initialize_node_params (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (!info->descriptors
&& ipa_alloc_node_params (node, count_formal_params (node->decl)))
}
}
- struct ipa_polymorphic_call_context *ctx
+ class ipa_polymorphic_call_context *ctx
= ipa_get_ith_polymorhic_call_context (IPA_EDGE_REF (cs), i);
if (ctx && !ctx->useless_p ())
{
for (cs = node->indirect_calls; cs; cs = cs->next_callee)
{
- struct cgraph_indirect_call_info *ii;
+ class cgraph_indirect_call_info *ii;
if (!ipa_edge_args_info_available_for_edge_p (cs))
continue;
static void
compute_complex_assign_jump_func (struct ipa_func_body_info *fbi,
- struct ipa_node_params *info,
+ class ipa_node_params *info,
struct ipa_jump_func *jfunc,
gcall *call, gimple *stmt, tree name,
tree param_type)
static void
compute_complex_ancestor_jump_func (struct ipa_func_body_info *fbi,
- struct ipa_node_params *info,
+ class ipa_node_params *info,
struct ipa_jump_func *jfunc,
gcall *call, gphi *phi)
{
ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
struct cgraph_edge *cs)
{
- struct ipa_node_params *info = IPA_NODE_REF (cs->caller);
- struct ipa_edge_args *args = IPA_EDGE_REF (cs);
+ class ipa_node_params *info = IPA_NODE_REF (cs->caller);
+ class ipa_edge_args *args = IPA_EDGE_REF (cs);
gcall *call = cs->call_stmt;
int n, arg_num = gimple_call_num_args (call);
bool useful_context = false;
if (flag_devirtualize && POINTER_TYPE_P (TREE_TYPE (arg)))
{
tree instance;
- struct ipa_polymorphic_call_context context (cs->caller->decl,
+ class ipa_polymorphic_call_context context (cs->caller->decl,
arg, cs->call_stmt,
&instance);
context.get_dynamic_type (instance, arg, NULL, cs->call_stmt,
ipa_analyze_indirect_call_uses (struct ipa_func_body_info *fbi, gcall *call,
tree target)
{
- struct ipa_node_params *info = fbi->info;
+ class ipa_node_params *info = fbi->info;
HOST_WIDE_INT offset;
bool by_ref;
if (TREE_CODE (obj) != SSA_NAME)
return;
- struct ipa_node_params *info = fbi->info;
+ class ipa_node_params *info = fbi->info;
if (SSA_NAME_IS_DEFAULT_DEF (obj))
{
struct ipa_jump_func jfunc;
}
struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call);
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
ii->offset = anc_offset;
ii->otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target));
ii->otr_type = obj_type_ref_class (target);
static bool
visit_ref_for_mod_analysis (gimple *, tree op, tree, void *data)
{
- struct ipa_node_params *info = (struct ipa_node_params *) data;
+ class ipa_node_params *info = (class ipa_node_params *) data;
op = get_base_address (op);
if (op
static void
ipa_analyze_controlled_uses (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
for (int i = 0; i < ipa_get_param_count (info); i++)
{
ipa_analyze_node (struct cgraph_node *node)
{
struct ipa_func_body_info fbi;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
update_jump_functions_after_inlining (struct cgraph_edge *cs,
struct cgraph_edge *e)
{
- struct ipa_edge_args *top = IPA_EDGE_REF (cs);
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *top = IPA_EDGE_REF (cs);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
int count = ipa_get_cs_argument_count (args);
int i;
for (i = 0; i < count; i++)
{
struct ipa_jump_func *dst = ipa_get_ith_jump_func (args, i);
- struct ipa_polymorphic_call_context *dst_ctx
+ class ipa_polymorphic_call_context *dst_ctx
= ipa_get_ith_polymorhic_call_context (args, i);
if (dst->type == IPA_JF_ANCESTOR)
{
struct ipa_jump_func *src;
int dst_fid = dst->value.ancestor.formal_id;
- struct ipa_polymorphic_call_context *src_ctx
+ class ipa_polymorphic_call_context *src_ctx
= ipa_get_ith_polymorhic_call_context (top, dst_fid);
/* Variable number of arguments can cause havoc if we try to access
if (src_ctx && !src_ctx->useless_p ())
{
- struct ipa_polymorphic_call_context ctx = *src_ctx;
+ class ipa_polymorphic_call_context ctx = *src_ctx;
/* TODO: Make type preserved safe WRT contexts. */
if (!ipa_get_jf_ancestor_type_preserved (dst))
int dst_fid = dst->value.pass_through.formal_id;
src = ipa_get_ith_jump_func (top, dst_fid);
bool dst_agg_p = ipa_get_jf_pass_through_agg_preserved (dst);
- struct ipa_polymorphic_call_context *src_ctx
+ class ipa_polymorphic_call_context *src_ctx
= ipa_get_ith_polymorhic_call_context (top, dst_fid);
if (src_ctx && !src_ctx->useless_p ())
{
- struct ipa_polymorphic_call_context ctx = *src_ctx;
+ class ipa_polymorphic_call_context ctx = *src_ctx;
/* TODO: Make type preserved safe WRT contexts. */
if (!ipa_get_jf_pass_through_type_preserved (dst))
static struct cgraph_edge *
try_make_edge_direct_simple_call (struct cgraph_edge *ie,
struct ipa_jump_func *jfunc, tree target_type,
- struct ipa_node_params *new_root_info)
+ class ipa_node_params *new_root_info)
{
struct cgraph_edge *cs;
tree target;
static struct cgraph_edge *
try_make_edge_direct_virtual_call (struct cgraph_edge *ie,
struct ipa_jump_func *jfunc,
- struct ipa_polymorphic_call_context ctx)
+ class ipa_polymorphic_call_context ctx)
{
tree target = NULL;
bool speculative = false;
struct cgraph_node *node,
vec<cgraph_edge *> *new_edges)
{
- struct ipa_edge_args *top;
+ class ipa_edge_args *top;
struct cgraph_edge *ie, *next_ie, *new_direct_edge;
- struct ipa_node_params *new_root_info, *inlined_node_info;
+ class ipa_node_params *new_root_info, *inlined_node_info;
bool res = false;
ipa_check_create_edge_args ();
for (ie = node->indirect_calls; ie; ie = next_ie)
{
- struct cgraph_indirect_call_info *ici = ie->indirect_info;
+ class cgraph_indirect_call_info *ici = ie->indirect_info;
struct ipa_jump_func *jfunc;
int param_index;
cgraph_node *spec_target = NULL;
static void
propagate_controlled_uses (struct cgraph_edge *cs)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (cs);
+ class ipa_edge_args *args = IPA_EDGE_REF (cs);
struct cgraph_node *new_root = cs->caller->global.inlined_to
? cs->caller->global.inlined_to : cs->caller;
- struct ipa_node_params *new_root_info = IPA_NODE_REF (new_root);
- struct ipa_node_params *old_root_info = IPA_NODE_REF (cs->callee);
+ class ipa_node_params *new_root_info = IPA_NODE_REF (new_root);
+ class ipa_node_params *old_root_info = IPA_NODE_REF (cs->callee);
int count, i;
count = MIN (ipa_get_cs_argument_count (args),
{
struct cgraph_node *inline_root = dst->caller->global.inlined_to
? dst->caller->global.inlined_to : dst->caller;
- struct ipa_node_params *root_info = IPA_NODE_REF (inline_root);
+ class ipa_node_params *root_info = IPA_NODE_REF (inline_root);
int idx = ipa_get_jf_pass_through_formal_id (dst_jf);
int c = ipa_get_controlled_uses (root_info, idx);
ipa_print_node_params (FILE *f, struct cgraph_node *node)
{
int i, count;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
if (!node->definition)
return;
/* Read in jump function JUMP_FUNC from IB. */
static void
-ipa_read_jump_function (struct lto_input_block *ib,
+ipa_read_jump_function (class lto_input_block *ib,
struct ipa_jump_func *jump_func,
struct cgraph_edge *cs,
- struct data_in *data_in,
+ class data_in *data_in,
bool prevails)
{
enum jump_func_type jftype;
ipa_write_indirect_edge_info (struct output_block *ob,
struct cgraph_edge *cs)
{
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
struct bitpack_d bp;
streamer_write_hwi (ob, ii->param_index);
relevant to indirect inlining from IB. */
static void
-ipa_read_indirect_edge_info (struct lto_input_block *ib,
- struct data_in *data_in,
+ipa_read_indirect_edge_info (class lto_input_block *ib,
+ class data_in *data_in,
struct cgraph_edge *cs)
{
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
struct bitpack_d bp;
ii->param_index = (int) streamer_read_hwi (ib);
{
int node_ref;
lto_symtab_encoder_t encoder;
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int j;
struct cgraph_edge *e;
struct bitpack_d bp;
}
for (e = node->callees; e; e = e->next_callee)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
streamer_write_uhwi (ob,
ipa_get_cs_argument_count (args) * 2
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
streamer_write_uhwi (ob,
ipa_get_cs_argument_count (args) * 2
/* Stream in edge E from IB. */
static void
-ipa_read_edge_info (struct lto_input_block *ib,
- struct data_in *data_in,
+ipa_read_edge_info (class lto_input_block *ib,
+ class data_in *data_in,
struct cgraph_edge *e, bool prevails)
{
int count = streamer_read_uhwi (ib);
return;
if (prevails && e->possibly_call_in_translation_unit_p ())
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
vec_safe_grow_cleared (args->jump_functions, count);
if (contexts_computed)
vec_safe_grow_cleared (args->polymorphic_call_contexts, count);
data_in, prevails);
if (contexts_computed)
{
- struct ipa_polymorphic_call_context ctx;
+ class ipa_polymorphic_call_context ctx;
ctx.stream_in (ib, data_in);
}
}
/* Stream in NODE info from IB. */
static void
-ipa_read_node_info (struct lto_input_block *ib, struct cgraph_node *node,
- struct data_in *data_in)
+ipa_read_node_info (class lto_input_block *ib, struct cgraph_node *node,
+ class data_in *data_in)
{
int k;
struct cgraph_edge *e;
struct bitpack_d bp;
bool prevails = node->prevailing_p ();
- struct ipa_node_params *info = prevails ? IPA_NODE_REF (node) : NULL;
+ class ipa_node_params *info = prevails ? IPA_NODE_REF (node) : NULL;
int param_count = streamer_read_uhwi (ib);
if (prevails)
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
/* Information about zero/non-zero bits. The pointed to structure is shared
betweed different jump functions. Use ipa_set_jfunc_bits to set this
field. */
- struct ipa_bits *bits;
+ class ipa_bits *bits;
/* Information about value range, containing valid data only when vr_known is
true. The pointed to structure is shared betweed different jump
functions. Use ipa_set_jfunc_vr to set this field. */
- struct value_range_base *m_vr;
+ class value_range_base *m_vr;
enum jump_func_type type;
/* Represents a value of a jump function. pass_through is used only in jump
vec<ipa_param_descriptor, va_gc> *descriptors;
/* Pointer to an array of structures describing individual formal
parameters. */
- struct ipcp_param_lattices * GTY((skip)) lattices;
+ class ipcp_param_lattices * GTY((skip)) lattices;
/* Only for versioned nodes this field would not be NULL,
it points to the node that IPA cp cloned from. */
struct cgraph_node * GTY((skip)) ipcp_orig_node;
cgraph_node *node;
/* Its info. */
- struct ipa_node_params *info;
+ class ipa_node_params *info;
/* Information about individual BBs. */
vec<ipa_bb_info> bb_infos;
/* Return the number of formal parameters. */
static inline int
-ipa_get_param_count (struct ipa_node_params *info)
+ipa_get_param_count (class ipa_node_params *info)
{
return vec_safe_length (info->descriptors);
}
WPA. */
static inline tree
-ipa_get_param (struct ipa_node_params *info, int i)
+ipa_get_param (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
gcc_checking_assert (!flag_wpa);
to INFO if it is known or NULL if not. */
static inline tree
-ipa_get_type (struct ipa_node_params *info, int i)
+ipa_get_type (class ipa_node_params *info, int i)
{
if (vec_safe_length (info->descriptors) <= (unsigned) i)
return NULL;
to INFO. */
static inline int
-ipa_get_param_move_cost (struct ipa_node_params *info, int i)
+ipa_get_param_move_cost (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
return (*info->descriptors)[i].move_cost;
associated with INFO to VAL. */
static inline void
-ipa_set_param_used (struct ipa_node_params *info, int i, bool val)
+ipa_set_param_used (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
(*info->descriptors)[i].used = val;
IPA_UNDESCRIBED_USE if there is a use that is not described by these
structures. */
static inline int
-ipa_get_controlled_uses (struct ipa_node_params *info, int i)
+ipa_get_controlled_uses (class ipa_node_params *info, int i)
{
/* FIXME: introducing speculation causes out of bounds access here. */
if (vec_safe_length (info->descriptors) > (unsigned)i)
/* Set the controlled counter of a given parameter. */
static inline void
-ipa_set_controlled_uses (struct ipa_node_params *info, int i, int val)
+ipa_set_controlled_uses (class ipa_node_params *info, int i, int val)
{
gcc_checking_assert (info->descriptors);
(*info->descriptors)[i].controlled_uses = val;
function associated with INFO. */
static inline bool
-ipa_is_param_used (struct ipa_node_params *info, int i)
+ipa_is_param_used (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
return (*info->descriptors)[i].used;
/* Return the number of actual arguments. */
static inline int
-ipa_get_cs_argument_count (struct ipa_edge_args *args)
+ipa_get_cs_argument_count (class ipa_edge_args *args)
{
return vec_safe_length (args->jump_functions);
}
ipa_compute_jump_functions. */
static inline struct ipa_jump_func *
-ipa_get_ith_jump_func (struct ipa_edge_args *args, int i)
+ipa_get_ith_jump_func (class ipa_edge_args *args, int i)
{
return &(*args->jump_functions)[i];
}
/* Returns a pointer to the polymorphic call context for the ith argument.
NULL if contexts are not computed. */
-static inline struct ipa_polymorphic_call_context *
-ipa_get_ith_polymorhic_call_context (struct ipa_edge_args *args, int i)
+static inline class ipa_polymorphic_call_context *
+ipa_get_ith_polymorhic_call_context (class ipa_edge_args *args, int i)
{
if (!args->polymorphic_call_contexts)
return NULL;
ipcp_poly_ctx_values_pool;
template <typename valtype>
-class ipcp_value_source;
+struct ipcp_value_source;
extern object_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
-class ipcp_agg_lattice;
+struct ipcp_agg_lattice;
extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
void ipa_prop_read_jump_functions (void);
void ipcp_write_transformation_summaries (void);
void ipcp_read_transformation_summaries (void);
-int ipa_get_param_decl_index (struct ipa_node_params *, tree);
-tree ipa_value_from_jfunc (struct ipa_node_params *info,
+int ipa_get_param_decl_index (class ipa_node_params *, tree);
+tree ipa_value_from_jfunc (class ipa_node_params *info,
struct ipa_jump_func *jfunc, tree type);
unsigned int ipcp_transform_function (struct cgraph_node *node);
ipa_polymorphic_call_context ipa_context_from_jfunc (ipa_node_params *,
cgraph_edge *,
int,
ipa_jump_func *);
-void ipa_dump_param (FILE *, struct ipa_node_params *info, int i);
+void ipa_dump_param (FILE *, class ipa_node_params *info, int i);
void ipa_release_body_info (struct ipa_func_body_info *);
tree ipa_get_callee_param_type (struct cgraph_edge *e, int i);
enum malloc_state_e malloc_state;
};
-typedef struct funct_state_d * funct_state;
+typedef class funct_state_d * funct_state;
/* The storage of the funct_state is abstracted because there is the
possibility that it may be desirable to move this to the cgraph
funct_state l;
basic_block this_block;
- l = XCNEW (struct funct_state_d);
+ l = XCNEW (class funct_state_d);
l->pure_const_state = IPA_CONST;
l->state_previously_known = IPA_NEITHER;
l->looping_previously_known = true;
}
else
{
- struct loop *loop;
+ class loop *loop;
scev_initialize ();
FOR_EACH_LOOP (loop, 0)
if (!finite_loop_p (loop))
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_pure_const,
&data, &len);
#define GCC_IPA_REF_H
struct cgraph_node;
-class varpool_node;
-class symtab_node;
+struct varpool_node;
+struct symtab_node;
/* How the reference is done. */
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_reference,
&data, &len);
/* Best split point found. */
-struct split_point best_split_point;
+class split_point best_split_point;
/* Set of basic blocks that are not allowed to dominate a split point. */
/* Dump split point CURRENT. */
static void
-dump_split_point (FILE * file, struct split_point *current)
+dump_split_point (FILE * file, class split_point *current)
{
fprintf (file,
"Split point at BB %i\n"
Parameters are the same as for consider_split. */
static bool
-verify_non_ssa_vars (struct split_point *current, bitmap non_ssa_vars,
+verify_non_ssa_vars (class split_point *current, bitmap non_ssa_vars,
basic_block return_bb)
{
bitmap seen = BITMAP_ALLOC (NULL);
/* For give split point CURRENT and return block RETURN_BB return 1
if ssa name VAL is set by split part and 0 otherwise. */
static bool
-split_part_set_ssa_name_p (tree val, struct split_point *current,
+split_part_set_ssa_name_p (tree val, class split_point *current,
basic_block return_bb)
{
if (TREE_CODE (val) != SSA_NAME)
See if we can split function here. */
static void
-consider_split (struct split_point *current, bitmap non_ssa_vars,
+consider_split (class split_point *current, bitmap non_ssa_vars,
basic_block return_bb)
{
tree parm;
stack_entry first;
vec<stack_entry> stack = vNULL;
basic_block bb;
- struct split_point current;
+ class split_point current;
current.header_time = overall_time;
current.header_size = overall_size;
/* Split function at SPLIT_POINT. */
static void
-split_function (basic_block return_bb, struct split_point *split_point,
+split_function (basic_block return_bb, class split_point *split_point,
bool add_tsan_func_exit)
{
vec<tree> args_to_pass = vNULL;
loop designating the whole function when CFG loops are not
built. */
static void
-add_loop_to_tree (struct loop *loop)
+add_loop_to_tree (class loop *loop)
{
int loop_num;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t loop_node, parent_node;
/* We cannot use loop node access macros here because of potential
form_loop_tree (void)
{
basic_block bb;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t bb_node, loop_node;
/* We cannot use loop/bb node access macros because of potential
form a region from such loop if the target use stack register
because reg-stack.c cannot deal with such edges. */
static bool
-loop_with_complex_edge_p (struct loop *loop)
+loop_with_complex_edge_p (class loop *loop)
{
int i;
edge_iterator ei;
ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
rtx x;
bitmap_iterator bi;
- struct ira_spilled_reg_stack_slot *slot = NULL;
+ class ira_spilled_reg_stack_slot *slot = NULL;
ira_assert (! ira_use_lra_p);
void
ira_mark_new_stack_slot (rtx x, int regno, poly_uint64 total_size)
{
- struct ira_spilled_reg_stack_slot *slot;
+ class ira_spilled_reg_stack_slot *slot;
int slot_num;
ira_allocno_t allocno;
/* The node represents basic block if children == NULL. */
basic_block bb; /* NULL for loop. */
/* NULL for BB or for loop tree root if we did not build CFG loop tree. */
- struct loop *loop;
+ class loop *loop;
/* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
SUBLOOP_NEXT is always NULL for BBs. */
ira_loop_tree_node_t subloop_next, next;
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+extern class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, cost of the
allocnos assigned to hard-registers, cost of the allocnos assigned
bool x_ira_prohibited_mode_move_regs_initialized_p;
};
-extern struct target_ira_int default_target_ira_int;
+extern class target_ira_int default_target_ira_int;
#if SWITCHABLE_TARGET
-extern struct target_ira_int *this_target_ira_int;
+extern class target_ira_int *this_target_ira_int;
#else
#define this_target_ira_int (&default_target_ira_int)
#endif
#include "print-rtl.h"
struct target_ira default_target_ira;
-struct target_ira_int default_target_ira_int;
+class target_ira_int default_target_ira_int;
#if SWITCHABLE_TARGET
struct target_ira *this_target_ira = &default_target_ira;
-struct target_ira_int *this_target_ira_int = &default_target_ira_int;
+class target_ira_int *this_target_ira_int = &default_target_ira_int;
#endif
/* A modified value of flag `-fira-verbose' used internally. */
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, overall cost before
reload, cost of the allocnos assigned to hard-registers, cost of
/* Print chain C to FILE. */
static void
-print_insn_chain (FILE *file, struct insn_chain *c)
+print_insn_chain (FILE *file, class insn_chain *c)
{
fprintf (file, "insn=%d, ", INSN_UID (c->insn));
bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
static void
print_insn_chains (FILE *file)
{
- struct insn_chain *c;
+ class insn_chain *c;
for (c = reload_insn_chain; c ; c = c->next)
print_insn_chain (file, c);
}
build_insn_chain (void)
{
unsigned int i;
- struct insn_chain **p = &reload_insn_chain;
+ class insn_chain **p = &reload_insn_chain;
basic_block bb;
- struct insn_chain *c = NULL;
- struct insn_chain *next = NULL;
+ class insn_chain *c = NULL;
+ class insn_chain *next = NULL;
auto_bitmap live_relevant_regs;
auto_bitmap elim_regset;
/* live_subregs is a vector used to keep accurate information about
{
ira_spilled_reg_stack_slots_num = 0;
ira_spilled_reg_stack_slots
- = ((struct ira_spilled_reg_stack_slot *)
+ = ((class ira_spilled_reg_stack_slot *)
ira_allocate (max_regno
- * sizeof (struct ira_spilled_reg_stack_slot)));
+ * sizeof (class ira_spilled_reg_stack_slot)));
memset ((void *)ira_spilled_reg_stack_slots, 0,
- max_regno * sizeof (struct ira_spilled_reg_stack_slot));
+ max_regno * sizeof (class ira_spilled_reg_stack_slot));
}
}
allocate_initial_values ();
describes the number of iterations of the loop. */
static bool
-doloop_valid_p (struct loop *loop, struct niter_desc *desc)
+doloop_valid_p (class loop *loop, class niter_desc *desc)
{
basic_block *body = get_loop_body (loop), bb;
rtx_insn *insn;
DOLOOP_SEQ. COUNT is the number of iterations of the LOOP. */
static void
-doloop_modify (struct loop *loop, struct niter_desc *desc,
+doloop_modify (class loop *loop, class niter_desc *desc,
rtx_insn *doloop_seq, rtx condition, rtx count)
{
rtx counter_reg;
modified. */
static bool
-doloop_optimize (struct loop *loop)
+doloop_optimize (class loop *loop)
{
scalar_int_mode mode;
rtx doloop_reg;
unsigned level;
HOST_WIDE_INT est_niter;
int max_cost;
- struct niter_desc *desc;
+ class niter_desc *desc;
unsigned word_mode_size;
unsigned HOST_WIDE_INT word_mode_max;
int entered_at_top;
void
doloop_optimize_loops (void)
{
- struct loop *loop;
+ class loop *loop;
if (optimize == 1)
{
void
loop_optimizer_finalize (struct function *fn)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
timevar_push (TV_LOOP_FINI);
{
basic_block bb;
int record_exits = 0;
- struct loop *loop;
+ class loop *loop;
unsigned old_nloops, i;
timevar_push (TV_LOOP_INIT);
while (loop->inner)
{
- struct loop *ploop = loop->inner;
+ class loop *ploop = loop->inner;
flow_loop_tree_node_remove (ploop);
flow_loop_tree_node_add (loop_outer (loop), ploop);
}
class loop_data
{
public:
- struct loop *outermost_exit; /* The outermost exit of the loop. */
+ class loop *outermost_exit; /* The outermost exit of the loop. */
bool has_call; /* True if the loop contains a call. */
/* Maximal register pressure inside loop for given register class
(defined only for the pressure classes). */
bitmap_head regs_live;
};
-#define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
+#define LOOP_DATA(LOOP) ((class loop_data *) (LOOP)->aux)
/* The description of an use. */
};
/* Currently processed loop. */
-static struct loop *curr_loop;
+static class loop *curr_loop;
/* Table of invariants indexed by the df_ref uid field. */
get_loop_body_in_dom_order. */
static void
-compute_always_reached (struct loop *loop, basic_block *body,
+compute_always_reached (class loop *loop, basic_block *body,
bitmap may_exit, bitmap always_reached)
{
unsigned i;
additionally mark blocks that may exit due to a call. */
static void
-find_exits (struct loop *loop, basic_block *body,
+find_exits (class loop *loop, basic_block *body,
bitmap may_exit, bitmap has_exit)
{
unsigned i;
edge_iterator ei;
edge e;
- struct loop *outermost_exit = loop, *aexit;
+ class loop *outermost_exit = loop, *aexit;
bool has_call = false;
rtx_insn *insn;
if (loop->aux == NULL)
{
- loop->aux = xcalloc (1, sizeof (struct loop_data));
+ loop->aux = xcalloc (1, sizeof (class loop_data));
bitmap_initialize (&LOOP_DATA (loop)->regs_ref, ®_obstack);
bitmap_initialize (&LOOP_DATA (loop)->regs_live, ®_obstack);
}
BODY. */
static void
-find_defs (struct loop *loop)
+find_defs (class loop *loop)
{
if (dump_file)
{
ends due to a function call. */
static void
-find_invariants_body (struct loop *loop, basic_block *body,
+find_invariants_body (class loop *loop, basic_block *body,
bitmap always_reached, bitmap always_executed)
{
unsigned i;
/* Finds invariants in LOOP. */
static void
-find_invariants (struct loop *loop)
+find_invariants (class loop *loop)
{
auto_bitmap may_exit;
auto_bitmap always_reached;
the block preceding its header. */
static bool
-can_move_invariant_reg (struct loop *loop, struct invariant *inv, rtx reg)
+can_move_invariant_reg (class loop *loop, struct invariant *inv, rtx reg)
{
df_ref def, use;
unsigned int dest_regno, defs_in_loop_count = 0;
otherwise. */
static bool
-move_invariant_reg (struct loop *loop, unsigned invno)
+move_invariant_reg (class loop *loop, unsigned invno)
{
struct invariant *inv = invariants[invno];
struct invariant *repr = invariants[inv->eqto];
in TEMPORARY_REGS. */
static void
-move_invariants (struct loop *loop)
+move_invariants (class loop *loop)
{
struct invariant *inv;
unsigned i;
/* Move the invariants out of the LOOP. */
static void
-move_single_loop_invariants (struct loop *loop)
+move_single_loop_invariants (class loop *loop)
{
init_inv_motion_data ();
/* Releases the auxiliary data for LOOP. */
static void
-free_loop_data (struct loop *loop)
+free_loop_data (class loop *loop)
{
- struct loop_data *data = LOOP_DATA (loop);
+ class loop_data *data = LOOP_DATA (loop);
if (!data)
return;
static void
mark_regno_live (int regno)
{
- struct loop *loop;
+ class loop *loop;
for (loop = curr_loop;
loop != current_loops->tree_root;
code = GET_CODE (x);
if (code == REG)
{
- struct loop *loop;
+ class loop *loop;
for (loop = curr_loop;
loop != current_loops->tree_root;
basic_block bb;
rtx_insn *insn;
rtx link;
- struct loop *loop, *parent;
+ class loop *loop, *parent;
FOR_EACH_LOOP (loop, 0)
if (loop->aux == NULL)
{
- loop->aux = xcalloc (1, sizeof (struct loop_data));
+ loop->aux = xcalloc (1, sizeof (class loop_data));
bitmap_initialize (&LOOP_DATA (loop)->regs_ref, ®_obstack);
bitmap_initialize (&LOOP_DATA (loop)->regs_live, ®_obstack);
}
void
move_loop_invariants (void)
{
- struct loop *loop;
+ class loop *loop;
if (optimize == 1)
df_live_add_problem ();
{
public:
unsigned regno; /* The register of the biv. */
- struct rtx_iv iv; /* Value of the biv. */
+ class rtx_iv iv; /* Value of the biv. */
};
static bool clean_slate = true;
static unsigned int iv_ref_table_size = 0;
/* Table of rtx_ivs indexed by the df_ref uid field. */
-static struct rtx_iv ** iv_ref_table;
+static class rtx_iv ** iv_ref_table;
/* Induction variable stored at the reference. */
#define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
/* The current loop. */
-static struct loop *current_loop;
+static class loop *current_loop;
/* Hashtable helper. */
static hash_table<biv_entry_hasher> *bivs;
-static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, struct rtx_iv *);
+static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
/* Return the RTX code corresponding to the IV extend code EXTEND. */
static inline enum rtx_code
/* Dumps information about IV to FILE. */
-extern void dump_iv_info (FILE *, struct rtx_iv *);
+extern void dump_iv_info (FILE *, class rtx_iv *);
void
-dump_iv_info (FILE *file, struct rtx_iv *iv)
+dump_iv_info (FILE *file, class rtx_iv *iv)
{
if (!iv->base)
{
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
+ iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size);
memset (&iv_ref_table[iv_ref_table_size], 0,
- (new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
+ (new_size - iv_ref_table_size) * sizeof (class rtx_iv *));
iv_ref_table_size = new_size;
}
}
clear_iv_info (void)
{
unsigned i, n_defs = DF_DEFS_TABLE_SIZE ();
- struct rtx_iv *iv;
+ class rtx_iv *iv;
check_iv_ref_table_size ();
for (i = 0; i < n_defs; i++)
/* Prepare the data for an induction variable analysis of a LOOP. */
void
-iv_analysis_loop_init (struct loop *loop)
+iv_analysis_loop_init (class loop *loop)
{
current_loop = loop;
{
df_ref single_rd = NULL, adef;
unsigned regno = REGNO (reg);
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
consistency with other iv manipulation functions that may fail). */
static bool
-iv_constant (struct rtx_iv *iv, scalar_int_mode mode, rtx cst)
+iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst)
{
iv->mode = mode;
iv->base = cst;
/* Evaluates application of subreg to MODE on IV. */
static bool
-iv_subreg (struct rtx_iv *iv, scalar_int_mode mode)
+iv_subreg (class rtx_iv *iv, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
/* Evaluates application of EXTEND to MODE on IV. */
static bool
-iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
+iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
/* Evaluates negation of IV. */
static bool
-iv_neg (struct rtx_iv *iv)
+iv_neg (class rtx_iv *iv)
{
if (iv->extend == IV_UNKNOWN_EXTEND)
{
/* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
static bool
-iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op)
+iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op)
{
scalar_int_mode mode;
rtx arg;
/* Evaluates multiplication of IV by constant CST. */
static bool
-iv_mult (struct rtx_iv *iv, rtx mby)
+iv_mult (class rtx_iv *iv, rtx mby)
{
scalar_int_mode mode = iv->extend_mode;
/* Evaluates shift of IV by constant CST. */
static bool
-iv_shift (struct rtx_iv *iv, rtx mby)
+iv_shift (class rtx_iv *iv, rtx mby)
{
scalar_int_mode mode = iv->extend_mode;
/* Records information that DEF is induction variable IV. */
static void
-record_iv (df_ref def, struct rtx_iv *iv)
+record_iv (df_ref def, class rtx_iv *iv)
{
- struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
+ class rtx_iv *recorded_iv = XNEW (class rtx_iv);
*recorded_iv = *iv;
check_iv_ref_table_size ();
IV and return true. Otherwise return false. */
static bool
-analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
+analyzed_for_bivness_p (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
+ class biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
if (!biv)
return false;
}
static void
-record_biv (rtx def, struct rtx_iv *iv)
+record_biv (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv = XNEW (struct biv_entry);
+ class biv_entry *biv = XNEW (class biv_entry);
biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def), INSERT);
biv->regno = REGNO (def);
to *IV. OUTER_MODE is the mode of DEF. */
static bool
-iv_analyze_biv (scalar_int_mode outer_mode, rtx def, struct rtx_iv *iv)
+iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv)
{
rtx inner_step, outer_step;
scalar_int_mode inner_mode;
bool
iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs,
- struct rtx_iv *iv)
+ class rtx_iv *iv)
{
rtx mby = NULL_RTX;
rtx op0 = NULL_RTX, op1 = NULL_RTX;
- struct rtx_iv iv0, iv1;
+ class rtx_iv iv0, iv1;
enum rtx_code code = GET_CODE (rhs);
scalar_int_mode omode = mode;
/* Analyzes iv DEF and stores the result to *IV. */
static bool
-iv_analyze_def (df_ref def, struct rtx_iv *iv)
+iv_analyze_def (df_ref def, class rtx_iv *iv)
{
rtx_insn *insn = DF_REF_INSN (def);
rtx reg = DF_REF_REG (def);
mode of OP. */
static bool
-iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, struct rtx_iv *iv)
+iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv)
{
df_ref def = NULL;
enum iv_grd_result res;
mode of VAL. */
bool
-iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, struct rtx_iv *iv)
+iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv)
{
rtx reg;
/* Analyzes definition of DEF in INSN and stores the result to IV. */
bool
-iv_analyze_result (rtx_insn *insn, rtx def, struct rtx_iv *iv)
+iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv)
{
df_ref adef;
bool
biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg)
{
- struct rtx_iv iv;
+ class rtx_iv iv;
df_ref def, last_def;
if (!simple_reg_p (reg))
/* Calculates value of IV at ITERATION-th iteration. */
rtx
-get_iv_value (struct rtx_iv *iv, rtx iteration)
+get_iv_value (class rtx_iv *iv, rtx iteration)
{
rtx val;
is a list, its elements are assumed to be combined using OP. */
static void
-simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
+simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr)
{
bool expression_valid;
rtx head, tail, last_valid_expr;
is SIGNED_P to DESC. */
static void
-shorten_into_mode (struct rtx_iv *iv, scalar_int_mode mode,
- enum rtx_code cond, bool signed_p, struct niter_desc *desc)
+shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode,
+ enum rtx_code cond, bool signed_p, class niter_desc *desc)
{
rtx mmin, mmax, cond_over, cond_under;
some assumptions to DESC). */
static bool
-canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
- enum rtx_code cond, struct niter_desc *desc)
+canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1,
+ enum rtx_code cond, class niter_desc *desc)
{
scalar_int_mode comp_mode;
bool signed_p;
expression for the number of iterations, before we tried to simplify it. */
static uint64_t
-determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
+determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
(basically its rtl version), complicated by things like subregs. */
static void
-iv_number_of_iterations (struct loop *loop, rtx_insn *insn, rtx condition,
- struct niter_desc *desc)
+iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition,
+ class niter_desc *desc)
{
rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1;
- struct rtx_iv iv0, iv1;
+ class rtx_iv iv0, iv1;
rtx assumption, may_not_xform;
enum rtx_code cond;
machine_mode nonvoid_mode;
into DESC. */
static void
-check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc)
+check_simple_exit (class loop *loop, edge e, class niter_desc *desc)
{
basic_block exit_bb;
rtx condition;
/* Finds a simple exit of LOOP and stores its description into DESC. */
void
-find_simple_exit (struct loop *loop, struct niter_desc *desc)
+find_simple_exit (class loop *loop, class niter_desc *desc)
{
unsigned i;
basic_block *body;
edge e;
- struct niter_desc act;
+ class niter_desc act;
bool any = false;
edge_iterator ei;
/* Creates a simple loop description of LOOP if it was not computed
already. */
-struct niter_desc *
-get_simple_loop_desc (struct loop *loop)
+class niter_desc *
+get_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (desc)
return desc;
/* Releases simple loop description for LOOP. */
void
-free_simple_loop_desc (struct loop *loop)
+free_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (!desc)
return;
basic_block loop_preheader; /* The loop preheader basic block. */
};
-static void decide_unroll_stupid (struct loop *, int);
-static void decide_unroll_constant_iterations (struct loop *, int);
-static void decide_unroll_runtime_iterations (struct loop *, int);
-static void unroll_loop_stupid (struct loop *);
+static void decide_unroll_stupid (class loop *, int);
+static void decide_unroll_constant_iterations (class loop *, int);
+static void decide_unroll_runtime_iterations (class loop *, int);
+static void unroll_loop_stupid (class loop *);
static void decide_unrolling (int);
-static void unroll_loop_constant_iterations (struct loop *);
-static void unroll_loop_runtime_iterations (struct loop *);
-static struct opt_info *analyze_insns_in_loop (struct loop *);
+static void unroll_loop_constant_iterations (class loop *);
+static void unroll_loop_runtime_iterations (class loop *);
+static struct opt_info *analyze_insns_in_loop (class loop *);
static void opt_info_start_duplication (struct opt_info *);
static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
static void free_opt_info (struct opt_info *);
-static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx_insn *);
-static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
+static struct var_to_expand *analyze_insn_to_expand_var (class loop*, rtx_insn *);
+static bool referenced_in_one_insn_in_loop_p (class loop *, rtx, int *);
static struct iv_to_split *analyze_iv_to_split_insn (rtx_insn *);
static void expand_var_during_unrolling (struct var_to_expand *, rtx_insn *);
static void insert_var_expansion_initialization (struct var_to_expand *,
appropriate given the dump or -fopt-info settings. */
static void
-report_unroll (struct loop *loop, dump_location_t locus)
+report_unroll (class loop *loop, dump_location_t locus)
{
dump_flags_t report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS;
static void
decide_unrolling (int flags)
{
- struct loop *loop;
+ class loop *loop;
/* Scan the loops, inner ones first. */
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
void
unroll_loops (int flags)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
/* Now decide rest of unrolling. */
/* Check whether exit of the LOOP is at the end of loop body. */
static bool
-loop_exit_at_end_p (struct loop *loop)
+loop_exit_at_end_p (class loop *loop)
{
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
rtx_insn *insn;
/* We should never have conditional in latch block. */
and how much. */
static void
-decide_unroll_constant_iterations (struct loop *loop, int flags)
+decide_unroll_constant_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
}
*/
static void
-unroll_loop_constant_iterations (struct loop *loop)
+unroll_loop_constant_iterations (class loop *loop)
{
unsigned HOST_WIDE_INT niter;
unsigned exit_mod;
unsigned i;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
/* Decide whether to unroll LOOP iterating runtime computable number of times
and how much. */
static void
-decide_unroll_runtime_iterations (struct loop *loop, int flags)
+decide_unroll_runtime_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
}
*/
static void
-unroll_loop_runtime_iterations (struct loop *loop)
+unroll_loop_runtime_iterations (class loop *loop)
{
rtx old_niter, niter, tmp;
rtx_insn *init_code, *branch_code;
edge e;
bool extra_zero_check, last_may_exit;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
/* Decide whether to unroll LOOP stupidly and how much. */
static void
-decide_unroll_stupid (struct loop *loop, int flags)
+decide_unroll_stupid (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
}
*/
static void
-unroll_loop_stupid (struct loop *loop)
+unroll_loop_stupid (class loop *loop)
{
unsigned nunroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
bool ok;
variable. */
static bool
-referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
+referenced_in_one_insn_in_loop_p (class loop *loop, rtx reg,
int *debug_uses)
{
basic_block *body, bb;
/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
static void
-reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
+reset_debug_uses_in_loop (class loop *loop, rtx reg, int debug_uses)
{
basic_block *body, bb;
unsigned i;
*/
static struct var_to_expand *
-analyze_insn_to_expand_var (struct loop *loop, rtx_insn *insn)
+analyze_insn_to_expand_var (class loop *loop, rtx_insn *insn)
{
rtx set, dest, src;
struct var_to_expand *ves;
analyze_iv_to_split_insn (rtx_insn *insn)
{
rtx set, dest;
- struct rtx_iv iv;
+ class rtx_iv iv;
struct iv_to_split *ivts;
scalar_int_mode mode;
bool ok;
is undefined for the return value. */
static struct opt_info *
-analyze_insns_in_loop (struct loop *loop)
+analyze_insns_in_loop (class loop *loop)
{
basic_block *body, bb;
unsigned i;
of eliminating a register in favor of another. If there is more
than one way of eliminating a particular register, the most
preferred should be specified first. */
-static struct lra_elim_table *reg_eliminate = 0;
+static class lra_elim_table *reg_eliminate = 0;
/* This is an intermediate structure to initialize the table. It has
exactly the members provided by ELIMINABLE_REGS. */
static void
print_elim_table (FILE *f)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
VALUE. Setup FRAME_POINTER_NEEDED if elimination from frame
pointer to stack pointer is not possible anymore. */
static void
-setup_can_eliminate (struct lra_elim_table *ep, bool value)
+setup_can_eliminate (class lra_elim_table *ep, bool value)
{
ep->can_eliminate = ep->prev_can_eliminate = value;
if (! value
or NULL if none. The elimination table may contain more than
one elimination for the same hard register, but this map specifies
the one that we are currently using. */
-static struct lra_elim_table *elimination_map[FIRST_PSEUDO_REGISTER];
+static class lra_elim_table *elimination_map[FIRST_PSEUDO_REGISTER];
/* When an eliminable hard register becomes not eliminable, we use the
following special structure to restore original offsets for the
register. */
-static struct lra_elim_table self_elim_table;
+static class lra_elim_table self_elim_table;
/* Offsets should be used to restore original offsets for eliminable
hard register which just became not eliminable. Zero,
setup_elimination_map (void)
{
int i;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
elimination_map[i] = NULL;
int
lra_get_elimination_hard_regno (int hard_regno)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
if (hard_regno < 0 || hard_regno >= FIRST_PSEUDO_REGISTER)
return hard_regno;
/* Return elimination which will be used for hard reg REG, NULL
otherwise. */
-static struct lra_elim_table *
+static class lra_elim_table *
get_elimination (rtx reg)
{
int hard_regno;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
lra_assert (REG_P (reg));
if ((hard_regno = REGNO (reg)) < 0 || hard_regno >= FIRST_PSEUDO_REGISTER)
poly_int64 update_sp_offset, bool full_p)
{
enum rtx_code code = GET_CODE (x);
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
rtx new_rtx;
int i, j;
const char *fmt;
mark_not_eliminable (rtx x, machine_mode mem_mode)
{
enum rtx_code code = GET_CODE (x);
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
int i, j;
const char *fmt;
poly_int64 offset = 0;
int i;
rtx substed_operand[MAX_RECOG_OPERANDS];
rtx orig_operand[MAX_RECOG_OPERANDS];
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
rtx plus_src, plus_cst_src;
lra_insn_recog_data_t id;
struct lra_static_insn_data *static_id;
update_reg_eliminate (bitmap insns_with_changed_offsets)
{
bool prev, result;
- struct lra_elim_table *ep, *ep1;
+ class lra_elim_table *ep, *ep1;
HARD_REG_SET temp_hard_reg_set;
targetm.compute_frame_layout ();
static void
init_elim_table (void)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
bool value_p;
const struct elim_table_1 *ep1;
if (!reg_eliminate)
- reg_eliminate = XCNEWVEC (struct lra_elim_table, NUM_ELIMINABLE_REGS);
+ reg_eliminate = XCNEWVEC (class lra_elim_table, NUM_ELIMINABLE_REGS);
memset (self_elim_offsets, 0, sizeof (self_elim_offsets));
/* Initiate member values which will be never changed. */
bool stop_to_sp_elimination_p;
basic_block bb;
rtx_insn *insn;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
init_elim_table ();
FOR_EACH_BB_FN (bb, cfun)
lra_eliminate_reg_if_possible (rtx *loc)
{
int regno;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
lra_assert (REG_P (*loc));
if ((regno = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
unsigned int uid;
bitmap_head insns_with_changed_offsets;
bitmap_iterator bi;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
gcc_assert (! final_p || ! first_p);
};
/* References to the common info about each register. */
-extern struct lra_reg *lra_reg_info;
+extern class lra_reg *lra_reg_info;
extern HARD_REG_SET hard_regs_spilled_into;
struct lra_insn_reg *regs;
};
-typedef struct lra_insn_recog_data *lra_insn_recog_data_t;
+typedef class lra_insn_recog_data *lra_insn_recog_data_t;
/* Whether the clobber is used temporary in LRA. */
#define LRA_TEMP_CLOBBER_P(x) \
};
/* Array for all BB data. Indexed by the corresponding BB index. */
-typedef struct bb_data_pseudos *bb_data_t;
+typedef class bb_data_pseudos *bb_data_t;
/* All basic block data are referred through the following array. */
static bb_data_t bb_data;
{
bitmap_initialize (&all_hard_regs_bitmap, ®_obstack);
bitmap_set_range (&all_hard_regs_bitmap, 0, FIRST_PSEUDO_REGISTER);
- bb_data = XNEWVEC (struct bb_data_pseudos, last_basic_block_for_fn (cfun));
+ bb_data = XNEWVEC (class bb_data_pseudos, last_basic_block_for_fn (cfun));
bitmap_initialize (&all_blocks, ®_obstack);
basic_block bb;
};
/* Array for all BB data. Indexed by the corresponding BB index. */
-typedef struct remat_bb_data *remat_bb_data_t;
+typedef class remat_bb_data *remat_bb_data_t;
/* Basic blocks for data flow problems -- all bocks except the special
ones. */
basic_block bb;
remat_bb_data_t bb_info;
- remat_bb_data = XNEWVEC (struct remat_bb_data,
+ remat_bb_data = XNEWVEC (class remat_bb_data,
last_basic_block_for_fn (cfun));
FOR_ALL_BB_FN (bb, cfun)
{
/* Array containing info about the stack slots. The array element is
indexed by the stack slot number in the range [0..slots_num). */
-static struct slot *slots;
+static class slot *slots;
/* The number of the stack slots currently existing. */
static int slots_num;
spill_hard_reg[i] = NULL_RTX;
pseudo_slots[i].mem = NULL_RTX;
}
- slots = XNEWVEC (struct slot, regs_num);
+ slots = XNEWVEC (class slot, regs_num);
/* Sort regnos according their usage frequencies. */
qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
n = assign_spill_hard_regs (pseudo_regnos, n);
/* It might be a new simple insn which is not recognized yet. */
INSN_CODE (insn) = icode = recog_memoized (insn);
}
- data = XNEW (struct lra_insn_recog_data);
+ data = XNEW (class lra_insn_recog_data);
lra_insn_recog_data[uid] = data;
data->insn = insn;
data->used_insn_alternative = LRA_UNKNOWN_ALT;
/* The size of the following array. */
static int reg_info_size;
/* Common info about each register. */
-struct lra_reg *lra_reg_info;
+class lra_reg *lra_reg_info;
HARD_REG_SET hard_regs_spilled_into;
last_reg_value = 0;
reg_info_size = max_reg_num () * 3 / 2 + 1;
- lra_reg_info = XNEWVEC (struct lra_reg, reg_info_size);
+ lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
for (i = 0; i < reg_info_size; i++)
initialize_lra_reg_info_element (i);
copy_vec.truncate (0);
if (reg_info_size > max_reg_num ())
return;
reg_info_size = max_reg_num () * 3 / 2 + 1;
- lra_reg_info = XRESIZEVEC (struct lra_reg, lra_reg_info, reg_info_size);
+ lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
for (i = old; i < reg_info_size; i++)
initialize_lra_reg_info_element (i);
}
/* Return identifier encoded in IB as a plain string. */
static tree
-read_identifier (struct lto_input_block *ib)
+read_identifier (class lto_input_block *ib)
{
unsigned int len = strnlen (ib->data + ib->p, ib->len - ib->p - 1);
tree id;
/* Return string encoded in IB, NULL if string is empty. */
static const char *
-read_string (struct lto_input_block *ib)
+read_string (class lto_input_block *ib)
{
unsigned int len = strnlen (ib->data + ib->p, ib->len - ib->p - 1);
const char *str;
static struct cgraph_node *
input_node (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib,
+ class lto_input_block *ib,
enum LTO_symtab_tags tag,
vec<symtab_node *> nodes)
{
static varpool_node *
input_varpool_node (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib)
+ class lto_input_block *ib)
{
int decl_index;
tree var_decl;
Return the node read or overwriten. */
static void
-input_ref (struct lto_input_block *ib,
+input_ref (class lto_input_block *ib,
symtab_node *referring_node,
vec<symtab_node *> nodes)
{
indirect_unknown_callee set). */
static void
-input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
+input_edge (class lto_input_block *ib, vec<symtab_node *> nodes,
bool indirect)
{
struct cgraph_node *caller, *callee;
static vec<symtab_node *>
input_cgraph_1 (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib)
+ class lto_input_block *ib)
{
enum LTO_symtab_tags tag;
vec<symtab_node *> nodes = vNULL;
/* Input ipa_refs. */
static void
-input_refs (struct lto_input_block *ib,
+input_refs (class lto_input_block *ib,
vec<symtab_node *> nodes)
{
int count;
/* Input profile_info from IB. */
static void
-input_profile_summary (struct lto_input_block *ib,
+input_profile_summary (class lto_input_block *ib,
struct lto_file_decl_data *file_data)
{
unsigned int runs = streamer_read_uhwi (ib);
{
const char *data;
size_t len;
- struct lto_input_block *ib;
+ class lto_input_block *ib;
vec<symtab_node *> nodes;
ib = lto_create_simple_input_block (file_data, LTO_section_symtab_nodes,
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data, LTO_section_offload_table,
&data, &len);
if (!ib)
static void
input_edge_opt_summary (struct cgraph_edge *edge ATTRIBUTE_UNUSED,
- struct lto_input_block *ib_main ATTRIBUTE_UNUSED)
+ class lto_input_block *ib_main ATTRIBUTE_UNUSED)
{
}
static void
input_node_opt_summary (struct cgraph_node *node,
- struct lto_input_block *ib_main,
- struct data_in *data_in)
+ class lto_input_block *ib_main,
+ class data_in *data_in)
{
int i;
int count;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
raw pointer to the section is returned in DATAR and LEN. These are
used to free the section. Return NULL if the section is not present. */
-struct lto_input_block *
+class lto_input_block *
lto_create_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
const char **datar, size_t *len)
void
lto_destroy_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
- struct lto_input_block *ib,
+ class lto_input_block *ib,
const char *data, size_t len)
{
delete ib;
/* Report read pass end of the section. */
void
-lto_section_overrun (struct lto_input_block *ib)
+lto_section_overrun (class lto_input_block *ib)
{
fatal_error (input_location, "bytecode stream: trying to read %d bytes "
"after the end of the input buffer", ib->p - ib->len);
/* Read LENGTH bytes from STREAM to ADDR. */
void
-lto_input_data_block (struct lto_input_block *ib, void *addr, size_t length)
+lto_input_data_block (class lto_input_block *ib, void *addr, size_t length)
{
size_t i;
unsigned char *const buffer = (unsigned char *) addr;
void
lto_location_cache::input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in)
+ class data_in *data_in)
{
static const char *stream_file;
static int stream_line;
void
lto_input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in)
+ class data_in *data_in)
{
data_in->location_cache.input_location (loc, bp, data_in);
}
discarded. */
location_t
-stream_input_location_now (struct bitpack_d *bp, struct data_in *data_in)
+stream_input_location_now (struct bitpack_d *bp, class data_in *data_in)
{
location_t loc;
stream_input_location (&loc, bp, data_in);
function scope for the read tree. */
tree
-lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_tree_ref (class lto_input_block *ib, class data_in *data_in,
struct function *fn, enum LTO_tags tag)
{
unsigned HOST_WIDE_INT ix_u;
block IB, using descriptors in DATA_IN. */
static struct eh_catch_d *
-lto_input_eh_catch_list (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_eh_catch_list (class lto_input_block *ib, class data_in *data_in,
eh_catch *last_p)
{
eh_catch first;
in DATA_IN. */
static eh_region
-input_eh_region (struct lto_input_block *ib, struct data_in *data_in, int ix)
+input_eh_region (class lto_input_block *ib, class data_in *data_in, int ix)
{
enum LTO_tags tag;
eh_region r;
in DATA_IN. */
static eh_landing_pad
-input_eh_lp (struct lto_input_block *ib, struct data_in *data_in, int ix)
+input_eh_lp (class lto_input_block *ib, class data_in *data_in, int ix)
{
enum LTO_tags tag;
eh_landing_pad lp;
in DATA_IN. */
static void
-input_eh_regions (struct lto_input_block *ib, struct data_in *data_in,
+input_eh_regions (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
HOST_WIDE_INT i, root_region, len;
/* Read the CFG for function FN from input block IB. */
static void
-input_cfg (struct lto_input_block *ib, struct data_in *data_in,
+input_cfg (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
unsigned int bb_count;
continue;
}
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = BASIC_BLOCK_FOR_FN (fn, header_index);
loop->header->loop_father = loop;
block IB. */
static void
-input_ssa_names (struct lto_input_block *ib, struct data_in *data_in,
+input_ssa_names (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
unsigned int i, size;
using input block IB. */
static void
-input_struct_function_base (struct function *fn, struct data_in *data_in,
- struct lto_input_block *ib)
+input_struct_function_base (struct function *fn, class data_in *data_in,
+ class lto_input_block *ib)
{
struct bitpack_d bp;
int len;
/* Read the body of function FN_DECL from DATA_IN using input block IB. */
static void
-input_function (tree fn_decl, struct data_in *data_in,
- struct lto_input_block *ib, struct lto_input_block *ib_cfg)
+input_function (tree fn_decl, class data_in *data_in,
+ class lto_input_block *ib, class lto_input_block *ib_cfg)
{
struct function *fn;
enum LTO_tags tag;
/* Read the body of function FN_DECL from DATA_IN using input block IB. */
static void
-input_constructor (tree var, struct data_in *data_in,
- struct lto_input_block *ib)
+input_constructor (tree var, class data_in *data_in,
+ class lto_input_block *ib)
{
DECL_INITIAL (var) = stream_read_tree (ib, data_in);
}
const char *data, enum lto_section_type section_type)
{
const struct lto_function_header *header;
- struct data_in *data_in;
+ class data_in *data_in;
int cfg_offset;
int main_offset;
int string_offset;
input block IB using the per-file context in DATA_IN. */
static void
-lto_read_tree_1 (struct lto_input_block *ib, struct data_in *data_in, tree expr)
+lto_read_tree_1 (class lto_input_block *ib, class data_in *data_in, tree expr)
{
/* Read all the bitfield values in EXPR. Note that for LTO, we
only write language-independent bitfields, so no more unpacking is
input block IB using the per-file context in DATA_IN. */
static tree
-lto_read_tree (struct lto_input_block *ib, struct data_in *data_in,
+lto_read_tree (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag, hashval_t hash)
{
/* Instantiate a new tree node. */
following in the IB, DATA_IN stream. */
hashval_t
-lto_input_scc (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_scc (class lto_input_block *ib, class data_in *data_in,
unsigned *len, unsigned *entry_len)
{
/* A blob of unnamed tree nodes, fill the cache from it and
to previously read nodes. */
tree
-lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_tree_1 (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag, hashval_t hash)
{
tree result;
}
tree
-lto_input_tree (struct lto_input_block *ib, struct data_in *data_in)
+lto_input_tree (class lto_input_block *ib, class data_in *data_in)
{
enum LTO_tags tag;
const struct lto_simple_header_with_strings *header
= (const struct lto_simple_header_with_strings *) data;
int string_offset;
- struct data_in *data_in;
+ class data_in *data_in;
tree str;
if (! data)
const struct lto_simple_header_with_strings *header
= (const struct lto_simple_header_with_strings *) data;
int string_offset;
- struct data_in *data_in;
+ class data_in *data_in;
string_offset = sizeof (*header) + header->main_size;
lto_input_block ib (data + sizeof (*header), header->main_size, NULL);
table to use with LEN strings. RESOLUTIONS is the vector of linker
resolutions (NULL if not using a linker plugin). */
-struct data_in *
+class data_in *
lto_data_in_create (struct lto_file_decl_data *file_data, const char *strings,
unsigned len,
vec<ld_plugin_symbol_resolution_t> resolutions)
{
- struct data_in *data_in = new (struct data_in);
+ class data_in *data_in = new (class data_in);
data_in->file_data = file_data;
data_in->strings = strings;
data_in->strings_len = len;
/* Remove DATA_IN. */
void
-lto_data_in_delete (struct data_in *data_in)
+lto_data_in_delete (class data_in *data_in)
{
data_in->globals_resolution.release ();
streamer_tree_cache_delete (data_in->reader_cache);
/* Output each loop, skipping the tree root which has number zero. */
for (unsigned i = 1; i < number_of_loops (fn); ++i)
{
- struct loop *loop = get_loop (fn, i);
+ class loop *loop = get_loop (fn, i);
/* Write the index of the loop header. That's enough to rebuild
the loop tree on the reader side. Stream -1 for an unused
/* Tree merging did suceed; throw away recent changes. */
void revert_location_cache ();
void input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in);
+ class data_in *data_in);
lto_location_cache ()
: loc_cache (), accepted_length (0), current_file (NULL), current_line (0),
current_col (0), current_sysp (false), current_loc (UNKNOWN_LOCATION)
/* In lto-section-in.c */
-extern struct lto_input_block * lto_create_simple_input_block (
+extern class lto_input_block * lto_create_simple_input_block (
struct lto_file_decl_data *,
enum lto_section_type, const char **, size_t *);
extern void
lto_destroy_simple_input_block (struct lto_file_decl_data *,
enum lto_section_type,
- struct lto_input_block *, const char *, size_t);
+ class lto_input_block *, const char *, size_t);
extern void lto_set_in_hooks (struct lto_file_decl_data **,
lto_get_section_data_f *,
lto_free_section_data_f *);
struct lto_file_decl_data *, tree);
extern void lto_free_function_in_decl_state (struct lto_in_decl_state *);
extern void lto_free_function_in_decl_state_for_node (symtab_node *);
-extern void lto_section_overrun (struct lto_input_block *) ATTRIBUTE_NORETURN;
+extern void lto_section_overrun (class lto_input_block *) ATTRIBUTE_NORETURN;
extern void lto_value_range_error (const char *,
HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT) ATTRIBUTE_NORETURN;
const char *);
extern void lto_input_toplevel_asms (struct lto_file_decl_data *, int);
extern void lto_input_mode_table (struct lto_file_decl_data *);
-extern struct data_in *lto_data_in_create (struct lto_file_decl_data *,
+extern class data_in *lto_data_in_create (struct lto_file_decl_data *,
const char *, unsigned,
vec<ld_plugin_symbol_resolution_t> );
-extern void lto_data_in_delete (struct data_in *);
-extern void lto_input_data_block (struct lto_input_block *, void *, size_t);
-void lto_input_location (location_t *, struct bitpack_d *, struct data_in *);
+extern void lto_data_in_delete (class data_in *);
+extern void lto_input_data_block (class lto_input_block *, void *, size_t);
+void lto_input_location (location_t *, struct bitpack_d *, class data_in *);
location_t stream_input_location_now (struct bitpack_d *bp,
- struct data_in *data);
-tree lto_input_tree_ref (struct lto_input_block *, struct data_in *,
+ class data_in *data);
+tree lto_input_tree_ref (class lto_input_block *, class data_in *,
struct function *, enum LTO_tags);
void lto_tag_check_set (enum LTO_tags, int, ...);
void lto_init_eh (void);
-hashval_t lto_input_scc (struct lto_input_block *, struct data_in *,
+hashval_t lto_input_scc (class lto_input_block *, class data_in *,
unsigned *, unsigned *);
-tree lto_input_tree_1 (struct lto_input_block *, struct data_in *,
+tree lto_input_tree_1 (class lto_input_block *, class data_in *,
enum LTO_tags, hashval_t hash);
-tree lto_input_tree (struct lto_input_block *, struct data_in *);
+tree lto_input_tree (class lto_input_block *, class data_in *);
/* In lto-streamer-out.c */
void cl_target_option_stream_out (struct output_block *, struct bitpack_d *,
struct cl_target_option *);
-void cl_target_option_stream_in (struct data_in *,
+void cl_target_option_stream_in (class data_in *,
struct bitpack_d *,
struct cl_target_option *);
void cl_optimization_stream_out (struct output_block *,
struct bitpack_d *, struct cl_optimization *);
-void cl_optimization_stream_in (struct data_in *,
+void cl_optimization_stream_in (class data_in *,
struct bitpack_d *, struct cl_optimization *);
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
+ * lto-common.c (lto_splay_tree_new): : Change class-key of PODs
+ to struct and others to class.
+ (mentions_vars_p): Same.
+ (register_resolution): Same.
+ (lto_register_var_decl_in_symtab): Same.
+ (lto_register_function_decl_in_symtab): Same.
+ (cmp_tree): Same.
+ (lto_read_decls): Same.
+
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
input. */
static const uint32_t *
-lto_read_in_decl_state (struct data_in *data_in, const uint32_t *data,
+lto_read_in_decl_state (class data_in *data_in, const uint32_t *data,
struct lto_in_decl_state *state)
{
uint32_t ix;
/* Return the resolution for the decl with index INDEX from DATA_IN. */
static enum ld_plugin_symbol_resolution
-get_resolution (struct data_in *data_in, unsigned index)
+get_resolution (class data_in *data_in, unsigned index)
{
if (data_in->globals_resolution.exists ())
{
different files. */
static void
-lto_register_var_decl_in_symtab (struct data_in *data_in, tree decl,
+lto_register_var_decl_in_symtab (class data_in *data_in, tree decl,
unsigned ix)
{
tree context;
file being read. */
static void
-lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl,
+lto_register_function_decl_in_symtab (class data_in *data_in, tree decl,
unsigned ix)
{
/* If this variable has already been declared, queue the
/* Check if T is a decl and needs register its resolution info. */
static void
-lto_maybe_register_decl (struct data_in *data_in, tree t, unsigned ix)
+lto_maybe_register_decl (class data_in *data_in, tree t, unsigned ix)
{
if (TREE_CODE (t) == VAR_DECL)
lto_register_var_decl_in_symtab (data_in, t, ix);
that was successful, otherwise return false. */
static bool
-unify_scc (struct data_in *data_in, unsigned from,
+unify_scc (class data_in *data_in, unsigned from,
unsigned len, unsigned scc_entry_len, hashval_t scc_hash)
{
bool unified_p = false;
const int decl_offset = sizeof (struct lto_decl_header);
const int main_offset = decl_offset + header->decl_state_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
const uint32_t *data_ptr, *data_end;
uint32_t num_decl_states;
static void set_node_sched_params (ddg_ptr);
static partial_schedule_ptr sms_schedule_by_order (ddg_ptr, int, int, int *);
static void permute_partial_schedule (partial_schedule_ptr, rtx_insn *);
-static void generate_prolog_epilog (partial_schedule_ptr, struct loop *,
+static void generate_prolog_epilog (partial_schedule_ptr, class loop *,
rtx, rtx);
static int calculate_stage_count (partial_schedule_ptr, int);
static void calculate_must_precede_follow (ddg_node_ptr, int, int,
/* Generate the instructions (including reg_moves) for prolog & epilog. */
static void
-generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
+generate_prolog_epilog (partial_schedule_ptr ps, class loop *loop,
rtx count_reg, rtx count_init)
{
int i;
/* Mark LOOP as software pipelined so the later
scheduling passes don't touch it. */
static void
-mark_loop_unsched (struct loop *loop)
+mark_loop_unsched (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body (loop);
/* Return true if all the BBs of the loop are empty except the
loop header. */
static bool
-loop_single_full_bb_p (struct loop *loop)
+loop_single_full_bb_p (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body (loop);
/* Return true if the loop is in its canonical form and false if not.
i.e. SIMPLE_SMS_LOOP_P and have one preheader block, and single exit. */
static bool
-loop_canon_p (struct loop *loop)
+loop_canon_p (class loop *loop)
{
if (loop->inner || !loop_outer (loop))
make it one by splitting the first entry edge and
redirecting the others to the new BB. */
static void
-canon_loop (struct loop *loop)
+canon_loop (class loop *loop)
{
edge e;
edge_iterator i;
int maxii, max_asap;
partial_schedule_ptr ps;
basic_block bb = NULL;
- struct loop *loop;
+ class loop *loop;
basic_block condition_bb = NULL;
edge latch_edge;
HOST_WIDE_INT trip_count, max_trip_count;
if (e2)
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = new_header;
loop->latch = e2->src;
add_loop (loop, body_bb->loop_father);
/* We enter expand_omp_for_generic with a loop. This original loop may
have its own loop struct, or it may be part of an outer loop struct
(which may be the fake loop). */
- struct loop *outer_loop = entry_bb->loop_father;
+ class loop *outer_loop = entry_bb->loop_father;
bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
add_bb_to_loop (l2_bb, outer_loop);
/* We've added a new loop around the original loop. Allocate the
corresponding loop struct. */
- struct loop *new_loop = alloc_loop ();
+ class loop *new_loop = alloc_loop ();
new_loop->header = l0_bb;
new_loop->latch = l2_bb;
add_loop (new_loop, outer_loop);
if (!orig_loop_has_loop_struct
&& !gimple_omp_for_combined_p (fd->for_stmt))
{
- struct loop *orig_loop = alloc_loop ();
+ class loop *orig_loop = alloc_loop ();
orig_loop->header = l1_bb;
/* The loop may have multiple latches. */
add_loop (orig_loop, new_loop);
set_immediate_dominator (CDI_DOMINATORS, exit3_bb, exit_bb);
}
- struct loop *loop = body_bb->loop_father;
+ class loop *loop = body_bb->loop_father;
if (loop != entry_bb->loop_father)
{
gcc_assert (broken_loop || loop->header == body_bb);
if (!broken_loop)
{
- struct loop *loop = body_bb->loop_father;
- struct loop *trip_loop = alloc_loop ();
+ class loop *loop = body_bb->loop_father;
+ class loop *trip_loop = alloc_loop ();
trip_loop->header = iter_part_bb;
trip_loop->latch = trip_update_bb;
add_loop (trip_loop, iter_part_bb->loop_father);
if (!broken_loop)
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = l1_bb;
loop->latch = cont_bb;
add_loop (loop, l1_bb->loop_father);
if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = body_bb;
if (collapse_bb == NULL)
loop->latch = cont_bb;
{
/* We now have one, two or three nested loops. Update the loop
structures. */
- struct loop *parent = entry_bb->loop_father;
- struct loop *body = body_bb->loop_father;
+ class loop *parent = entry_bb->loop_father;
+ class loop *body = body_bb->loop_father;
if (chunking)
{
- struct loop *chunk_loop = alloc_loop ();
+ class loop *chunk_loop = alloc_loop ();
chunk_loop->header = head_bb;
chunk_loop->latch = bottom_bb;
add_loop (chunk_loop, parent);
if (parent)
{
- struct loop *body_loop = alloc_loop ();
+ class loop *body_loop = alloc_loop ();
body_loop->header = body_bb;
body_loop->latch = cont_bb;
add_loop (body_loop, parent);
if (fd->tiling)
{
/* Insert tiling's element loop. */
- struct loop *inner_loop = alloc_loop ();
+ class loop *inner_loop = alloc_loop ();
inner_loop->header = elem_body_bb;
inner_loop->latch = elem_cont_bb;
add_loop (inner_loop, body_loop);
/* Remove GIMPLE_OMP_ATOMIC_STORE. */
gsi_remove (&si, true);
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = loop_header;
loop->latch = store_bb;
add_loop (loop, loop_header->loop_father);
mark_loops_in_oacc_kernels_region (basic_block region_entry,
basic_block region_exit)
{
- struct loop *outer = region_entry->loop_father;
+ class loop *outer = region_entry->loop_father;
gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
/* Don't parallelize the kernels region if it contains more than one outer
loop. */
unsigned int nr_outer_loops = 0;
- struct loop *single_outer = NULL;
- for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
+ class loop *single_outer = NULL;
+ for (class loop *loop = outer->inner; loop != NULL; loop = loop->next)
{
gcc_assert (loop_outer (loop) == outer);
if (nr_outer_loops != 1)
return;
- for (struct loop *loop = single_outer->inner;
+ for (class loop *loop = single_outer->inner;
loop != NULL;
loop = loop->inner)
if (loop->next)
return;
/* Mark the loops in the region. */
- for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
+ for (class loop *loop = single_outer; loop != NULL; loop = loop->inner)
loop->in_oacc_kernels_region = true;
}
|| !global_options_set.x_flag_tree_loop_vectorize))
{
basic_block bb = gsi_bb (gsi);
- struct loop *parent = bb->loop_father;
- struct loop *body = parent->inner;
+ class loop *parent = bb->loop_father;
+ class loop *body = parent->inner;
parent->force_vectorize = true;
parent->safelen = INT_MAX;
gimple *g;
basic_block incr_bb = NULL;
- struct loop *loop = NULL;
+ class loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
POS_OP is the operand number of the bit position. */
static bool
-get_optab_extraction_insn (struct extraction_insn *insn,
+get_optab_extraction_insn (class extraction_insn *insn,
enum extraction_type type,
machine_mode mode, direct_optab reg_optab,
direct_optab misalign_optab, int pos_op)
expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
rtx target, int unsignedp)
{
- struct expand_operand eops[4];
+ class expand_operand eops[4];
tree oprnd0, oprnd1, oprnd2;
machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
optab widen_pattern_optab;
expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
rtx op1, rtx op2, rtx target, int unsignedp)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode = optab_handler (ternary_optab, mode);
gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
insn_code icode = optab_handler (vec_duplicate_optab, vmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], NULL_RTX, vmode);
create_input_operand (&ops[1], op, GET_MODE (op));
expand_insn (icode, 2, ops);
machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
machine_mode mode0, mode1, tmp_mode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
bool commutative_p;
rtx_insn *pat;
rtx xop0 = op0, xop1 = op1;
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode = optab_handler (unoptab, mode);
create_fixed_operand (&ops[0], targ0);
if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode = optab_handler (binoptab, mode);
machine_mode mode0 = insn_data[icode].operand[1].mode;
machine_mode mode1 = insn_data[icode].operand[2].mode;
{
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
enum insn_code icode = optab_handler (unoptab, mode);
rtx_insn *last = get_last_insn ();
rtx_insn *pat;
maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
enum rtx_code code)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
rtx_insn *pat;
create_output_operand (&ops[0], target, GET_MODE (target));
sorry ("indirect jumps are not available on this target");
else
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_address_operand (&ops[0], loc);
expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
emit_barrier ();
OPTAB_WIDEN, &comparison, &cmpmode);
if (comparison)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], comparison);
target = gen_reg_rtx (mode);
rtx_insn *last = get_last_insn ();
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], cond);
&comparison, &cmode);
if (comparison)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], comparison);
tree t_op0, tree t_op1, bool unsignedp,
enum insn_code icode, unsigned int opno)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
rtx rtx_op0, rtx_op1;
machine_mode m0, m1;
enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
{
machine_mode tmode = GET_MODE (target);
machine_mode smode = GET_MODE (sel);
- struct expand_operand ops[4];
+ class expand_operand ops[4];
gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
|| mode_for_int_vector (tmode).require () == smode);
rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices, shift_optab);
if (shift_amt)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
if (shift_code != CODE_FOR_nothing)
{
create_output_operand (&ops[0], target, mode);
expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx target)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
machine_mode mode = TYPE_MODE (vec_cond_type);
machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx target)
{
- struct expand_operand ops[6];
+ class expand_operand ops[6];
enum insn_code icode;
rtx comparison, rtx_op1, rtx_op2;
machine_mode mode = TYPE_MODE (vec_cond_type);
rtx
expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode;
machine_mode emode = GET_MODE_INNER (vmode);
rtx
expand_vec_cmp_expr (tree type, tree exp, rtx target)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode;
rtx comparison;
machine_mode mask_mode = TYPE_MODE (type);
expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
rtx target, bool uns_p)
{
- struct expand_operand eops[3];
+ class expand_operand eops[3];
enum insn_code icode;
int method, i;
machine_mode wmode;
icode = direct_optab_handler (atomic_exchange_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
create_input_operand (&ops[2], val, mode);
maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
{
machine_mode pat_bool_mode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
if (!targetm.have_atomic_test_and_set ())
return NULL_RTX;
enum memmodel fail_model)
{
machine_mode mode = GET_MODE (mem);
- struct expand_operand ops[8];
+ class expand_operand ops[8];
enum insn_code icode;
rtx target_oval, target_bool = NULL_RTX;
rtx libfunc;
icode = direct_optab_handler (atomic_load_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx_insn *last = get_last_insn ();
if (is_mm_seq_cst (model))
expand_memory_blockage ();
{
machine_mode mode = GET_MODE (mem);
enum insn_code icode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
/* If the target supports the store directly, great. */
icode = direct_optab_handler (atomic_store_optab, mode);
rtx val, bool use_memmodel, enum memmodel model, bool after)
{
machine_mode mode = GET_MODE (mem);
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode;
int op_counter = 0;
int num_ops;
of that rtx if so. */
void
-create_integer_operand (struct expand_operand *op, poly_int64 intval)
+create_integer_operand (class expand_operand *op, poly_int64 intval)
{
create_expand_operand (op, EXPAND_INTEGER,
gen_int_mode (intval, MAX_MODE_INT),
static bool
maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
- struct expand_operand *op)
+ class expand_operand *op)
{
/* See if the operand matches in its current form. */
if (insn_operand_matches (icode, opno, op->value))
static bool
maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
- struct expand_operand *op)
+ class expand_operand *op)
{
machine_mode mode, imode;
bool old_volatile_ok, result;
TYPE is the type of VALUE. */
void
-create_convert_operand_from_type (struct expand_operand *op,
+create_convert_operand_from_type (class expand_operand *op,
rtx value, tree type)
{
create_convert_operand_from (op, value, TYPE_MODE (type),
static inline bool
can_reuse_operands_p (enum insn_code icode,
unsigned int opno1, unsigned int opno2,
- const struct expand_operand *op1,
- const struct expand_operand *op2)
+ const class expand_operand *op1,
+ const class expand_operand *op2)
{
/* Check requirements that are common to all types. */
if (op1->type != op2->type
bool
maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
- unsigned int nops, struct expand_operand *ops)
+ unsigned int nops, class expand_operand *ops)
{
rtx_insn *last = get_last_insn ();
rtx *orig_values = XALLOCAVEC (rtx, nops);
rtx_insn *
maybe_gen_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
if (!maybe_legitimize_operands (icode, 0, nops, ops))
bool
maybe_expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
bool
maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
void
expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
if (!maybe_expand_insn (icode, nops, ops))
gcc_unreachable ();
void
expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
if (!maybe_expand_jump_insn (icode, nops, ops))
gcc_unreachable ();
to their default values. */
static inline void
-create_expand_operand (struct expand_operand *op,
+create_expand_operand (class expand_operand *op,
enum expand_operand_type type,
rtx value, machine_mode mode,
bool unsigned_p, poly_int64 int_value = 0)
/* Make OP describe an operand that must use rtx X, even if X is volatile. */
static inline void
-create_fixed_operand (struct expand_operand *op, rtx x)
+create_fixed_operand (class expand_operand *op, rtx x)
{
create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
}
be ignored in that case. */
static inline void
-create_output_operand (struct expand_operand *op, rtx x,
+create_output_operand (class expand_operand *op, rtx x,
machine_mode mode)
{
create_expand_operand (op, EXPAND_OUTPUT, x, mode, false);
as an operand. */
static inline void
-create_input_operand (struct expand_operand *op, rtx value,
+create_input_operand (class expand_operand *op, rtx value,
machine_mode mode)
{
create_expand_operand (op, EXPAND_INPUT, value, mode, false);
to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
static inline void
-create_convert_operand_to (struct expand_operand *op, rtx value,
+create_convert_operand_to (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
create_expand_operand (op, EXPAND_CONVERT_TO, value, mode, unsigned_p);
UNSIGNED_P says whether VALUE is unsigned. */
static inline void
-create_convert_operand_from (struct expand_operand *op, rtx value,
+create_convert_operand_from (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
create_expand_operand (op, EXPAND_CONVERT_FROM, value, mode, unsigned_p);
of the address, but it may need to be converted to Pmode first. */
static inline void
-create_address_operand (struct expand_operand *op, rtx value)
+create_address_operand (class expand_operand *op, rtx value)
{
create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
}
-extern void create_integer_operand (struct expand_operand *, poly_int64);
+extern void create_integer_operand (class expand_operand *, poly_int64);
/* Passed to expand_simple_binop and expand_binop to say which options
to try to use if the requested operation can't be open-coded on the
extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
rtx operand);
extern bool valid_multiword_target_p (rtx);
-extern void create_convert_operand_from_type (struct expand_operand *op,
+extern void create_convert_operand_from_type (class expand_operand *op,
rtx value, tree type);
extern bool maybe_legitimize_operands (enum insn_code icode,
unsigned int opno, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern rtx_insn *maybe_gen_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern bool maybe_expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern bool maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern void expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern void expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern enum rtx_code get_rtx_code (enum tree_code tcode, bool unsignedp);
/* Forward decls. */
-struct opt_pass;
+class opt_pass;
class optinfo_item;
/* Return true if any of the active optinfo destinations make use
#ifndef HAVE_POLY_INT_H
#define HAVE_POLY_INT_H
-template<unsigned int N, typename T> class poly_int_pod;
+template<unsigned int N, typename T> struct poly_int_pod;
template<unsigned int N, typename T> class poly_int;
/* poly_coeff_traiits<T> describes the properties of a poly_int
enum predictor_reason, edge);
static void predict_paths_leading_to (basic_block, enum br_predictor,
enum prediction,
- struct loop *in_loop = NULL);
+ class loop *in_loop = NULL);
static void predict_paths_leading_to_edge (edge, enum br_predictor,
enum prediction,
- struct loop *in_loop = NULL);
+ class loop *in_loop = NULL);
static bool can_predict_insn_p (const rtx_insn *);
static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
static void determine_unlikely_bbs ();
/* Return TRUE when LOOP should be optimized for size. */
bool
-optimize_loop_for_size_p (struct loop *loop)
+optimize_loop_for_size_p (class loop *loop)
{
return optimize_bb_for_size_p (loop->header);
}
/* Return TRUE when LOOP should be optimized for speed. */
bool
-optimize_loop_for_speed_p (struct loop *loop)
+optimize_loop_for_speed_p (class loop *loop)
{
return optimize_bb_for_speed_p (loop->header);
}
/* Return TRUE when LOOP nest should be optimized for speed. */
bool
-optimize_loop_nest_for_speed_p (struct loop *loop)
+optimize_loop_nest_for_speed_p (class loop *loop)
{
- struct loop *l = loop;
+ class loop *l = loop;
if (optimize_loop_for_speed_p (loop))
return true;
l = loop->inner;
/* Return TRUE when LOOP nest should be optimized for size. */
bool
-optimize_loop_nest_for_size_p (struct loop *loop)
+optimize_loop_nest_for_size_p (class loop *loop)
{
return !optimize_loop_nest_for_speed_p (loop);
}
Otherwise return false and set LOOP_INVAIANT to NULL. */
static bool
-is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
+is_comparison_with_loop_invariant_p (gcond *stmt, class loop *loop,
tree *loop_invariant,
enum tree_code *compare_code,
tree *loop_step,
In this loop, we will predict the branch inside the loop to be taken. */
static void
-predict_iv_comparison (struct loop *loop, basic_block bb,
+predict_iv_comparison (class loop *loop, basic_block bb,
tree loop_bound_var,
tree loop_iv_base_var,
enum tree_code loop_bound_code,
static void
predict_loops (void)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
- hash_set <struct loop *> with_recursion(10);
+ hash_set <class loop *> with_recursion(10);
FOR_EACH_BB_FN (bb, cfun)
{
basic_block bb, *bbs;
unsigned j, n_exits = 0;
vec<edge> exits;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
edge ex;
- struct nb_iter_bound *nb_iter;
+ class nb_iter_bound *nb_iter;
enum tree_code loop_bound_code = ERROR_MARK;
tree loop_bound_step = NULL;
tree loop_bound_var = NULL;
predict_paths_for_bb (basic_block cur, basic_block bb,
enum br_predictor pred,
enum prediction taken,
- bitmap visited, struct loop *in_loop = NULL)
+ bitmap visited, class loop *in_loop = NULL)
{
edge e;
edge_iterator ei;
static void
predict_paths_leading_to (basic_block bb, enum br_predictor pred,
- enum prediction taken, struct loop *in_loop)
+ enum prediction taken, class loop *in_loop)
{
predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
}
static void
predict_paths_leading_to_edge (edge e, enum br_predictor pred,
- enum prediction taken, struct loop *in_loop)
+ enum prediction taken, class loop *in_loop)
{
bool has_nonloop_edge = false;
edge_iterator ei;
/* Estimate frequencies in loops at same nest level. */
static void
-estimate_loops_at_level (struct loop *first_loop)
+estimate_loops_at_level (class loop *first_loop)
{
- struct loop *loop;
+ class loop *loop;
for (loop = first_loop; loop; loop = loop->next)
{
profile_status_for_fn (fun) = PROFILE_GUESSED;
if (dump_file && (dump_flags & TDF_DETAILS))
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
if (loop->header->count.initialized_p ())
fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
extern bool optimize_edge_for_speed_p (edge);
extern bool optimize_insn_for_size_p (void);
extern bool optimize_insn_for_speed_p (void);
-extern bool optimize_loop_for_size_p (struct loop *);
-extern bool optimize_loop_for_speed_p (struct loop *);
-extern bool optimize_loop_nest_for_speed_p (struct loop *);
-extern bool optimize_loop_nest_for_size_p (struct loop *);
+extern bool optimize_loop_for_size_p (class loop *);
+extern bool optimize_loop_for_speed_p (class loop *);
+extern bool optimize_loop_nest_for_speed_p (class loop *);
+extern bool optimize_loop_nest_for_size_p (class loop *);
extern bool predictable_edge_p (edge);
extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);
/* Stream THIS from IB. */
profile_count
-profile_count::stream_in (struct lto_input_block *ib)
+profile_count::stream_in (class lto_input_block *ib)
{
profile_count ret;
ret.m_val = streamer_read_gcov_count (ib);
/* Stream THIS from IB. */
profile_probability
-profile_probability::stream_in (struct lto_input_block *ib)
+profile_probability::stream_in (class lto_input_block *ib)
{
profile_probability ret;
ret.m_val = streamer_read_uhwi (ib);
#define GCC_PROFILE_COUNT_H
struct function;
-class profile_count;
+struct profile_count;
/* Quality of the profile count. Because gengtype does not support enums
inside of classes, this is in global namespace. */
uint32_t m_val : 29;
enum profile_quality m_quality : 3;
- friend class profile_count;
+ friend struct profile_count;
public:
profile_probability (): m_val (uninitialized_probability),
m_quality (GUESSED)
profile_count count2) const;
/* LTO streaming support. */
- static profile_probability stream_in (struct lto_input_block *);
+ static profile_probability stream_in (class lto_input_block *);
void stream_out (struct output_block *);
void stream_out (struct lto_output_stream *);
};
profile_quality quality = PRECISE);
/* LTO streaming support. */
- static profile_count stream_in (struct lto_input_block *);
+ static profile_count stream_in (class lto_input_block *);
void stream_out (struct output_block *);
void stream_out (struct lto_output_stream *);
};
if (flag_branch_probabilities
&& (profile_status_for_fn (cfun) == PROFILE_READ))
{
- struct loop *loop;
+ class loop *loop;
if (dump_file && (dump_flags & TDF_DETAILS))
report_predictor_hitrates ();
static vec<du_head_p> id_to_chain;
/* List of currently open chains. */
-static struct du_head *open_chains;
+static class du_head *open_chains;
/* Bitmap of open chains. The bits set always match the list found in
open_chains. */
static operand_rr_info *cur_operand;
/* Set while scanning RTL if a register dies. Used to tie chains. */
-static struct du_head *terminated_this_insn;
+static class du_head *terminated_this_insn;
/* Return the chain corresponding to id number ID. Take into account that
chains may have been merged. */
another chain whose id is ID. */
static void
-mark_conflict (struct du_head *chains, unsigned id)
+mark_conflict (class du_head *chains, unsigned id)
{
while (chains)
{
use THIS_DU which is part of the chain HEAD. */
static void
-record_operand_use (struct du_head *head, struct du_chain *this_du)
+record_operand_use (class du_head *head, struct du_chain *this_du)
{
if (cur_operand == NULL || cur_operand->failed)
return;
create_new_chain (unsigned this_regno, unsigned this_nregs, rtx *loc,
rtx_insn *insn, enum reg_class cl)
{
- struct du_head *head = XOBNEW (&rename_obstack, struct du_head);
+ class du_head *head = XOBNEW (&rename_obstack, class du_head);
struct du_chain *this_du;
int nregs;
set the corresponding bits in *PSET. */
static void
-merge_overlapping_regs (HARD_REG_SET *pset, struct du_head *head)
+merge_overlapping_regs (HARD_REG_SET *pset, class du_head *head)
{
bitmap_iterator bi;
unsigned i;
static bool
check_new_reg_p (int reg ATTRIBUTE_UNUSED, int new_reg,
- struct du_head *this_head, HARD_REG_SET this_unavailable)
+ class du_head *this_head, HARD_REG_SET this_unavailable)
{
machine_mode mode = GET_MODE (*this_head->first->loc);
int nregs = hard_regno_nregs (new_reg, mode);
/* Initialize a rename_info structure P for basic block BB, which starts a new
scan. */
static void
-init_rename_info (struct bb_rename_info *p, basic_block bb)
+init_rename_info (class bb_rename_info *p, basic_block bb)
{
int i;
df_ref def;
/* Record in RI that the block corresponding to it has an incoming
live value, described by CHAIN. */
static void
-set_incoming_from_chain (struct bb_rename_info *ri, du_head_p chain)
+set_incoming_from_chain (class bb_rename_info *ri, du_head_p chain)
{
int i;
int incoming_nregs = ri->incoming[chain->regno].nregs;
void
regrename_analyze (bitmap bb_mask)
{
- struct bb_rename_info *rename_info;
+ class bb_rename_info *rename_info;
int i;
basic_block bb;
int n_bbs;
n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
/* Gather some information about the blocks in this function. */
- rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
+ rename_info = XCNEWVEC (class bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *ri = rename_info + i;
+ class bb_rename_info *ri = rename_info + i;
ri->bb = bb;
if (bb_mask != NULL && !bitmap_bit_p (bb_mask, bb->index))
bb->aux = NULL;
for (i = 0; i < n_bbs; i++)
{
basic_block bb1 = BASIC_BLOCK_FOR_FN (cfun, inverse_postorder[i]);
- struct bb_rename_info *this_info;
+ class bb_rename_info *this_info;
bool success;
edge e;
edge_iterator ei;
int old_length = id_to_chain.length ();
- this_info = (struct bb_rename_info *) bb1->aux;
+ this_info = (class bb_rename_info *) bb1->aux;
if (this_info == NULL)
continue;
will be used to pre-open chains when processing the successors. */
FOR_EACH_EDGE (e, ei, bb1->succs)
{
- struct bb_rename_info *dest_ri;
- struct du_head *chain;
+ class bb_rename_info *dest_ri;
+ class du_head *chain;
if (dump_file)
fprintf (dump_file, "successor block %d\n", e->dest->index);
if (e->flags & (EDGE_EH | EDGE_ABNORMAL))
continue;
- dest_ri = (struct bb_rename_info *)e->dest->aux;
+ dest_ri = (class bb_rename_info *)e->dest->aux;
if (dest_ri == NULL)
continue;
for (chain = open_chains; chain; chain = chain->next_chain)
edges). */
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
+ class bb_rename_info *bb_ri = (class bb_rename_info *) bb->aux;
unsigned j;
bitmap_iterator bi;
{
edge e;
edge_iterator ei;
- struct du_head *chain = regrename_chain_from_id (j);
+ class du_head *chain = regrename_chain_from_id (j);
int n_preds_used = 0, n_preds_joined = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
{
- struct bb_rename_info *src_ri;
+ class bb_rename_info *src_ri;
unsigned k;
bitmap_iterator bi2;
HARD_REG_SET live;
if (e->flags & (EDGE_EH | EDGE_ABNORMAL))
continue;
- src_ri = (struct bb_rename_info *)e->src->aux;
+ src_ri = (class bb_rename_info *)e->src->aux;
if (src_ri == NULL)
continue;
EXECUTE_IF_SET_IN_BITMAP (&src_ri->open_chains_set,
0, k, bi2)
{
- struct du_head *outgoing_chain = regrename_chain_from_id (k);
+ class du_head *outgoing_chain = regrename_chain_from_id (k);
if (outgoing_chain->regno == chain->regno
&& outgoing_chain->nregs == chain->nregs)
}
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
+ class bb_rename_info *bb_ri = (class bb_rename_info *) bb->aux;
unsigned j;
bitmap_iterator bi;
{
edge e;
edge_iterator ei;
- struct du_head *chain = regrename_chain_from_id (j);
+ class du_head *chain = regrename_chain_from_id (j);
int n_succs_used = 0, n_succs_joined = 0;
FOR_EACH_EDGE (e, ei, bb->succs)
{
bool printed = false;
- struct bb_rename_info *dest_ri;
+ class bb_rename_info *dest_ri;
unsigned k;
bitmap_iterator bi2;
HARD_REG_SET live;
n_succs_used++;
- dest_ri = (struct bb_rename_info *)e->dest->aux;
+ dest_ri = (class bb_rename_info *)e->dest->aux;
if (dest_ri == NULL)
continue;
EXECUTE_IF_SET_IN_BITMAP (&dest_ri->incoming_open_chains_set,
0, k, bi2)
{
- struct du_head *incoming_chain = regrename_chain_from_id (k);
+ class du_head *incoming_chain = regrename_chain_from_id (k);
if (incoming_chain->regno == chain->regno
&& incoming_chain->nregs == chain->nregs)
numbering in its subpatterns. */
bool
-regrename_do_replace (struct du_head *head, int reg)
+regrename_do_replace (class du_head *head, int reg)
{
struct du_chain *chain;
unsigned int base_regno = head->regno;
for (chain = head->first; chain; chain = chain->next_use)
{
unsigned int regno = ORIGINAL_REGNO (*chain->loc);
- struct reg_attrs *attr = REG_ATTRS (*chain->loc);
+ class reg_attrs *attr = REG_ATTRS (*chain->loc);
int reg_ptr = REG_POINTER (*chain->loc);
if (DEBUG_INSN_P (chain->insn) && REGNO (*chain->loc) != base_regno)
note_sets_clobbers (rtx x, const_rtx set, void *data)
{
enum rtx_code code = *(enum rtx_code *)data;
- struct du_head *chain;
+ class du_head *chain;
if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
scan_rtx_reg (rtx_insn *insn, rtx *loc, enum reg_class cl, enum scan_actions action,
enum op_type type)
{
- struct du_head **p;
+ class du_head **p;
rtx x = *loc;
unsigned this_regno = REGNO (x);
int this_nregs = REG_NREGS (x);
for (p = &open_chains; *p;)
{
- struct du_head *head = *p;
- struct du_head *next = head->next_chain;
+ class du_head *head = *p;
+ class du_head *next = head->next_chain;
int exact_match = (head->regno == this_regno
&& head->nregs == this_nregs);
int superset = (this_regno <= head->regno
rtx op = *loc;
enum reg_class cl = alternative_class (op_alt, opn);
- struct du_head *prev_open;
+ class du_head *prev_open;
if (recog_data.operand_type[opn] != OP_OUT
|| op_alt[opn].earlyclobber != earlyclobber)
requires a caller-saved reg. */
if (CALL_P (insn))
{
- struct du_head *p;
+ class du_head *p;
for (p = open_chains; p; p = p->next_chain)
p->need_caller_save_reg = 1;
}
{
public:
/* The next chain. */
- struct du_head *next_chain;
+ class du_head *next_chain;
/* The first and last elements of this chain. */
struct du_chain *first, *last;
/* The chain that this chain is tied to. */
- struct du_head *tied_chain;
+ class du_head *tied_chain;
/* Describes the register being tracked. */
unsigned regno;
int nregs;
unsigned int target_data_2;
};
-typedef struct du_head *du_head_p;
+typedef class du_head *du_head_p;
/* This struct describes a single occurrence of a register. */
struct du_chain
/* Holds either the chain for the operand itself, or for the registers in
a memory operand. */
struct du_chain *chains[MAX_REGS_PER_ADDRESS];
- struct du_head *heads[MAX_REGS_PER_ADDRESS];
+ class du_head *heads[MAX_REGS_PER_ADDRESS];
};
/* A struct to hold a vector of operand_rr_info structures describing the
{
public:
/* Links to the neighbor instructions. */
- struct insn_chain *next, *prev;
+ class insn_chain *next, *prev;
/* Link through a chains set up by calculate_needs_all_insns, containing
all insns that need reloading. */
- struct insn_chain *next_need_reload;
+ class insn_chain *next_need_reload;
/* The rtx of the insn. */
rtx_insn *insn;
/* A chain of insn_chain structures to describe all non-note insns in
a function. */
-extern struct insn_chain *reload_insn_chain;
+extern class insn_chain *reload_insn_chain;
/* Allocate a new insn_chain structure. */
-extern struct insn_chain *new_insn_chain (void);
+extern class insn_chain *new_insn_chain (void);
#endif
#if defined SET_HARD_REG_BIT
/* List of insn_chain instructions, one for every insn that reload needs to
examine. */
-struct insn_chain *reload_insn_chain;
+class insn_chain *reload_insn_chain;
/* TRUE if we potentially left dead insns in the insn stream and want to
run DCE immediately after reload, FALSE otherwise. */
static bool need_dce;
/* List of all insns needing reloads. */
-static struct insn_chain *insns_need_reload;
+static class insn_chain *insns_need_reload;
\f
/* This structure is used to record information about register eliminations.
Each array entry describes one possible way of eliminating a register
\f
static void replace_pseudos_in (rtx *, machine_mode, rtx);
static void maybe_fix_stack_asms (void);
-static void copy_reloads (struct insn_chain *);
+static void copy_reloads (class insn_chain *);
static void calculate_needs_all_insns (int);
-static int find_reg (struct insn_chain *, int);
-static void find_reload_regs (struct insn_chain *);
+static int find_reg (class insn_chain *, int);
+static void find_reload_regs (class insn_chain *);
static void select_reload_regs (void);
static void delete_caller_save_insns (void);
static int finish_spills (int);
static void scan_paradoxical_subregs (rtx);
static void count_pseudo (int);
-static void order_regs_for_reload (struct insn_chain *);
+static void order_regs_for_reload (class insn_chain *);
static void reload_as_needed (int);
static void forget_old_reloads_1 (rtx, const_rtx, void *);
static void forget_marked_reloads (regset);
rtx, rtx, int, int);
static int free_for_value_p (int, machine_mode, int, enum reload_type,
rtx, rtx, int, int);
-static int allocate_reload_reg (struct insn_chain *, int, int);
+static int allocate_reload_reg (class insn_chain *, int, int);
static int conflicts_with_override (rtx);
static void failed_reload (rtx_insn *, int);
static int set_reload_reg (int, int);
-static void choose_reload_regs_init (struct insn_chain *, rtx *);
-static void choose_reload_regs (struct insn_chain *);
-static void emit_input_reload_insns (struct insn_chain *, struct reload *,
+static void choose_reload_regs_init (class insn_chain *, rtx *);
+static void choose_reload_regs (class insn_chain *);
+static void emit_input_reload_insns (class insn_chain *, struct reload *,
rtx, int);
-static void emit_output_reload_insns (struct insn_chain *, struct reload *,
+static void emit_output_reload_insns (class insn_chain *, struct reload *,
int);
-static void do_input_reload (struct insn_chain *, struct reload *, int);
-static void do_output_reload (struct insn_chain *, struct reload *, int);
-static void emit_reload_insns (struct insn_chain *);
+static void do_input_reload (class insn_chain *, struct reload *, int);
+static void do_output_reload (class insn_chain *, struct reload *, int);
+static void emit_reload_insns (class insn_chain *);
static void delete_output_reload (rtx_insn *, int, int, rtx);
static void delete_address_reloads (rtx_insn *, rtx_insn *);
static void delete_address_reloads_1 (rtx_insn *, rtx, rtx_insn *);
}
/* List of insn chains that are currently unused. */
-static struct insn_chain *unused_insn_chains = 0;
+static class insn_chain *unused_insn_chains = 0;
/* Allocate an empty insn_chain structure. */
-struct insn_chain *
+class insn_chain *
new_insn_chain (void)
{
- struct insn_chain *c;
+ class insn_chain *c;
if (unused_insn_chains == 0)
{
- c = XOBNEW (&reload_obstack, struct insn_chain);
+ c = XOBNEW (&reload_obstack, class insn_chain);
INIT_REG_SET (&c->live_throughout);
INIT_REG_SET (&c->dead_or_set);
}
#ifdef STACK_REGS
const char *constraints[MAX_RECOG_OPERANDS];
machine_mode operand_mode[MAX_RECOG_OPERANDS];
- struct insn_chain *chain;
+ class insn_chain *chain;
for (chain = reload_insn_chain; chain != 0; chain = chain->next)
{
/* Copy the global variables n_reloads and rld into the corresponding elts
of CHAIN. */
static void
-copy_reloads (struct insn_chain *chain)
+copy_reloads (class insn_chain *chain)
{
chain->n_reloads = n_reloads;
chain->rld = XOBNEWVEC (&reload_obstack, struct reload, n_reloads);
static void
calculate_needs_all_insns (int global)
{
- struct insn_chain **pprev_reload = &insns_need_reload;
- struct insn_chain *chain, *next = 0;
+ class insn_chain **pprev_reload = &insns_need_reload;
+ class insn_chain *chain, *next = 0;
something_needs_elimination = 0;
contents of BAD_SPILL_REGS for the insn described by CHAIN. */
static void
-order_regs_for_reload (struct insn_chain *chain)
+order_regs_for_reload (class insn_chain *chain)
{
unsigned i;
HARD_REG_SET used_by_pseudos;
/* Find reload register to use for reload number ORDER. */
static int
-find_reg (struct insn_chain *chain, int order)
+find_reg (class insn_chain *chain, int order)
{
int rnum = reload_order[order];
struct reload *rl = rld + rnum;
for a smaller class even though it belongs to that class. */
static void
-find_reload_regs (struct insn_chain *chain)
+find_reload_regs (class insn_chain *chain)
{
int i;
static void
select_reload_regs (void)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
/* Try to satisfy the needs for each insn. */
for (chain = insns_need_reload; chain != 0;
static void
delete_caller_save_insns (void)
{
- struct insn_chain *c = reload_insn_chain;
+ class insn_chain *c = reload_insn_chain;
while (c != 0)
{
while (c != 0 && c->is_caller_save_insn)
{
- struct insn_chain *next = c->next;
+ class insn_chain *next = c->next;
rtx_insn *insn = c->insn;
if (c == reload_insn_chain)
static int
finish_spills (int global)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
int something_changed = 0;
unsigned i;
reg_set_iterator rsi;
static void
reload_as_needed (int live_known)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
#if AUTO_INC_DEC
int i;
#endif
we didn't change anything. */
static int
-allocate_reload_reg (struct insn_chain *chain ATTRIBUTE_UNUSED, int r,
+allocate_reload_reg (class insn_chain *chain ATTRIBUTE_UNUSED, int r,
int last_reload)
{
int i, pass, count;
is the array we use to restore the reg_rtx field for every reload. */
static void
-choose_reload_regs_init (struct insn_chain *chain, rtx *save_reload_reg_rtx)
+choose_reload_regs_init (class insn_chain *chain, rtx *save_reload_reg_rtx)
{
int i;
finding a reload reg in the proper class. */
static void
-choose_reload_regs (struct insn_chain *chain)
+choose_reload_regs (class insn_chain *chain)
{
rtx_insn *insn = chain->insn;
int i, j;
has the number J. OLD contains the value to be used as input. */
static void
-emit_input_reload_insns (struct insn_chain *chain, struct reload *rl,
+emit_input_reload_insns (class insn_chain *chain, struct reload *rl,
rtx old, int j)
{
rtx_insn *insn = chain->insn;
/* Generate insns to for the output reload RL, which is for the insn described
by CHAIN and has the number J. */
static void
-emit_output_reload_insns (struct insn_chain *chain, struct reload *rl,
+emit_output_reload_insns (class insn_chain *chain, struct reload *rl,
int j)
{
rtx reloadreg;
/* Do input reloading for reload RL, which is for the insn described by CHAIN
and has the number J. */
static void
-do_input_reload (struct insn_chain *chain, struct reload *rl, int j)
+do_input_reload (class insn_chain *chain, struct reload *rl, int j)
{
rtx_insn *insn = chain->insn;
rtx old = (rl->in && MEM_P (rl->in)
??? At some point we need to support handling output reloads of
JUMP_INSNs or insns that set cc0. */
static void
-do_output_reload (struct insn_chain *chain, struct reload *rl, int j)
+do_output_reload (class insn_chain *chain, struct reload *rl, int j)
{
rtx note, old;
rtx_insn *insn = chain->insn;
/* Output insns to reload values in and out of the chosen reload regs. */
static void
-emit_reload_insns (struct insn_chain *chain)
+emit_reload_insns (class insn_chain *chain)
{
rtx_insn *insn = chain->insn;
tree rt_tree;
basic_block rt_bb;
mem_attrs *rt_mem;
- struct constant_descriptor_rtx *rt_constant;
+ class constant_descriptor_rtx *rt_constant;
struct dw_cfi_node *rt_cfi;
};
rtx x_static_reg_base_value[FIRST_PSEUDO_REGISTER];
/* The default memory attributes for each mode. */
- struct mem_attrs *x_mode_mem_attrs[(int) MAX_MACHINE_MODE];
+ class mem_attrs *x_mode_mem_attrs[(int) MAX_MACHINE_MODE];
/* Track if RTL has been initialized. */
bool target_specific_initialized;
#ifndef GENERATOR_FILE
/* Return the attributes of a MEM rtx. */
-static inline const struct mem_attrs *
+static inline const class mem_attrs *
get_mem_attrs (const_rtx x)
{
- struct mem_attrs *attrs;
+ class mem_attrs *attrs;
attrs = MEM_ATTRS (x);
if (!attrs)
/* Optimize away redundant UBSAN_NULL calls. */
static bool
-maybe_optimize_ubsan_null_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_ubsan_null_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 3);
tree ptr = gimple_call_arg (stmt, 0);
when we can actually optimize. */
static bool
-maybe_optimize_ubsan_vptr_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_ubsan_vptr_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 5);
sanopt_tree_triplet triplet;
/* Optimize away redundant ASAN_CHECK calls. */
static bool
-maybe_optimize_asan_check_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_asan_check_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 4);
tree ptr = gimple_call_arg (stmt, 1);
anything anymore. CTX is a sanopt context. */
static void
-sanopt_optimize_walker (basic_block bb, struct sanopt_ctx *ctx)
+sanopt_optimize_walker (basic_block bb, class sanopt_ctx *ctx)
{
basic_block son;
gimple_stmt_iterator gsi;
static int
sanopt_optimize (function *fun, bool *contains_asan_mark)
{
- struct sanopt_ctx ctx;
+ class sanopt_ctx ctx;
ctx.asan_num_accesses = 0;
ctx.contains_asan_mark = false;
static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
enum reg_note, bool);
-static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
+static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
rtx_insn_list **, int, enum reg_note,
bool);
static void delete_all_dependences (rtx_insn *);
static void chain_to_prev_insn (rtx_insn *);
-static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
-static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
+static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
+static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
static bool sched_has_condition_p (const rtx_insn *);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
+add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
rtx_insn_list **listp,
int uncond, enum reg_note dep_type, bool hard)
{
so that we can do memory aliasing on it. */
static void
-add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
+add_insn_mem_dependence (class deps_desc *deps, bool read_p,
rtx_insn *insn, rtx mem)
{
rtx_insn_list **insn_list;
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
+flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
int for_write)
{
if (for_write)
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
-setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
+setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
{
unsigned i;
reg_set_iterator rsi;
/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
-extend_deps_reg_info (struct deps_desc *deps, int regno)
+extend_deps_reg_info (class deps_desc *deps, int regno)
{
int max_regno = regno + 1;
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
-sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
+sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
enum rtx_code ref, rtx_insn *insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
int i;
int j;
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
+deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
/* Initialize DEPS for the new block beginning with HEAD. */
void
-deps_start_bb (struct deps_desc *deps, rtx_insn *head)
+deps_start_bb (class deps_desc *deps, rtx_insn *head)
{
gcc_assert (!deps->readonly);
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
+sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
{
rtx_insn *insn;
\f
/* Initialize variables for region data dependence analysis.
When LAZY_REG_LAST is true, do not allocate reg_last array
- of struct deps_desc immediately. */
+ of class deps_desc immediately. */
void
-init_deps (struct deps_desc *deps, bool lazy_reg_last)
+init_deps (class deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
-init_deps_reg_last (struct deps_desc *deps)
+init_deps_reg_last (class deps_desc *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
/* Free insn lists found in DEPS. */
void
-free_deps (struct deps_desc *deps)
+free_deps (class deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
/* Remove INSN from dependence contexts DEPS. */
void
-remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
+remove_from_deps (class deps_desc *deps, rtx_insn *insn)
{
int removed;
unsigned i;
schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
{
basic_block first_bb, target_bb;
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
bool success;
/* Blah. We should fix the rest of the code not to get confused by
BOOL_BITFIELD readonly : 1;
};
-typedef struct deps_desc *deps_t;
+typedef class deps_desc *deps_t;
/* This structure holds some state of the current scheduling pass, and
contains some function pointers that abstract out some of the non-generic
const rtx_insn *);
extern bool sched_insn_is_legitimate_for_speculation_p (const rtx_insn *, ds_t);
extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
-extern void sched_analyze (struct deps_desc *, rtx_insn *, rtx_insn *);
-extern void init_deps (struct deps_desc *, bool);
-extern void init_deps_reg_last (struct deps_desc *);
-extern void free_deps (struct deps_desc *);
+extern void sched_analyze (class deps_desc *, rtx_insn *, rtx_insn *);
+extern void init_deps (class deps_desc *, bool);
+extern void init_deps_reg_last (class deps_desc *);
+extern void free_deps (class deps_desc *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
-extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
-extern void remove_from_deps (struct deps_desc *, rtx_insn *);
+extern void deps_analyze_insn (class deps_desc *, rtx_insn *);
+extern void remove_from_deps (class deps_desc *, rtx_insn *);
extern void init_insn_reg_pressure_info (rtx_insn *);
extern void get_implicit_reg_pending_clobbers (HARD_REG_SET *, rtx_insn *);
extern void maybe_extend_reg_info_p (void);
-extern void deps_start_bb (struct deps_desc *, rtx_insn *);
+extern void deps_start_bb (class deps_desc *, rtx_insn *);
extern enum reg_note ds_to_dt (ds_t);
extern bool deps_pools_are_empty_p (void);
extern void free_rgn_deps (void);
extern int contributes_to_priority (rtx_insn *, rtx_insn *);
extern void extend_rgns (int *, int *, sbitmap, int *);
-extern void deps_join (struct deps_desc *, struct deps_desc *);
+extern void deps_join (class deps_desc *, class deps_desc *);
extern void rgn_setup_common_sched_info (void);
extern void rgn_setup_sched_infos (void);
static void schedule_region (int);
static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
rtx_insn_list **, rtx_expr_list **);
-static void propagate_deps (int, struct deps_desc *);
+static void propagate_deps (int, class deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
-static struct deps_desc *bb_deps;
+static class deps_desc *bb_deps;
static void
concat_insn_mem_list (rtx_insn_list *copy_insns,
/* Join PRED_DEPS to the SUCC_DEPS. */
void
-deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
+deps_join (class deps_desc *succ_deps, class deps_desc *pred_deps)
{
unsigned reg;
reg_set_iterator rsi;
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (int bb, struct deps_desc *pred_deps)
+propagate_deps (int bb, class deps_desc *pred_deps)
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
edge_iterator ei;
compute_block_dependences (int bb)
{
rtx_insn *head, *tail;
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ bb_deps = XNEWVEC (class deps_desc, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb, false);
static struct common_sched_info_def sel_common_sched_info;
/* The loop nest being pipelined. */
-struct loop *current_loop_nest;
+class loop *current_loop_nest;
/* LOOP_NESTS is a vector containing the corresponding loop nest for
each region. */
}
\f
/* Functions to work with dependence contexts.
- Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
+ Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
static deps_t
alloc_deps_context (void)
{
- return XNEW (struct deps_desc);
+ return XNEW (class deps_desc);
}
/* Allocate and initialize dep context. */
static void
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
{
- struct deps_desc _dc, *dc = &_dc;
+ class deps_desc _dc, *dc = &_dc;
deps_init_id_data.where = DEPS_IN_NOWHERE;
deps_init_id_data.id = id;
{
int i;
ds_t ds;
- struct deps_desc *dc;
+ class deps_desc *dc;
if (INSN_SIMPLEJUMP_P (pred))
/* Unconditional jump is just a transfer of control flow.
if (current_loop_nest)
{
- struct loop *loop;
+ class loop *loop;
for (loop = current_loop_nest; loop; loop = loop_outer (loop))
if (considered_for_pipelining_p (loop) && loop->latch == from)
/* Create a region for LOOP and return its number. If we don't want
to pipeline LOOP, return -1. */
static int
-make_region_from_loop (struct loop *loop)
+make_region_from_loop (class loop *loop)
{
unsigned int i;
int new_rgn_number = -1;
- struct loop *inner;
+ class loop *inner;
/* Basic block index, to be assigned to BLOCK_TO_BB. */
int bb_ord_index = 0;
pipelined before outer loops. Returns true when a region for LOOP
is created. */
static bool
-make_regions_from_loop_nest (struct loop *loop)
+make_regions_from_loop_nest (class loop *loop)
{
- struct loop *cur_loop;
+ class loop *cur_loop;
int rgn_number;
/* Traverse all inner nodes of the loop. */
recompute_rev_top_order ();
}
-/* Returns a struct loop for region RGN. */
+/* Returns a class loop for region RGN. */
loop_p
get_loop_nest_for_rgn (unsigned int rgn)
{
/* True when LOOP was included into pipelining regions. */
bool
-considered_for_pipelining_p (struct loop *loop)
+considered_for_pipelining_p (class loop *loop)
{
if (loop_depth (loop) == 0)
return false;
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- struct loop *loop;
+ class loop *loop;
/* Release aux fields so we don't free them later by mistake. */
FOR_EACH_LOOP (loop, 0)
{
if (current_loop_nest)
{
- struct loop *outer;
+ class loop *outer;
if (preheader_removed)
return false;
htab_t transformed_insns;
/* A context incapsulating this insn. */
- struct deps_desc deps_context;
+ class deps_desc deps_context;
/* This field is initialized at the beginning of scheduling and is used
to handle sched group instructions. If it is non-null, then it points
BOOL_BITFIELD after_stall_p : 1;
};
-typedef struct _sel_insn_data sel_insn_data_def;
+typedef class _sel_insn_data sel_insn_data_def;
typedef sel_insn_data_def *sel_insn_data_t;
extern vec<sel_insn_data_def> s_i_d;
extern bitmap_head *forced_ebb_heads;
/* The loop nest being pipelined. */
-extern struct loop *current_loop_nest;
+extern class loop *current_loop_nest;
/* Saves pipelined blocks. Bitmap is indexed by bb->index. */
extern sbitmap bbs_pipelined;
static inline bool
inner_loop_header_p (basic_block bb)
{
- struct loop *inner_loop;
+ class loop *inner_loop;
if (!current_loop_nest)
return false;
/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
static inline vec<edge>
-get_loop_exit_edges_unique_dests (const struct loop *loop)
+get_loop_exit_edges_unique_dests (const class loop *loop)
{
vec<edge> edges = vNULL;
struct loop_exit *exit;
/* And now check whether we should skip over inner loop. */
if (inner_loop_header_p (bb))
{
- struct loop *this_loop;
- struct loop *pred_loop = NULL;
+ class loop *this_loop;
+ class loop *pred_loop = NULL;
int i;
unsigned this_depth;
edge e;
extern void sel_finish_pipelining (void);
extern void sel_sched_region (int);
extern loop_p get_loop_nest_for_rgn (unsigned int);
-extern bool considered_for_pipelining_p (struct loop *);
+extern bool considered_for_pipelining_p (class loop *);
extern void make_region_from_loop_preheader (vec<basic_block> *&);
extern void sel_add_loop_preheaders (bb_vec_t *);
extern bool sel_is_loop_preheader_p (basic_block);
The following struct describes a particular case within our test
matrix. */
-struct line_table_case;
+class line_table_case;
/* A class for overriding the global "line_table" within a selftest,
restoring its value afterwards. At most one instance of this
sese_info_p
new_sese_info (edge entry, edge exit)
{
- sese_info_p region = XNEW (struct sese_info_t);
+ sese_info_p region = XNEW (class sese_info_t);
region->region.entry = entry;
region->region.exit = exit;
/* Returns the outermost loop in SCOP that contains BB. */
-struct loop *
+class loop *
outermost_loop_in_sese_1 (sese_l ®ion, basic_block bb)
{
- struct loop *nest;
+ class loop *nest;
nest = bb->loop_father;
while (loop_outer (nest)
extern sese_info_p new_sese_info (edge, edge);
extern void free_sese_info (sese_info_p);
extern void sese_insert_phis_for_liveouts (sese_info_p, basic_block, edge, edge);
-extern struct loop *outermost_loop_in_sese (sese_l &, basic_block);
+extern class loop *outermost_loop_in_sese (sese_l &, basic_block);
extern tree scalar_evolution_in_region (const sese_l &, loop_p, tree);
extern bool scev_analyzable_p (tree, sese_l &);
extern bool invariant_in_sese_p_rec (tree, const sese_l &, bool *);
/* Returns true when LOOP is in REGION. */
static inline bool
-loop_in_sese_p (struct loop *loop, const sese_l ®ion)
+loop_in_sese_p (class loop *loop, const sese_l ®ion)
{
return (bb_in_sese_p (loop->header, region)
&& bb_in_sese_p (loop->latch, region));
/* Return the innermost loop that contains the basic block GBB. */
-static inline struct loop *
+static inline class loop *
gbb_loop (gimple_poly_bb_p gbb)
{
return GBB_BB (gbb)->loop_father;
/* Read sreal value from IB. */
sreal
-sreal::stream_in (struct lto_input_block *ib)
+sreal::stream_in (class lto_input_block *ib)
{
sreal val;
val.m_sig = streamer_read_hwi (ib);
#define SREAL_ABS(v) (v < 0 ? -v: v)
struct output_block;
-struct lto_input_block;
+class lto_input_block;
/* Structure for holding a simple real number. */
class sreal
int64_t to_int () const;
double to_double () const;
void stream_out (struct output_block *);
- static sreal stream_in (struct lto_input_block *);
+ static sreal stream_in (class lto_input_block *);
sreal operator+ (const sreal &other) const;
sreal operator- (const sreal &other) const;
sreal operator* (const sreal &other) const;
/* Forward declarations to avoid including unnecessary headers. */
struct output_block;
-struct lto_input_block;
-struct data_in;
+class lto_input_block;
+class data_in;
/* Streamer hooks. These functions do additional processing as
needed by the module. There are two types of callbacks, those that
to the buffer where to read from and a data_in instance with tables
and descriptors needed by the unpickling routines. It returns the
tree instantiated from the stream. */
- tree (*read_tree) (struct lto_input_block *, struct data_in *);
+ tree (*read_tree) (class lto_input_block *, class data_in *);
/* [REQ] Called by every streaming routine that needs to read a location. */
- void (*input_location) (location_t *, struct bitpack_d *, struct data_in *);
+ void (*input_location) (location_t *, struct bitpack_d *, class data_in *);
/* [REQ] Called by every streaming routine that needs to write a location. */
void (*output_location) (struct output_block *, struct bitpack_d *, location_t);
#include "lower-subreg.h"
#if SWITCHABLE_TARGET
-struct target_globals default_target_globals = {
+class target_globals default_target_globals = {
&default_target_flag_state,
&default_target_regs,
&default_target_rtl,
&default_target_lower_subreg
};
-struct target_globals *
+class target_globals *
save_target_globals (void)
{
- struct target_globals *g = ggc_cleared_alloc <target_globals> ();
- g->flag_state = XCNEW (struct target_flag_state);
+ class target_globals *g = ggc_cleared_alloc <target_globals> ();
+ g->flag_state = XCNEW (class target_flag_state);
g->regs = XCNEW (struct target_regs);
g->rtl = ggc_cleared_alloc<target_rtl> ();
g->recog = XCNEW (struct target_recog);
g->libfuncs = ggc_cleared_alloc<target_libfuncs> ();
g->cfgloop = XCNEW (struct target_cfgloop);
g->ira = XCNEW (struct target_ira);
- g->ira_int = XCNEW (struct target_ira_int);
+ g->ira_int = XCNEW (class target_ira_int);
g->builtins = XCNEW (struct target_builtins);
g->gcse = XCNEW (struct target_gcse);
g->bb_reorder = XCNEW (struct target_bb_reorder);
correctly when a previous function has changed
*this_target_optabs. */
-struct target_globals *
+class target_globals *
save_target_globals_default_opts ()
{
- struct target_globals *globals;
+ class target_globals *globals;
if (optimization_current_node != optimization_default_node)
{
#define TARGET_GLOBALS_H 1
#if SWITCHABLE_TARGET
-extern struct target_flag_state *this_target_flag_state;
+extern class target_flag_state *this_target_flag_state;
extern struct target_regs *this_target_regs;
extern struct target_rtl *this_target_rtl;
extern struct target_recog *this_target_recog;
extern struct target_libfuncs *this_target_libfuncs;
extern struct target_cfgloop *this_target_cfgloop;
extern struct target_ira *this_target_ira;
-extern struct target_ira_int *this_target_ira_int;
+extern class target_ira_int *this_target_ira_int;
extern struct target_builtins *this_target_builtins;
extern struct target_gcse *this_target_gcse;
extern struct target_bb_reorder *this_target_bb_reorder;
public:
~target_globals ();
- struct target_flag_state *GTY((skip)) flag_state;
+ class target_flag_state *GTY((skip)) flag_state;
struct target_regs *GTY((skip)) regs;
struct target_rtl *rtl;
struct target_recog *GTY((skip)) recog;
struct target_libfuncs *libfuncs;
struct target_cfgloop *GTY((skip)) cfgloop;
struct target_ira *GTY((skip)) ira;
- struct target_ira_int *GTY((skip)) ira_int;
+ class target_ira_int *GTY((skip)) ira_int;
struct target_builtins *GTY((skip)) builtins;
struct target_gcse *GTY((skip)) gcse;
struct target_bb_reorder *GTY((skip)) bb_reorder;
};
#if SWITCHABLE_TARGET
-extern struct target_globals default_target_globals;
+extern class target_globals default_target_globals;
-extern struct target_globals *save_target_globals (void);
-extern struct target_globals *save_target_globals_default_opts (void);
+extern class target_globals *save_target_globals (void);
+extern class target_globals *save_target_globals_default_opts (void);
static inline void
-restore_target_globals (struct target_globals *g)
+restore_target_globals (class target_globals *g)
{
this_target_flag_state = g->flag_state;
this_target_regs = g->regs;
"non-NULL, it identifies the loop being vectorized; otherwise a single block "
"is being vectorized.",
void *,
- (struct loop *loop_info),
+ (class loop *loop_info),
default_init_cost)
/* Target function to record N statements of the given kind using the
"revised.",
unsigned,
(void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where),
default_add_stmt_cost)
the loop, which is going to be checked for unrolling. This target hook\n\
is required only when the target has special constraints like maximum\n\
number of memory accesses.",
- unsigned, (unsigned nunroll, struct loop *loop),
+ unsigned, (unsigned nunroll, class loop *loop),
NULL)
/* True if X is a legitimate MODE-mode immediate operand. */
version of this hook assumes the system C library errno location\
is either a declaration of type int or accessed by dereferencing\
a pointer to int.",
- bool, (struct ao_ref *ref),
+ bool, (ao_ref *ref),
default_ref_may_alias_errno)
/* Support for named address spaces. */
This target hook is required only when the target supports low-overhead\n\
loops, and will help ivopts to make some decisions.\n\
The default version of this hook returns false.",
- bool, (struct loop *loop),
+ bool, (class loop *loop),
default_predict_doloop_p)
DEFHOOK
struct ddg;
/* This is defined in cfgloop.h . */
-struct loop;
+class loop;
/* This is defined in ifcvt.h. */
struct noce_if_info;
/* This is defined in tree-ssa-alias.h. */
-struct ao_ref;
+class ao_ref;
/* This is defined in tree-vectorizer.h. */
-struct _stmt_vec_info;
+class _stmt_vec_info;
/* These are defined in tree-vect-stmts.c. */
-extern tree stmt_vectype (struct _stmt_vec_info *);
-extern bool stmt_in_inner_loop_p (struct _stmt_vec_info *);
+extern tree stmt_vectype (class _stmt_vec_info *);
+extern bool stmt_in_inner_loop_p (class _stmt_vec_info *);
/* Assembler instructions for creating various kinds of integer object. */
if the target can take advantage of it. */
bool
-default_predict_doloop_p (struct loop *loop ATTRIBUTE_UNUSED)
+default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
{
return false;
}
array of three unsigned ints, set it to zero, and return its address. */
void *
-default_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED)
+default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED)
{
unsigned *cost = XNEWVEC (unsigned, 3);
cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
unsigned
default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
unsigned *cost = (unsigned *) data;
extern bool default_has_ifunc_p (void);
-extern bool default_predict_doloop_p (struct loop *);
+extern bool default_predict_doloop_p (class loop *);
extern const char * default_invalid_within_doloop (const rtx_insn *);
extern tree default_builtin_vectorized_function (unsigned int, tree, tree);
extern void default_autovectorize_vector_sizes (vector_sizes *, bool);
extern opt_machine_mode default_get_mask_mode (poly_uint64, poly_uint64);
extern bool default_empty_mask_is_expensive (unsigned);
-extern void *default_init_cost (struct loop *);
+extern void *default_init_cost (class loop *);
extern unsigned default_add_stmt_cost (void *, int, enum vect_cost_for_stmt,
- struct _stmt_vec_info *, int,
+ class _stmt_vec_info *, int,
enum vect_cost_model_location);
extern void default_finish_cost (void *, unsigned *, unsigned *, unsigned *);
extern void default_destroy_cost_data (void *);
extern bool default_target_option_pragma_parse (tree, tree);
extern bool default_target_can_inline_p (tree, tree);
extern bool default_valid_pointer_mode (scalar_int_mode);
-extern bool default_ref_may_alias_errno (struct ao_ref *);
+extern bool default_ref_may_alias_errno (class ao_ref *);
extern scalar_int_mode default_addr_space_pointer_mode (addr_space_t);
extern scalar_int_mode default_addr_space_address_mode (addr_space_t);
extern bool default_addr_space_valid_pointer_mode (scalar_int_mode,
the support provided depends on the backend. */
rtx stack_limit_rtx;
-struct target_flag_state default_target_flag_state;
+class target_flag_state default_target_flag_state;
#if SWITCHABLE_TARGET
-struct target_flag_state *this_target_flag_state = &default_target_flag_state;
+class target_flag_state *this_target_flag_state = &default_target_flag_state;
#else
#define this_target_flag_state (&default_target_flag_state)
#endif
element exists. If IDX is not NULL, it is set to the index of VAL in
COMB. */
-static struct aff_comb_elt *
+static class aff_comb_elt *
aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
{
unsigned i;
tree e;
gimple *def;
widest_int scale;
- struct name_expansion *exp;
+ class name_expansion *exp;
aff_combination_zero (&to_add, comb->type);
for (i = 0; i < comb->n; i++)
default:
continue;
}
- exp = XNEW (struct name_expansion);
+ exp = XNEW (class name_expansion);
exp->in_progress = 1;
if (!*cache)
*cache = new hash_map<tree, name_expansion *>;
for (i = 0; i < div->n; i++)
{
- struct aff_comb_elt *elt
+ class aff_comb_elt *elt
= aff_combination_find_elt (val, div->elts[i].val, NULL);
if (!elt)
return false;
The coefficients are always sign extended from the precision of TYPE
(regardless of signedness of TYPE). */
- struct aff_comb_elt elts[MAX_AFF_ELTS];
+ class aff_comb_elt elts[MAX_AFF_ELTS];
/* Remainder of the expression. Usually NULL, used only if there are more
than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
tree rest;
};
-struct name_expansion;
+class name_expansion;
void aff_combination_const (aff_tree *, tree, const poly_widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
come immediately before the condition in BB, if any. */
static void
-replace_loop_annotate_in_block (basic_block bb, struct loop *loop)
+replace_loop_annotate_in_block (basic_block bb, class loop *loop)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple *stmt = gsi_stmt (gsi);
static void
replace_loop_annotate (void)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
gimple_stmt_iterator gsi;
gimple *stmt;
/* Also update the trees stored in loop structures. */
if (current_loops)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
{
if (current_loops)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
/* If a loop gets removed, clean up the information associated
with it. */
num_edges = 0;
FOR_EACH_BB_FN (bb, cfun)
num_edges += EDGE_COUNT (bb->succs);
- size = num_edges * sizeof (struct edge_def);
+ size = num_edges * sizeof (class edge_def);
total += size;
fprintf (file, fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size));
{
unsigned i;
bool free_region_copy = false, copying_header = false;
- struct loop *loop = entry->dest->loop_father;
+ class loop *loop = entry->dest->loop_father;
edge exit_copy;
vec<basic_block> doms = vNULL;
edge redirected;
{
unsigned i;
bool free_region_copy = false;
- struct loop *loop = exit->dest->loop_father;
- struct loop *orig_loop = entry->dest->loop_father;
+ class loop *loop = exit->dest->loop_father;
+ class loop *orig_loop = entry->dest->loop_father;
basic_block switch_bb, entry_bb, nentry_bb;
vec<basic_block> doms;
profile_count total_count = profile_count::uninitialized (),
gphi_iterator psi;
gphi *phi;
tree def;
- struct loop *target, *aloop, *cloop;
+ class loop *target, *aloop, *cloop;
gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
exits[0] = exit;
/* Move BB from its current loop to the copy in the new function. */
if (current_loops)
{
- struct loop *new_loop = (struct loop *)bb->loop_father->aux;
+ class loop *new_loop = (class loop *)bb->loop_father->aux;
if (new_loop)
bb->loop_father = new_loop;
}
static void
fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
- struct loop *loop)
+ class loop *loop)
{
/* Discard it from the old loop array. */
(*get_loops (fn1))[loop->num] = NULL;
edge_iterator ei;
htab_t new_label_map;
hash_map<void *, void *> *eh_map;
- struct loop *loop = entry_bb->loop_father;
- struct loop *loop0 = get_loop (saved_cfun, 0);
+ class loop *loop = entry_bb->loop_father;
+ class loop *loop0 = get_loop (saved_cfun, 0);
struct move_stmt_d d;
/* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
{
if (bb->loop_father->header == bb)
{
- struct loop *this_loop = bb->loop_father;
- struct loop *outer = loop_outer (this_loop);
+ class loop *this_loop = bb->loop_father;
+ class loop *outer = loop_outer (this_loop);
if (outer == loop
/* If the SESE region contains some bbs ending with
a noreturn call, those are considered to belong
/* Fix up orig_loop_num. If the block referenced in it has been moved
to dest_cfun, update orig_loop_num field, otherwise clear it. */
- struct loop *dloop;
+ class loop *dloop;
signed char *moved_orig_loop_num = NULL;
FOR_EACH_LOOP_FN (dest_cfun, dloop, 0)
if (dloop->orig_loop_num)
loop0->aux = NULL;
/* Loop sizes are no longer correct, fix them up. */
loop->num_nodes -= num_nodes;
- for (struct loop *outer = loop_outer (loop);
+ for (class loop *outer = loop_outer (loop);
outer; outer = loop_outer (outer))
outer->num_nodes -= num_nodes;
loop0->num_nodes -= bbs.length () - num_nodes;
if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
{
- struct loop *aloop;
+ class loop *aloop;
for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
if (aloop != NULL)
{
}
}
-static void print_loop_and_siblings (FILE *, struct loop *, int, int);
+static void print_loop_and_siblings (FILE *, class loop *, int, int);
/* Pretty print LOOP on FILE, indented INDENT spaces. Following
VERBOSITY level this outputs the contents of the loop, or just its
structure. */
static void
-print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
+print_loop (FILE *file, class loop *loop, int indent, int verbosity)
{
char *s_indent;
basic_block bb;
loop, or just its structure. */
static void
-print_loop_and_siblings (FILE *file, struct loop *loop, int indent,
+print_loop_and_siblings (FILE *file, class loop *loop, int indent,
int verbosity)
{
if (loop == NULL)
/* Dump a loop. */
DEBUG_FUNCTION void
-debug (struct loop &ref)
+debug (class loop &ref)
{
print_loop (stderr, &ref, 0, /*verbosity*/0);
}
DEBUG_FUNCTION void
-debug (struct loop *ptr)
+debug (class loop *ptr)
{
if (ptr)
debug (*ptr);
/* Dump a loop verbosely. */
DEBUG_FUNCTION void
-debug_verbose (struct loop &ref)
+debug_verbose (class loop &ref)
{
print_loop (stderr, &ref, 0, /*verbosity*/3);
}
DEBUG_FUNCTION void
-debug_verbose (struct loop *ptr)
+debug_verbose (class loop *ptr)
{
if (ptr)
debug (*ptr);
/* Print on stderr the code of LOOP, at some VERBOSITY level. */
DEBUG_FUNCTION void
-debug_loop (struct loop *loop, int verbosity)
+debug_loop (class loop *loop, int verbosity)
{
print_loop (stderr, loop, 0, verbosity);
}
extern void debug_function (tree, dump_flags_t);
extern void print_loops_bb (FILE *, basic_block, int, int);
extern void print_loops (FILE *, int);
-extern void debug (struct loop &ref);
-extern void debug (struct loop *ptr);
-extern void debug_verbose (struct loop &ref);
-extern void debug_verbose (struct loop *ptr);
+extern void debug (class loop &ref);
+extern void debug (class loop *ptr);
+extern void debug_verbose (class loop &ref);
+extern void debug_verbose (class loop *ptr);
extern void debug_loops (int);
-extern void debug_loop (struct loop *, int);
+extern void debug_loop (class loop *, int);
extern void debug_loop_num (unsigned, int);
extern void remove_edge_and_dominated_blocks (edge);
extern bool gimple_purge_dead_eh_edges (basic_block);
tree poly1)
{
tree left, right;
- struct loop *loop0 = get_chrec_loop (poly0);
- struct loop *loop1 = get_chrec_loop (poly1);
+ class loop *loop0 = get_chrec_loop (poly0);
+ class loop *loop1 = get_chrec_loop (poly1);
tree rtype = code == POINTER_PLUS_EXPR ? chrec_type (poly1) : type;
gcc_assert (poly0);
{
tree t0, t1, t2;
int var;
- struct loop *loop0 = get_chrec_loop (poly0);
- struct loop *loop1 = get_chrec_loop (poly1);
+ class loop *loop0 = get_chrec_loop (poly0);
+ class loop *loop1 = get_chrec_loop (poly1);
gcc_assert (poly0);
gcc_assert (poly1);
{
tree arg0, arg1, binomial_n_k;
tree type = TREE_TYPE (chrec);
- struct loop *var_loop = get_loop (cfun, var);
+ class loop *var_loop = get_loop (cfun, var);
while (TREE_CODE (chrec) == POLYNOMIAL_CHREC
&& flow_loop_nested_p (var_loop, get_chrec_loop (chrec)))
hide_evolution_in_other_loops_than_loop (tree chrec,
unsigned loop_num)
{
- struct loop *loop = get_loop (cfun, loop_num), *chloop;
+ class loop *loop = get_loop (cfun, loop_num), *chloop;
if (automatically_generated_chrec_p (chrec))
return chrec;
bool right)
{
tree component;
- struct loop *loop = get_loop (cfun, loop_num), *chloop;
+ class loop *loop = get_loop (cfun, loop_num), *chloop;
if (automatically_generated_chrec_p (chrec))
return chrec;
tree chrec,
tree new_evol)
{
- struct loop *loop = get_loop (cfun, loop_num);
+ class loop *loop = get_loop (cfun, loop_num);
if (POINTER_TYPE_P (chrec_type (chrec)))
gcc_assert (ptrofftype_p (chrec_type (new_evol)));
static bool
chrec_contains_symbols (const_tree chrec, hash_set<const_tree> &visited,
- struct loop *loop)
+ class loop *loop)
{
int i, n;
the chrec is considered as a SYMBOL. */
bool
-chrec_contains_symbols (const_tree chrec, struct loop* loop)
+chrec_contains_symbols (const_tree chrec, class loop* loop)
{
hash_set<const_tree> visited;
return chrec_contains_symbols (chrec, visited, loop);
the conversion succeeded, false otherwise. */
bool
-convert_affine_scev (struct loop *loop, tree type,
+convert_affine_scev (class loop *loop, tree type,
tree *base, tree *step, gimple *at_stmt,
bool use_overflow_semantics, tree from)
{
{
tree ct, res;
tree base, step;
- struct loop *loop;
+ class loop *loop;
if (automatically_generated_chrec_p (chrec))
return chrec;
if (!*fold_conversions && evolution_function_is_affine_p (chrec))
{
tree base, step;
- struct loop *loop;
+ class loop *loop;
loop = get_chrec_loop (chrec);
base = CHREC_LEFT (chrec);
extern tree reset_evolution_in_loop (unsigned, tree, tree);
extern tree chrec_merge (tree, tree);
extern void for_each_scev_op (tree *, bool (*) (tree *, void *), void *);
-extern bool convert_affine_scev (struct loop *, tree, tree *, tree *, gimple *,
+extern bool convert_affine_scev (class loop *, tree, tree *, tree *, gimple *,
bool, tree = NULL);
/* Observers. */
extern bool eq_evolutions_p (const_tree, const_tree);
extern bool is_multivariate_chrec (const_tree);
-extern bool chrec_contains_symbols (const_tree, struct loop * = NULL);
+extern bool chrec_contains_symbols (const_tree, class loop * = NULL);
extern bool chrec_contains_symbols_defined_in_loop (const_tree, unsigned);
extern bool chrec_contains_undetermined (const_tree);
extern bool tree_contains_chrecs (const_tree, int *);
/* Forward declaration, defined in target-globals.h. */
-struct GTY(()) target_globals;
+class GTY(()) target_globals;
/* Target options used by a function. */
struct tree_base base;
/* Target globals for the corresponding target option. */
- struct target_globals *globals;
+ class target_globals *globals;
/* The optimization options used by the user. */
struct cl_target_option *opts;
static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
unsigned int, unsigned int,
- struct loop *);
+ class loop *);
/* Returns true iff A divides B. */
static inline bool
else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
{
unsigned int i;
- struct loop *loopi;
+ class loop *loopi;
subscript *sub;
FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
opt_result
dr_analyze_innermost (innermost_loop_behavior *drb, tree ref,
- struct loop *loop, const gimple *stmt)
+ class loop *loop, const gimple *stmt)
{
poly_int64 pbitsize, pbitpos;
tree base, poffset;
check. */
opt_result
-runtime_alias_check_p (ddr_p ddr, struct loop *loop, bool speed_p)
+runtime_alias_check_p (ddr_p ddr, class loop *loop, bool speed_p)
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
Note evolution step of index needs to be considered in comparison. */
static bool
-create_intersect_range_checks_index (struct loop *loop, tree *cond_expr,
+create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
const dr_with_seg_len& dr_a,
const dr_with_seg_len& dr_b)
{
|| (DR_B_addr_0 + DER_B_segment_length_0) <= DR_A_addr_0)) */
static void
-create_intersect_range_checks (struct loop *loop, tree *cond_expr,
+create_intersect_range_checks (class loop *loop, tree *cond_expr,
const dr_with_seg_len& dr_a,
const dr_with_seg_len& dr_b)
{
that controls which version of the loop gets executed at runtime. */
void
-create_runtime_alias_checks (struct loop *loop,
+create_runtime_alias_checks (class loop *loop,
vec<dr_with_seg_len_pair_t> *alias_pairs,
tree * cond_expr)
{
/* Returns true if the address of OBJ is invariant in LOOP. */
static bool
-object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
+object_address_invariant_in_loop_p (const class loop *loop, const_tree obj)
{
while (handled_component_p (obj))
{
bool
dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
tree addr_a = DR_BASE_OBJECT (a);
tree addr_b = DR_BASE_OBJECT (b);
chrec_dont_know. */
static tree
-max_stmt_executions_tree (struct loop *loop)
+max_stmt_executions_tree (class loop *loop)
{
widest_int nit;
if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
{
HOST_WIDE_INT numiter;
- struct loop *loop = get_chrec_loop (chrec_b);
+ class loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
tmp = fold_build2 (EXACT_DIV_EXPR, type,
if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
{
HOST_WIDE_INT numiter;
- struct loop *loop = get_chrec_loop (chrec_b);
+ class loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
conflict_function **overlaps_a,
conflict_function **overlaps_b,
tree *last_conflicts,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
tree type, difference;
tree chrec_b,
conflict_function **overlap_iterations_a,
conflict_function **overlap_iterations_b,
- tree *last_conflicts, struct loop *loop_nest)
+ tree *last_conflicts, class loop *loop_nest)
{
unsigned int lnn = loop_nest->num;
{
unsigned i;
lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
- struct loop *loop = DDR_LOOP_NEST (ddr)[0];
+ class loop *loop = DDR_LOOP_NEST (ddr)[0];
for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
{
unsigned i;
int index_carry = DDR_NB_LOOPS (ddr);
subscript *sub;
- struct loop *loop = DDR_LOOP_NEST (ddr)[0];
+ class loop *loop = DDR_LOOP_NEST (ddr)[0];
FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
{
static bool
build_classic_dist_vector (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
bool init_b = false;
int index_carry = DDR_NB_LOOPS (ddr);
static bool
subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
unsigned int a_index, unsigned int b_index,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
unsigned int i;
tree last_conflicts;
static void
subscript_dependence_tester (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest))
dependence_stats.num_dependence_dependent++;
static bool
access_functions_are_affine_or_constant_p (const struct data_reference *a,
- const struct loop *loop_nest)
+ const class loop *loop_nest)
{
unsigned int i;
vec<tree> fns = DR_ACCESS_FNS (a);
void
compute_affine_dependence (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
{
case IFN_GOMP_SIMD_LANE:
{
- struct loop *loop = gimple_bb (stmt)->loop_father;
+ class loop *loop = gimple_bb (stmt)->loop_father;
tree uid = gimple_call_arg (stmt, 0);
gcc_assert (TREE_CODE (uid) == SSA_NAME);
if (loop == NULL
loop of the loop nest in which the references should be analyzed. */
opt_result
-find_data_references_in_stmt (struct loop *nest, gimple *stmt,
+find_data_references_in_stmt (class loop *nest, gimple *stmt,
vec<data_reference_p> *datarefs)
{
unsigned i;
difficult case, returns NULL_TREE otherwise. */
tree
-find_data_references_in_bb (struct loop *loop, basic_block bb,
+find_data_references_in_bb (class loop *loop, basic_block bb,
vec<data_reference_p> *datarefs)
{
gimple_stmt_iterator bsi;
arithmetic as if they were array accesses, etc. */
tree
-find_data_references_in_loop (struct loop *loop,
+find_data_references_in_loop (class loop *loop,
vec<data_reference_p> *datarefs)
{
basic_block bb, *bbs;
/* Recursive helper function. */
static bool
-find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
+find_loop_nest_1 (class loop *loop, vec<loop_p> *loop_nest)
{
/* Inner loops of the nest should not contain siblings. Example:
when there are two consecutive loops,
appear in the classic distance vector. */
bool
-find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
+find_loop_nest (class loop *loop, vec<loop_p> *loop_nest)
{
loop_nest->safe_push (loop);
if (loop->inner)
COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
bool
-compute_data_dependences_for_loop (struct loop *loop,
+compute_data_dependences_for_loop (class loop *loop,
bool compute_self_and_read_read_dependences,
vec<loop_p> *loop_nest,
vec<data_reference_p> *datarefs,
\f
opt_result dr_analyze_innermost (innermost_loop_behavior *, tree,
- struct loop *, const gimple *);
-extern bool compute_data_dependences_for_loop (struct loop *, bool,
+ class loop *, const gimple *);
+extern bool compute_data_dependences_for_loop (class loop *, bool,
vec<loop_p> *,
vec<data_reference_p> *,
vec<ddr_p> *);
extern void free_dependence_relations (vec<ddr_p> );
extern void free_data_ref (data_reference_p);
extern void free_data_refs (vec<data_reference_p> );
-extern opt_result find_data_references_in_stmt (struct loop *, gimple *,
+extern opt_result find_data_references_in_stmt (class loop *, gimple *,
vec<data_reference_p> *);
extern bool graphite_find_data_references_in_stmt (edge, loop_p, gimple *,
vec<data_reference_p> *);
-tree find_data_references_in_loop (struct loop *, vec<data_reference_p> *);
+tree find_data_references_in_loop (class loop *, vec<data_reference_p> *);
bool loop_nest_has_data_refs (loop_p loop);
struct data_reference *create_data_ref (edge, loop_p, tree, gimple *, bool,
bool);
-extern bool find_loop_nest (struct loop *, vec<loop_p> *);
+extern bool find_loop_nest (class loop *, vec<loop_p> *);
extern struct data_dependence_relation *initialize_data_dependence_relation
(struct data_reference *, struct data_reference *, vec<loop_p>);
extern void compute_affine_dependence (struct data_dependence_relation *,
extern bool compute_all_dependences (vec<data_reference_p> ,
vec<ddr_p> *,
vec<loop_p>, bool);
-extern tree find_data_references_in_bb (struct loop *, basic_block,
+extern tree find_data_references_in_bb (class loop *, basic_block,
vec<data_reference_p> *);
extern unsigned int dr_alignment (innermost_loop_behavior *);
extern tree get_base_for_alignment (tree, unsigned int *);
}
extern bool dr_may_alias_p (const struct data_reference *,
- const struct data_reference *, struct loop *);
+ const struct data_reference *, class loop *);
extern bool dr_equal_offsets_p (struct data_reference *,
struct data_reference *);
-extern opt_result runtime_alias_check_p (ddr_p, struct loop *, bool);
+extern opt_result runtime_alias_check_p (ddr_p, class loop *, bool);
extern int data_ref_compare_tree (tree, tree);
extern void prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *,
poly_uint64);
-extern void create_runtime_alias_checks (struct loop *,
+extern void create_runtime_alias_checks (class loop *,
vec<dr_with_seg_len_pair_t> *, tree*);
extern tree dr_direction_indicator (struct data_reference *);
extern tree dr_zero_step_indicator (struct data_reference *);
static inline int
index_in_loop_nest (int var, vec<loop_p> loop_nest)
{
- struct loop *loopi;
+ class loop *loopi;
int var_index;
for (var_index = 0; loop_nest.iterate (var_index, &loopi); var_index++)
cd-equivalent if they are executed under the same condition. */
static inline void
-add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
+add_to_predicate_list (class loop *loop, basic_block bb, tree nc)
{
tree bc, *tp;
basic_block dom_bb;
the loop to be if-converted. */
static void
-add_to_dst_predicate_list (struct loop *loop, edge e,
+add_to_dst_predicate_list (class loop *loop, edge e,
tree prev_cond, tree cond)
{
if (!flow_bb_inside_loop_p (loop, e->dest))
/* Return true if one of the successor edges of BB exits LOOP. */
static bool
-bb_with_exit_edge_p (struct loop *loop, basic_block bb)
+bb_with_exit_edge_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
ANY_COMPLICATED_PHI if PHI is complicated. */
static bool
-if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
+if_convertible_phi_p (class loop *loop, basic_block bb, gphi *phi)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
widest_int niter, valid_niter, delta, wi_step;
tree ev, init, step;
tree low, high;
- struct loop *loop = (struct loop*) dta;
+ class loop *loop = (class loop*) dta;
/* Only support within-bound access for array references. */
if (TREE_CODE (ref) != ARRAY_REF)
static bool
ref_within_array_bound (gimple *stmt, tree ref)
{
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
gcc_assert (loop != NULL);
return for_each_index (&ref, idx_within_array_bound, loop);
inside LOOP. */
static bool
-if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
+if_convertible_bb_p (class loop *loop, basic_block bb, basic_block exit_bb)
{
edge e;
edge_iterator ei;
predecessors are already selected. */
static basic_block *
-get_loop_body_in_if_conv_order (const struct loop *loop)
+get_loop_body_in_if_conv_order (const class loop *loop)
{
basic_block *blocks, *blocks_in_bfs_order;
basic_block bb;
/* Build region by adding loop pre-header and post-header blocks. */
static vec<basic_block>
-build_region (struct loop *loop)
+build_region (class loop *loop)
{
vec<basic_block> region = vNULL;
basic_block exit_bb = NULL;
in if_convertible_loop_p. */
static bool
-if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
+if_convertible_loop_p_1 (class loop *loop, vec<data_reference_p> *refs)
{
unsigned int i;
basic_block exit_bb = NULL;
- if its basic blocks and phi nodes are if convertible. */
static bool
-if_convertible_loop_p (struct loop *loop)
+if_convertible_loop_p (class loop *loop)
{
edge e;
edge_iterator ei;
gimple *header_phi = NULL;
enum tree_code reduction_op;
basic_block bb = gimple_bb (phi);
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
edge latch_e = loop_latch_edge (loop);
imm_use_iterator imm_iter;
use_operand_p use_p;
LOOP->header block with conditional modify expressions. */
static void
-predicate_all_scalar_phis (struct loop *loop)
+predicate_all_scalar_phis (class loop *loop)
{
basic_block bb;
unsigned int orig_loop_num_nodes = loop->num_nodes;
blocks. Replace PHI nodes with conditional modify expressions. */
static void
-combine_blocks (struct loop *loop)
+combine_blocks (class loop *loop)
{
basic_block bb, exit_bb, merge_target_bb;
unsigned int orig_loop_num_nodes = loop->num_nodes;
out of LOOP_VECTORIZED must have 100% probability so the profile remains
consistent after the condition is folded in the vectorizer. */
-static struct loop *
-version_loop_for_if_conversion (struct loop *loop, vec<gimple *> *preds)
+static class loop *
+version_loop_for_if_conversion (class loop *loop, vec<gimple *> *preds)
{
basic_block cond_bb;
tree cond = make_ssa_name (boolean_type_node);
- struct loop *new_loop;
+ class loop *new_loop;
gimple *g;
gimple_stmt_iterator gsi;
unsigned int save_length;
inner loop's exit block. */
static bool
-versionable_outer_loop_p (struct loop *loop)
+versionable_outer_loop_p (class loop *loop)
{
if (!loop_outer (loop)
|| loop->dont_vectorize
Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
static bool
-ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
+ifcvt_split_critical_edges (class loop *loop, bool aggressive_if_conv)
{
basic_block *body;
basic_block bb;
changed. */
unsigned int
-tree_if_conversion (struct loop *loop, vec<gimple *> *preds)
+tree_if_conversion (class loop *loop, vec<gimple *> *preds)
{
unsigned int todo = 0;
bool aggressive_if_conv;
- struct loop *rloop;
+ class loop *rloop;
bitmap exit_bbs;
again:
aggressive_if_conv = loop->force_vectorize;
if (!aggressive_if_conv)
{
- struct loop *outer_loop = loop_outer (loop);
+ class loop *outer_loop = loop_outer (loop);
if (outer_loop && outer_loop->force_vectorize)
aggressive_if_conv = true;
}
|| any_complicated_phi
|| flag_tree_loop_if_convert != 1)
{
- struct loop *vloop
+ class loop *vloop
= (versionable_outer_loop_p (loop_outer (loop))
? loop_outer (loop) : loop);
- struct loop *nloop = version_loop_for_if_conversion (vloop, preds);
+ class loop *nloop = version_loop_for_if_conversion (vloop, preds);
if (nloop == NULL)
goto cleanup;
if (vloop != loop)
unsigned int
pass_if_conversion::execute (function *fun)
{
- struct loop *loop;
+ class loop *loop;
unsigned todo = 0;
if (number_of_loops (fun) <= 1)
#ifndef GCC_TREE_IF_CONV_H
#define GCC_TREE_IF_CONV_H
-unsigned int tree_if_conversion (struct loop *, vec<gimple *> * = NULL);
+unsigned int tree_if_conversion (class loop *, vec<gimple *> * = NULL);
#endif /* GCC_TREE_IF_CONV_H */
static void
copy_loops (copy_body_data *id,
- struct loop *dest_parent, struct loop *src_parent)
+ class loop *dest_parent, class loop *src_parent)
{
- struct loop *src_loop = src_parent->inner;
+ class loop *src_loop = src_parent->inner;
while (src_loop)
{
if (!id->blocks_to_copy
|| bitmap_bit_p (id->blocks_to_copy, src_loop->header->index))
{
- struct loop *dest_loop = alloc_loop ();
+ class loop *dest_loop = alloc_loop ();
/* Assign the new loop its header and latch and associate
those with the new loop. */
statements in loop copies. */
static void
-stmts_from_loop (struct loop *loop, vec<gimple *> *stmts)
+stmts_from_loop (class loop *loop, vec<gimple *> *stmts)
{
unsigned int i;
basic_block *bbs = get_loop_body_in_custom_order (loop, bb_top_order_cmp);
collected and recorded in global data DATAREFS_VEC. */
static struct graph *
-build_rdg (struct loop *loop, control_dependences *cd)
+build_rdg (class loop *loop, control_dependences *cd)
{
struct graph *rdg;
/* Return a copy of LOOP placed before LOOP. */
-static struct loop *
-copy_loop_before (struct loop *loop)
+static class loop *
+copy_loop_before (class loop *loop)
{
- struct loop *res;
+ class loop *res;
edge preheader = loop_preheader_edge (loop);
initialize_original_copy_tables ();
/* Creates an empty basic block after LOOP. */
static void
-create_bb_after_loop (struct loop *loop)
+create_bb_after_loop (class loop *loop)
{
edge exit = single_exit (loop);
basic blocks of a loop are taken in dom order. */
static void
-generate_loops_for_partition (struct loop *loop, partition *partition,
+generate_loops_for_partition (class loop *loop, partition *partition,
bool copy_p)
{
unsigned i;
/* Generate a call to memset for PARTITION in LOOP. */
static void
-generate_memset_builtin (struct loop *loop, partition *partition)
+generate_memset_builtin (class loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
tree mem, fn, nb_bytes;
/* Generate a call to memcpy for PARTITION in LOOP. */
static void
-generate_memcpy_builtin (struct loop *loop, partition *partition)
+generate_memcpy_builtin (class loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
gimple *fn_call;
/* Remove and destroy the loop LOOP. */
static void
-destroy_loop (struct loop *loop)
+destroy_loop (class loop *loop)
{
unsigned nbbs = loop->num_nodes;
edge exit = single_exit (loop);
/* Generates code for PARTITION. Return whether LOOP needs to be destroyed. */
static bool
-generate_code_for_partition (struct loop *loop,
+generate_code_for_partition (class loop *loop,
partition *partition, bool copy_p)
{
switch (partition->kind)
data references. */
static bool
-find_single_drs (struct loop *loop, struct graph *rdg, partition *partition,
+find_single_drs (class loop *loop, struct graph *rdg, partition *partition,
data_reference_p *dst_dr, data_reference_p *src_dr)
{
unsigned i;
{
location_t loc = gimple_location (DR_STMT (dr));
basic_block bb = gimple_bb (DR_STMT (dr));
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
tree ref = DR_REF (dr);
tree access_base = build_fold_addr_expr (ref);
tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (ref));
DR. */
static inline bool
-latch_dominated_by_data_ref (struct loop *loop, data_reference *dr)
+latch_dominated_by_data_ref (class loop *loop, data_reference *dr)
{
return dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src,
gimple_bb (DR_STMT (dr)));
data dependence relations ALIAS_DDRS. */
static void
-compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs,
+compute_alias_check_pairs (class loop *loop, vec<ddr_p> *alias_ddrs,
vec<dr_with_seg_len_pair_t> *comp_alias_pairs)
{
unsigned int i;
static void
version_loop_by_alias_check (vec<struct partition *> *partitions,
- struct loop *loop, vec<ddr_p> *alias_ddrs)
+ class loop *loop, vec<ddr_p> *alias_ddrs)
{
profile_probability prob;
basic_block cond_bb;
- struct loop *nloop;
+ class loop *nloop;
tree lhs, arg0, cond_expr = NULL_TREE;
gimple_seq cond_stmts = NULL;
gimple *call_stmt = NULL;
ALIAS_DDRS contains ddrs which need runtime alias check. */
static void
-finalize_partitions (struct loop *loop, vec<struct partition *> *partitions,
+finalize_partitions (class loop *loop, vec<struct partition *> *partitions,
vec<ddr_p> *alias_ddrs)
{
unsigned i;
Set *DESTROY_P to whether LOOP needs to be destroyed. */
static int
-distribute_loop (struct loop *loop, vec<gimple *> stmts,
+distribute_loop (class loop *loop, vec<gimple *> stmts,
control_dependences *cd, int *nb_calls, bool *destroy_p,
bool only_patterns_p)
{
WORK_LIST. Return false if there is nothing for distribution. */
static bool
-find_seed_stmts_for_distribution (struct loop *loop, vec<gimple *> *work_list)
+find_seed_stmts_for_distribution (class loop *loop, vec<gimple *> *work_list)
{
basic_block *bbs = get_loop_body_in_dom_order (loop);
/* Given innermost LOOP, return the outermost enclosing loop that forms a
perfect loop nest. */
-static struct loop *
-prepare_perfect_loop_nest (struct loop *loop)
+static class loop *
+prepare_perfect_loop_nest (class loop *loop)
{
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
tree niters = number_of_latch_executions (loop);
/* TODO: We only support the innermost 3-level loop nest distribution
unsigned int
pass_loop_distribution::execute (function *fun)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
basic_block bb;
control_dependences *cd = NULL;
in parallel). */
static bool
-loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack)
+loop_parallel_p (class loop *loop, struct obstack * parloop_obstack)
{
vec<ddr_p> dependence_relations;
vec<data_reference_p> datarefs;
BB_IRREDUCIBLE_LOOP flag. */
static inline bool
-loop_has_blocks_with_irreducible_flag (struct loop *loop)
+loop_has_blocks_with_irreducible_flag (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
the loop described in DATA. */
int
-initialize_reductions (reduction_info **slot, struct loop *loop)
+initialize_reductions (reduction_info **slot, class loop *loop)
{
tree init;
tree type, arg;
reduction's data structure. */
int
-create_phi_for_local_result (reduction_info **slot, struct loop *loop)
+create_phi_for_local_result (reduction_info **slot, class loop *loop)
{
struct reduction_info *const reduc = *slot;
edge e;
LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
-create_call_for_reduction (struct loop *loop,
+create_call_for_reduction (class loop *loop,
reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
- reduction_list->traverse <struct loop *, create_phi_for_local_result> (loop);
+ reduction_list->traverse <class loop *, create_phi_for_local_result> (loop);
/* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
basic_block continue_bb = single_pred (loop->latch);
ld_st_data->load_bb = FALLTHRU_EDGE (continue_bb)->dest;
bound. */
static void
-transform_to_exit_first_loop_alt (struct loop *loop,
+transform_to_exit_first_loop_alt (class loop *loop,
reduction_info_table_type *reduction_list,
tree bound)
{
transformation is successful. */
static bool
-try_transform_to_exit_first_loop_alt (struct loop *loop,
+try_transform_to_exit_first_loop_alt (class loop *loop,
reduction_info_table_type *reduction_list,
tree nit)
{
LOOP. */
static void
-transform_to_exit_first_loop (struct loop *loop,
+transform_to_exit_first_loop (class loop *loop,
reduction_info_table_type *reduction_list,
tree nit)
{
that number is to be determined later. */
static void
-create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
+create_parallel_loop (class loop *loop, tree loop_fn, tree data,
tree new_data, unsigned n_threads, location_t loc,
bool oacc_kernels_p)
{
REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
-gen_parallel_loop (struct loop *loop,
+gen_parallel_loop (class loop *loop,
reduction_info_table_type *reduction_list,
- unsigned n_threads, struct tree_niter_desc *niter,
+ unsigned n_threads, class tree_niter_desc *niter,
bool oacc_kernels_p)
{
tree many_iterations_cond, type, nit;
/* Generate initializations for reductions. */
if (!reduction_list->is_empty ())
- reduction_list->traverse <struct loop *, initialize_reductions> (loop);
+ reduction_list->traverse <class loop *, initialize_reductions> (loop);
/* Eliminate the references to local variables from the loop. */
gcc_assert (single_exit (loop));
/* Returns true when LOOP contains vector phi nodes. */
static bool
-loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED)
+loop_has_vector_phi_nodes (class loop *loop ATTRIBUTE_UNUSED)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
/* Try to initialize NITER for code generation part. */
static bool
-try_get_loop_niter (loop_p loop, struct tree_niter_desc *niter)
+try_get_loop_niter (loop_p loop, class tree_niter_desc *niter)
{
edge exit = single_dom_exit (loop);
and return addr. Otherwise, return NULL_TREE. */
static tree
-find_reduc_addr (struct loop *loop, gphi *phi)
+find_reduc_addr (class loop *loop, gphi *phi)
{
edge e = loop_preheader_edge (loop);
tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e);
/* Return true if LOOP contains phis with ADDR_EXPR in args. */
static bool
-loop_has_phi_with_address_arg (struct loop *loop)
+loop_has_phi_with_address_arg (class loop *loop)
{
basic_block *bbs = get_loop_body (loop);
bool res = false;
outside LOOP by guarding them such that only a single gang executes them. */
static bool
-oacc_entry_exit_ok (struct loop *loop,
+oacc_entry_exit_ok (class loop *loop,
reduction_info_table_type *reduction_list)
{
basic_block *loop_bbs = get_loop_body_in_dom_order (loop);
{
unsigned n_threads;
bool changed = false;
- struct loop *loop;
- struct loop *skip_loop = NULL;
- struct tree_niter_desc niter_desc;
+ class loop *loop;
+ class loop *skip_loop = NULL;
+ class tree_niter_desc niter_desc;
struct obstack parloop_obstack;
HOST_WIDE_INT estimated;
}
};
-class varpool_node;
+struct varpool_node;
struct cgraph_node;
struct lto_symtab_encoder_d;
it is executed whenever the loop is entered. */
static basic_block
-last_always_executed_block (struct loop *loop)
+last_always_executed_block (class loop *loop)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
/* Splits dependence graph on DATAREFS described by DEPENDS to components. */
static struct component *
-split_data_refs_to_components (struct loop *loop,
+split_data_refs_to_components (class loop *loop,
vec<data_reference_p> datarefs,
vec<ddr_p> depends)
{
comps[ca] = comp;
}
- dataref = XCNEW (struct dref_d);
+ dataref = XCNEW (class dref_d);
dataref->ref = dr;
dataref->stmt = DR_STMT (dr);
dataref->offset = 0;
loop. */
static bool
-suitable_component_p (struct loop *loop, struct component *comp)
+suitable_component_p (class loop *loop, struct component *comp)
{
unsigned i;
dref a, first;
the beginning of this file. LOOP is the current loop. */
static struct component *
-filter_suitable_components (struct loop *loop, struct component *comps)
+filter_suitable_components (class loop *loop, struct component *comps)
{
struct component **comp, *act;
is the root of the current chain. */
static gphi *
-find_looparound_phi (struct loop *loop, dref ref, dref root)
+find_looparound_phi (class loop *loop, dref ref, dref root)
{
tree name, init, init_ref;
gphi *phi = NULL;
static void
insert_looparound_copy (chain_p chain, dref ref, gphi *phi)
{
- dref nw = XCNEW (struct dref_d), aref;
+ dref nw = XCNEW (class dref_d), aref;
unsigned i;
nw->stmt = phi;
(also, it may allow us to combine chains together). */
static void
-add_looparound_copies (struct loop *loop, chain_p chain)
+add_looparound_copies (class loop *loop, chain_p chain)
{
unsigned i;
dref ref, root = get_chain_root (chain);
loop. */
static void
-determine_roots_comp (struct loop *loop,
+determine_roots_comp (class loop *loop,
struct component *comp,
vec<chain_p> *chains)
{
separates the references to CHAINS. LOOP is the current loop. */
static void
-determine_roots (struct loop *loop,
+determine_roots (class loop *loop,
struct component *comps, vec<chain_p> *chains)
{
struct component *comp;
temporary variables are marked in TMP_VARS. */
static void
-initialize_root_vars (struct loop *loop, chain_p chain, bitmap tmp_vars)
+initialize_root_vars (class loop *loop, chain_p chain, bitmap tmp_vars)
{
unsigned i;
unsigned n = chain->length;
In this case, we can use these invariant values directly after LOOP. */
static bool
-is_inv_store_elimination_chain (struct loop *loop, chain_p chain)
+is_inv_store_elimination_chain (class loop *loop, chain_p chain)
{
if (chain->length == 0 || chain->type != CT_STORE_STORE)
return false;
of the newly created root variables are marked in TMP_VARS. */
static void
-initialize_root_vars_store_elim_2 (struct loop *loop,
+initialize_root_vars_store_elim_2 (class loop *loop,
chain_p chain, bitmap tmp_vars)
{
unsigned i, n = chain->length;
(CHAIN->length - 1) iterations. */
static void
-finalize_eliminated_stores (struct loop *loop, chain_p chain)
+finalize_eliminated_stores (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
initializer. */
static void
-initialize_root_vars_lm (struct loop *loop, dref root, bool written,
+initialize_root_vars_lm (class loop *loop, dref root, bool written,
vec<tree> *vars, vec<tree> inits,
bitmap tmp_vars)
{
created temporary variables are marked in TMP_VARS. */
static void
-execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
+execute_load_motion (class loop *loop, chain_p chain, bitmap tmp_vars)
{
auto_vec<tree> vars;
dref a;
Uids of the newly created temporary variables are marked in TMP_VARS.*/
static void
-execute_pred_commoning_chain (struct loop *loop, chain_p chain,
+execute_pred_commoning_chain (class loop *loop, chain_p chain,
bitmap tmp_vars)
{
unsigned i;
Uids of the newly created temporary variables are marked in TMP_VARS. */
static void
-execute_pred_commoning (struct loop *loop, vec<chain_p> chains,
+execute_pred_commoning (class loop *loop, vec<chain_p> chains,
bitmap tmp_vars)
{
chain_p chain;
};
static void
-execute_pred_commoning_cbck (struct loop *loop, void *data)
+execute_pred_commoning_cbck (class loop *loop, void *data)
{
struct epcc_data *const dta = (struct epcc_data *) data;
the header of the LOOP. */
static void
-base_names_in_chain_on (struct loop *loop, tree name, tree var)
+base_names_in_chain_on (class loop *loop, tree name, tree var)
{
gimple *stmt, *phi;
imm_use_iterator iter;
for those we want to perform this. */
static void
-eliminate_temp_copies (struct loop *loop, bitmap tmp_vars)
+eliminate_temp_copies (class loop *loop, bitmap tmp_vars)
{
edge e;
gphi *phi;
for (i = 0; (ch1->refs.iterate (i, &r1)
&& ch2->refs.iterate (i, &r2)); i++)
{
- nw = XCNEW (struct dref_d);
+ nw = XCNEW (class dref_d);
nw->stmt = stmt_combining_refs (r1, r2);
nw->distance = r1->distance;
/* Try to combine the CHAINS in LOOP. */
static void
-try_combine_chains (struct loop *loop, vec<chain_p> *chains)
+try_combine_chains (class loop *loop, vec<chain_p> *chains)
{
unsigned i, j;
chain_p ch1, ch2, cch;
otherwise. */
static bool
-prepare_initializers_chain_store_elim (struct loop *loop, chain_p chain)
+prepare_initializers_chain_store_elim (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
impossible because one of these initializers may trap, true otherwise. */
static bool
-prepare_initializers_chain (struct loop *loop, chain_p chain)
+prepare_initializers_chain (class loop *loop, chain_p chain)
{
unsigned i, n = (chain->type == CT_INVARIANT) ? 1 : chain->length;
struct data_reference *dr = get_chain_root (chain)->ref;
be used because the initializers might trap. */
static void
-prepare_initializers (struct loop *loop, vec<chain_p> chains)
+prepare_initializers (class loop *loop, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
if finalizer code for CHAIN can be generated, otherwise false. */
static bool
-prepare_finalizers_chain (struct loop *loop, chain_p chain)
+prepare_finalizers_chain (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
struct data_reference *dr = get_chain_root (chain)->ref;
if finalizer code generation for CHAINS breaks loop closed ssa form. */
static bool
-prepare_finalizers (struct loop *loop, vec<chain_p> chains)
+prepare_finalizers (class loop *loop, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
/* Insert all initializing gimple stmts into loop's entry edge. */
static void
-insert_init_seqs (struct loop *loop, vec<chain_p> chains)
+insert_init_seqs (class loop *loop, vec<chain_p> chains)
{
unsigned i;
edge entry = loop_preheader_edge (loop);
form was corrupted. */
static unsigned
-tree_predictive_commoning_loop (struct loop *loop)
+tree_predictive_commoning_loop (class loop *loop)
{
vec<data_reference_p> datarefs;
vec<ddr_p> dependences;
struct component *components;
vec<chain_p> chains = vNULL;
unsigned unroll_factor;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unroll = false, loop_closed_ssa = false;
edge exit;
unsigned
tree_predictive_commoning (void)
{
- struct loop *loop;
+ class loop *loop;
unsigned ret = 0, changed = 0;
initialize_original_copy_tables ();
#include "builtins.h"
#include "case-cfn-macros.h"
-static tree analyze_scalar_evolution_1 (struct loop *, tree);
-static tree analyze_scalar_evolution_for_address_of (struct loop *loop,
+static tree analyze_scalar_evolution_1 (class loop *, tree);
+static tree analyze_scalar_evolution_for_address_of (class loop *loop,
tree var);
/* The cached information about an SSA name with version NAME_VERSION,
*/
tree
-compute_overall_effect_of_inner_loop (struct loop *loop, tree evolution_fn)
+compute_overall_effect_of_inner_loop (class loop *loop, tree evolution_fn)
{
bool val = false;
else if (TREE_CODE (evolution_fn) == POLYNOMIAL_CHREC)
{
- struct loop *inner_loop = get_chrec_loop (evolution_fn);
+ class loop *inner_loop = get_chrec_loop (evolution_fn);
if (inner_loop == loop
|| flow_loop_nested_p (loop, inner_loop))
gimple *at_stmt)
{
tree type, left, right;
- struct loop *loop = get_loop (cfun, loop_nb), *chloop;
+ class loop *loop = get_loop (cfun, loop_nb), *chloop;
switch (TREE_CODE (chrec_before))
{
analyze, then give up. */
gcond *
-get_loop_exit_condition (const struct loop *loop)
+get_loop_exit_condition (const class loop *loop)
{
gcond *res = NULL;
edge exit_edge = single_exit (loop);
};
-static t_bool follow_ssa_edge (struct loop *loop, gimple *, gphi *,
+static t_bool follow_ssa_edge (class loop *loop, gimple *, gphi *,
tree *, int);
/* Follow the ssa edge into the binary expression RHS0 CODE RHS1.
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_binary (struct loop *loop, gimple *at_stmt,
+follow_ssa_edge_binary (class loop *loop, gimple *at_stmt,
tree type, tree rhs0, enum tree_code code, tree rhs1,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_expr (struct loop *loop, gimple *at_stmt, tree expr,
+follow_ssa_edge_expr (class loop *loop, gimple *at_stmt, tree expr,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
{
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_in_rhs (struct loop *loop, gimple *stmt,
+follow_ssa_edge_in_rhs (class loop *loop, gimple *stmt,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
{
static inline t_bool
follow_ssa_edge_in_condition_phi_branch (int i,
- struct loop *loop,
+ class loop *loop,
gphi *condition_phi,
gphi *halting_phi,
tree *evolution_of_branch,
loop. */
static t_bool
-follow_ssa_edge_in_condition_phi (struct loop *loop,
+follow_ssa_edge_in_condition_phi (class loop *loop,
gphi *condition_phi,
gphi *halting_phi,
tree *evolution_of_loop, int limit)
considered as a single statement. */
static t_bool
-follow_ssa_edge_inner_loop_phi (struct loop *outer_loop,
+follow_ssa_edge_inner_loop_phi (class loop *outer_loop,
gphi *loop_phi_node,
gphi *halting_phi,
tree *evolution_of_loop, int limit)
{
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
tree ev = analyze_scalar_evolution (loop, PHI_RESULT (loop_phi_node));
/* Sometimes, the inner loop is too difficult to analyze, and the
path that is analyzed on the return walk. */
static t_bool
-follow_ssa_edge (struct loop *loop, gimple *def, gphi *halting_phi,
+follow_ssa_edge (class loop *loop, gimple *def, gphi *halting_phi,
tree *evolution_of_loop, int limit)
{
- struct loop *def_loop;
+ class loop *def_loop;
if (gimple_nop_p (def))
return t_false;
See PR41488. */
static tree
-simplify_peeled_chrec (struct loop *loop, tree arg, tree init_cond)
+simplify_peeled_chrec (class loop *loop, tree arg, tree init_cond)
{
aff_tree aff1, aff2;
tree ev, left, right, type, step_val;
{
int i, n = gimple_phi_num_args (loop_phi_node);
tree evolution_function = chrec_not_analyzed_yet;
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
basic_block bb;
static bool simplify_peeled_chrec_p = true;
{
int i, n;
tree init_cond = chrec_not_analyzed_yet;
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
if (dump_file && (dump_flags & TDF_SCEV))
{
/* Analyze the scalar evolution for LOOP_PHI_NODE. */
static tree
-interpret_loop_phi (struct loop *loop, gphi *loop_phi_node)
+interpret_loop_phi (class loop *loop, gphi *loop_phi_node)
{
tree res;
- struct loop *phi_loop = loop_containing_stmt (loop_phi_node);
+ class loop *phi_loop = loop_containing_stmt (loop_phi_node);
tree init_cond;
gcc_assert (phi_loop == loop);
analyzed. */
static tree
-interpret_condition_phi (struct loop *loop, gphi *condition_phi)
+interpret_condition_phi (class loop *loop, gphi *condition_phi)
{
int i, n = gimple_phi_num_args (condition_phi);
tree res = chrec_not_analyzed_yet;
analyze the effect of an inner loop: see interpret_loop_phi. */
static tree
-interpret_rhs_expr (struct loop *loop, gimple *at_stmt,
+interpret_rhs_expr (class loop *loop, gimple *at_stmt,
tree type, tree rhs1, enum tree_code code, tree rhs2)
{
tree res, chrec1, chrec2, ctype;
/* Interpret the expression EXPR. */
static tree
-interpret_expr (struct loop *loop, gimple *at_stmt, tree expr)
+interpret_expr (class loop *loop, gimple *at_stmt, tree expr)
{
enum tree_code code;
tree type = TREE_TYPE (expr), op0, op1;
/* Interpret the rhs of the assignment STMT. */
static tree
-interpret_gimple_assign (struct loop *loop, gimple *stmt)
+interpret_gimple_assign (class loop *loop, gimple *stmt)
{
tree type = TREE_TYPE (gimple_assign_lhs (stmt));
enum tree_code code = gimple_assign_rhs_code (stmt);
/* Helper recursive function. */
static tree
-analyze_scalar_evolution_1 (struct loop *loop, tree var)
+analyze_scalar_evolution_1 (class loop *loop, tree var)
{
gimple *def;
basic_block bb;
- struct loop *def_loop;
+ class loop *def_loop;
tree res;
if (TREE_CODE (var) != SSA_NAME)
if (loop != def_loop)
{
res = analyze_scalar_evolution_1 (def_loop, var);
- struct loop *loop_to_skip = superloop_at_depth (def_loop,
+ class loop *loop_to_skip = superloop_at_depth (def_loop,
loop_depth (loop) + 1);
res = compute_overall_effect_of_inner_loop (loop_to_skip, res);
if (chrec_contains_symbols_defined_in_loop (res, loop->num))
*/
tree
-analyze_scalar_evolution (struct loop *loop, tree var)
+analyze_scalar_evolution (class loop *loop, tree var)
{
tree res;
/* Analyzes and returns the scalar evolution of VAR address in LOOP. */
static tree
-analyze_scalar_evolution_for_address_of (struct loop *loop, tree var)
+analyze_scalar_evolution_for_address_of (class loop *loop, tree var)
{
return analyze_scalar_evolution (loop, build_fold_addr_expr (var));
}
*/
static tree
-analyze_scalar_evolution_in_loop (struct loop *wrto_loop, struct loop *use_loop,
+analyze_scalar_evolution_in_loop (class loop *wrto_loop, class loop *use_loop,
tree version, bool *folded_casts)
{
bool val = false;
static tree
loop_closed_phi_def (tree var)
{
- struct loop *loop;
+ class loop *loop;
edge exit;
gphi *phi;
gphi_iterator psi;
return NULL_TREE;
}
-static tree instantiate_scev_r (edge, struct loop *, struct loop *,
+static tree instantiate_scev_r (edge, class loop *, class loop *,
tree, bool *, int);
/* Analyze all the parameters of the chrec, between INSTANTIATE_BELOW
static tree
instantiate_scev_name (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
bool *fold_conversions,
int size_expr)
{
tree res;
- struct loop *def_loop;
+ class loop *def_loop;
basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (chrec));
/* A parameter, nothing to do. */
static tree
instantiate_scev_poly (edge instantiate_below,
- struct loop *evolution_loop, struct loop *,
+ class loop *evolution_loop, class loop *,
tree chrec, bool *fold_conversions, int size_expr)
{
tree op1;
static tree
instantiate_scev_binary (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec, enum tree_code code,
tree type, tree c0, tree c1,
bool *fold_conversions, int size_expr)
static tree
instantiate_scev_convert (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec, tree type, tree op,
bool *fold_conversions, int size_expr)
{
static tree
instantiate_scev_not (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
enum tree_code code, tree type, tree op,
bool *fold_conversions, int size_expr)
static tree
instantiate_scev_r (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
bool *fold_conversions, int size_expr)
{
a function parameter. */
tree
-instantiate_scev (edge instantiate_below, struct loop *evolution_loop,
+instantiate_scev (edge instantiate_below, class loop *evolution_loop,
tree chrec)
{
tree res;
of an expression. */
tree
-resolve_mixers (struct loop *loop, tree chrec, bool *folded_casts)
+resolve_mixers (class loop *loop, tree chrec, bool *folded_casts)
{
bool destr = false;
bool fold_conversions = false;
the loop body has been executed 6 times. */
tree
-number_of_latch_executions (struct loop *loop)
+number_of_latch_executions (class loop *loop)
{
edge exit;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
tree may_be_zero;
tree res;
void
scev_initialize (void)
{
- struct loop *loop;
+ class loop *loop;
gcc_assert (! scev_initialized_p ());
void
scev_reset (void)
{
- struct loop *loop;
+ class loop *loop;
scev_reset_htab ();
hypotetical IVs to be inserted into code. */
bool
-iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
+iv_can_overflow_p (class loop *loop, tree type, tree base, tree step)
{
widest_int nit;
wide_int base_min, base_max, step_min, step_max, type_min, type_max;
infinite. */
bool
-simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
+simple_iv_with_niters (class loop *wrto_loop, class loop *use_loop,
tree op, affine_iv *iv, tree *iv_niters,
bool allow_nonconstant_step)
{
affine iv unconditionally. */
bool
-simple_iv (struct loop *wrto_loop, struct loop *use_loop, tree op,
+simple_iv (class loop *wrto_loop, class loop *use_loop, tree op,
affine_iv *iv, bool allow_nonconstant_step)
{
return simple_iv_with_niters (wrto_loop, use_loop, op, iv,
/* Do final value replacement for LOOP, return true if we did anything. */
bool
-final_value_replacement_loop (struct loop *loop)
+final_value_replacement_loop (class loop *loop)
{
/* If we do not know exact number of iterations of the loop, we cannot
replace the final value. */
/* Set stmt insertion pointer. All stmts are inserted before this point. */
gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
- struct loop *ex_loop
+ class loop *ex_loop
= superloop_at_depth (loop,
loop_depth (exit->dest->loop_father) + 1);
#ifndef GCC_TREE_SCALAR_EVOLUTION_H
#define GCC_TREE_SCALAR_EVOLUTION_H
-extern tree number_of_latch_executions (struct loop *);
-extern gcond *get_loop_exit_condition (const struct loop *);
+extern tree number_of_latch_executions (class loop *);
+extern gcond *get_loop_exit_condition (const class loop *);
extern void scev_initialize (void);
extern bool scev_initialized_p (void);
extern void scev_reset (void);
extern void scev_reset_htab (void);
extern void scev_finalize (void);
-extern tree analyze_scalar_evolution (struct loop *, tree);
-extern tree instantiate_scev (edge, struct loop *, tree);
-extern tree resolve_mixers (struct loop *, tree, bool *);
+extern tree analyze_scalar_evolution (class loop *, tree);
+extern tree instantiate_scev (edge, class loop *, tree);
+extern tree resolve_mixers (class loop *, tree, bool *);
extern void gather_stats_on_scev_database (void);
-extern bool final_value_replacement_loop (struct loop *);
+extern bool final_value_replacement_loop (class loop *);
extern unsigned int scev_const_prop (void);
extern bool expression_expensive_p (tree);
-extern bool simple_iv_with_niters (struct loop *, struct loop *, tree,
+extern bool simple_iv_with_niters (class loop *, class loop *, tree,
struct affine_iv *, tree *, bool);
-extern bool simple_iv (struct loop *, struct loop *, tree, struct affine_iv *,
+extern bool simple_iv (class loop *, class loop *, tree, struct affine_iv *,
bool);
-extern bool iv_can_overflow_p (struct loop *, tree, tree, tree);
-extern tree compute_overall_effect_of_inner_loop (struct loop *, tree);
+extern bool iv_can_overflow_p (class loop *, tree, tree, tree);
+extern tree compute_overall_effect_of_inner_loop (class loop *, tree);
/* Returns the basic block preceding LOOP, or the CFG entry block when
the loop is function's body. */
be analyzed and instantiated. */
static inline tree
-instantiate_parameters (struct loop *loop, tree chrec)
+instantiate_parameters (class loop *loop, tree chrec)
{
return instantiate_scev (loop_preheader_edge (loop), loop, chrec);
}
/* Returns the loop of the polynomial chrec CHREC. */
-static inline struct loop *
+static inline class loop *
get_chrec_loop (const_tree chrec)
{
return get_loop (cfun, CHREC_VARIABLE (chrec));
extern tree tree_mem_ref_addr (tree, tree);
extern bool valid_mem_ref_p (machine_mode, addr_space_t, struct mem_address *);
extern void move_fixed_address_to_symbol (struct mem_address *,
- struct aff_tree *);
+ class aff_tree *);
tree create_mem_ref (gimple_stmt_iterator *, tree,
- struct aff_tree *, tree, tree, tree, bool);
+ class aff_tree *, tree, tree, tree, bool);
extern void copy_ref_info (tree, tree);
tree maybe_fold_tmr (tree);
/* Prevent the empty possibly infinite loops from being removed. */
if (aggressive)
{
- struct loop *loop;
+ class loop *loop;
if (mark_irreducible_loops ())
FOR_EACH_BB_FN (bb, cfun)
{
void
free_dom_edge_info (edge e)
{
- class edge_info *edge_info = (struct edge_info *)e->aux;
+ class edge_info *edge_info = (class edge_info *)e->aux;
if (edge_info)
delete edge_info;
bool can_infer_simple_equiv
= !(HONOR_SIGNED_ZEROS (op0)
&& real_zerop (op0));
- struct edge_info *edge_info;
+ class edge_info *edge_info;
edge_info = new class edge_info (true_edge);
record_conditions (&edge_info->cond_equivalences, cond, inverted);
bool can_infer_simple_equiv
= !(HONOR_SIGNED_ZEROS (op1)
&& (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
- struct edge_info *edge_info;
+ class edge_info *edge_info;
edge_info = new class edge_info (true_edge);
record_conditions (&edge_info->cond_equivalences, cond, inverted);
function. */
var_map
-init_var_map (int size, struct loop *loop)
+init_var_map (int size, class loop *loop)
{
var_map map;
if (cfun->has_simduid_loops)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
if (loop->simduid && !is_used_p (loop->simduid))
loop->simduid = NULL_TREE;
/* Value used to represent no partition number. */
#define NO_PARTITION -1
-extern var_map init_var_map (int, struct loop* = NULL);
+extern var_map init_var_map (int, class loop* = NULL);
extern void delete_var_map (var_map);
extern int var_union (var_map, tree, tree);
extern void partition_view_normal (var_map);
amount. */
static bool
-should_duplicate_loop_header_p (basic_block header, struct loop *loop,
+should_duplicate_loop_header_p (basic_block header, class loop *loop,
int *limit)
{
gimple_stmt_iterator bsi;
/* Checks whether LOOP is a do-while style loop. */
static bool
-do_while_loop_p (struct loop *loop)
+do_while_loop_p (class loop *loop)
{
gimple *stmt = last_stmt (loop->latch);
unsigned int copy_headers (function *fun);
/* Return true to copy headers of LOOP or false to skip. */
- virtual bool process_loop_p (struct loop *loop) = 0;
+ virtual bool process_loop_p (class loop *loop) = 0;
};
const pass_data pass_data_ch =
protected:
/* ch_base method: */
- virtual bool process_loop_p (struct loop *loop);
+ virtual bool process_loop_p (class loop *loop);
}; // class pass_ch
const pass_data pass_data_ch_vect =
protected:
/* ch_base method: */
- virtual bool process_loop_p (struct loop *loop);
+ virtual bool process_loop_p (class loop *loop);
}; // class pass_ch_vect
/* For all loops, copy the condition at the end of the loop body in front
unsigned int
ch_base::copy_headers (function *fun)
{
- struct loop *loop;
+ class loop *loop;
basic_block header;
edge exit, entry;
basic_block *bbs, *copied_bbs;
/* Apply header copying according to a very simple test of do-while shape. */
bool
-pass_ch::process_loop_p (struct loop *loop)
+pass_ch::process_loop_p (class loop *loop)
{
return !do_while_loop_p (loop);
}
/* Apply header-copying to loops where we might enable vectorization. */
bool
-pass_ch_vect::process_loop_p (struct loop *loop)
+pass_ch_vect::process_loop_p (class loop *loop)
{
if (!flag_tree_loop_vectorize && !loop->force_vectorize)
return false;
struct lim_aux_data
{
- struct loop *max_loop; /* The outermost loop in that the statement
+ class loop *max_loop; /* The outermost loop in that the statement
is invariant. */
- struct loop *tgt_loop; /* The loop out of that we want to move the
+ class loop *tgt_loop; /* The loop out of that we want to move the
invariant. */
- struct loop *always_executed_in;
+ class loop *always_executed_in;
/* The outermost loop for that we are sure
the statement is executed if the loop
is entered. */
static inline bool equal (const im_mem_ref *, const ao_ref *);
};
-/* A hash function for struct im_mem_ref object OBJ. */
+/* A hash function for class im_mem_ref object OBJ. */
inline hashval_t
mem_ref_hasher::hash (const im_mem_ref *mem)
return mem->hash;
}
-/* An equality function for struct im_mem_ref object MEM1 with
+/* An equality function for class im_mem_ref object MEM1 with
memory reference OBJ2. */
inline bool
static bitmap_obstack lim_bitmap_obstack;
static obstack mem_ref_obstack;
-static bool ref_indep_loop_p (struct loop *, im_mem_ref *);
-static bool ref_always_accessed_p (struct loop *, im_mem_ref *, bool);
+static bool ref_indep_loop_p (class loop *, im_mem_ref *);
+static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
/* Minimum cost of an expensive expression. */
#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
/* The outermost loop for which execution of the header guarantees that the
block will be executed. */
-#define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
+#define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
#define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
/* ID of the shared unanalyzable mem. */
other operands, i.e. the outermost loop enclosing LOOP in that the value
of DEF is invariant. */
-static struct loop *
-outermost_invariant_loop (tree def, struct loop *loop)
+static class loop *
+outermost_invariant_loop (tree def, class loop *loop)
{
gimple *def_stmt;
basic_block def_bb;
- struct loop *max_loop;
+ class loop *max_loop;
struct lim_aux_data *lim_data;
if (!def)
If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
static bool
-add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
+add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
bool add_cost)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (def);
basic_block def_bb = gimple_bb (def_stmt);
- struct loop *max_loop;
+ class loop *max_loop;
struct lim_aux_data *def_data;
if (!def_bb)
REF is independent. If REF is not independent in LOOP, NULL is returned
instead. */
-static struct loop *
-outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref)
+static class loop *
+outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
{
- struct loop *aloop;
+ class loop *aloop;
if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
return NULL;
determine_max_movement (gimple *stmt, bool must_preserve_exec)
{
basic_block bb = gimple_bb (stmt);
- struct loop *loop = bb->loop_father;
- struct loop *level;
+ class loop *loop = bb->loop_father;
+ class loop *level;
struct lim_aux_data *lim_data = get_lim_data (stmt);
tree val;
ssa_op_iter iter;
operands) is hoisted at least out of the loop LEVEL. */
static void
-set_level (gimple *stmt, struct loop *orig_loop, struct loop *level)
+set_level (gimple *stmt, class loop *orig_loop, class loop *level)
{
- struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
+ class loop *stmt_loop = gimple_bb (stmt)->loop_father;
struct lim_aux_data *lim_data;
gimple *dep_stmt;
unsigned i;
gimple_stmt_iterator bsi;
gimple *stmt;
bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
- struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
+ class loop *outermost = ALWAYS_EXECUTED_IN (bb);
struct lim_aux_data *lim_data;
if (!loop_outer (bb->loop_father))
{
tree op0 = gimple_assign_rhs1 (stmt);
tree op1 = gimple_assign_rhs2 (stmt);
- struct loop *ol1 = outermost_invariant_loop (op1,
+ class loop *ol1 = outermost_invariant_loop (op1,
loop_containing_stmt (stmt));
/* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
unsigned int
move_computations_worker (basic_block bb)
{
- struct loop *level;
+ class loop *level;
unsigned cost = 0;
struct lim_aux_data *lim_data;
unsigned int todo = 0;
static bool
may_move_till (tree ref, tree *index, void *data)
{
- struct loop *loop = (struct loop *) data, *max_loop;
+ class loop *loop = (class loop *) data, *max_loop;
/* If REF is an array reference, check also that the step and the lower
bound is invariant in LOOP. */
moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
static void
-force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
+force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
{
gimple *stmt;
struct fmt_data
{
- struct loop *loop;
- struct loop *orig_loop;
+ class loop *loop;
+ class loop *orig_loop;
};
static bool
/* A function to free the mem_ref object OBJ. */
static void
-memref_free (struct im_mem_ref *mem)
+memref_free (class im_mem_ref *mem)
{
mem->accesses_in_loop.release ();
}
static im_mem_ref *
mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
{
- im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
+ im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
if (mem)
ref->mem = *mem;
else
necessary. Return whether a bit was changed. */
static bool
-set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
+set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
{
if (!ref->stored)
ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
/* Marks reference REF as stored in LOOP. */
static void
-mark_ref_stored (im_mem_ref *ref, struct loop *loop)
+mark_ref_stored (im_mem_ref *ref, class loop *loop)
{
while (loop != current_loops->tree_root
&& set_ref_stored_in_loop (ref, loop))
well. */
static void
-gather_mem_refs_stmt (struct loop *loop, gimple *stmt)
+gather_mem_refs_stmt (class loop *loop, gimple *stmt)
{
tree *mem = NULL;
hashval_t hash;
{
basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
- struct loop *loop1 = bb1->loop_father;
- struct loop *loop2 = bb2->loop_father;
+ class loop *loop1 = bb1->loop_father;
+ class loop *loop2 = bb2->loop_father;
if (loop1->num == loop2->num)
return bb1->index - bb2->index;
return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
{
mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
- struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
- struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
+ class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
+ class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
if (loop1->num == loop2->num)
return 0;
return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
{
gimple_stmt_iterator bsi;
basic_block bb, *bbs;
- struct loop *loop, *outer;
+ class loop *loop, *outer;
unsigned i, n;
/* Collect all basic-blocks in loops and sort them after their
static int
find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
{
- struct loop *loop = (struct loop *)const_cast<void *>(loop_);
+ class loop *loop = (class loop *)const_cast<void *>(loop_);
mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
- struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
+ class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
if (loop->num == loc_loop->num
|| flow_loop_nested_p (loop, loc_loop))
return 0;
template <typename FN>
static bool
-for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn)
+for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
{
unsigned i;
mem_ref_loc *loc;
/* Rewrites all references to REF in LOOP by variable TMP_VAR. */
static void
-rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var)
+rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
{
for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
}
/* Returns the first reference location to REF in LOOP. */
static mem_ref_loc *
-first_mem_ref_loc (struct loop *loop, im_mem_ref *ref)
+first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
{
mem_ref_loc *locp = NULL;
for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
set, set an appropriate flag indicating the store. */
static tree
-execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref,
+execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
hash_set <basic_block> *bbs)
{
tree flag;
to the reference from the temporary variable are emitted to exits. */
static void
-execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
+execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
{
tree tmp_var, store_flag = NULL_TREE;
unsigned i;
edges of the LOOP. */
static void
-hoist_memory_references (struct loop *loop, bitmap mem_refs,
+hoist_memory_references (class loop *loop, bitmap mem_refs,
vec<edge> exits)
{
im_mem_ref *ref;
class ref_always_accessed
{
public:
- ref_always_accessed (struct loop *loop_, bool stored_p_)
+ ref_always_accessed (class loop *loop_, bool stored_p_)
: loop (loop_), stored_p (stored_p_) {}
bool operator () (mem_ref_loc *loc);
- struct loop *loop;
+ class loop *loop;
bool stored_p;
};
bool
ref_always_accessed::operator () (mem_ref_loc *loc)
{
- struct loop *must_exec;
+ class loop *must_exec;
if (!get_lim_data (loc->stmt))
return false;
make sure REF is always stored to in LOOP. */
static bool
-ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p)
+ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
{
return for_all_locs_in_loop (loop, ref,
ref_always_accessed (loop, stored_p));
and its super-loops. */
static void
-record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
+record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
{
/* We can propagate dependent-in-loop bits up the loop
hierarchy to all outer loops. */
references in LOOP. */
static bool
-ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
+ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
{
stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
return false;
- struct loop *inner = loop->inner;
+ class loop *inner = loop->inner;
while (inner)
{
if (!ref_indep_loop_p_1 (inner, ref, stored_p))
LOOP. */
static bool
-ref_indep_loop_p (struct loop *loop, im_mem_ref *ref)
+ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
{
gcc_checking_assert (MEM_ANALYZABLE (ref));
/* Returns true if we can perform store motion of REF from LOOP. */
static bool
-can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
+can_sm_ref_p (class loop *loop, im_mem_ref *ref)
{
tree base;
motion was performed in one of the outer loops. */
static void
-find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
+find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
{
bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
unsigned i;
on its exits). */
static bool
-loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
+loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
vec<edge> exits)
{
unsigned i;
store motion was executed in one of the outer loops. */
static void
-store_motion_loop (struct loop *loop, bitmap sm_executed)
+store_motion_loop (class loop *loop, bitmap sm_executed)
{
vec<edge> exits = get_loop_exit_edges (loop);
- struct loop *subloop;
+ class loop *subloop;
bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
if (loop_suitable_for_sm (loop, exits))
static void
store_motion (void)
{
- struct loop *loop;
+ class loop *loop;
bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
blocks that contain a nonpure call. */
static void
-fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
+fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
{
basic_block bb = NULL, *bbs, last = NULL;
unsigned i;
edge e;
- struct loop *inn_loop = loop;
+ class loop *inn_loop = loop;
if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
{
fill_always_executed_in (void)
{
basic_block bb;
- struct loop *loop;
+ class loop *loop;
auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
bitmap_clear (contains_call);
static void
tree_ssa_lim_initialize (void)
{
- struct loop *loop;
+ class loop *loop;
unsigned i;
bitmap_obstack_initialize (&lim_bitmap_obstack);
if they are not NULL. */
void
-create_canonical_iv (struct loop *loop, edge exit, tree niter,
+create_canonical_iv (class loop *loop, edge exit, tree niter,
tree *var_before = NULL, tree *var_after = NULL)
{
edge in;
/* Return true if OP in STMT will be constant after peeling LOOP. */
static bool
-constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
+constant_after_peeling (tree op, gimple *stmt, class loop *loop)
{
if (is_gimple_min_invariant (op))
return true;
Stop estimating after UPPER_BOUND is met. Return true in this case. */
static bool
-tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel,
+tree_estimate_loop_size (class loop *loop, edge exit, edge edge_to_cancel,
struct loop_size *size, int upper_bound)
{
basic_block *body = get_loop_body (loop);
The other cases are hopefully rare and will be cleaned up later. */
static edge
-loop_edge_to_cancel (struct loop *loop)
+loop_edge_to_cancel (class loop *loop)
{
vec<edge> exits;
unsigned i;
known to not be executed. */
static bool
-remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
+remove_exits_and_undefined_stmts (class loop *loop, unsigned int npeeled)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool changed = false;
for (elt = loop->bounds; elt; elt = elt->next)
discovered. */
static bool
-remove_redundant_iv_tests (struct loop *loop)
+remove_redundant_iv_tests (class loop *loop)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool changed = false;
if (!loop->any_upper_bound)
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
if (!loop_exit_edge_p (loop, exit_edge))
exit_edge = EDGE_SUCC (bb, 1);
{
while (loops_to_unloop.length ())
{
- struct loop *loop = loops_to_unloop.pop ();
+ class loop *loop = loops_to_unloop.pop ();
int n_unroll = loops_to_unloop_nunroll.pop ();
basic_block latch = loop->latch;
edge latch_edge = loop_latch_edge (loop);
a summary of the unroll to the dump file. */
static bool
-try_unroll_loop_completely (struct loop *loop,
+try_unroll_loop_completely (class loop *loop,
edge exit, tree niter, bool may_be_zero,
enum unroll_level ul,
HOST_WIDE_INT maxiter,
Parameters are the same as for try_unroll_loops_completely */
static bool
-try_peel_loop (struct loop *loop,
+try_peel_loop (class loop *loop,
edge exit, tree niter, bool may_be_zero,
HOST_WIDE_INT maxiter)
{
Returns true if cfg is changed. */
static bool
-canonicalize_loop_induction_variables (struct loop *loop,
+canonicalize_loop_induction_variables (class loop *loop,
bool create_iv, enum unroll_level ul,
bool try_eval, bool allow_peel)
{
HOST_WIDE_INT maxiter;
bool modified = false;
dump_user_location_t locus;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
bool may_be_zero = false;
/* For unrolling allow conditional constant or zero iterations, thus
unsigned int
canonicalize_induction_variables (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
bool irred_invalidated = false;
bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
static bool
tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
- bitmap father_bbs, struct loop *loop)
+ bitmap father_bbs, class loop *loop)
{
- struct loop *loop_father;
+ class loop *loop_father;
bool changed = false;
- struct loop *inner;
+ class loop *inner;
enum unroll_level ul;
unsigned num = number_of_loops (cfun);
exists. */
static inline HOST_WIDE_INT
-avg_loop_niter (struct loop *loop)
+avg_loop_niter (class loop *loop)
{
HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
if (niter == -1)
/* Number of IV candidates in the cost_map. */
unsigned n_map_members;
/* The costs wrto the iv candidates. */
- struct cost_pair *cost_map;
+ class cost_pair *cost_map;
/* The selected candidate for the group. */
struct iv_cand *selected;
/* Uses in the group. */
struct ivopts_data
{
/* The currently optimized loop. */
- struct loop *current_loop;
+ class loop *current_loop;
location_t loop_loc;
/* Numbers of iterations for all exits of the current loop. */
unsigned bad_groups;
/* Candidate assigned to a use, together with the related costs. */
- struct cost_pair **cand_for_group;
+ class cost_pair **cand_for_group;
/* Number of times each candidate is used. */
unsigned *n_cand_uses;
struct iv_group *group;
/* An old assignment (for rollback purposes). */
- struct cost_pair *old_cp;
+ class cost_pair *old_cp;
/* A new assignment. */
- struct cost_pair *new_cp;
+ class cost_pair *new_cp;
/* Next change in the list. */
struct iv_ca_delta *next;
/* The single loop exit if it dominates the latch, NULL otherwise. */
edge
-single_dom_exit (struct loop *loop)
+single_dom_exit (class loop *loop)
{
edge exit = single_exit (loop);
emitted in LOOP. */
static bool
-stmt_after_ip_normal_pos (struct loop *loop, gimple *stmt)
+stmt_after_ip_normal_pos (class loop *loop, gimple *stmt)
{
basic_block bb = ip_normal_pos (loop), sbb = gimple_bb (stmt);
CAND is incremented in LOOP. */
static bool
-stmt_after_increment (struct loop *loop, struct iv_cand *cand, gimple *stmt)
+stmt_after_increment (class loop *loop, struct iv_cand *cand, gimple *stmt)
{
switch (cand->pos)
{
/* Returns the structure describing number of iterations determined from
EXIT of DATA->current_loop, or NULL if something goes wrong. */
-static struct tree_niter_desc *
+static class tree_niter_desc *
niter_for_exit (struct ivopts_data *data, edge exit)
{
- struct tree_niter_desc *desc;
+ class tree_niter_desc *desc;
tree_niter_desc **slot;
if (!data->niters)
/* Try to determine number of iterations. We cannot safely work with ssa
names that appear in phi nodes on abnormal edges, so that we do not
create overlapping life ranges for them (PR 27283). */
- desc = XNEW (struct tree_niter_desc);
+ desc = XNEW (class tree_niter_desc);
if (!number_of_iterations_exit (data->current_loop,
exit, desc, true)
|| contains_abnormal_ssa_name_p (desc->niter))
single dominating exit of DATA->current_loop, or NULL if something
goes wrong. */
-static struct tree_niter_desc *
+static class tree_niter_desc *
niter_for_single_dom_exit (struct ivopts_data *data)
{
edge exit = single_dom_exit (data->current_loop);
affine_iv iv;
tree step, type, base, stop;
bool found = false;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
gphi_iterator psi;
for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
gimple *def;
tree var;
struct iv *iv, *incr_iv;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block incr_bb;
gphi_iterator psi;
find_givs_in_stmt_scev (struct ivopts_data *data, gimple *stmt, affine_iv *iv)
{
tree lhs, stop;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
iv->base = NULL_TREE;
iv->step = NULL_TREE;
static void
find_givs (struct ivopts_data *data)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block *body = get_loop_body_in_dom_order (loop);
unsigned i;
if (dump_file && (dump_flags & TDF_DETAILS))
{
- struct tree_niter_desc *niter = niter_for_single_dom_exit (data);
+ class tree_niter_desc *niter = niter_for_single_dom_exit (data);
if (niter)
{
outside of the returned loop. Returns NULL if EXPR is not
even obviously invariant in LOOP. */
-struct loop *
-outermost_invariant_loop_for_expr (struct loop *loop, tree expr)
+class loop *
+outermost_invariant_loop_for_expr (class loop *loop, tree expr)
{
basic_block def_bb;
unsigned i, len;
len = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < len; i++)
{
- struct loop *ivloop;
+ class loop *ivloop;
if (!TREE_OPERAND (expr, i))
continue;
should not be the function body. */
bool
-expr_invariant_in_loop_p (struct loop *loop, tree expr)
+expr_invariant_in_loop_p (class loop *loop, tree expr)
{
basic_block def_bb;
unsigned i, len;
struct iv *iv;
bool use_overflow_semantics = false;
tree step, iv_base, iv_step, lbound, off;
- struct loop *loop = dta->ivopts_data->current_loop;
+ class loop *loop = dta->ivopts_data->current_loop;
/* If base is a component ref, require that the offset of the reference
be invariant. */
is already nonempty. */
static bool
-allow_ip_end_pos_p (struct loop *loop)
+allow_ip_end_pos_p (class loop *loop)
{
if (!ip_normal_pos (loop))
return true;
record_common_cand (struct ivopts_data *data, tree base,
tree step, struct iv_use *use)
{
- struct iv_common_cand ent;
- struct iv_common_cand **slot;
+ class iv_common_cand ent;
+ class iv_common_cand **slot;
ent.base = base;
ent.step = step;
common_cand_cmp (const void *p1, const void *p2)
{
unsigned n1, n2;
- const struct iv_common_cand *const *const ccand1
- = (const struct iv_common_cand *const *)p1;
- const struct iv_common_cand *const *const ccand2
- = (const struct iv_common_cand *const *)p2;
+ const class iv_common_cand *const *const ccand1
+ = (const class iv_common_cand *const *)p1;
+ const class iv_common_cand *const *const ccand2
+ = (const class iv_common_cand *const *)p2;
n1 = (*ccand1)->uses.length ();
n2 = (*ccand2)->uses.length ();
data->iv_common_cands.qsort (common_cand_cmp);
for (i = 0; i < data->iv_common_cands.length (); i++)
{
- struct iv_common_cand *ptr = data->iv_common_cands[i];
+ class iv_common_cand *ptr = data->iv_common_cands[i];
/* Only add IV candidate if it's derived from multiple uses. */
if (ptr->uses.length () <= 1)
}
group->n_map_members = size;
- group->cost_map = XCNEWVEC (struct cost_pair, size);
+ group->cost_map = XCNEWVEC (class cost_pair, size);
}
}
/* Gets cost of (GROUP, CAND) pair. */
-static struct cost_pair *
+static class cost_pair *
get_group_iv_cost (struct ivopts_data *data, struct iv_group *group,
struct iv_cand *cand)
{
unsigned i, s;
- struct cost_pair *ret;
+ class cost_pair *ret;
if (!cand)
return NULL;
static bool ATTRIBUTE_UNUSED
generic_predict_doloop_p (struct ivopts_data *data)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
/* Call target hook for target dependent checks. */
if (!targetm.predict_doloop_p (loop))
suitable or not. Keep it as simple as possible, feel free to extend it
if you find any multiple exits cases matter. */
edge exit = single_dom_exit (loop);
- struct tree_niter_desc *niter_desc;
+ class tree_niter_desc *niter_desc;
if (!exit || !(niter_desc = niter_for_exit (data, exit)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
/* Returns variable containing the value of candidate CAND at statement AT. */
static tree
-var_at_stmt (struct loop *loop, struct iv_cand *cand, gimple *stmt)
+var_at_stmt (class loop *loop, struct iv_cand *cand, gimple *stmt)
{
if (stmt_after_increment (loop, cand, stmt))
return cand->var_after;
non-null. Returns false if USE cannot be expressed using CAND. */
static bool
-get_computation_aff_1 (struct loop *loop, gimple *at, struct iv_use *use,
- struct iv_cand *cand, struct aff_tree *aff_inv,
- struct aff_tree *aff_var, widest_int *prat = NULL)
+get_computation_aff_1 (class loop *loop, gimple *at, struct iv_use *use,
+ struct iv_cand *cand, class aff_tree *aff_inv,
+ class aff_tree *aff_var, widest_int *prat = NULL)
{
tree ubase = use->iv->base, ustep = use->iv->step;
tree cbase = cand->iv->base, cstep = cand->iv->step;
form into AFF. Returns false if USE cannot be expressed using CAND. */
static bool
-get_computation_aff (struct loop *loop, gimple *at, struct iv_use *use,
- struct iv_cand *cand, struct aff_tree *aff)
+get_computation_aff (class loop *loop, gimple *at, struct iv_use *use,
+ struct iv_cand *cand, class aff_tree *aff)
{
aff_tree aff_var;
CAND at statement AT in LOOP. The computation is unshared. */
static tree
-get_computation_at (struct loop *loop, gimple *at,
+get_computation_at (class loop *loop, gimple *at,
struct iv_use *use, struct iv_cand *cand)
{
aff_tree aff;
stores it to VAL. */
static void
-cand_value_at (struct loop *loop, struct iv_cand *cand, gimple *at, tree niter,
+cand_value_at (class loop *loop, struct iv_cand *cand, gimple *at, tree niter,
aff_tree *val)
{
aff_tree step, delta, nit;
static enum tree_code
iv_elimination_compare (struct ivopts_data *data, struct iv_use *use)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block ex_bb;
edge exit;
static bool
iv_elimination_compare_lt (struct ivopts_data *data,
struct iv_cand *cand, enum tree_code *comp_p,
- struct tree_niter_desc *niter)
+ class tree_niter_desc *niter)
{
tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
- struct aff_tree nit, tmpa, tmpb;
+ class aff_tree nit, tmpa, tmpb;
enum tree_code comp;
HOST_WIDE_INT step;
basic_block ex_bb;
edge exit;
tree period;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
aff_tree bnd;
- struct tree_niter_desc *desc = NULL;
+ class tree_niter_desc *desc = NULL;
if (TREE_CODE (cand->iv->step) != INTEGER_CST)
return false;
gphi *phi;
gphi_iterator psi;
tree op;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
bitmap_iterator bi;
if (dump_file && (dump_flags & TDF_DETAILS))
/* Returns true if A is a cheaper cost pair than B. */
static bool
-cheaper_cost_pair (struct cost_pair *a, struct cost_pair *b)
+cheaper_cost_pair (class cost_pair *a, class cost_pair *b)
{
if (!a)
return false;
for more expensive, equal and cheaper respectively. */
static int
-compare_cost_pair (struct cost_pair *a, struct cost_pair *b)
+compare_cost_pair (class cost_pair *a, class cost_pair *b)
{
if (cheaper_cost_pair (a, b))
return -1;
/* Returns candidate by that USE is expressed in IVS. */
-static struct cost_pair *
-iv_ca_cand_for_group (struct iv_ca *ivs, struct iv_group *group)
+static class cost_pair *
+iv_ca_cand_for_group (class iv_ca *ivs, struct iv_group *group)
{
return ivs->cand_for_group[group->id];
}
/* Computes the cost field of IVS structure. */
static void
-iv_ca_recount_cost (struct ivopts_data *data, struct iv_ca *ivs)
+iv_ca_recount_cost (struct ivopts_data *data, class iv_ca *ivs)
{
comp_cost cost = ivs->cand_use_cost;
and IVS. */
static void
-iv_ca_set_remove_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
+iv_ca_set_remove_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
{
bitmap_iterator bi;
unsigned iid;
/* Set USE not to be expressed by any candidate in IVS. */
static void
-iv_ca_set_no_cp (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_set_no_cp (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group)
{
unsigned gid = group->id, cid;
- struct cost_pair *cp;
+ class cost_pair *cp;
cp = ivs->cand_for_group[gid];
if (!cp)
IVS. */
static void
-iv_ca_set_add_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
+iv_ca_set_add_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
{
bitmap_iterator bi;
unsigned iid;
/* Set cost pair for GROUP in set IVS to CP. */
static void
-iv_ca_set_cp (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_group *group, struct cost_pair *cp)
+iv_ca_set_cp (struct ivopts_data *data, class iv_ca *ivs,
+ struct iv_group *group, class cost_pair *cp)
{
unsigned gid = group->id, cid;
set IVS don't give any result. */
static void
-iv_ca_add_group (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_add_group (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group)
{
- struct cost_pair *best_cp = NULL, *cp;
+ class cost_pair *best_cp = NULL, *cp;
bitmap_iterator bi;
unsigned i;
struct iv_cand *cand;
/* Get cost for assignment IVS. */
static comp_cost
-iv_ca_cost (struct iv_ca *ivs)
+iv_ca_cost (class iv_ca *ivs)
{
/* This was a conditional expression but it triggered a bug in
Sun C 5.5. */
respectively. */
static int
-iv_ca_compare_deps (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_group *group, struct cost_pair *old_cp,
- struct cost_pair *new_cp)
+iv_ca_compare_deps (struct ivopts_data *data, class iv_ca *ivs,
+ struct iv_group *group, class cost_pair *old_cp,
+ class cost_pair *new_cp)
{
gcc_assert (old_cp && new_cp && old_cp != new_cp);
unsigned old_n_invs = ivs->n_invs;
it before NEXT. */
static struct iv_ca_delta *
-iv_ca_delta_add (struct iv_group *group, struct cost_pair *old_cp,
- struct cost_pair *new_cp, struct iv_ca_delta *next)
+iv_ca_delta_add (struct iv_group *group, class cost_pair *old_cp,
+ class cost_pair *new_cp, struct iv_ca_delta *next)
{
struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
reverted instead. */
static void
-iv_ca_delta_commit (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_delta_commit (struct ivopts_data *data, class iv_ca *ivs,
struct iv_ca_delta *delta, bool forward)
{
- struct cost_pair *from, *to;
+ class cost_pair *from, *to;
struct iv_ca_delta *act;
if (!forward)
/* Returns true if CAND is used in IVS. */
static bool
-iv_ca_cand_used_p (struct iv_ca *ivs, struct iv_cand *cand)
+iv_ca_cand_used_p (class iv_ca *ivs, struct iv_cand *cand)
{
return ivs->n_cand_uses[cand->id] > 0;
}
/* Returns number of induction variable candidates in the set IVS. */
static unsigned
-iv_ca_n_cands (struct iv_ca *ivs)
+iv_ca_n_cands (class iv_ca *ivs)
{
return ivs->n_cands;
}
/* Allocates new iv candidates assignment. */
-static struct iv_ca *
+static class iv_ca *
iv_ca_new (struct ivopts_data *data)
{
- struct iv_ca *nw = XNEW (struct iv_ca);
+ class iv_ca *nw = XNEW (class iv_ca);
nw->upto = 0;
nw->bad_groups = 0;
- nw->cand_for_group = XCNEWVEC (struct cost_pair *,
+ nw->cand_for_group = XCNEWVEC (class cost_pair *,
data->vgroups.length ());
nw->n_cand_uses = XCNEWVEC (unsigned, data->vcands.length ());
nw->cands = BITMAP_ALLOC (NULL);
/* Free memory occupied by the set IVS. */
static void
-iv_ca_free (struct iv_ca **ivs)
+iv_ca_free (class iv_ca **ivs)
{
free ((*ivs)->cand_for_group);
free ((*ivs)->n_cand_uses);
/* Dumps IVS to FILE. */
static void
-iv_ca_dump (struct ivopts_data *data, FILE *file, struct iv_ca *ivs)
+iv_ca_dump (struct ivopts_data *data, FILE *file, class iv_ca *ivs)
{
unsigned i;
comp_cost cost = iv_ca_cost (ivs);
for (i = 0; i < ivs->upto; i++)
{
struct iv_group *group = data->vgroups[i];
- struct cost_pair *cp = iv_ca_cand_for_group (ivs, group);
+ class cost_pair *cp = iv_ca_cand_for_group (ivs, group);
if (cp)
fprintf (file, " group:%d --> iv_cand:%d, cost=("
"%" PRId64 ",%d)\n", group->id, cp->cand->id,
the function will try to find a solution with mimimal iv candidates. */
static comp_cost
-iv_ca_extend (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_extend (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *cand, struct iv_ca_delta **delta,
unsigned *n_ivs, bool min_ncand)
{
unsigned i;
comp_cost cost;
struct iv_group *group;
- struct cost_pair *old_cp, *new_cp;
+ class cost_pair *old_cp, *new_cp;
*delta = NULL;
for (i = 0; i < ivs->upto; i++)
the candidate with which we start narrowing. */
static comp_cost
-iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_narrow (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *cand, struct iv_cand *start,
struct iv_ca_delta **delta)
{
unsigned i, ci;
struct iv_group *group;
- struct cost_pair *old_cp, *new_cp, *cp;
+ class cost_pair *old_cp, *new_cp, *cp;
bitmap_iterator bi;
struct iv_cand *cnd;
comp_cost cost, best_cost, acost;
differences in DELTA. */
static comp_cost
-iv_ca_prune (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_prune (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *except_cand, struct iv_ca_delta **delta)
{
bitmap_iterator bi;
cheaper local cost for GROUP than BEST_CP. Return pointer to
the corresponding cost_pair, otherwise just return BEST_CP. */
-static struct cost_pair*
+static class cost_pair*
cheaper_cost_with_cand (struct ivopts_data *data, struct iv_group *group,
unsigned int cand_idx, struct iv_cand *old_cand,
- struct cost_pair *best_cp)
+ class cost_pair *best_cp)
{
struct iv_cand *cand;
- struct cost_pair *cp;
+ class cost_pair *cp;
gcc_assert (old_cand != NULL && best_cp != NULL);
if (cand_idx == old_cand->id)
candidate replacement in list DELTA. */
static comp_cost
-iv_ca_replace (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_replace (struct ivopts_data *data, class iv_ca *ivs,
struct iv_ca_delta **delta)
{
bitmap_iterator bi, bj;
struct iv_cand *cand;
comp_cost orig_cost, acost;
struct iv_ca_delta *act_delta, *tmp_delta;
- struct cost_pair *old_cp, *best_cp = NULL;
+ class cost_pair *old_cp, *best_cp = NULL;
*delta = NULL;
orig_cost = iv_ca_cost (ivs);
based on any memory object. */
static bool
-try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
+try_add_cand_for (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group, bool originalp)
{
comp_cost best_cost, act_cost;
bitmap_iterator bi;
struct iv_cand *cand;
struct iv_ca_delta *best_delta = NULL, *act_delta;
- struct cost_pair *cp;
+ class cost_pair *cp;
iv_ca_add_group (data, ivs, group);
best_cost = iv_ca_cost (ivs);
/* Finds an initial assignment of candidates to uses. */
-static struct iv_ca *
+static class iv_ca *
get_initial_solution (struct ivopts_data *data, bool originalp)
{
unsigned i;
- struct iv_ca *ivs = iv_ca_new (data);
+ class iv_ca *ivs = iv_ca_new (data);
for (i = 0; i < data->vgroups.length (); i++)
if (!try_add_cand_for (data, ivs, data->vgroups[i], originalp))
static bool
try_improve_iv_set (struct ivopts_data *data,
- struct iv_ca *ivs, bool *try_replace_p)
+ class iv_ca *ivs, bool *try_replace_p)
{
unsigned i, n_ivs;
comp_cost acost, best_cost = iv_ca_cost (ivs);
greedy heuristic -- we try to replace at most one candidate in the selected
solution and remove the unused ivs while this improves the cost. */
-static struct iv_ca *
+static class iv_ca *
find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
{
- struct iv_ca *set;
+ class iv_ca *set;
bool try_replace_p = true;
/* Get the initial solution. */
return set;
}
-static struct iv_ca *
+static class iv_ca *
find_optimal_iv_set (struct ivopts_data *data)
{
unsigned i;
comp_cost cost, origcost;
- struct iv_ca *set, *origset;
+ class iv_ca *set, *origset;
/* Determine the cost based on a strategy that starts with original IVs,
and try again using a strategy that prefers candidates not based
/* Creates new induction variables described in SET. */
static void
-create_new_ivs (struct ivopts_data *data, struct iv_ca *set)
+create_new_ivs (struct ivopts_data *data, class iv_ca *set)
{
unsigned i;
struct iv_cand *cand;
gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
enum tree_code compare;
struct iv_group *group = data->vgroups[use->group_id];
- struct cost_pair *cp = get_group_iv_cost (data, group, cand);
+ class cost_pair *cp = get_group_iv_cost (data, group, cand);
bound = cp->value;
if (bound)
}
}
-/* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
+/* Frees memory occupied by class tree_niter_desc in *VALUE. Callback
for hash_map::traverse. */
bool
/* Optimizes the LOOP. Returns true if anything changed. */
static bool
-tree_ssa_iv_optimize_loop (struct ivopts_data *data, struct loop *loop,
+tree_ssa_iv_optimize_loop (struct ivopts_data *data, class loop *loop,
bitmap toremove)
{
bool changed = false;
- struct iv_ca *iv_ca;
+ class iv_ca *iv_ca;
edge exit = single_dom_exit (loop);
basic_block *body;
void
tree_ssa_iv_optimize (void)
{
- struct loop *loop;
+ class loop *loop;
struct ivopts_data data;
auto_bitmap toremove;
#ifndef GCC_TREE_SSA_LOOP_IVOPTS_H
#define GCC_TREE_SSA_LOOP_IVOPTS_H
-extern edge single_dom_exit (struct loop *);
+extern edge single_dom_exit (class loop *);
extern void dump_iv (FILE *, struct iv *);
extern void dump_use (FILE *, struct iv_use *);
extern void dump_uses (FILE *, struct ivopts_data *);
extern void dump_cand (FILE *, struct iv_cand *);
extern bool contains_abnormal_ssa_name_p (tree);
-extern struct loop *outermost_invariant_loop_for_expr (struct loop *, tree);
-extern bool expr_invariant_in_loop_p (struct loop *, tree);
+extern class loop *outermost_invariant_loop_for_expr (class loop *, tree);
+extern bool expr_invariant_in_loop_p (class loop *, tree);
extern tree strip_offset (tree, poly_uint64_pod *);
bool may_be_nonaddressable_p (tree expr);
void tree_ssa_iv_optimize (void);
-void create_canonical_iv (struct loop *, edge, tree,
+void create_canonical_iv (class loop *, edge, tree,
tree * = NULL, tree * = NULL);
#endif /* GCC_TREE_SSA_LOOP_IVOPTS_H */
VAR_AFTER (unless they are NULL). */
void
-create_iv (tree base, tree step, tree var, struct loop *loop,
+create_iv (tree base, tree step, tree var, class loop *loop,
gimple_stmt_iterator *incr_pos, bool after,
tree *var_before, tree *var_after)
{
/* Return the innermost superloop LOOP of USE_LOOP that is a superloop of
both DEF_LOOP and USE_LOOP. */
-static inline struct loop *
-find_sibling_superloop (struct loop *use_loop, struct loop *def_loop)
+static inline class loop *
+find_sibling_superloop (class loop *use_loop, class loop *def_loop)
{
unsigned ud = loop_depth (use_loop);
unsigned dd = loop_depth (def_loop);
{
unsigned i;
bitmap_iterator bi;
- struct loop *def_loop = def_bb->loop_father;
+ class loop *def_loop = def_bb->loop_father;
unsigned def_loop_depth = loop_depth (def_loop);
bitmap def_loop_exits;
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
- struct loop *use_loop = use_bb->loop_father;
+ class loop *use_loop = use_bb->loop_father;
gcc_checking_assert (def_loop != use_loop
&& ! flow_loop_nested_p (def_loop, use_loop));
if (! flow_loop_nested_p (use_loop, def_loop))
FOR_EACH_EDGE (e, ei, bb->preds)
{
basic_block pred = e->src;
- struct loop *pred_loop = pred->loop_father;
+ class loop *pred_loop = pred->loop_father;
unsigned pred_loop_depth = loop_depth (pred_loop);
bool pred_visited;
}
def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
- for (struct loop *loop = def_loop;
+ for (class loop *loop = def_loop;
loop != current_loops->tree_root;
loop = loop_outer (loop))
bitmap_ior_into (def_loop_exits, loop_exits[loop->num]);
basic_block def_bb = gimple_bb (def_stmt);
FOR_EACH_EDGE (e, ei, exit->preds)
{
- struct loop *aloop = find_common_loop (def_bb->loop_father,
+ class loop *aloop = find_common_loop (def_bb->loop_father,
e->src->loop_father);
if (!flow_bb_inside_loop_p (aloop, e->dest))
break;
static void
get_loops_exits (bitmap *loop_exits)
{
- struct loop *loop;
+ class loop *loop;
unsigned j;
edge e;
{
unsigned ver;
basic_block def_bb;
- struct loop *def_loop;
+ class loop *def_loop;
if (TREE_CODE (use) != SSA_NAME)
return;
USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
static void
-find_uses_to_rename_in_loop (struct loop *loop, bitmap *use_blocks,
+find_uses_to_rename_in_loop (class loop *loop, bitmap *use_blocks,
bitmap need_phis, int use_flags)
{
bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
void
rewrite_into_loop_closed_ssa_1 (bitmap changed_bbs, unsigned update_flag,
- int use_flags, struct loop *loop)
+ int use_flags, class loop *loop)
{
bitmap *use_blocks;
bitmap names_to_rename;
form. */
void
-rewrite_virtuals_into_loop_closed_ssa (struct loop *loop)
+rewrite_virtuals_into_loop_closed_ssa (class loop *loop)
{
rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_VIRTUAL_USES, loop);
}
if LOOP is NULL, otherwise, only LOOP is checked. */
DEBUG_FUNCTION void
-verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop)
+verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop)
{
if (number_of_loops (cfun) <= 1)
return;
variables incremented at the end of the LOOP. */
basic_block
-ip_end_pos (struct loop *loop)
+ip_end_pos (class loop *loop)
{
return loop->latch;
}
variables incremented just before exit condition of a LOOP. */
basic_block
-ip_normal_pos (struct loop *loop)
+ip_normal_pos (class loop *loop)
{
gimple *last;
basic_block bb;
the increment should be inserted after *BSI. */
void
-standard_iv_increment_position (struct loop *loop, gimple_stmt_iterator *bsi,
+standard_iv_increment_position (class loop *loop, gimple_stmt_iterator *bsi,
bool *insert_after)
{
basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop);
after the loop has been duplicated. */
bool
-gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
+gimple_duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
of iterations of the loop is returned in NITER. */
bool
-can_unroll_loop_p (struct loop *loop, unsigned factor,
- struct tree_niter_desc *niter)
+can_unroll_loop_p (class loop *loop, unsigned factor,
+ class tree_niter_desc *niter)
{
edge exit;
how the exit from the unrolled loop should be controlled. */
static void
-determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
+determine_exit_conditions (class loop *loop, class tree_niter_desc *desc,
unsigned factor, tree *enter_cond,
tree *exit_base, tree *exit_step,
enum tree_code *exit_cmp, tree *exit_bound)
dominated by BB by NUM/DEN. */
static void
-scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
+scale_dominated_blocks_in_loop (class loop *loop, basic_block bb,
profile_count num, profile_count den)
{
basic_block son;
/* Return estimated niter for LOOP after unrolling by FACTOR times. */
gcov_type
-niter_for_unrolled_loop (struct loop *loop, unsigned factor)
+niter_for_unrolled_loop (class loop *loop, unsigned factor)
{
gcc_assert (factor != 0);
bool profile_p = false;
#define PROB_UNROLLED_LOOP_ENTERED 90
void
-tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
- edge exit, struct tree_niter_desc *desc,
+tree_transform_and_unroll_loop (class loop *loop, unsigned factor,
+ edge exit, class tree_niter_desc *desc,
transform_callback transform,
void *data)
{
gphi *phi_old_loop, *phi_new_loop, *phi_rest;
gphi_iterator psi_old_loop, psi_new_loop;
tree init, next, new_init;
- struct loop *new_loop;
+ class loop *new_loop;
basic_block rest, exit_bb;
edge old_entry, new_entry, old_latch, precond_edge, new_exit;
edge new_nonexit, e;
of the arguments is the same as for tree_transform_and_unroll_loop. */
void
-tree_unroll_loop (struct loop *loop, unsigned factor,
- edge exit, struct tree_niter_desc *desc)
+tree_unroll_loop (class loop *loop, unsigned factor,
+ edge exit, class tree_niter_desc *desc)
{
tree_transform_and_unroll_loop (loop, factor, exit, desc,
NULL, NULL);
created. */
tree
-canonicalize_loop_ivs (struct loop *loop, tree *nit, bool bump_in_latch)
+canonicalize_loop_ivs (class loop *loop, tree *nit, bool bump_in_latch)
{
unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
unsigned original_precision = precision;
#ifndef GCC_TREE_SSA_LOOP_MANIP_H
#define GCC_TREE_SSA_LOOP_MANIP_H
-typedef void (*transform_callback)(struct loop *, void *);
+typedef void (*transform_callback)(class loop *, void *);
-extern void create_iv (tree, tree, tree, struct loop *, gimple_stmt_iterator *,
+extern void create_iv (tree, tree, tree, class loop *, gimple_stmt_iterator *,
bool, tree *, tree *);
extern void rewrite_into_loop_closed_ssa_1 (bitmap, unsigned, int,
- struct loop *);
+ class loop *);
extern void rewrite_into_loop_closed_ssa (bitmap, unsigned);
-extern void rewrite_virtuals_into_loop_closed_ssa (struct loop *);
-extern void verify_loop_closed_ssa (bool, struct loop * = NULL);
+extern void rewrite_virtuals_into_loop_closed_ssa (class loop *);
+extern void verify_loop_closed_ssa (bool, class loop * = NULL);
static inline void
-checking_verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop = NULL)
+checking_verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop = NULL)
{
if (flag_checking)
verify_loop_closed_ssa (verify_ssa_p, loop);
}
extern basic_block split_loop_exit_edge (edge, bool = false);
-extern basic_block ip_end_pos (struct loop *);
-extern basic_block ip_normal_pos (struct loop *);
-extern void standard_iv_increment_position (struct loop *,
+extern basic_block ip_end_pos (class loop *);
+extern basic_block ip_normal_pos (class loop *);
+extern void standard_iv_increment_position (class loop *,
gimple_stmt_iterator *, bool *);
-extern bool gimple_duplicate_loop_to_header_edge (struct loop *, edge,
+extern bool gimple_duplicate_loop_to_header_edge (class loop *, edge,
unsigned int, sbitmap,
edge, vec<edge> *,
int);
-extern bool can_unroll_loop_p (struct loop *loop, unsigned factor,
- struct tree_niter_desc *niter);
-extern gcov_type niter_for_unrolled_loop (struct loop *, unsigned);
-extern void tree_transform_and_unroll_loop (struct loop *, unsigned,
- edge, struct tree_niter_desc *,
+extern bool can_unroll_loop_p (class loop *loop, unsigned factor,
+ class tree_niter_desc *niter);
+extern gcov_type niter_for_unrolled_loop (class loop *, unsigned);
+extern void tree_transform_and_unroll_loop (class loop *, unsigned,
+ edge, class tree_niter_desc *,
transform_callback, void *);
-extern void tree_unroll_loop (struct loop *, unsigned,
- edge, struct tree_niter_desc *);
-extern tree canonicalize_loop_ivs (struct loop *, tree *, bool);
+extern void tree_unroll_loop (class loop *, unsigned,
+ edge, class tree_niter_desc *);
+extern tree canonicalize_loop_ivs (class loop *, tree *, bool);
static bool number_of_iterations_popcount (loop_p loop, edge exit,
enum tree_code code,
- struct tree_niter_desc *niter);
+ class tree_niter_desc *niter);
/* Splits expression EXPR to a variable part VAR and constant OFFSET. */
in TYPE to MIN and MAX. */
static void
-determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
+determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
mpz_t min, mpz_t max)
{
int cnt = 0;
comparisons before the loop (usually created by loop header copying). */
static void
-bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
+bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
{
tree type = TREE_TYPE (x);
tree varx, vary;
bounds on the difference FINAL - IV->base. */
static bool
-number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
- tree final, struct tree_niter_desc *niter,
+number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
+ tree final, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree niter_type = unsigned_type_for (type);
static bool
number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter,
+ class tree_niter_desc *niter,
tree *delta, tree step,
bool exit_must_be_taken, bounds *bnds)
{
static bool
assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter, tree step)
+ class tree_niter_desc *niter, tree step)
{
tree bound, d, assumption, diff;
tree niter_type = TREE_TYPE (step);
static void
assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter, bounds *bnds)
+ class tree_niter_desc *niter, bounds *bnds)
{
tree assumption = boolean_true_node, bound, diff;
tree mbz, mbzl, mbzr, type1;
that the exit must be taken eventually. */
static bool
-number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
- affine_iv *iv1, struct tree_niter_desc *niter,
+number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree niter_type = unsigned_type_for (type);
is the case). BNDS bounds the difference IV1->base - IV0->base. */
static bool
-number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
- affine_iv *iv1, struct tree_niter_desc *niter,
+number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree assumption;
if EVERY_ITERATION is true, we know the test is executed on every iteration.
The results (number of iterations and assumptions as described in
- comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
+ comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
Returns false if it fails to determine number of iterations, true if it
was determined (possibly with some assumptions). */
static bool
-number_of_iterations_cond (struct loop *loop,
+number_of_iterations_cond (class loop *loop,
tree type, affine_iv *iv0, enum tree_code code,
- affine_iv *iv1, struct tree_niter_desc *niter,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool only_exit, bool every_iteration)
{
bool exit_must_be_taken = false, ret;
simplification was possible). */
tree
-simplify_using_initial_conditions (struct loop *loop, tree expr)
+simplify_using_initial_conditions (class loop *loop, tree expr)
{
edge e;
basic_block bb;
(or EXPR unchanged, if no simplification was possible). */
static tree
-simplify_using_outer_evolutions (struct loop *loop, tree expr)
+simplify_using_outer_evolutions (class loop *loop, tree expr)
{
enum tree_code code = TREE_CODE (expr);
bool changed;
/* Returns true if EXIT is the only possible exit from LOOP. */
bool
-loop_only_exit_p (const struct loop *loop, const_edge exit)
+loop_only_exit_p (const class loop *loop, const_edge exit)
{
basic_block *body;
gimple_stmt_iterator bsi;
/* Stores description of number of iterations of LOOP derived from
EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
information could be derived (and fields of NITER have meaning described
- in comments at struct tree_niter_desc declaration), false otherwise.
+ in comments at class tree_niter_desc declaration), false otherwise.
When EVERY_ITERATION is true, only tests that are known to be executed
every iteration are considered (i.e. only test that alone bounds the loop).
If AT_STMT is not NULL, this function stores LOOP's condition statement in
it when returning true. */
bool
-number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
- struct tree_niter_desc *niter,
+number_of_iterations_exit_assumptions (class loop *loop, edge exit,
+ class tree_niter_desc *niter,
gcond **at_stmt, bool every_iteration)
{
gimple *last;
static bool
number_of_iterations_popcount (loop_p loop, edge exit,
enum tree_code code,
- struct tree_niter_desc *niter)
+ class tree_niter_desc *niter)
{
bool adjust = true;
tree iter;
the niter information holds unconditionally. */
bool
-number_of_iterations_exit (struct loop *loop, edge exit,
- struct tree_niter_desc *niter,
+number_of_iterations_exit (class loop *loop, edge exit,
+ class tree_niter_desc *niter,
bool warn, bool every_iteration)
{
gcond *stmt;
chrec_dont_know is returned. */
tree
-find_loop_niter (struct loop *loop, edge *exit)
+find_loop_niter (class loop *loop, edge *exit)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
*exit = NULL;
FOR_EACH_VEC_ELT (exits, i, ex)
/* Return true if loop is known to have bounded number of iterations. */
bool
-finite_loop_p (struct loop *loop)
+finite_loop_p (class loop *loop)
{
widest_int nit;
int flags;
operands are constants. */
static gphi *
-chain_of_csts_start (struct loop *loop, tree x)
+chain_of_csts_start (class loop *loop, tree x)
{
gimple *stmt = SSA_NAME_DEF_STMT (x);
tree use;
If such phi node exists, it is returned, otherwise NULL is returned. */
static gphi *
-get_base_for (struct loop *loop, tree x)
+get_base_for (class loop *loop, tree x)
{
gphi *phi;
tree init, next;
of the iterations of LOOP if successful, chrec_dont_know otherwise. */
tree
-loop_niter_by_eval (struct loop *loop, edge exit)
+loop_niter_by_eval (class loop *loop, edge exit)
{
tree acnd;
tree op[2], val[2], next[2], aval[2];
determines the number of iterations, chrec_dont_know is returned. */
tree
-find_loop_niter_by_eval (struct loop *loop, edge *exit)
+find_loop_niter_by_eval (class loop *loop, edge *exit)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
/* Emit a -Waggressive-loop-optimizations warning if needed. */
static void
-do_warn_aggressive_loop_optimizations (struct loop *loop,
+do_warn_aggressive_loop_optimizations (class loop *loop,
widest_int i_bound, gimple *stmt)
{
/* Don't warn if the loop doesn't have known constant bound. */
BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
static void
-record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
+record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
gimple *at_stmt, bool is_exit, bool realistic, bool upper)
{
widest_int delta;
|| loop->nb_iterations == NULL_TREE
|| TREE_CODE (loop->nb_iterations) != INTEGER_CST))
{
- struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
+ class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
elt->bound = i_bound;
elt->stmt = at_stmt;
and doesn't overflow. */
static void
-record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
+record_control_iv (class loop *loop, class tree_niter_desc *niter)
{
struct control_iv *iv;
return false;
gimple *def_stmt = SSA_NAME_DEF_STMT (var);
- struct loop *loop = loop_containing_stmt (def_stmt);
+ class loop *loop = loop_containing_stmt (def_stmt);
if (loop == NULL)
return false;
UPPER is true if we are sure the induction variable does not wrap. */
static void
-record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
+record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
tree low, tree high, bool realistic, bool upper)
{
tree niter_bound, extreme, delta;
struct ilb_data
{
- struct loop *loop;
+ class loop *loop;
gimple *stmt;
};
tree ev, init, step;
tree low, high, type, next;
bool sign, upper = true, at_end = false;
- struct loop *loop = data->loop;
+ class loop *loop = data->loop;
if (TREE_CODE (base) != ARRAY_REF)
return true;
upper = false;
}
- struct loop *dloop = loop_containing_stmt (data->stmt);
+ class loop *dloop = loop_containing_stmt (data->stmt);
if (!dloop)
return true;
STMT is guaranteed to be executed in every iteration of LOOP.*/
static void
-infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
+infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
{
struct ilb_data data;
executed in every iteration of LOOP. */
static void
-infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
{
if (is_gimple_assign (stmt))
{
that pointer arithmetics in STMT does not overflow. */
static void
-infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
{
tree def, base, step, scev, type, low, high;
tree var, ptr;
if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
return;
- struct loop *uloop = loop_containing_stmt (stmt);
+ class loop *uloop = loop_containing_stmt (stmt);
scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
if (chrec_contains_undetermined (scev))
return;
that signed arithmetics in STMT does not overflow. */
static void
-infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
{
tree def, base, step, scev, type, low, high;
*/
static void
-infer_loop_bounds_from_undefined (struct loop *loop)
+infer_loop_bounds_from_undefined (class loop *loop)
{
unsigned i;
basic_block *bbs;
some bounded statement. */
static void
-discover_iteration_bound_by_body_walk (struct loop *loop)
+discover_iteration_bound_by_body_walk (class loop *loop)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
auto_vec<widest_int> bounds;
vec<vec<basic_block> > queues = vNULL;
vec<basic_block> queue = vNULL;
count by 1. */
static void
-maybe_lower_iteration_bound (struct loop *loop)
+maybe_lower_iteration_bound (class loop *loop)
{
hash_set<gimple *> *not_executed_last_iteration = NULL;
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool found_exit = false;
auto_vec<basic_block> queue;
bitmap visited;
is true also use estimates derived from undefined behavior. */
void
-estimate_numbers_of_iterations (struct loop *loop)
+estimate_numbers_of_iterations (class loop *loop)
{
vec<edge> exits;
tree niter, type;
unsigned i;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
edge ex;
widest_int bound;
edge likely_exit;
the function returns false, otherwise returns true. */
bool
-estimated_loop_iterations (struct loop *loop, widest_int *nit)
+estimated_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-estimated_loop_iterations_int (struct loop *loop)
+estimated_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
false, otherwise returns true. */
bool
-max_loop_iterations (struct loop *loop, widest_int *nit)
+max_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-max_loop_iterations_int (struct loop *loop)
+max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
false, otherwise returns true. */
bool
-likely_max_loop_iterations (struct loop *loop, widest_int *nit)
+likely_max_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-likely_max_loop_iterations_int (struct loop *loop)
+likely_max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
the number of execution of the latch by one. */
HOST_WIDE_INT
-estimated_stmt_executions_int (struct loop *loop)
+estimated_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
HOST_WIDE_INT snit;
false, otherwise returns true. */
bool
-max_stmt_executions (struct loop *loop, widest_int *nit)
+max_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
false, otherwise returns true. */
bool
-likely_max_stmt_executions (struct loop *loop, widest_int *nit)
+likely_max_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
false, otherwise returns true. */
bool
-estimated_stmt_executions (struct loop *loop, widest_int *nit)
+estimated_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
void
estimate_numbers_of_iterations (function *fn)
{
- struct loop *loop;
+ class loop *loop;
/* We don't want to issue signed overflow warnings while getting
loop iteration estimates. */
static bool
n_of_executions_at_most (gimple *stmt,
- struct nb_iter_bound *niter_bound,
+ class nb_iter_bound *niter_bound,
tree niter)
{
widest_int bound = niter_bound->bound;
static bool
loop_exits_before_overflow (tree base, tree step,
- gimple *at_stmt, struct loop *loop)
+ gimple *at_stmt, class loop *loop)
{
widest_int niter;
struct control_iv *civ;
- struct nb_iter_bound *bound;
+ class nb_iter_bound *bound;
tree e, delta, step_abs, unsigned_base;
tree type = TREE_TYPE (step);
tree unsigned_type, valid_niter;
(4294967295, 4294967296, ...). */
static bool
-scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
+scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
{
tree type;
wide_int minv, maxv, diff, step_wi;
bool
scev_probably_wraps_p (tree var, tree base, tree step,
- gimple *at_stmt, struct loop *loop,
+ gimple *at_stmt, class loop *loop,
bool use_overflow_semantics)
{
/* FIXME: We really need something like
/* Frees the information on upper bounds on numbers of iterations of LOOP. */
void
-free_numbers_of_iterations_estimates (struct loop *loop)
+free_numbers_of_iterations_estimates (class loop *loop)
{
struct control_iv *civ;
- struct nb_iter_bound *bound;
+ class nb_iter_bound *bound;
loop->nb_iterations = NULL;
loop->estimate_state = EST_NOT_COMPUTED;
for (bound = loop->bounds; bound;)
{
- struct nb_iter_bound *next = bound->next;
+ class nb_iter_bound *next = bound->next;
ggc_free (bound);
bound = next;
}
void
free_numbers_of_iterations_estimates (function *fn)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP_FN (fn, loop, 0)
free_numbers_of_iterations_estimates (loop);
at LOOP. */
void
-substitute_in_loop_info (struct loop *loop, tree name, tree val)
+substitute_in_loop_info (class loop *loop, tree name, tree val)
{
loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
}
#define GCC_TREE_SSA_LOOP_NITER_H
extern tree expand_simple_operations (tree, tree = NULL);
-extern tree simplify_using_initial_conditions (struct loop *, tree);
-extern bool loop_only_exit_p (const struct loop *, const_edge);
-extern bool number_of_iterations_exit (struct loop *, edge,
- struct tree_niter_desc *niter, bool,
+extern tree simplify_using_initial_conditions (class loop *, tree);
+extern bool loop_only_exit_p (const class loop *, const_edge);
+extern bool number_of_iterations_exit (class loop *, edge,
+ class tree_niter_desc *niter, bool,
bool every_iteration = true);
-extern bool number_of_iterations_exit_assumptions (struct loop *, edge,
- struct tree_niter_desc *,
+extern bool number_of_iterations_exit_assumptions (class loop *, edge,
+ class tree_niter_desc *,
gcond **, bool = true);
-extern tree find_loop_niter (struct loop *, edge *);
-extern bool finite_loop_p (struct loop *);
-extern tree loop_niter_by_eval (struct loop *, edge);
-extern tree find_loop_niter_by_eval (struct loop *, edge *);
-extern bool estimated_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT estimated_loop_iterations_int (struct loop *);
-extern bool max_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT max_loop_iterations_int (struct loop *);
-extern bool likely_max_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT likely_max_loop_iterations_int (struct loop *);
-extern HOST_WIDE_INT max_stmt_executions_int (struct loop *);
-extern HOST_WIDE_INT likely_max_stmt_executions_int (struct loop *);
-extern HOST_WIDE_INT estimated_stmt_executions_int (struct loop *);
-extern bool max_stmt_executions (struct loop *, widest_int *);
-extern bool likely_max_stmt_executions (struct loop *, widest_int *);
-extern bool estimated_stmt_executions (struct loop *, widest_int *);
+extern tree find_loop_niter (class loop *, edge *);
+extern bool finite_loop_p (class loop *);
+extern tree loop_niter_by_eval (class loop *, edge);
+extern tree find_loop_niter_by_eval (class loop *, edge *);
+extern bool estimated_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT estimated_loop_iterations_int (class loop *);
+extern bool max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT max_loop_iterations_int (class loop *);
+extern bool likely_max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT likely_max_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT likely_max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT estimated_stmt_executions_int (class loop *);
+extern bool max_stmt_executions (class loop *, widest_int *);
+extern bool likely_max_stmt_executions (class loop *, widest_int *);
+extern bool estimated_stmt_executions (class loop *, widest_int *);
extern void estimate_numbers_of_iterations (function *);
-extern void estimate_numbers_of_iterations (struct loop *);
+extern void estimate_numbers_of_iterations (class loop *);
extern bool stmt_dominates_stmt_p (gimple *, gimple *);
extern bool nowrap_type_p (tree);
extern bool scev_probably_wraps_p (tree, tree, tree, gimple *,
- struct loop *, bool);
-extern void free_numbers_of_iterations_estimates (struct loop *);
+ class loop *, bool);
+extern void free_numbers_of_iterations_estimates (class loop *);
extern void free_numbers_of_iterations_estimates (function *);
extern tree simplify_replace_tree (tree, tree, tree, tree (*)(tree) = NULL);
-extern void substitute_in_loop_info (struct loop *, tree, tree);
+extern void substitute_in_loop_info (class loop *, tree, tree);
#endif /* GCC_TREE_SSA_LOOP_NITER_H */
struct ar_data
{
- struct loop *loop; /* Loop of the reference. */
+ class loop *loop; /* Loop of the reference. */
gimple *stmt; /* Statement of the reference. */
tree *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
references from REF_P. */
static bool
-analyze_ref (struct loop *loop, tree *ref_p, tree *base,
+analyze_ref (class loop *loop, tree *ref_p, tree *base,
tree *step, HOST_WIDE_INT *delta,
gimple *stmt)
{
reference was recorded, false otherwise. */
static bool
-gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
+gather_memory_references_ref (class loop *loop, struct mem_ref_group **refs,
tree ref, bool write_p, gimple *stmt)
{
tree base, step;
true if there are no other memory references inside the loop. */
static struct mem_ref_group *
-gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
+gather_memory_references (class loop *loop, bool *no_other_refs, unsigned *ref_count)
{
basic_block *body = get_loop_body_in_dom_order (loop);
basic_block bb;
/* Issue a memory fence instruction after LOOP. */
static void
-emit_mfence_after_loop (struct loop *loop)
+emit_mfence_after_loop (class loop *loop)
{
vec<edge> exits = get_loop_exit_edges (loop);
edge exit;
/* Returns true if we can use storent in loop, false otherwise. */
static bool
-may_use_storent_in_loop_p (struct loop *loop)
+may_use_storent_in_loop_p (class loop *loop)
{
bool ret = true;
references in the loop. */
static void
-mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
+mark_nontemporal_stores (class loop *loop, struct mem_ref_group *groups)
{
struct mem_ref *ref;
bool any = false;
iterations. */
static bool
-should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
+should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc,
unsigned factor)
{
if (!can_unroll_loop_p (loop, factor, desc))
the loop, or -1 if no estimate is available. */
static unsigned
-determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
- unsigned ninsns, struct tree_niter_desc *desc,
+determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
+ unsigned ninsns, class tree_niter_desc *desc,
HOST_WIDE_INT est_niter)
{
unsigned upper_bound;
static void
add_subscript_strides (tree access_fn, unsigned stride,
- HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
+ HOST_WIDE_INT *strides, unsigned n, class loop *loop)
{
- struct loop *aloop;
+ class loop *aloop;
tree step;
HOST_WIDE_INT astep;
unsigned min_depth = loop_depth (loop) - n;
static unsigned
self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
- struct loop *loop)
+ class loop *loop)
{
tree stride, access_fn;
HOST_WIDE_INT *strides, astride;
memory references in the loop. Return false if the analysis fails. */
static bool
-determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
+determine_loop_nest_reuse (class loop *loop, struct mem_ref_group *refs,
bool no_other_refs)
{
- struct loop *nest, *aloop;
+ class loop *nest, *aloop;
vec<data_reference_p> datarefs = vNULL;
vec<ddr_p> dependences = vNULL;
struct mem_ref_group *gr;
true if the LOOP was unrolled. */
static bool
-loop_prefetch_arrays (struct loop *loop)
+loop_prefetch_arrays (class loop *loop)
{
struct mem_ref_group *refs;
unsigned ahead, ninsns, time, unroll_factor;
HOST_WIDE_INT est_niter;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unrolled = false, no_other_refs;
unsigned prefetch_count;
unsigned mem_ref_count;
unsigned int
tree_ssa_prefetch_arrays (void)
{
- struct loop *loop;
+ class loop *loop;
bool unrolled = false;
int todo_flags = 0;
point in *BORDER and the comparison induction variable in IV. */
static tree
-split_at_bb_p (struct loop *loop, basic_block bb, tree *border, affine_iv *iv)
+split_at_bb_p (class loop *loop, basic_block bb, tree *border, affine_iv *iv)
{
gimple *last;
gcond *stmt;
tree op0 = gimple_cond_lhs (stmt);
tree op1 = gimple_cond_rhs (stmt);
- struct loop *useloop = loop_containing_stmt (stmt);
+ class loop *useloop = loop_containing_stmt (stmt);
if (!simple_iv (loop, useloop, op0, iv, false))
return NULL_TREE;
also be true/false in the next iteration. */
static void
-patch_loop_exit (struct loop *loop, gcond *guard, tree nextval, tree newbound,
+patch_loop_exit (class loop *loop, gcond *guard, tree nextval, tree newbound,
bool initial_true)
{
edge exit = single_exit (loop);
such phi node. Return that phi node. */
static gphi *
-find_or_create_guard_phi (struct loop *loop, tree guard_iv, affine_iv * /*iv*/)
+find_or_create_guard_phi (class loop *loop, tree guard_iv, affine_iv * /*iv*/)
{
gimple *def = SSA_NAME_DEF_STMT (guard_iv);
gphi *phi;
determined easily (i.e. that connect_loop_phis can determine them). */
static bool
-easy_exit_values (struct loop *loop)
+easy_exit_values (class loop *loop)
{
edge exit = single_exit (loop);
edge latch = loop_latch_edge (loop);
this. The loops need to fulfill easy_exit_values(). */
static void
-connect_loop_phis (struct loop *loop1, struct loop *loop2, edge new_e)
+connect_loop_phis (class loop *loop1, class loop *loop2, edge new_e)
{
basic_block rest = loop_preheader_edge (loop2)->src;
gcc_assert (new_e->dest == rest);
This doesn't update the SSA form, see connect_loop_phis for that. */
static edge
-connect_loops (struct loop *loop1, struct loop *loop2)
+connect_loops (class loop *loop1, class loop *loop2)
{
edge exit = single_exit (loop1);
basic_block skip_bb = split_edge (exit);
and add or subtract 1. This routine computes newend above. */
static tree
-compute_new_first_bound (gimple_seq *stmts, struct tree_niter_desc *niter,
+compute_new_first_bound (gimple_seq *stmts, class tree_niter_desc *niter,
tree border,
enum tree_code guard_code, tree guard_init)
{
single exit of LOOP. */
static bool
-split_loop (struct loop *loop1, struct tree_niter_desc *niter)
+split_loop (class loop *loop1, class tree_niter_desc *niter)
{
basic_block *bbs;
unsigned i;
initialize_original_copy_tables ();
basic_block cond_bb;
- struct loop *loop2 = loop_version (loop1, cond, &cond_bb,
+ class loop *loop2 = loop_version (loop1, cond, &cond_bb,
profile_probability::always (),
profile_probability::always (),
profile_probability::always (),
static unsigned int
tree_ssa_split_loops (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
/* Go through all loops starting from innermost. */
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
if (loop->aux)
{
/* If any of our inner loops was split, don't split us,
tree-ssa-loop-im.c ensures that all the suitable conditions are in this
shape. */
-static struct loop *tree_unswitch_loop (struct loop *, basic_block, tree);
-static bool tree_unswitch_single_loop (struct loop *, int);
-static tree tree_may_unswitch_on (basic_block, struct loop *);
-static bool tree_unswitch_outer_loop (struct loop *);
-static edge find_loop_guard (struct loop *);
-static bool empty_bb_without_guard_p (struct loop *, basic_block);
-static bool used_outside_loop_p (struct loop *, tree);
-static void hoist_guard (struct loop *, edge);
-static bool check_exit_phi (struct loop *);
-static tree get_vop_from_header (struct loop *);
+static class loop *tree_unswitch_loop (class loop *, basic_block, tree);
+static bool tree_unswitch_single_loop (class loop *, int);
+static tree tree_may_unswitch_on (basic_block, class loop *);
+static bool tree_unswitch_outer_loop (class loop *);
+static edge find_loop_guard (class loop *);
+static bool empty_bb_without_guard_p (class loop *, basic_block);
+static bool used_outside_loop_p (class loop *, tree);
+static void hoist_guard (class loop *, edge);
+static bool check_exit_phi (class loop *);
+static tree get_vop_from_header (class loop *);
/* Main entry point. Perform loop unswitching on all suitable loops. */
unsigned int
tree_ssa_unswitch_loops (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
/* Go through all loops starting from innermost. */
considering for unswitching and LOOP is the loop it appears in. */
static bool
-is_maybe_undefined (const tree name, gimple *stmt, struct loop *loop)
+is_maybe_undefined (const tree name, gimple *stmt, class loop *loop)
{
/* The loop header is the only block we can trivially determine that
will always be executed. If the comparison is in the loop
basic blocks (for what it means see comments below). */
static tree
-tree_may_unswitch_on (basic_block bb, struct loop *loop)
+tree_may_unswitch_on (basic_block bb, class loop *loop)
{
gimple *last, *def;
gcond *stmt;
unnecessarily). */
static tree
-simplify_using_entry_checks (struct loop *loop, tree cond)
+simplify_using_entry_checks (class loop *loop, tree cond)
{
edge e = loop_preheader_edge (loop);
gimple *stmt;
grow exponentially. */
static bool
-tree_unswitch_single_loop (struct loop *loop, int num)
+tree_unswitch_single_loop (class loop *loop, int num)
{
basic_block *bbs;
- struct loop *nloop;
+ class loop *nloop;
unsigned i, found;
tree cond = NULL_TREE;
gimple *stmt;
loop is entered -- the new loop is entered if COND is true. Returns NULL
if impossible, new loop otherwise. */
-static struct loop *
-tree_unswitch_loop (struct loop *loop,
+static class loop *
+tree_unswitch_loop (class loop *loop,
basic_block unswitch_on, tree cond)
{
profile_probability prob_true;
/* Unswitch outer loops by hoisting invariant guard on
inner loop without code duplication. */
static bool
-tree_unswitch_outer_loop (struct loop *loop)
+tree_unswitch_outer_loop (class loop *loop)
{
edge exit, guard;
HOST_WIDE_INT iterations;
otherwise returns NULL. */
static edge
-find_loop_guard (struct loop *loop)
+find_loop_guard (class loop *loop)
{
basic_block header = loop->header;
edge guard_edge, te, fe;
are invariant or not. */
static bool
-empty_bb_without_guard_p (struct loop *loop, basic_block bb)
+empty_bb_without_guard_p (class loop *loop, basic_block bb)
{
basic_block exit_bb = single_exit (loop)->src;
bool may_be_used_outside = (bb == exit_bb
/* Return true if NAME is used outside of LOOP. */
static bool
-used_outside_loop_p (struct loop *loop, tree name)
+used_outside_loop_p (class loop *loop, tree name)
{
imm_use_iterator it;
use_operand_p use;
/* Return argument for loop preheader edge in header virtual phi if any. */
static tree
-get_vop_from_header (struct loop *loop)
+get_vop_from_header (class loop *loop)
{
for (gphi_iterator gsi = gsi_start_phis (loop->header);
!gsi_end_p (gsi); gsi_next (&gsi))
/* Move the check of GUARD outside of LOOP. */
static void
-hoist_guard (struct loop *loop, edge guard)
+hoist_guard (class loop *loop, edge guard)
{
edge exit = single_exit (loop);
edge preh = loop_preheader_edge (loop);
for edge around loop. */
static bool
-check_exit_phi (struct loop *loop)
+check_exit_phi (class loop *loop)
{
edge exit = single_exit (loop);
basic_block pre_header = loop_preheader_edge (loop)->src;
if (!lookup_attribute ("oacc kernels", DECL_ATTRIBUTES (fn->decl)))
return false;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
if (loop->in_oacc_kernels_region)
return true;
unsigned
pass_scev_cprop::execute (function *)
{
- struct loop *loop;
+ class loop *loop;
bool any = false;
/* Perform final value replacement in loops, in case the replacement
/* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */
unsigned
-tree_num_loop_insns (struct loop *loop, eni_weights *weights)
+tree_num_loop_insns (class loop *loop, eni_weights *weights)
{
basic_block *body = get_loop_body (loop);
gimple_stmt_iterator gsi;
extern bool for_each_index (tree *, bool (*) (tree, tree *, void *), void *);
extern char *get_lsm_tmp_name (tree ref, unsigned n, const char *suffix = NULL);
-extern unsigned tree_num_loop_insns (struct loop *, struct eni_weights *);
+extern unsigned tree_num_loop_insns (class loop *, struct eni_weights *);
/* Returns the loop of the statement STMT. */
-static inline struct loop *
+static inline class loop *
loop_containing_stmt (gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
phi_rank (gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
- struct loop *father = bb->loop_father;
+ class loop *father = bb->loop_father;
tree res;
unsigned i;
use_operand_p use;
operation with tree code CODE, and is inside LOOP. */
static bool
-is_reassociable_op (gimple *stmt, enum tree_code code, struct loop *loop)
+is_reassociable_op (gimple *stmt, enum tree_code code, class loop *loop)
{
basic_block bb = gimple_bb (stmt);
static bool
undistribute_ops_list (enum tree_code opcode,
- vec<operand_entry *> *ops, struct loop *loop)
+ vec<operand_entry *> *ops, class loop *loop)
{
unsigned int length = ops->length ();
operand_entry *oe1;
static bool
get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops,
- struct loop *loop)
+ class loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
tree rhs[2];
static tree
update_ops (tree var, enum tree_code code, vec<operand_entry *> ops,
- unsigned int *pidx, struct loop *loop)
+ unsigned int *pidx, class loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
tree rhs[4];
gimple *oldbinrhs = binrhs;
enum tree_code rhscode = gimple_assign_rhs_code (stmt);
gimple *newbinrhs = NULL;
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
tree lhs = gimple_assign_lhs (stmt);
gcc_assert (is_reassociable_op (binlhs, rhscode, loop)
tree binlhs = gimple_assign_rhs1 (stmt);
tree binrhs = gimple_assign_rhs2 (stmt);
gimple *immusestmt;
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (TREE_CODE (binlhs) == SSA_NAME
&& is_reassociable_op (SSA_NAME_DEF_STMT (binlhs), PLUS_EXPR, loop))
bool binlhsisreassoc = false;
bool binrhsisreassoc = false;
enum tree_code rhscode = gimple_assign_rhs_code (stmt);
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (set_visited)
gimple_set_visited (stmt, true);
expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
{
const struct hashable_expr *expr1 = p1->expr ();
- const struct expr_hash_elt *stamp1 = p1->stamp ();
+ const class expr_hash_elt *stamp1 = p1->stamp ();
const struct hashable_expr *expr2 = p2->expr ();
- const struct expr_hash_elt *stamp2 = p2->stamp ();
+ const class expr_hash_elt *stamp2 = p2->stamp ();
/* This case should apply only when removing entries from the table. */
if (stamp1 == stamp2)
/* A unique stamp, typically the address of the hash
element itself, used in removing entries from the table. */
- struct expr_hash_elt *m_stamp;
+ class expr_hash_elt *m_stamp;
/* We should never be making assignments between objects in this class.
Though it might allow us to exploit C++11 move semantics if we
number 1, pages 9-14. */
static void
-scc_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+scc_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i;
bitmap_iterator bi;
and label it's nodes with DFS numbers. */
static void
-condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+condense_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i;
bitmap_iterator bi;
3. Hashable. */
static void
-label_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+label_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i, first_pred;
bitmap_iterator bi;
/* Print the pred graph in dot format. */
static void
-dump_pred_graph (struct scc_info *si, FILE *file)
+dump_pred_graph (class scc_info *si, FILE *file)
{
unsigned int i;
/* Perform offline variable substitution, discovering equivalence
classes, and eliminating non-pointer variables. */
-static struct scc_info *
+static class scc_info *
perform_var_substitution (constraint_graph_t graph)
{
unsigned int i;
substitution. */
static void
-free_var_substitution_info (struct scc_info *si)
+free_var_substitution_info (class scc_info *si)
{
delete si;
free (graph->pointer_label);
static void
rewrite_constraints (constraint_graph_t graph,
- struct scc_info *si)
+ class scc_info *si)
{
int i;
constraint_t c;
static void
solve_constraints (void)
{
- struct scc_info *si;
+ class scc_info *si;
/* Sort varinfos so that ones that cannot be pointed to are last.
This makes bitmaps more efficient. */
returns the state. */
enum bb_dom_status
-determine_bb_domination_status (struct loop *loop, basic_block bb)
+determine_bb_domination_status (class loop *loop, basic_block bb)
{
basic_block *bblocks;
unsigned nblocks, i;
to the inside of the loop. */
static bool
-thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
+thread_through_loop_header (class loop *loop, bool may_peel_loop_headers)
{
basic_block header = loop->header;
edge e, tgt_edge, latch = loop_latch_edge (loop);
unsigned n_region, unsigned current_path_no)
{
unsigned i;
- struct loop *loop = entry->dest->loop_father;
+ class loop *loop = entry->dest->loop_father;
edge exit_copy;
edge redirected;
profile_count curr_count;
{
bool retval = false;
unsigned int i;
- struct loop *loop;
+ class loop *loop;
auto_bitmap threaded_blocks;
hash_set<edge> visited_starting_edges;
DOMST_DOMINATING
};
-enum bb_dom_status determine_bb_domination_status (struct loop *, basic_block);
+enum bb_dom_status determine_bb_domination_status (class loop *, basic_block);
#endif
block IB. */
tree
-streamer_read_string_cst (struct data_in *data_in, struct lto_input_block *ib)
+streamer_read_string_cst (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char * ptr;
block IB. */
static tree
-input_identifier (struct data_in *data_in, struct lto_input_block *ib)
+input_identifier (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char *ptr;
tables and descriptors for the file being read. */
tree
-streamer_read_chain (struct lto_input_block *ib, struct data_in *data_in)
+streamer_read_chain (class lto_input_block *ib, class data_in *data_in)
{
tree first, prev, curr;
of expression EXPR from bitpack BP. */
static void
-unpack_ts_block_value_fields (struct data_in *data_in,
+unpack_ts_block_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
/* BLOCK_NUMBER is recomputed. */
structure of expression EXPR from bitpack BP. */
static void
-unpack_ts_translation_unit_decl_value_fields (struct data_in *data_in,
+unpack_ts_translation_unit_decl_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
TRANSLATION_UNIT_LANGUAGE (expr) = xstrdup (bp_unpack_string (data_in, bp));
structure of expression EXPR from bitpack BP. */
static void
-unpack_ts_omp_clause_value_fields (struct data_in *data_in,
+unpack_ts_omp_clause_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
stream_input_location (&OMP_CLAUSE_LOCATION (expr), bp, data_in);
bitfield values that the writer may have written. */
void
-streamer_read_tree_bitfields (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+streamer_read_tree_bitfields (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
enum tree_code code;
struct bitpack_d bp;
*IX_P the index into the reader cache where the new tree is stored. */
tree
-streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in,
+streamer_alloc_tree (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag)
{
enum tree_code code;
static void
-lto_input_ts_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
if (TREE_CODE (expr) != IDENTIFIER_NODE)
TREE_TYPE (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_vector_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_vector_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
unsigned int count = vector_cst_encoded_nelts (expr);
for (unsigned int i = 0; i < count; ++i)
file being read. */
static void
-lto_input_ts_poly_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_poly_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
POLY_INT_CST_COEFF (expr, i) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_complex_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_complex_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TREE_REALPART (expr) = stream_read_tree (ib, data_in);
TREE_IMAGPART (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_decl_minimal_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_minimal_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_NAME (expr) = stream_read_tree (ib, data_in);
DECL_CONTEXT (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_decl_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_SIZE (expr) = stream_read_tree (ib, data_in);
DECL_SIZE_UNIT (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_decl_non_common_tree_pointers (struct lto_input_block *,
- struct data_in *, tree)
+lto_input_ts_decl_non_common_tree_pointers (class lto_input_block *,
+ class data_in *, tree)
{
}
file being read. */
static void
-lto_input_ts_decl_with_vis_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_with_vis_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
tree id;
file being read. */
static void
-lto_input_ts_field_decl_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_field_decl_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_FIELD_OFFSET (expr) = stream_read_tree (ib, data_in);
DECL_BIT_FIELD_TYPE (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_function_decl_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_function_decl_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
/* DECL_STRUCT_FUNCTION is loaded on demand by cgraph_get_body. */
DECL_FUNCTION_PERSONALITY (expr) = stream_read_tree (ib, data_in);
being read. */
static void
-lto_input_ts_type_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_type_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TYPE_SIZE (expr) = stream_read_tree (ib, data_in);
TYPE_SIZE_UNIT (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_type_non_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in,
+lto_input_ts_type_non_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in,
tree expr)
{
if (TREE_CODE (expr) == ENUMERAL_TYPE)
file being read. */
static void
-lto_input_ts_list_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_list_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TREE_PURPOSE (expr) = stream_read_tree (ib, data_in);
TREE_VALUE (expr) = stream_read_tree (ib, data_in);
file being read. */
static void
-lto_input_ts_vec_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_vec_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
static void
-lto_input_ts_exp_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_exp_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
tree block;
file being read. */
static void
-lto_input_ts_block_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_block_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
BLOCK_VARS (expr) = streamer_read_chain (ib, data_in);
file being read. */
static void
-lto_input_ts_binfo_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_binfo_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
tree t;
file being read. */
static void
-lto_input_ts_constructor_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_constructor_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
unsigned i;
file being read. */
static void
-lto_input_ts_omp_clause_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_omp_clause_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
contains tables and descriptors for the file being read. */
void
-streamer_read_tree_body (struct lto_input_block *ib, struct data_in *data_in,
+streamer_read_tree_body (class lto_input_block *ib, class data_in *data_in,
tree expr)
{
enum tree_code code;
DATA_IN->FILE_DATA->GLOBALS_INDEX[IX]. */
tree
-streamer_get_pickled_tree (struct lto_input_block *ib, struct data_in *data_in)
+streamer_get_pickled_tree (class lto_input_block *ib, class data_in *data_in)
{
unsigned HOST_WIDE_INT ix;
tree result;
};
/* In tree-streamer-in.c. */
-tree streamer_read_string_cst (struct data_in *, struct lto_input_block *);
-tree streamer_read_chain (struct lto_input_block *, struct data_in *);
-tree streamer_alloc_tree (struct lto_input_block *, struct data_in *,
+tree streamer_read_string_cst (class data_in *, class lto_input_block *);
+tree streamer_read_chain (class lto_input_block *, class data_in *);
+tree streamer_alloc_tree (class lto_input_block *, class data_in *,
enum LTO_tags);
-void streamer_read_tree_body (struct lto_input_block *, struct data_in *, tree);
-tree streamer_get_pickled_tree (struct lto_input_block *, struct data_in *);
-void streamer_read_tree_bitfields (struct lto_input_block *,
- struct data_in *, tree);
+void streamer_read_tree_body (class lto_input_block *, class data_in *, tree);
+tree streamer_get_pickled_tree (class lto_input_block *, class data_in *);
+void streamer_read_tree_bitfields (class lto_input_block *,
+ class data_in *, tree);
/* In tree-streamer-out.c. */
void streamer_write_string_cst (struct output_block *,
bp_unpack_machine_mode (struct bitpack_d *bp)
{
return (machine_mode)
- ((struct lto_input_block *)
+ ((class lto_input_block *)
bp->stream)->mode_table[bp_unpack_enum (bp, machine_mode, 1 << 8)];
}
int
case_bit_test::cmp (const void *p1, const void *p2)
{
- const struct case_bit_test *const d1 = (const struct case_bit_test *) p1;
- const struct case_bit_test *const d2 = (const struct case_bit_test *) p2;
+ const case_bit_test *const d1 = (const case_bit_test *) p1;
+ const case_bit_test *const d2 = (const case_bit_test *) p2;
if (d2->bits != d1->bits)
return d2->bits - d1->bits;
bit_test_cluster::emit (tree index_expr, tree index_type,
tree, basic_block default_bb)
{
- struct case_bit_test test[m_max_case_bit_tests] = { {} };
+ case_bit_test test[m_max_case_bit_tests] = { {} };
unsigned int i, j, k;
unsigned int count;
static opt_result
vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return opt_result::failure_at (vect_location,
loop_vec_info loop_vinfo,
int loop_depth, unsigned int *max_vf)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
lambda_vector dist_v;
unsigned int i;
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
unsigned int *max_vf)
{
unsigned int i;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
vect_record_base_alignments (vec_info *vinfo)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
data_reference *dr;
unsigned int i;
FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
stmt_vec_info stmt_info = dr_info->stmt;
vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
tree ref = DR_REF (dr_info->dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum dr_alignment_support supportable_dr_alignment;
dr_vec_info *first_store = NULL;
dr_vec_info *dr0_info = NULL;
computation will be invariant in the outermost loop. */
else if (same_align_drs_max == same_align_drs)
{
- struct loop *ivloop0, *ivloop;
+ class loop *ivloop0, *ivloop;
ivloop0 = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr0_info->dr));
ivloop = outermost_invariant_loop_for_expr
tree scalar_type = TREE_TYPE (DR_REF (dr));
stmt_vec_info stmt_info = dr_info->stmt;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
return true;
{
HOST_WIDE_INT scale = 1;
poly_int64 pbitpos, pbitsize;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
tree decl = NULL_TREE, base, off;
opt_result
vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf, bool *fatal)
{
- struct loop *loop = NULL;
+ class loop *loop = NULL;
unsigned int i;
struct data_reference *dr;
tree scalar_type;
tree
vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
- struct loop *at_loop, tree offset,
+ class loop *at_loop, tree offset,
tree *initial_address, gimple_stmt_iterator *gsi,
gimple **ptr_incr, bool only_init,
tree byte_offset, tree iv_step)
{
const char *base_name;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
bool nested_in_vect_loop = false;
- struct loop *containing_loop = NULL;
+ class loop *containing_loop = NULL;
tree aggr_ptr_type;
tree aggr_ptr;
tree new_temp;
tree *realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
- struct loop **at_loop)
+ class loop **at_loop)
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
struct data_reference *dr = dr_info->dr;
- struct loop *loop = NULL;
+ class loop *loop = NULL;
edge pe = NULL;
tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
tree vec_dest;
gimple_seq stmts = NULL;
bool compute_in_loop = false;
bool nested_in_vect_loop = false;
- struct loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
- struct loop *loop_for_initial_load = NULL;
+ class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
+ class loop *loop_for_initial_load = NULL;
if (loop_vinfo)
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *vect_loop = NULL;
+ class loop *vect_loop = NULL;
bool nested_in_vect_loop = false;
if (aligned_access_p (dr_info) && !check_aligned_accesses)
ssa_op_iter iter;
edge e;
edge_iterator ei;
- struct loop *loop = bb->loop_father;
- struct loop *outer_loop = NULL;
+ class loop *loop = bb->loop_father;
+ class loop *outer_loop = NULL;
if (rename_from_outer_loop)
{
value that it should have on subsequent iterations. */
static void
-vect_set_loop_mask (struct loop *loop, tree mask, tree init_mask,
+vect_set_loop_mask (class loop *loop, tree mask, tree init_mask,
tree next_mask)
{
gphi *phi = create_phi_node (mask, loop->header);
/* Add SEQ to the end of LOOP's preheader block. */
static void
-add_preheader_seq (struct loop *loop, gimple_seq seq)
+add_preheader_seq (class loop *loop, gimple_seq seq)
{
if (seq)
{
/* Add SEQ to the beginning of LOOP's header block. */
static void
-add_header_seq (struct loop *loop, gimple_seq seq)
+add_header_seq (class loop *loop, gimple_seq seq)
{
if (seq)
{
would ever hit a value that produces a set of all-false masks for RGM. */
static tree
-vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_masks_directly (class loop *loop, loop_vec_info loop_vinfo,
gimple_seq *preheader_seq,
gimple_stmt_iterator loop_cond_gsi,
rgroup_masks *rgm, tree niters, tree niters_skip,
final gcond. */
static gcond *
-vect_set_loop_condition_masked (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition_masked (class loop *loop, loop_vec_info loop_vinfo,
tree niters, tree final_iv,
bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
are no loop masks. */
static gcond *
-vect_set_loop_condition_unmasked (struct loop *loop, tree niters,
+vect_set_loop_condition_unmasked (class loop *loop, tree niters,
tree step, tree final_iv,
bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
Assumption: the exit-condition of LOOP is the last stmt in the loop. */
void
-vect_set_loop_condition (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition (class loop *loop, loop_vec_info loop_vinfo,
tree niters, tree step, tree final_iv,
bool niters_maybe_zero)
{
basic blocks from SCALAR_LOOP instead of LOOP, but to either the
entry or exit of LOOP. */
-struct loop *
-slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop,
- struct loop *scalar_loop, edge e)
+class loop *
+slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
+ class loop *scalar_loop, edge e)
{
- struct loop *new_loop;
+ class loop *new_loop;
basic_block *new_bbs, *bbs, *pbbs;
bool at_exit;
bool was_imm_dom;
*/
bool
-slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
+slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
{
edge exit_e = single_exit (loop);
edge entry_e = loop_preheader_edge (loop);
uses should be renamed. */
static void
-create_lcssa_for_virtual_phi (struct loop *loop)
+create_lcssa_for_virtual_phi (class loop *loop)
{
gphi_iterator gsi;
edge exit_e = single_exit (loop);
Return the loop location if succeed and NULL if not. */
dump_user_location_t
-find_loop_location (struct loop *loop)
+find_loop_location (class loop *loop)
{
gimple *stmt = NULL;
basic_block bb;
bool
vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block bb = loop->header;
gphi_iterator gsi;
tree niters, edge update_e)
{
gphi_iterator gsi, gsi1;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block update_bb = update_e->dest;
basic_block exit_bb = single_exit (loop)->dest;
{
/* We should be using a step_vector of VF if VF is variable. */
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ();
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree type = TREE_TYPE (niters_vector);
tree log_vf = build_int_cst (type, exact_log2 (vf));
basic_block exit_bb = single_exit (loop)->dest;
static void
slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
- struct loop *first, struct loop *second,
+ class loop *first, class loop *second,
bool create_lcssa_for_iv_phis)
{
gphi_iterator gsi_update, gsi_orig;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
edge first_latch_e = EDGE_SUCC (first->latch, 0);
edge second_preheader_e = loop_preheader_edge (second);
in the update_loop's PHI node with the result of new PHI result. */
static void
-slpeel_update_phi_nodes_for_guard1 (struct loop *skip_loop,
- struct loop *update_loop,
+slpeel_update_phi_nodes_for_guard1 (class loop *skip_loop,
+ class loop *update_loop,
edge guard_edge, edge merge_edge)
{
location_t merge_loc, guard_loc;
NULL. */
static tree
-find_guard_arg (struct loop *loop, struct loop *epilog ATTRIBUTE_UNUSED,
+find_guard_arg (class loop *loop, class loop *epilog ATTRIBUTE_UNUSED,
gphi *lcssa_phi)
{
gphi_iterator gsi;
in exit_bb will also be updated. */
static void
-slpeel_update_phi_nodes_for_guard2 (struct loop *loop, struct loop *epilog,
+slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
edge guard_edge, edge merge_edge)
{
gphi_iterator gsi;
the arg of its loop closed ssa PHI needs to be updated. */
static void
-slpeel_update_phi_nodes_for_lcssa (struct loop *epilog)
+slpeel_update_phi_nodes_for_lcssa (class loop *epilog)
{
gphi_iterator gsi;
basic_block exit_bb = single_exit (epilog)->dest;
versioning conditions if loop versioning is needed. */
-struct loop *
+class loop *
vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
tree *niters_vector, tree *step_vector,
tree *niters_vector_mult_vf_var, int th,
prob_prolog = prob_epilog = profile_probability::guessed_always ()
.apply_scale (estimated_vf - 1, estimated_vf);
- struct loop *prolog, *epilog = NULL, *loop = LOOP_VINFO_LOOP (loop_vinfo);
- struct loop *first_loop = loop;
+ class loop *prolog, *epilog = NULL, *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *first_loop = loop;
bool irred_flag = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
create_lcssa_for_virtual_phi (loop);
update_ssa (TODO_update_ssa_only_virtuals);
}
dump_user_location_t loop_loc = find_loop_location (loop);
- struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
if (prolog_peeling)
{
e = loop_preheader_edge (loop);
The versioning precondition(s) are placed in *COND_EXPR and
*COND_EXPR_STMT_LIST. */
-struct loop *
+class loop *
vect_loop_versioning (loop_vec_info loop_vinfo,
unsigned int th, bool check_profitability,
poly_uint64 versioning_threshold)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *nloop;
- struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *nloop;
+ class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
basic_block condition_bb;
gphi_iterator gsi;
gimple_stmt_iterator cond_exp_gsi;
/* Compute the outermost loop cond_expr and cond_expr_stmt_list are
invariant in. */
- struct loop *outermost = outermost_invariant_loop_for_expr (loop, cond_expr);
+ class loop *outermost = outermost_invariant_loop_for_expr (loop, cond_expr);
for (gimple_stmt_iterator gsi = gsi_start (cond_expr_stmt_list);
!gsi_end_p (gsi); gsi_next (&gsi))
{
/* Search for the outermost loop we can version. Avoid versioning of
non-perfect nests but allow if-conversion versioned loops inside. */
- struct loop *loop_to_version = loop;
+ class loop *loop_to_version = loop;
if (flow_loop_nested_p (outermost, loop))
{
if (dump_enabled_p ())
/* Kill off IFN_LOOP_VECTORIZED_CALL in the copy, nobody will
reap those otherwise; they also refer to the original
loops. */
- struct loop *l = loop;
+ class loop *l = loop;
while (gimple *call = vect_loop_vectorized_call (l))
{
call = SSA_NAME_DEF_STMT (get_current_def (gimple_call_lhs (call)));
static opt_result
vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
poly_uint64 vectorization_factor = 1;
enclosing LOOP). */
static void
-vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
+vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop)
{
basic_block bb = loop->header;
tree init, step;
static void
vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
static gcond *
-vect_get_loop_niters (struct loop *loop, tree *assumptions,
+vect_get_loop_niters (class loop *loop, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
edge exit = single_exit (loop);
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
static bool
bb_in_loop_p (const_basic_block bb, const void *data)
{
- const struct loop *const loop = (const struct loop *)data;
+ const class loop *const loop = (const class loop *)data;
if (flow_bb_inside_loop_p (loop, bb))
return true;
return false;
/* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
stmt_vec_info structs for all the stmts in LOOP_IN. */
-_loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
+_loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
: vec_info (vec_info::loop, init_cost (loop_in), shared),
loop (loop_in),
bbs (XCNEWVEC (basic_block, loop->num_nodes)),
static bool
vect_verify_full_masking (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int min_ni_width;
unsigned int max_nscalars_per_iter
= vect_get_max_nscalars_per_iter (loop_vinfo);
static void
vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes, factor;
int innerloop_iters, i;
niter could be analyzed under some assumptions. */
opt_result
-vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
+vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond,
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
}
else
{
- struct loop *innerloop = loop->inner;
+ class loop *innerloop = loop->inner;
edge entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
/* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
opt_loop_vec_info
-vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
+vect_analyze_loop_form (class loop *loop, vec_info_shared *shared)
{
tree assumptions, number_of_iterations, number_of_iterationsm1;
gcond *loop_cond, *inner_loop_cond = NULL;
static void
vect_update_vf_for_slp (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
poly_uint64 vectorization_factor;
static opt_result
vect_analyze_loop_operations (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
/* Only fully-masked loops can have iteration counts less than the
loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
be vectorized. */
opt_loop_vec_info
-vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
+vect_analyze_loop (class loop *loop, loop_vec_info orig_loop_vinfo,
vec_info_shared *shared)
{
auto_vector_sizes vector_sizes;
stmt_vec_info stmt_vinfo = stmts[0];
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
- struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
+ class loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
gcc_assert (loop);
switch (code)
vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
gimple *first_stmt)
{
- struct loop *loop = (gimple_bb (phi))->loop_father;
- struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = (gimple_bb (phi))->loop_father;
+ class loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
gimple *loop_use_stmt = NULL;
stmt_vec_info use_stmt_info;
enum vect_reduction_type *v_reduc_type)
{
gphi *phi = as_a <gphi *> (phi_info->stmt);
- struct loop *loop = (gimple_bb (phi))->loop_father;
- struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = (gimple_bb (phi))->loop_father;
+ class loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
gimple *phi_use_stmt = NULL;
enum tree_code orig_code, code;
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
tree *adjustment_def)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (scalar_type);
enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
tree vector_type;
unsigned int group_size = stmts.length ();
unsigned int i;
- struct loop *loop;
+ class loop *loop;
vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
basic_block exit_bb;
tree scalar_dest;
tree scalar_type;
int reduc_index, vec_loop_masks *masks)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
stmt_vec_info new_stmt_info = NULL;
internal_fn mask_reduc_fn = get_masked_reduction_fn (reduc_fn, vectype_in);
does not cause overflow. */
static bool
-is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
+is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, class loop *loop)
{
gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
tree vectype_in = NULL_TREE;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, orig_code;
internal_fn reduc_fn;
machine_mode vec_mode;
bool nested_cycle = false, found_nested_cycle_def = false;
bool double_reduc = false;
basic_block def_bb;
- struct loop * def_stmt_loop;
+ class loop * def_stmt_loop;
tree def_arg;
auto_vec<tree> vec_oprnds0;
auto_vec<tree> vec_oprnds1;
stmt_vector_for_cost *cost_vec)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
bool nested_in_vect_loop = false;
- struct loop *iv_loop;
+ class loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
stmt_vector_for_cost *)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
tree lhs, lhs_type, bitsize, vec_bitsize;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
/* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
static void
-vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
+vect_loop_kill_debug_uses (class loop *loop, stmt_vec_info stmt_info)
{
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
}
widest_int max;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Check the upper bound of loop niters. */
if (get_max_loop_iterations (loop, &max))
{
by factor VF. */
static void
-scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
+scale_profile_for_vect_loop (class loop *loop, unsigned vf)
{
edge preheader = loop_preheader_edge (loop);
/* Reduce loop iterations by the vectorization factor. */
vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
if (dump_enabled_p ())
stmts in the loop, and update the loop exit condition.
Returns scalar epilogue loop if any. */
-struct loop *
+class loop *
vect_transform_loop (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- struct loop *epilogue = NULL;
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *epilogue = NULL;
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
versioning_threshold);
check_profitability = false;
}
- struct loop *sloop
+ class loop *sloop
= vect_loop_versioning (loop_vinfo, th, check_profitability,
versioning_threshold);
sloop->force_vectorize = false;
*/
void
-optimize_mask_stores (struct loop *loop)
+optimize_mask_stores (class loop *loop)
{
basic_block *bbs = get_loop_body (loop);
unsigned nbbs = loop->num_nodes;
unsigned i;
basic_block bb;
- struct loop *bb_loop;
+ class loop *bb_loop;
gimple_stmt_iterator gsi;
gimple *stmt;
auto_vec<gimple *> worklist;
vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo)
{
tree niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned HOST_WIDE_INT max_vf = vect_max_vf (loop_vinfo);
/* Calculate the value that the induction variable must be able
/* We don't allow changing the order of the computation in the inner-loop
when doing outer-loop vectorization. */
- struct loop *loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = LOOP_VINFO_LOOP (loop_info);
if (loop && nested_in_vect_loop_p (loop, stmt_info))
return false;
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned int nbbs = loop->num_nodes;
void
vect_pattern_recog (vec_info *vinfo)
{
- struct loop *loop;
+ class loop *loop;
basic_block *bbs;
unsigned int nbbs;
gimple_stmt_iterator si;
else
{
/* Create a new SLP instance. */
- new_instance = XNEW (struct _slp_instance);
+ new_instance = XNEW (class _slp_instance);
SLP_INSTANCE_TREE (new_instance) = node;
SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
/* Return the vectorized type for the given statement. */
tree
-stmt_vectype (struct _stmt_vec_info *stmt_info)
+stmt_vectype (class _stmt_vec_info *stmt_info)
{
return STMT_VINFO_VECTYPE (stmt_info);
}
/* Return TRUE iff the given statement is in an inner loop relative to
the loop being vectorized. */
bool
-stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
+stmt_in_inner_loop_p (class _stmt_vec_info *stmt_info)
{
gimple *stmt = STMT_VINFO_STMT (stmt_info);
basic_block bb = gimple_bb (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop* loop;
+ class loop* loop;
if (!loop_vinfo)
return false;
vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
enum vect_relevant *relevant, bool *live_p)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
use_operand_p use_p;
opt_result
vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned int nbbs = loop->num_nodes;
gimple_stmt_iterator si;
if (loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block new_bb;
edge pe;
unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
/* Try lowering COUNT to the number of scalar latch iterations. */
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
widest_int max_iters;
if (max_loop_iterations (loop, &max_iters)
&& max_iters < count)
{
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
tree mask)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies = vect_get_num_copies (loop_vinfo, vectype);
containing loop. */
static void
-vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
+vect_get_gather_scatter_ops (class loop *loop, stmt_vec_info stmt_info,
gather_scatter_info *gs_info,
tree *dataref_ptr, tree *vec_offset)
{
tree *dataref_bump, tree *vec_offset)
{
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
gimple_seq stmts;
*ARGINFO. */
static void
-vect_simd_lane_linear (tree op, struct loop *loop,
+vect_simd_lane_linear (tree op, class loop *loop,
struct simd_call_arg_info *arginfo)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (op);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
vec_info *vinfo = stmt_info->vinfo;
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
tree fndecl, new_temp;
int ncopies, j;
auto_vec<simd_call_arg_info> arginfo;
tree vec_oprnd = NULL_TREE;
tree elem_type;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
machine_mode vec_mode;
tree dummy;
enum dr_alignment_support alignment_support_scheme;
otherwise returns false. */
static bool
-hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
+hoist_defs_of_uses (stmt_vec_info stmt_info, class loop *loop)
{
ssa_op_iter i;
tree op;
tree data_ref = NULL;
stmt_vec_info prev_stmt_info;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
- struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
+ class loop *loop = NULL;
+ class loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
bool nested_in_vect_loop = false;
tree elem_type;
tree new_temp;
stmt_vec_info first_stmt_info;
stmt_vec_info first_stmt_info_for_drptr = NULL;
bool compute_in_loop = false;
- struct loop *at_loop;
+ class loop *at_loop;
int vec_num;
bool slp = (slp_node != NULL);
bool slp_perm = false;
|| STMT_VINFO_RELEVANT (stmt_info) ==
vect_used_in_outer_by_reduction))
{
- struct loop *innerloop = LOOP_VINFO_LOOP (
+ class loop *innerloop = LOOP_VINFO_LOOP (
STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
imm_use_iterator imm_iter;
use_operand_p use_p;
vec<tree> *interm_types)
{
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *vect_loop = NULL;
+ class loop *vect_loop = NULL;
machine_mode vec_mode;
enum insn_code icode1, icode2;
optab optab1, optab2;
stmt_vec_info
vec_info::new_stmt_vec_info (gimple *stmt)
{
- stmt_vec_info res = XCNEW (struct _stmt_vec_info);
+ stmt_vec_info res = XCNEW (class _stmt_vec_info);
res->vinfo = this;
res->stmt = stmt;
clear loop constraint LOOP_C_FINITE. */
void
-vect_free_loop_info_assumptions (struct loop *loop)
+vect_free_loop_info_assumptions (class loop *loop)
{
scev_reset_htab ();
/* We need to explicitly reset upper bound information since they are
guarding it. */
gimple *
-vect_loop_vectorized_call (struct loop *loop, gcond **cond)
+vect_loop_vectorized_call (class loop *loop, gcond **cond)
{
basic_block bb = loop_preheader_edge (loop)->src;
gimple *g;
internal call. */
static gimple *
-vect_loop_dist_alias_call (struct loop *loop)
+vect_loop_dist_alias_call (class loop *loop)
{
basic_block bb;
basic_block entry;
- struct loop *outer, *orig;
+ class loop *outer, *orig;
gimple_stmt_iterator gsi;
gimple *g;
tree arg = gimple_call_arg (loop_vectorized_call, 1);
basic_block *bbs;
unsigned int i;
- struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
+ class loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
unsigned int i;
unsigned int num_vectorized_loops = 0;
unsigned int vect_loops_num;
- struct loop *loop;
+ class loop *loop;
hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
bool any_ifcvt_loops = false;
&& vect_loop_vectorized_call (loop->inner))
{
tree arg = gimple_call_arg (loop_vectorized_call, 0);
- struct loop *vector_loop
+ class loop *vector_loop
= get_loop (cfun, tree_to_shwi (arg));
if (vector_loop && vector_loop != loop)
{
#ifndef GCC_TREE_VECTORIZER_H
#define GCC_TREE_VECTORIZER_H
-typedef struct _stmt_vec_info *stmt_vec_info;
+typedef class _stmt_vec_info *stmt_vec_info;
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
stmt_vec_info lookup_stmt (gimple *);
stmt_vec_info lookup_def (tree);
stmt_vec_info lookup_single_use (tree);
- struct dr_vec_info *lookup_dr (data_reference *);
+ class dr_vec_info *lookup_dr (data_reference *);
void move_dr (stmt_vec_info, stmt_vec_info);
void remove_stmt (stmt_vec_info);
void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
void free_stmt_vec_info (stmt_vec_info);
};
-struct _loop_vec_info;
-struct _bb_vec_info;
+class _loop_vec_info;
+class _bb_vec_info;
template<>
template<>
/*-----------------------------------------------------------------*/
typedef class _loop_vec_info : public vec_info {
public:
- _loop_vec_info (struct loop *, vec_info_shared *);
+ _loop_vec_info (class loop *, vec_info_shared *);
~_loop_vec_info ();
/* The loop to which this info struct refers to. */
- struct loop *loop;
+ class loop *loop;
/* The loop basic blocks. */
basic_block *bbs;
tree iv_type;
/* Unknown DRs according to which loop was peeled. */
- struct dr_vec_info *unaligned_dr;
+ class dr_vec_info *unaligned_dr;
/* peeling_for_alignment indicates whether peeling for alignment will take
place, and what the peeling factor should be:
/* If if-conversion versioned this loop before conversion, this is the
loop version without if-conversion. */
- struct loop *scalar_loop;
+ class loop *scalar_loop;
/* For loops being epilogues of already vectorized loops
this points to the original vectorized loop. Otherwise NULL. */
typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
static inline loop_vec_info
-loop_vec_info_for_loop (struct loop *loop)
+loop_vec_info_for_loop (class loop *loop)
{
return (loop_vec_info) loop->aux;
}
&& TYPE_UNSIGNED (TYPE)))
static inline bool
-nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info)
+nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
{
return (loop->inner
&& (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
/* Alias targetm.vectorize.init_cost. */
static inline void *
-init_cost (struct loop *loop_info)
+init_cost (class loop *loop_info)
{
return targetm.vectorize.init_cost (loop_info);
}
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
-extern void vect_set_loop_condition (struct loop *, loop_vec_info,
+extern void vect_set_loop_condition (class loop *, loop_vec_info,
tree, tree, tree, bool);
-extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
-struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
- struct loop *, edge);
-struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
+extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
+class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
+ class loop *, edge);
+class loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
poly_uint64);
-extern struct loop *vect_do_peeling (loop_vec_info, tree, tree,
+extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
tree *, tree *, tree *, int, bool, bool);
extern void vect_prepare_for_masked_peels (loop_vec_info);
-extern dump_user_location_t find_loop_location (struct loop *);
+extern dump_user_location_t find_loop_location (class loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
/* In tree-vect-stmts.c. */
extern bool vect_supportable_shift (enum tree_code, tree);
extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
-extern void optimize_mask_stores (struct loop*);
+extern void optimize_mask_stores (class loop*);
extern gcall *vect_gen_while (tree, tree, tree);
extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
vec<data_reference_p> *);
extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *);
extern void vect_record_base_alignments (vec_info *);
-extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree,
+extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, class loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool,
tree = NULL_TREE, tree = NULL_TREE);
gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
tree *, enum dr_alignment_support, tree,
- struct loop **);
+ class loop **);
extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
gimple_stmt_iterator *);
extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
enum tree_code);
/* Drive for loop analysis stage. */
-extern opt_loop_vec_info vect_analyze_loop (struct loop *,
+extern opt_loop_vec_info vect_analyze_loop (class loop *,
loop_vec_info,
vec_info_shared *);
extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
unsigned int, tree, unsigned int);
/* Drive for loop transformation stage. */
-extern struct loop *vect_transform_loop (loop_vec_info);
-extern opt_loop_vec_info vect_analyze_loop_form (struct loop *,
+extern class loop *vect_transform_loop (loop_vec_info);
+extern opt_loop_vec_info vect_analyze_loop_form (class loop *,
vec_info_shared *);
extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
slp_tree, int, stmt_vec_info *,
/* In tree-vectorizer.c. */
unsigned vectorize_loops (void);
-void vect_free_loop_info_assumptions (struct loop *);
-gimple *vect_loop_vectorized_call (struct loop *, gcond **cond = NULL);
+void vect_free_loop_info_assumptions (class loop *);
+gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
#endif /* GCC_TREE_VECTORIZER_H */
language data removed. The lists are held inside FLD. */
static void
-add_tree_to_fld_list (tree t, struct free_lang_data_d *fld)
+add_tree_to_fld_list (tree t, class free_lang_data_d *fld)
{
if (DECL_P (t))
fld->decls.safe_push (t);
/* Push tree node T into FLD->WORKLIST. */
static inline void
-fld_worklist_push (tree t, struct free_lang_data_d *fld)
+fld_worklist_push (tree t, class free_lang_data_d *fld)
{
if (t && !is_lang_specific (t) && !fld->pset.contains (t))
fld->worklist.safe_push ((t));
Set TREE_TYPE to INNER_TYPE if non-NULL. */
static tree
-fld_type_variant (tree first, tree t, struct free_lang_data_d *fld,
+fld_type_variant (tree first, tree t, class free_lang_data_d *fld,
tree inner_type = NULL)
{
if (first == TYPE_MAIN_VARIANT (t))
static tree
fld_process_array_type (tree t, tree t2, hash_map<tree, tree> *map,
- struct free_lang_data_d *fld)
+ class free_lang_data_d *fld)
{
if (TREE_TYPE (t) == t2)
return t;
Return T if no simplification is possible. */
static tree
-fld_incomplete_type_of (tree t, struct free_lang_data_d *fld)
+fld_incomplete_type_of (tree t, class free_lang_data_d *fld)
{
if (!t)
return NULL;
types. */
static tree
-fld_simplified_type (tree t, struct free_lang_data_d *fld)
+fld_simplified_type (tree t, class free_lang_data_d *fld)
{
if (!t)
return t;
/* Reset all language specific information still present in TYPE. */
static void
-free_lang_data_in_type (tree type, struct free_lang_data_d *fld)
+free_lang_data_in_type (tree type, class free_lang_data_d *fld)
{
gcc_assert (TYPE_P (type));
DECL. */
static void
-free_lang_data_in_decl (tree decl, struct free_lang_data_d *fld)
+free_lang_data_in_decl (tree decl, class free_lang_data_d *fld)
{
gcc_assert (DECL_P (decl));
find_decls_types_r (tree *tp, int *ws, void *data)
{
tree t = *tp;
- struct free_lang_data_d *fld = (struct free_lang_data_d *) data;
+ class free_lang_data_d *fld = (class free_lang_data_d *) data;
if (TREE_CODE (t) == TREE_LIST)
return NULL_TREE;
/* Find decls and types in T. */
static void
-find_decls_types (tree t, struct free_lang_data_d *fld)
+find_decls_types (tree t, class free_lang_data_d *fld)
{
while (1)
{
FLD->DECLS and FLD->TYPES. */
static void
-find_decls_types_in_eh_region (eh_region r, struct free_lang_data_d *fld)
+find_decls_types_in_eh_region (eh_region r, class free_lang_data_d *fld)
{
switch (r->type)
{
NAMESPACE_DECLs, etc). */
static void
-find_decls_types_in_node (struct cgraph_node *n, struct free_lang_data_d *fld)
+find_decls_types_in_node (struct cgraph_node *n, class free_lang_data_d *fld)
{
basic_block bb;
struct function *fn;
NAMESPACE_DECLs, etc). */
static void
-find_decls_types_in_var (varpool_node *v, struct free_lang_data_d *fld)
+find_decls_types_in_var (varpool_node *v, class free_lang_data_d *fld)
{
find_decls_types (v->decl, fld);
}
been set up. */
static void
-free_lang_data_in_cgraph (struct free_lang_data_d *fld)
+free_lang_data_in_cgraph (class free_lang_data_d *fld)
{
struct cgraph_node *n;
varpool_node *v;
free_lang_data (void)
{
unsigned i;
- struct free_lang_data_d fld;
+ class free_lang_data_d fld;
/* If we are the LTO frontend we have freed lang-specific data already. */
if (in_lto_p
/* Dump information about HIST to DUMP_FILE. */
void
-stream_in_histogram_value (struct lto_input_block *ib, gimple *stmt)
+stream_in_histogram_value (class lto_input_block *ib, gimple *stmt)
{
enum hist_type type;
unsigned int ncounters = 0;
extern void gimple_gen_average_profiler (histogram_value, unsigned, unsigned);
extern void gimple_gen_ior_profiler (histogram_value, unsigned, unsigned);
extern void stream_out_histogram_value (struct output_block *, histogram_value);
-extern void stream_in_histogram_value (struct lto_input_block *, gimple *);
+extern void stream_in_histogram_value (class lto_input_block *, gimple *);
extern struct cgraph_node* find_func_by_profile_id (int func_id);
static rtx
adjust_mems (rtx loc, const_rtx old_rtx, void *data)
{
- struct adjust_mem_data *amd = (struct adjust_mem_data *) data;
+ class adjust_mem_data *amd = (class adjust_mem_data *) data;
rtx mem, addr = loc, tem;
machine_mode mem_mode_save;
bool store_save;
if (!frame_pointer_needed)
{
- struct adjust_mem_data amd;
+ class adjust_mem_data amd;
amd.mem_mode = VOIDmode;
amd.stack_adjust = -VTI (bb)->out.stack_adjust;
amd.store = true;
vt_expand_var_loc_chain (variable *var, bitmap regs, void *data,
bool *pendrecp)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
location_chain *loc, *next;
rtx result = NULL;
int first_child, result_first_child, last_child;
int max_depth ATTRIBUTE_UNUSED,
void *data)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
decl_or_value dv;
variable *var;
rtx result, subreg;
static rtx
vt_expand_loc (rtx loc, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx result;
if (!MAY_HAVE_DEBUG_BIND_INSNS)
static rtx
vt_expand_1pvar (variable *var, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx loc;
gcc_checking_assert (var->onepart && var->n_var_parts == 1);
const char *first_global_object_name;
const char *weak_global_object_name;
-struct addr_const;
-struct constant_descriptor_rtx;
+class addr_const;
+class constant_descriptor_rtx;
struct rtx_constant_pool;
#define n_deferred_constants (crtl->varasm.deferred_constants)
#ifdef ASM_OUTPUT_EXTERNAL
static bool incorporeal_function_p (tree);
#endif
-static void decode_addr_const (tree, struct addr_const *);
+static void decode_addr_const (tree, class addr_const *);
static hashval_t const_hash_1 (const tree);
static int compare_constant (const tree, const tree);
static void output_constant_def_contents (rtx);
};
static void
-decode_addr_const (tree exp, struct addr_const *value)
+decode_addr_const (tree exp, class addr_const *value)
{
tree target = TREE_OPERAND (exp, 0);
poly_int64 offset = 0;
/* Fallthru. */
case FDESC_EXPR:
{
- struct addr_const value;
+ class addr_const value;
decode_addr_const (exp, &value);
switch (GET_CODE (value.base))
case ADDR_EXPR:
case FDESC_EXPR:
{
- struct addr_const value1, value2;
+ class addr_const value1, value2;
enum rtx_code code;
int ret;
\f
class GTY((chain_next ("%h.next"), for_user)) constant_descriptor_rtx {
public:
- struct constant_descriptor_rtx *next;
+ class constant_descriptor_rtx *next;
rtx mem;
rtx sym;
rtx constant;
struct GTY(()) rtx_constant_pool {
/* Pointers to first and last constant in pool, as ordered by offset. */
- struct constant_descriptor_rtx *first;
- struct constant_descriptor_rtx *last;
+ class constant_descriptor_rtx *first;
+ class constant_descriptor_rtx *last;
/* Hash facility for making memory-constants from constant rtl-expressions.
It is used on RISC machines where immediate integer arguments and
rtx
force_const_mem (machine_mode in_mode, rtx x)
{
- struct constant_descriptor_rtx *desc, tmp;
+ class constant_descriptor_rtx *desc, tmp;
struct rtx_constant_pool *pool;
char label[256];
rtx def, symbol;
rtx
get_pool_constant_mark (rtx addr, bool *pmarked)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
desc = SYMBOL_REF_CONSTANT (addr);
*pmarked = (desc->mark != 0);
giving it ALIGN bits of alignment. */
static void
-output_constant_pool_1 (struct constant_descriptor_rtx *desc,
+output_constant_pool_1 (class constant_descriptor_rtx *desc,
unsigned int align)
{
rtx x, tmp;
static void
recompute_pool_offsets (struct rtx_constant_pool *pool)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
pool->offset = 0;
for (desc = pool->first; desc ; desc = desc->next)
{
if (CONSTANT_POOL_ADDRESS_P (x))
{
- struct constant_descriptor_rtx *desc = SYMBOL_REF_CONSTANT (x);
+ class constant_descriptor_rtx *desc = SYMBOL_REF_CONSTANT (x);
if (desc->mark == 0)
{
desc->mark = 1;
static void
output_constant_pool_contents (struct rtx_constant_pool *pool)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
for (desc = pool->first; desc ; desc = desc->next)
if (desc->mark)
place_block_symbol (rtx symbol)
{
unsigned HOST_WIDE_INT size, mask, offset;
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
unsigned int alignment;
struct object_block *block;
tree decl;
static void
output_object_block (struct object_block *block)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
unsigned int i;
HOST_WIDE_INT offset;
tree decl;
for VAR. If so, update VR with the new limits. */
void
-vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
+vr_values::adjust_range_with_scev (value_range *vr, class loop *loop,
gimple *stmt, tree var)
{
tree init, step, chrec, tmin, tmax, min, max, type, tem;
value_range *lhs_vr = get_value_range (lhs);
bool first = true;
int edges, old_edges;
- struct loop *l;
+ class loop *l;
if (dump_file && (dump_flags & TDF_DETAILS))
{
void set_defs_to_varying (gimple *);
bool update_value_range (const_tree, value_range *);
tree op_with_constant_singleton_value_range (tree);
- void adjust_range_with_scev (value_range *, struct loop *, gimple *, tree);
+ void adjust_range_with_scev (value_range *, class loop *, gimple *, tree);
tree vrp_evaluate_conditional (tree_code, tree, tree, gimple *);
void dump_all_value_ranges (FILE *);
return false;
}
-class web_entry : public web_entry_base
+struct web_entry : public web_entry_base
{
private:
rtx reg_pvt;
/* wi::storage_ref can be a reference to a primitive type,
so this is the conservatively-correct setting. */
template <bool SE, bool HDP = true>
-struct wide_int_ref_storage;
+class wide_int_ref_storage;
typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
{
if (c != '#' && (flags & DO_BOL))
{
- struct line_maps *line_table;
+ class line_maps *line_table;
if (!pfile->state.skipping && next_line != base)
cb->print_lines (lines, base, next_line - base);
static void
do_line (cpp_reader *pfile)
{
- struct line_maps *line_table = pfile->line_table;
+ class line_maps *line_table = pfile->line_table;
const line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (line_table);
/* skip_rest_of_line() may cause line table to be realloc()ed so note down
static void
do_linemarker (cpp_reader *pfile)
{
- struct line_maps *line_table = pfile->line_table;
+ class line_maps *line_table = pfile->line_table;
const line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (line_table);
const cpp_token *token;
const char *new_file = ORDINARY_MAP_FILE_NAME (map);
}
/* The dependencies structure. (Creates one if it hasn't already been.) */
-struct mkdeps *
+class mkdeps *
cpp_get_deps (cpp_reader *pfile)
{
if (!pfile->deps)
cpp_make_system_header (cpp_reader *pfile, int syshdr, int externc)
{
int flags = 0;
- const struct line_maps *line_table = pfile->line_table;
+ const class line_maps *line_table = pfile->line_table;
const line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (line_table);
/* 1 = system header, 2 = system header to be treated as C. */
if (syshdr)
that cpplib will share; this technique is used by the C front
ends. */
extern cpp_reader *cpp_create_reader (enum c_lang, struct ht *,
- struct line_maps *);
+ class line_maps *);
/* Reset the cpp_reader's line_map. This is only used after reading a
PCH file. */
-extern void cpp_set_line_map (cpp_reader *, struct line_maps *);
+extern void cpp_set_line_map (cpp_reader *, class line_maps *);
/* Call this to change the selected language standard (e.g. because of
command line options). */
extern cpp_options *cpp_get_options (cpp_reader *);
extern cpp_callbacks *cpp_get_callbacks (cpp_reader *);
extern void cpp_set_callbacks (cpp_reader *, cpp_callbacks *);
-extern struct mkdeps *cpp_get_deps (cpp_reader *);
+extern class mkdeps *cpp_get_deps (cpp_reader *);
/* This function reads the file, but does not start preprocessing. It
returns the name of the original file; this is the same as the
/* Return TRUE if MAP encodes locations coming from a macro
replacement-list at macro expansion point. */
bool
-linemap_macro_expansion_map_p (const struct line_map *);
+linemap_macro_expansion_map_p (const line_map *);
/* Assert that MAP encodes locations of tokens that are not part of
the replacement-list of a macro expansion, downcasting from
line_map * to line_map_ordinary *. */
inline line_map_ordinary *
-linemap_check_ordinary (struct line_map *map)
+linemap_check_ordinary (line_map *map)
{
linemap_assert (MAP_ORDINARY_P (map));
return (line_map_ordinary *)map;
const line_map * to const line_map_ordinary *. */
inline const line_map_ordinary *
-linemap_check_ordinary (const struct line_map *map)
+linemap_check_ordinary (const line_map *map)
{
linemap_assert (MAP_ORDINARY_P (map));
return (const line_map_ordinary *)map;
return (line_map_macro *)LINEMAPS_LAST_ALLOCATED_MAP (set, true);
}
-extern location_t get_combined_adhoc_loc (struct line_maps *,
+extern location_t get_combined_adhoc_loc (class line_maps *,
location_t,
source_range,
void *);
-extern void *get_data_from_adhoc_loc (const struct line_maps *, location_t);
-extern location_t get_location_from_adhoc_loc (const struct line_maps *,
+extern void *get_data_from_adhoc_loc (const line_maps *, location_t);
+extern location_t get_location_from_adhoc_loc (const line_maps *,
location_t);
extern source_range get_range_from_loc (line_maps *set, location_t loc);
/* Combine LOC and BLOCK, giving a combined adhoc location. */
inline location_t
-COMBINE_LOCATION_DATA (struct line_maps *set,
+COMBINE_LOCATION_DATA (class line_maps *set,
location_t loc,
source_range src_range,
void *block)
return get_combined_adhoc_loc (set, loc, src_range, block);
}
-extern void rebuild_location_adhoc_htab (struct line_maps *);
+extern void rebuild_location_adhoc_htab (class line_maps *);
/* Initialize a line map set. SET is the line map set to initialize
and BUILTIN_LOCATION is the special location value to be used as
spelling location for built-in tokens. This BUILTIN_LOCATION has
to be strictly less than RESERVED_LOCATION_COUNT. */
-extern void linemap_init (struct line_maps *set,
+extern void linemap_init (class line_maps *set,
location_t builtin_location);
/* Check for and warn about line_maps entered but not exited. */
-extern void linemap_check_files_exited (struct line_maps *);
+extern void linemap_check_files_exited (class line_maps *);
/* Return a location_t for the start (i.e. column==0) of
(physical) line TO_LINE in the current source file (as in the
the highest_location). */
extern location_t linemap_line_start
-(struct line_maps *set, linenum_type to_line, unsigned int max_column_hint);
+(class line_maps *set, linenum_type to_line, unsigned int max_column_hint);
/* Add a mapping of logical source line to physical source file and
line number. This function creates an "ordinary map", which is a
A call to this function can relocate the previous set of
maps, so any stored line_map pointers should not be used. */
-extern const struct line_map *linemap_add
- (struct line_maps *, enum lc_reason, unsigned int sysp,
+extern const line_map *linemap_add
+ (class line_maps *, enum lc_reason, unsigned int sysp,
const char *to_file, linenum_type to_line);
/* Given a logical source location, returns the map which the
monotonic increasing, and so the list is sorted and we can use a
binary search. If no line map have been allocated yet, this
function returns NULL. */
-extern const struct line_map *linemap_lookup
- (struct line_maps *, location_t);
+extern const line_map *linemap_lookup
+ (class line_maps *, location_t);
/* Returns TRUE if the line table set tracks token locations across
macro expansion, FALSE otherwise. */
-bool linemap_tracks_macro_expansion_locs_p (struct line_maps *);
+bool linemap_tracks_macro_expansion_locs_p (class line_maps *);
/* Return the name of the macro associated to MACRO_MAP. */
const char* linemap_map_get_macro_name (const line_map_macro *);
Note that this function returns 1 if LOCATION belongs to a token
that is part of a macro replacement-list defined in a system
header, but expanded in a non-system file. */
-int linemap_location_in_system_header_p (struct line_maps *,
+int linemap_location_in_system_header_p (class line_maps *,
location_t);
/* Return TRUE if LOCATION is a source code location of a token that is part of
a macro expansion, FALSE otherwise. */
-bool linemap_location_from_macro_expansion_p (const struct line_maps *,
+bool linemap_location_from_macro_expansion_p (const line_maps *,
location_t);
/* TRUE if LOCATION is a source code location of a token that is part of the
definition of a macro, FALSE otherwise. */
-bool linemap_location_from_macro_definition_p (struct line_maps *,
+bool linemap_location_from_macro_definition_p (class line_maps *,
location_t);
/* With the precondition that LOCATION is the locus of a token that is
linemap_line_start, i.e, the last source line which a location was
encoded from. */
extern location_t
-linemap_position_for_column (struct line_maps *, unsigned int);
+linemap_position_for_column (class line_maps *, unsigned int);
/* Encode and return a source location from a given line and
column. */
shifting it by OFFSET columns. This function does not support
virtual locations. */
location_t
-linemap_position_for_loc_and_offset (struct line_maps *set,
+linemap_position_for_loc_and_offset (class line_maps *set,
location_t loc,
unsigned int offset);
comes before the token of POST, 0 if PRE denotes the location of
the same token as the token for POST, and a negative value
otherwise. */
-int linemap_compare_locations (struct line_maps *set,
+int linemap_compare_locations (class line_maps *set,
location_t pre,
location_t post);
topogically before the token denoted by location LOC_B, or if they
are equal. */
inline bool
-linemap_location_before_p (struct line_maps *set,
+linemap_location_before_p (class line_maps *set,
location_t loc_a,
location_t loc_b)
{
resolves to a location reserved for the client code, like
UNKNOWN_LOCATION or BUILTINS_LOCATION in GCC. */
-location_t linemap_resolve_location (struct line_maps *,
+location_t linemap_resolve_location (class line_maps *,
location_t loc,
enum location_resolution_kind lrk,
const line_map_ordinary **loc_map);
the point where M' was expanded. LOC_MAP is an output parameter.
When non-NULL, *LOC_MAP is set to the map of the returned
location. */
-location_t linemap_unwind_toward_expansion (struct line_maps *,
+location_t linemap_unwind_toward_expansion (class line_maps *,
location_t loc,
- const struct line_map **loc_map);
+ const line_map **loc_map);
/* If LOC is the virtual location of a token coming from the expansion
of a macro M and if its spelling location is reserved (e.g, a
*MAP is set to the map of the returned location if the later is
different from LOC. */
-location_t linemap_unwind_to_first_non_reserved_loc (struct line_maps *,
+location_t linemap_unwind_to_first_non_reserved_loc (class line_maps *,
location_t loc,
- const struct line_map **map);
+ const line_map **map);
/* Expand source code location LOC and return a user readable source
code location. LOC must be a spelling (non-virtual) location. If
it's a location < RESERVED_LOCATION_COUNT a zeroed expanded source
location is returned. */
-expanded_location linemap_expand_location (struct line_maps *,
- const struct line_map *,
+expanded_location linemap_expand_location (class line_maps *,
+ const line_map *,
location_t loc);
/* Statistics about maps allocation and usage as returned by
there is a line map in SET. FILE_NAME is the file name to
consider. If the function returns TRUE, *LOC is set to the highest
location emitted for that file. */
-bool linemap_get_file_highest_location (struct line_maps * set,
+bool linemap_get_file_highest_location (class line_maps * set,
const char *file_name,
location_t *loc);
/* Compute and return statistics about the memory consumption of some
parts of the line table SET. */
-void linemap_get_statistics (struct line_maps *, struct linemap_stats *);
+void linemap_get_statistics (line_maps *, struct linemap_stats *);
/* Dump debugging information about source location LOC into the file
stream STREAM. SET is the line map set LOC comes from. */
-void linemap_dump_location (struct line_maps *, location_t, FILE *);
+void linemap_dump_location (line_maps *, location_t, FILE *);
/* Dump line map at index IX in line table SET to STREAM. If STREAM
is NULL, use stderr. IS_MACRO is true if the caller wants to
dump a macro map, false otherwise. */
-void linemap_dump (FILE *, struct line_maps *, unsigned, bool);
+void linemap_dump (FILE *, line_maps *, unsigned, bool);
/* Dump line table SET to STREAM. If STREAM is NULL, stderr is used.
NUM_ORDINARY specifies how many ordinary maps to dump. NUM_MACRO
specifies how many macro maps to dump. */
-void line_table_dump (FILE *, struct line_maps *, unsigned int, unsigned int);
+void line_table_dump (FILE *, line_maps *, unsigned int, unsigned int);
/* An enum for distinguishing the various parts within a location_t. */
/* This is the data structure used by all the functions in mkdeps.c.
It's quite straightforward, but should be treated as opaque. */
-struct mkdeps;
+class mkdeps;
/* Create a deps buffer. */
-extern struct mkdeps *deps_init (void);
+extern class mkdeps *deps_init (void);
/* Destroy a deps buffer. */
-extern void deps_free (struct mkdeps *);
+extern void deps_free (class mkdeps *);
/* Add a set of "vpath" directories. The second argument is a colon-
separated list of pathnames, like you would set Make's VPATH
variable to. If a dependency or target name begins with any of
these pathnames (and the next path element is not "..") that
pathname is stripped off. */
-extern void deps_add_vpath (struct mkdeps *, const char *);
+extern void deps_add_vpath (class mkdeps *, const char *);
/* Add a target (appears on left side of the colon) to the deps list. Takes
a boolean indicating whether to quote the target for MAKE. */
-extern void deps_add_target (struct mkdeps *, const char *, int);
+extern void deps_add_target (class mkdeps *, const char *, int);
/* Sets the default target if none has been given already. An empty
string as the default target is interpreted as stdin. */
-extern void deps_add_default_target (struct mkdeps *, const char *);
+extern void deps_add_default_target (class mkdeps *, const char *);
/* Add a dependency (appears on the right side of the colon) to the
deps list. Dependencies will be printed in the order that they
were entered with this function. By convention, the first
dependency entered should be the primary source file. */
-extern void deps_add_dep (struct mkdeps *, const char *);
+extern void deps_add_dep (class mkdeps *, const char *);
/* Write out a deps buffer to a specified file. The third argument
is the number of columns to word-wrap at (0 means don't wrap). */
-extern void deps_write (const struct mkdeps *, FILE *, bool, unsigned int);
+extern void deps_write (const class mkdeps *, FILE *, bool, unsigned int);
/* Write out a deps buffer to a file, in a form that can be read back
with deps_restore. Returns nonzero on error, in which case the
error number will be in errno. */
-extern int deps_save (struct mkdeps *, FILE *);
+extern int deps_save (class mkdeps *, FILE *);
/* Read back dependency information written with deps_save into
the deps buffer. The third argument may be NULL, in which case
the dependency information is just skipped, or it may be a filename,
in which case that filename is skipped. */
-extern int deps_restore (struct mkdeps *, FILE *, const char *);
+extern int deps_restore (class mkdeps *, FILE *, const char *);
#endif /* ! LIBCPP_MKDEPS_H */
/* Initialize a cpp_reader structure. */
cpp_reader *
cpp_create_reader (enum c_lang lang, cpp_hash_table *table,
- struct line_maps *line_table)
+ class line_maps *line_table)
{
cpp_reader *pfile;
/* Set the line_table entry in PFILE. This is called after reading a
PCH file, as the old line_table will be incorrect. */
void
-cpp_set_line_map (cpp_reader *pfile, struct line_maps *line_table)
+cpp_set_line_map (cpp_reader *pfile, class line_maps *line_table)
{
pfile->line_table = line_table;
}
#define CPP_BUF_COL(BUF) CPP_BUF_COLUMN(BUF, (BUF)->cur)
#define CPP_INCREMENT_LINE(PFILE, COLS_HINT) do { \
- const struct line_maps *line_table = PFILE->line_table; \
+ const class line_maps *line_table = PFILE->line_table; \
const struct line_map_ordinary *map = \
LINEMAPS_LAST_ORDINARY_MAP (line_table); \
linenum_type line = SOURCE_LINE (map, line_table->highest_line); \
struct lexer_state state;
/* Source line tracking. */
- struct line_maps *line_table;
+ class line_maps *line_table;
/* The line of the '#' of the current directive. */
location_t directive_line;
cpp_token eof;
/* Opaque handle to the dependencies of mkdeps.c. */
- struct mkdeps *deps;
+ class mkdeps *deps;
/* Obstack holding all macro hash nodes. This never shrinks.
See identifiers.c */
of the macro, rather than the the location of the first character
of the macro. NUM_TOKENS is the number of tokens that are part of
the replacement-list of MACRO. */
-const line_map_macro *linemap_enter_macro (struct line_maps *,
+const line_map_macro *linemap_enter_macro (class line_maps *,
struct cpp_hashnode*,
location_t,
unsigned int);
LOCATION is the location of token that is part of the
expansion-list of a macro expansion return the line number of the
macro expansion point. */
-int linemap_get_expansion_line (struct line_maps *,
+int linemap_get_expansion_line (class line_maps *,
location_t);
/* Return the path of the file corresponding to source code location
macro expansion point.
SET is the line map set LOCATION comes from. */
-const char* linemap_get_expansion_filename (struct line_maps *,
+const char* linemap_get_expansion_filename (class line_maps *,
location_t);
#ifdef __cplusplus
#include "internal.h"
#include "hashtab.h"
-static void trace_include (const struct line_maps *, const line_map_ordinary *);
-static const line_map_ordinary * linemap_ordinary_map_lookup (struct line_maps *,
+static void trace_include (const line_maps *, const line_map_ordinary *);
+static const line_map_ordinary * linemap_ordinary_map_lookup (line_maps *,
location_t);
-static const line_map_macro* linemap_macro_map_lookup (struct line_maps *,
+static const line_map_macro* linemap_macro_map_lookup (line_maps *,
location_t);
static location_t linemap_macro_map_loc_to_def_point
(const line_map_macro *, location_t);
static location_t linemap_macro_map_loc_to_exp_point
(const line_map_macro *, location_t);
static location_t linemap_macro_loc_to_spelling_point
-(struct line_maps *, location_t, const line_map_ordinary **);
+(line_maps *, location_t, const line_map_ordinary **);
static location_t linemap_macro_loc_to_def_point (line_maps *,
location_t,
const line_map_ordinary **);
/* Rebuild the hash table from the location adhoc data. */
void
-rebuild_location_adhoc_htab (struct line_maps *set)
+rebuild_location_adhoc_htab (line_maps *set)
{
unsigned i;
set->location_adhoc_data_map.htab =
within a location_t, without needing to use an ad-hoc location. */
static bool
-can_be_stored_compactly_p (struct line_maps *set,
+can_be_stored_compactly_p (line_maps *set,
location_t locus,
source_range src_range,
void *data)
/* Combine LOCUS and DATA to a combined adhoc loc. */
location_t
-get_combined_adhoc_loc (struct line_maps *set,
+get_combined_adhoc_loc (line_maps *set,
location_t locus,
source_range src_range,
void *data)
/* Return the data for the adhoc loc. */
void *
-get_data_from_adhoc_loc (const struct line_maps *set, location_t loc)
+get_data_from_adhoc_loc (const class line_maps *set, location_t loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_LOCATION_T].data;
/* Return the location for the adhoc loc. */
location_t
-get_location_from_adhoc_loc (const struct line_maps *set, location_t loc)
+get_location_from_adhoc_loc (const class line_maps *set, location_t loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_LOCATION_T].locus;
/* Return the source_range for adhoc location LOC. */
static source_range
-get_range_from_adhoc_loc (const struct line_maps *set, location_t loc)
+get_range_from_adhoc_loc (const class line_maps *set, location_t loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_LOCATION_T].src_range;
lookaside table, or embedded inside LOC itself. */
source_range
-get_range_from_loc (struct line_maps *set,
+get_range_from_loc (line_maps *set,
location_t loc)
{
if (IS_ADHOC_LOC (loc))
/* Initialize a line map set. */
void
-linemap_init (struct line_maps *set,
+linemap_init (line_maps *set,
location_t builtin_location)
{
#if __GNUC__ == 4 && __GNUC_MINOR__ == 2 && !defined (__clang__)
/* PR33916, needed to fix PR82939. */
- memset (set, 0, sizeof (struct line_maps));
+ memset (set, 0, sizeof (line_maps));
#else
new (set) line_maps();
#endif
/* Check for and warn about line_maps entered but not exited. */
void
-linemap_check_files_exited (struct line_maps *set)
+linemap_check_files_exited (line_maps *set)
{
/* Depending upon whether we are handling preprocessed input or
not, this can be a user error or an ICE. */
macro maps are allocated in different memory location. */
static struct line_map *
-new_linemap (struct line_maps *set, location_t start_location)
+new_linemap (line_maps *set, location_t start_location)
{
bool macro_p = start_location >= LINE_MAP_MAX_LOCATION;
unsigned num_maps_allocated = LINEMAPS_ALLOCATED (set, macro_p);
maps, so any stored line_map pointers should not be used. */
const struct line_map *
-linemap_add (struct line_maps *set, enum lc_reason reason,
+linemap_add (line_maps *set, enum lc_reason reason,
unsigned int sysp, const char *to_file, linenum_type to_line)
{
/* Generate a start_location above the current highest_location.
macro expansion, FALSE otherwise. */
bool
-linemap_tracks_macro_expansion_locs_p (struct line_maps *set)
+linemap_tracks_macro_expansion_locs_p (line_maps *set)
{
return LINEMAPS_MACRO_MAPS (set) != NULL;
}
macro tokens anymore. */
const line_map_macro *
-linemap_enter_macro (struct line_maps *set, struct cpp_hashnode *macro_node,
+linemap_enter_macro (class line_maps *set, struct cpp_hashnode *macro_node,
location_t expansion, unsigned int num_tokens)
{
location_t start_location
the highest_location). */
location_t
-linemap_line_start (struct line_maps *set, linenum_type to_line,
+linemap_line_start (line_maps *set, linenum_type to_line,
unsigned int max_column_hint)
{
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
encoded from. */
location_t
-linemap_position_for_column (struct line_maps *set, unsigned int to_column)
+linemap_position_for_column (line_maps *set, unsigned int to_column)
{
location_t r = set->highest_line;
virtual locations. */
location_t
-linemap_position_for_loc_and_offset (struct line_maps *set,
+linemap_position_for_loc_and_offset (line_maps *set,
location_t loc,
unsigned int column_offset)
{
ordinary or a macro map), returns that map. */
const struct line_map*
-linemap_lookup (struct line_maps *set, location_t line)
+linemap_lookup (line_maps *set, location_t line)
{
if (IS_ADHOC_LOC (line))
line = get_location_from_adhoc_loc (set, line);
binary search. */
static const line_map_ordinary *
-linemap_ordinary_map_lookup (struct line_maps *set, location_t line)
+linemap_ordinary_map_lookup (line_maps *set, location_t line)
{
unsigned int md, mn, mx;
const line_map_ordinary *cached, *result;
binary search. */
static const line_map_macro *
-linemap_macro_map_lookup (struct line_maps *set, location_t line)
+linemap_macro_map_lookup (line_maps *set, location_t line)
{
unsigned int md, mn, mx;
const struct line_map_macro *cached, *result;
macro expansion point. */
int
-linemap_get_expansion_line (struct line_maps *set,
+linemap_get_expansion_line (line_maps *set,
location_t location)
{
const line_map_ordinary *map = NULL;
SET is the line map set LOCATION comes from. */
const char*
-linemap_get_expansion_filename (struct line_maps *set,
+linemap_get_expansion_filename (line_maps *set,
location_t location)
{
const struct line_map_ordinary *map = NULL;
header, but expanded in a non-system file. */
int
-linemap_location_in_system_header_p (struct line_maps *set,
+linemap_location_in_system_header_p (line_maps *set,
location_t location)
{
const struct line_map *map = NULL;
a macro expansion, FALSE otherwise. */
bool
-linemap_location_from_macro_expansion_p (const struct line_maps *set,
+linemap_location_from_macro_expansion_p (const class line_maps *set,
location_t location)
{
if (IS_ADHOC_LOC (location))
virtual location of the token inside the resulting macro. */
static const struct line_map*
-first_map_in_common_1 (struct line_maps *set,
+first_map_in_common_1 (line_maps *set,
location_t *loc0,
location_t *loc1)
{
return of a non-NULL result. */
static const struct line_map*
-first_map_in_common (struct line_maps *set,
+first_map_in_common (line_maps *set,
location_t loc0,
location_t loc1,
location_t *res_loc0,
otherwise. */
int
-linemap_compare_locations (struct line_maps *set,
+linemap_compare_locations (line_maps *set,
location_t pre,
location_t post)
{
/* Print an include trace, for e.g. the -H option of the preprocessor. */
static void
-trace_include (const struct line_maps *set, const line_map_ordinary *map)
+trace_include (const class line_maps *set, const line_map_ordinary *map)
{
unsigned int i = set->depth;
This is a subroutine for linemap_resolve_location. */
static location_t
-linemap_macro_loc_to_spelling_point (struct line_maps *set,
+linemap_macro_loc_to_spelling_point (line_maps *set,
location_t location,
const line_map_ordinary **original_map)
{
This is a subroutine of linemap_resolve_location. */
static location_t
-linemap_macro_loc_to_def_point (struct line_maps *set,
+linemap_macro_loc_to_def_point (line_maps *set,
location_t location,
const line_map_ordinary **original_map)
{
This is a subroutine of linemap_resolve_location. */
static location_t
-linemap_macro_loc_to_exp_point (struct line_maps *set,
+linemap_macro_loc_to_exp_point (line_maps *set,
location_t location,
const line_map_ordinary **original_map)
{
UNKNOWN_LOCATION or BUILTINS_LOCATION in GCC. */
location_t
-linemap_resolve_location (struct line_maps *set,
+linemap_resolve_location (line_maps *set,
location_t loc,
enum location_resolution_kind lrk,
const line_map_ordinary **map)
definition of a macro, FALSE otherwise. */
bool
-linemap_location_from_macro_definition_p (struct line_maps *set,
+linemap_location_from_macro_definition_p (line_maps *set,
location_t loc)
{
if (IS_ADHOC_LOC (loc))
to the map of the returned location. */
location_t
-linemap_unwind_toward_expansion (struct line_maps *set,
+linemap_unwind_toward_expansion (line_maps *set,
location_t loc,
const struct line_map **map)
{
*MAP is set to the map of the returned location if the later is
different from LOC. */
location_t
-linemap_unwind_to_first_non_reserved_loc (struct line_maps *set,
+linemap_unwind_to_first_non_reserved_loc (line_maps *set,
location_t loc,
const struct line_map **map)
{
location is returned. */
expanded_location
-linemap_expand_location (struct line_maps *set,
+linemap_expand_location (line_maps *set,
const struct line_map *map,
location_t loc)
dump a macro map, false otherwise. */
void
-linemap_dump (FILE *stream, struct line_maps *set, unsigned ix, bool is_macro)
+linemap_dump (FILE *stream, class line_maps *set, unsigned ix, bool is_macro)
{
const char *const lc_reasons_v[LC_HWM]
= { "LC_ENTER", "LC_LEAVE", "LC_RENAME", "LC_RENAME_VERBATIM",
stream STREAM. SET is the line map set LOC comes from. */
void
-linemap_dump_location (struct line_maps *set,
+linemap_dump_location (line_maps *set,
location_t loc,
FILE *stream)
{
location emitted for that file. */
bool
-linemap_get_file_highest_location (struct line_maps *set,
+linemap_get_file_highest_location (line_maps *set,
const char *file_name,
location_t *loc)
{
parts of the line table SET. */
void
-linemap_get_statistics (struct line_maps *set,
+linemap_get_statistics (line_maps *set,
struct linemap_stats *s)
{
long ordinary_maps_allocated_size, ordinary_maps_used_size,
specifies how many macro maps to dump. */
void
-line_table_dump (FILE *stream, struct line_maps *set, unsigned int num_ordinary,
+line_table_dump (FILE *stream, class line_maps *set, unsigned int num_ordinary,
unsigned int num_macro)
{
unsigned int i;
/* If T begins with any of the partial pathnames listed in d->vpathv,
then advance T to point beyond that pathname. */
static const char *
-apply_vpath (struct mkdeps *d, const char *t)
+apply_vpath (class mkdeps *d, const char *t)
{
if (unsigned len = d->vpath.size ())
for (unsigned i = len; i--;)
/* Public routines. */
-struct mkdeps *
+class mkdeps *
deps_init (void)
{
return new mkdeps ();
}
void
-deps_free (struct mkdeps *d)
+deps_free (class mkdeps *d)
{
delete d;
}
/* Adds a target T. We make a copy, so it need not be a permanent
string. QUOTE is true if the string should be quoted. */
void
-deps_add_target (struct mkdeps *d, const char *t, int quote)
+deps_add_target (class mkdeps *d, const char *t, int quote)
{
t = xstrdup (apply_vpath (d, t));
string as the default target in interpreted as stdin. The string
is quoted for MAKE. */
void
-deps_add_default_target (struct mkdeps *d, const char *tgt)
+deps_add_default_target (class mkdeps *d, const char *tgt)
{
/* Only if we have no targets. */
if (d->targets.size ())
}
void
-deps_add_dep (struct mkdeps *d, const char *t)
+deps_add_dep (class mkdeps *d, const char *t)
{
gcc_assert (*t);
}
void
-deps_add_vpath (struct mkdeps *d, const char *vpath)
+deps_add_vpath (class mkdeps *d, const char *vpath)
{
const char *elem, *p;
.PHONY targets for all the dependencies too. */
static void
-make_write (const struct mkdeps *d, FILE *fp, bool phony, unsigned int colmax)
+make_write (const class mkdeps *d, FILE *fp, bool phony, unsigned int colmax)
{
unsigned column = 0;
if (colmax && colmax < 34)
only Make at the moment). */
void
-deps_write (const struct mkdeps *d, FILE *fp, bool phony, unsigned int colmax)
+deps_write (const class mkdeps *d, FILE *fp, bool phony, unsigned int colmax)
{
make_write (d, fp, phony, colmax);
}
error number will be in errno. */
int
-deps_save (struct mkdeps *deps, FILE *f)
+deps_save (class mkdeps *deps, FILE *f)
{
unsigned int i;
size_t size;
in which case that filename is skipped. */
int
-deps_restore (struct mkdeps *deps, FILE *fd, const char *self)
+deps_restore (class mkdeps *deps, FILE *fd, const char *self)
{
size_t size;
char *buf = NULL;