Apply mechanical replacement (generated patch).
authorMartin Liska <mliska@suse.cz>
Tue, 12 Nov 2019 10:08:40 +0000 (11:08 +0100)
committerMartin Liska <marxin@gcc.gnu.org>
Tue, 12 Nov 2019 10:08:40 +0000 (10:08 +0000)
2019-11-12  Martin Liska  <mliska@suse.cz>

* asan.c (asan_sanitize_stack_p): Replace old parameter syntax
with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
macro.
(asan_sanitize_allocas_p): Likewise.
(asan_emit_stack_protection): Likewise.
(asan_protect_global): Likewise.
(instrument_derefs): Likewise.
(instrument_builtin_call): Likewise.
(asan_expand_mark_ifn): Likewise.
* auto-profile.c (auto_profile): Likewise.
* bb-reorder.c (copy_bb_p): Likewise.
(duplicate_computed_gotos): Likewise.
* builtins.c (inline_expand_builtin_string_cmp): Likewise.
* cfgcleanup.c (try_crossjump_to_edge): Likewise.
(try_crossjump_bb): Likewise.
* cfgexpand.c (defer_stack_allocation): Likewise.
(stack_protect_classify_type): Likewise.
(pass_expand::execute): Likewise.
* cfgloopanal.c (expected_loop_iterations_unbounded): Likewise.
(estimate_reg_pressure_cost): Likewise.
* cgraph.c (cgraph_edge::maybe_hot_p): Likewise.
* combine.c (combine_instructions): Likewise.
(record_value_for_reg): Likewise.
* common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise.
(aarch64_option_default_params): Likewise.
* common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise.
* common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise.
* common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise.
* common/config/sh/sh-common.c (sh_option_default_params): Likewise.
* config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise.
(aarch64_allocate_and_probe_stack_space): Likewise.
(aarch64_expand_epilogue): Likewise.
(aarch64_override_options_internal): Likewise.
* config/alpha/alpha.c (alpha_option_override): Likewise.
* config/arm/arm.c (arm_option_override): Likewise.
(arm_valid_target_attribute_p): Likewise.
* config/i386/i386-options.c (ix86_option_override_internal): Likewise.
* config/i386/i386.c (get_probe_interval): Likewise.
(ix86_adjust_stack_and_probe_stack_clash): Likewise.
(ix86_max_noce_ifcvt_seq_cost): Likewise.
* config/ia64/ia64.c (ia64_adjust_cost): Likewise.
* config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise.
(get_stack_clash_protection_guard_size): Likewise.
* config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise.
* config/s390/s390.c (allocate_stack_space): Likewise.
(s390_emit_prologue): Likewise.
(s390_option_override_internal): Likewise.
* config/sparc/sparc.c (sparc_option_override): Likewise.
* config/visium/visium.c (visium_option_override): Likewise.
* coverage.c (get_coverage_counts): Likewise.
(coverage_compute_profile_id): Likewise.
(coverage_begin_function): Likewise.
(coverage_end_function): Likewise.
* cse.c (cse_find_path): Likewise.
(cse_extended_basic_block): Likewise.
(cse_main): Likewise.
* cselib.c (cselib_invalidate_mem): Likewise.
* dse.c (dse_step1): Likewise.
* emit-rtl.c (set_new_first_and_last_insn): Likewise.
(get_max_insn_count): Likewise.
(make_debug_insn_raw): Likewise.
(init_emit): Likewise.
* explow.c (compute_stack_clash_protection_loop_data): Likewise.
* final.c (compute_alignments): Likewise.
* fold-const.c (fold_range_test): Likewise.
(fold_truth_andor): Likewise.
(tree_single_nonnegative_warnv_p): Likewise.
(integer_valued_real_single_p): Likewise.
* gcse.c (want_to_gcse_p): Likewise.
(prune_insertions_deletions): Likewise.
(hoist_code): Likewise.
(gcse_or_cprop_is_too_expensive): Likewise.
* ggc-common.c: Likewise.
* ggc-page.c (ggc_collect): Likewise.
* gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise.
(MAX_DATAREFS): Likewise.
(OUTER_STRIDE_RATIO): Likewise.
* gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise.
* gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise.
* gimple-ssa-split-paths.c (is_feasible_trace): Likewise.
* gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise.
(imm_store_chain_info::coalesce_immediate_stores): Likewise.
(imm_store_chain_info::output_merged_store): Likewise.
(pass_store_merging::process_store): Likewise.
* gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise.
* graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise.
(scop_to_isl_ast): Likewise.
* graphite-optimize-isl.c (get_schedule_for_node_st): Likewise.
(optimize_isl): Likewise.
* graphite-scop-detection.c (build_scops): Likewise.
* haifa-sched.c (set_modulo_params): Likewise.
(rank_for_schedule): Likewise.
(model_add_to_worklist): Likewise.
(model_promote_insn): Likewise.
(model_choose_insn): Likewise.
(queue_to_ready): Likewise.
(autopref_multipass_dfa_lookahead_guard): Likewise.
(schedule_block): Likewise.
(sched_init): Likewise.
* hsa-gen.c (init_prologue): Likewise.
* ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise.
(cond_move_process_if_block): Likewise.
* ipa-cp.c (ipcp_lattice::add_value): Likewise.
(merge_agg_lats_step): Likewise.
(devirtualization_time_bonus): Likewise.
(hint_time_bonus): Likewise.
(incorporate_penalties): Likewise.
(good_cloning_opportunity_p): Likewise.
(ipcp_propagate_stage): Likewise.
* ipa-fnsummary.c (decompose_param_expr): Likewise.
(set_switch_stmt_execution_predicate): Likewise.
(analyze_function_body): Likewise.
(compute_fn_summary): Likewise.
* ipa-inline-analysis.c (estimate_growth): Likewise.
* ipa-inline.c (caller_growth_limits): Likewise.
(inline_insns_single): Likewise.
(inline_insns_auto): Likewise.
(can_inline_edge_by_limits_p): Likewise.
(want_early_inline_function_p): Likewise.
(big_speedup_p): Likewise.
(want_inline_small_function_p): Likewise.
(want_inline_self_recursive_call_p): Likewise.
(edge_badness): Likewise.
(recursive_inlining): Likewise.
(compute_max_insns): Likewise.
(early_inliner): Likewise.
* ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise.
* ipa-profile.c (ipa_profile): Likewise.
* ipa-prop.c (determine_known_aggregate_parts): Likewise.
(ipa_analyze_node): Likewise.
(ipcp_transform_function): Likewise.
* ipa-split.c (consider_split): Likewise.
* ipa-sra.c (allocate_access): Likewise.
(process_scan_results): Likewise.
(ipa_sra_summarize_function): Likewise.
(pull_accesses_from_callee): Likewise.
* ira-build.c (loop_compare_func): Likewise.
(mark_loops_for_removal): Likewise.
* ira-conflicts.c (build_conflict_bit_table): Likewise.
* loop-doloop.c (doloop_optimize): Likewise.
* loop-invariant.c (gain_for_invariant): Likewise.
(move_loop_invariants): Likewise.
* loop-unroll.c (decide_unroll_constant_iterations): Likewise.
(decide_unroll_runtime_iterations): Likewise.
(decide_unroll_stupid): Likewise.
(expand_var_during_unrolling): Likewise.
* lra-assigns.c (spill_for): Likewise.
* lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise.
* modulo-sched.c (sms_schedule): Likewise.
(DFA_HISTORY): Likewise.
* opts.c (default_options_optimization): Likewise.
(finish_options): Likewise.
(common_handle_option): Likewise.
* postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
(if): Likewise.
* predict.c (get_hot_bb_threshold): Likewise.
(maybe_hot_count_p): Likewise.
(probably_never_executed): Likewise.
(predictable_edge_p): Likewise.
(predict_loops): Likewise.
(expr_expected_value_1): Likewise.
(tree_predict_by_opcode): Likewise.
(handle_missing_profiles): Likewise.
* reload.c (find_equiv_reg): Likewise.
* reorg.c (redundant_insn): Likewise.
* resource.c (mark_target_live_regs): Likewise.
(incr_ticks_for_insn): Likewise.
* sanopt.c (pass_sanopt::execute): Likewise.
* sched-deps.c (sched_analyze_1): Likewise.
(sched_analyze_2): Likewise.
(sched_analyze_insn): Likewise.
(deps_analyze_insn): Likewise.
* sched-ebb.c (schedule_ebbs): Likewise.
* sched-rgn.c (find_single_block_region): Likewise.
(too_large): Likewise.
(haifa_find_rgns): Likewise.
(extend_rgns): Likewise.
(new_ready): Likewise.
(schedule_region): Likewise.
(sched_rgn_init): Likewise.
* sel-sched-ir.c (make_region_from_loop): Likewise.
* sel-sched-ir.h (MAX_WS): Likewise.
* sel-sched.c (process_pipelined_exprs): Likewise.
(sel_setup_region_sched_flags): Likewise.
* shrink-wrap.c (try_shrink_wrapping): Likewise.
* targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise.
* toplev.c (print_version): Likewise.
(process_options): Likewise.
* tracer.c (tail_duplicate): Likewise.
* trans-mem.c (tm_log_add): Likewise.
* tree-chrec.c (chrec_fold_plus_1): Likewise.
* tree-data-ref.c (split_constant_offset): Likewise.
(compute_all_dependences): Likewise.
* tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise.
* tree-inline.c (remap_gimple_stmt): Likewise.
* tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise.
* tree-parloops.c (MIN_PER_THREAD): Likewise.
(create_parallel_loop): Likewise.
* tree-predcom.c (determine_unroll_factor): Likewise.
* tree-scalar-evolution.c (instantiate_scev_r): Likewise.
* tree-sra.c (analyze_all_variable_accesses): Likewise.
* tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
* tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise.
(dse_optimize_redundant_stores): Likewise.
(dse_classify_store): Likewise.
* tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
* tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise.
* tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise.
* tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise.
(try_peel_loop): Likewise.
(tree_unroll_loops_completely): Likewise.
* tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise.
(CONSIDER_ALL_CANDIDATES_BOUND): Likewise.
(MAX_CONSIDERED_GROUPS): Likewise.
(ALWAYS_PRUNE_CAND_SET_BOUND): Likewise.
* tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise.
* tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise.
* tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise.
(L1_CACHE_SIZE_BYTES): Likewise.
(L2_CACHE_SIZE_BYTES): Likewise.
(should_issue_prefetch_p): Likewise.
(schedule_prefetches): Likewise.
(determine_unroll_factor): Likewise.
(volume_of_references): Likewise.
(add_subscript_strides): Likewise.
(self_reuse_distance): Likewise.
(mem_ref_count_reasonable_p): Likewise.
(insn_to_prefetch_ratio_too_small_p): Likewise.
(loop_prefetch_arrays): Likewise.
(tree_ssa_prefetch_arrays): Likewise.
* tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise.
* tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise.
(convert_mult_to_fma): Likewise.
(math_opts_dom_walker::after_dom_children): Likewise.
* tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise.
(hoist_adjacent_loads): Likewise.
(gate_hoist_loads): Likewise.
* tree-ssa-pre.c (translate_vuse_through_block): Likewise.
(compute_partial_antic_aux): Likewise.
* tree-ssa-reassoc.c (get_reassociation_width): Likewise.
* tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise.
(vn_reference_lookup): Likewise.
(do_rpo_vn): Likewise.
* tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise.
* tree-ssa-sink.c (select_best_block): Likewise.
* tree-ssa-strlen.c (new_stridx): Likewise.
(new_addr_stridx): Likewise.
(get_range_strlen_dynamic): Likewise.
(class ssa_name_limit_t): Likewise.
* tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise.
(create_variable_info_for_1): Likewise.
(init_alias_vars): Likewise.
* tree-ssa-tail-merge.c (find_clusters_1): Likewise.
(tail_merge_optimize): Likewise.
* tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise.
(thread_jumps::fsm_find_control_statement_thread_paths): Likewise.
(thread_jumps::find_jump_threads_backwards): Likewise.
* tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise.
* tree-ssa-uninit.c (compute_control_dep_chain): Likewise.
* tree-switch-conversion.c (switch_conversion::check_range): Likewise.
(jump_table_cluster::can_be_handled): Likewise.
* tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise.
(SWITCH_CONVERSION_BRANCH_RATIO): Likewise.
(param_switch_conversion_branch_ratio): Likewise.
* tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise.
(vect_enhance_data_refs_alignment): Likewise.
(vect_prune_runtime_alias_test_list): Likewise.
* tree-vect-loop.c (vect_analyze_loop_costing): Likewise.
(vect_get_datarefs_in_loop): Likewise.
(vect_analyze_loop): Likewise.
* tree-vect-slp.c (vect_slp_bb): Likewise.
* tree-vectorizer.h: Likewise.
* tree-vrp.c (find_switch_asserts): Likewise.
(vrp_prop::check_mem_ref): Likewise.
* tree.c (wide_int_to_tree_1): Likewise.
(cache_integer_cst): Likewise.
* var-tracking.c (EXPR_USE_DEPTH): Likewise.
(reverse_op): Likewise.
(vt_find_locations): Likewise.
2019-11-12  Martin Liska  <mliska@suse.cz>

* gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax
with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
macro.
2019-11-12  Martin Liska  <mliska@suse.cz>

* name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax
with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
macro.
* typeck.c (comptypes): Likewise.
2019-11-12  Martin Liska  <mliska@suse.cz>

* lto-partition.c (lto_balanced_map): Replace old parameter syntax
with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
macro.
* lto.c (do_whole_program_analysis): Likewise.

From-SVN: r278085

135 files changed:
gcc/ChangeLog
gcc/asan.c
gcc/auto-profile.c
gcc/bb-reorder.c
gcc/builtins.c
gcc/c/ChangeLog
gcc/c/gimple-parser.c
gcc/cfgcleanup.c
gcc/cfgexpand.c
gcc/cfgloopanal.c
gcc/cgraph.c
gcc/combine.c
gcc/common/config/aarch64/aarch64-common.c
gcc/common/config/ia64/ia64-common.c
gcc/common/config/powerpcspe/powerpcspe-common.c
gcc/common/config/rs6000/rs6000-common.c
gcc/common/config/sh/sh-common.c
gcc/config/aarch64/aarch64.c
gcc/config/alpha/alpha.c
gcc/config/arm/arm.c
gcc/config/i386/i386-options.c
gcc/config/i386/i386.c
gcc/config/ia64/ia64.c
gcc/config/rs6000/rs6000-logue.c
gcc/config/rs6000/rs6000.c
gcc/config/s390/s390.c
gcc/config/sparc/sparc.c
gcc/config/visium/visium.c
gcc/coverage.c
gcc/cp/ChangeLog
gcc/cp/name-lookup.c
gcc/cp/typeck.c
gcc/cse.c
gcc/cselib.c
gcc/dse.c
gcc/emit-rtl.c
gcc/explow.c
gcc/final.c
gcc/fold-const.c
gcc/gcse.c
gcc/ggc-common.c
gcc/ggc-page.c
gcc/gimple-loop-interchange.cc
gcc/gimple-loop-jam.c
gcc/gimple-loop-versioning.cc
gcc/gimple-ssa-split-paths.c
gcc/gimple-ssa-store-merging.c
gcc/gimple-ssa-strength-reduction.c
gcc/graphite-isl-ast-to-gimple.c
gcc/graphite-optimize-isl.c
gcc/graphite-scop-detection.c
gcc/haifa-sched.c
gcc/hsa-gen.c
gcc/ifcvt.c
gcc/ipa-cp.c
gcc/ipa-fnsummary.c
gcc/ipa-inline-analysis.c
gcc/ipa-inline.c
gcc/ipa-polymorphic-call.c
gcc/ipa-profile.c
gcc/ipa-prop.c
gcc/ipa-split.c
gcc/ipa-sra.c
gcc/ira-build.c
gcc/ira-conflicts.c
gcc/loop-doloop.c
gcc/loop-invariant.c
gcc/loop-unroll.c
gcc/lra-assigns.c
gcc/lra-constraints.c
gcc/lto/ChangeLog
gcc/lto/lto-partition.c
gcc/lto/lto.c
gcc/modulo-sched.c
gcc/opts.c
gcc/postreload-gcse.c
gcc/predict.c
gcc/reload.c
gcc/reorg.c
gcc/resource.c
gcc/sanopt.c
gcc/sched-deps.c
gcc/sched-ebb.c
gcc/sched-rgn.c
gcc/sel-sched-ir.c
gcc/sel-sched-ir.h
gcc/sel-sched.c
gcc/shrink-wrap.c
gcc/targhooks.c
gcc/toplev.c
gcc/tracer.c
gcc/trans-mem.c
gcc/tree-chrec.c
gcc/tree-data-ref.c
gcc/tree-if-conv.c
gcc/tree-inline.c
gcc/tree-loop-distribution.c
gcc/tree-parloops.c
gcc/tree-predcom.c
gcc/tree-scalar-evolution.c
gcc/tree-sra.c
gcc/tree-ssa-ccp.c
gcc/tree-ssa-dse.c
gcc/tree-ssa-ifcombine.c
gcc/tree-ssa-loop-ch.c
gcc/tree-ssa-loop-im.c
gcc/tree-ssa-loop-ivcanon.c
gcc/tree-ssa-loop-ivopts.c
gcc/tree-ssa-loop-manip.c
gcc/tree-ssa-loop-niter.c
gcc/tree-ssa-loop-prefetch.c
gcc/tree-ssa-loop-split.c
gcc/tree-ssa-loop-unswitch.c
gcc/tree-ssa-math-opts.c
gcc/tree-ssa-phiopt.c
gcc/tree-ssa-pre.c
gcc/tree-ssa-reassoc.c
gcc/tree-ssa-sccvn.c
gcc/tree-ssa-scopedtables.c
gcc/tree-ssa-sink.c
gcc/tree-ssa-strlen.c
gcc/tree-ssa-structalias.c
gcc/tree-ssa-tail-merge.c
gcc/tree-ssa-threadbackward.c
gcc/tree-ssa-threadedge.c
gcc/tree-ssa-uninit.c
gcc/tree-switch-conversion.c
gcc/tree-switch-conversion.h
gcc/tree-vect-data-refs.c
gcc/tree-vect-loop.c
gcc/tree-vect-slp.c
gcc/tree-vectorizer.h
gcc/tree-vrp.c
gcc/tree.c
gcc/var-tracking.c

index d4de69a271f85d5ff3e05f2f2caba8a7ae67233a..80986936e1bccb42b97ba543a8692ac0cca3cec1 100644 (file)
@@ -1,3 +1,285 @@
+2019-11-12  Martin Liska  <mliska@suse.cz>
+
+       * asan.c (asan_sanitize_stack_p): Replace old parameter syntax
+       with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
+       macro.
+       (asan_sanitize_allocas_p): Likewise.
+       (asan_emit_stack_protection): Likewise.
+       (asan_protect_global): Likewise.
+       (instrument_derefs): Likewise.
+       (instrument_builtin_call): Likewise.
+       (asan_expand_mark_ifn): Likewise.
+       * auto-profile.c (auto_profile): Likewise.
+       * bb-reorder.c (copy_bb_p): Likewise.
+       (duplicate_computed_gotos): Likewise.
+       * builtins.c (inline_expand_builtin_string_cmp): Likewise.
+       * cfgcleanup.c (try_crossjump_to_edge): Likewise.
+       (try_crossjump_bb): Likewise.
+       * cfgexpand.c (defer_stack_allocation): Likewise.
+       (stack_protect_classify_type): Likewise.
+       (pass_expand::execute): Likewise.
+       * cfgloopanal.c (expected_loop_iterations_unbounded): Likewise.
+       (estimate_reg_pressure_cost): Likewise.
+       * cgraph.c (cgraph_edge::maybe_hot_p): Likewise.
+       * combine.c (combine_instructions): Likewise.
+       (record_value_for_reg): Likewise.
+       * common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise.
+       (aarch64_option_default_params): Likewise.
+       * common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise.
+       * common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise.
+       * common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise.
+       * common/config/sh/sh-common.c (sh_option_default_params): Likewise.
+       * config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise.
+       (aarch64_allocate_and_probe_stack_space): Likewise.
+       (aarch64_expand_epilogue): Likewise.
+       (aarch64_override_options_internal): Likewise.
+       * config/alpha/alpha.c (alpha_option_override): Likewise.
+       * config/arm/arm.c (arm_option_override): Likewise.
+       (arm_valid_target_attribute_p): Likewise.
+       * config/i386/i386-options.c (ix86_option_override_internal): Likewise.
+       * config/i386/i386.c (get_probe_interval): Likewise.
+       (ix86_adjust_stack_and_probe_stack_clash): Likewise.
+       (ix86_max_noce_ifcvt_seq_cost): Likewise.
+       * config/ia64/ia64.c (ia64_adjust_cost): Likewise.
+       * config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise.
+       (get_stack_clash_protection_guard_size): Likewise.
+       * config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise.
+       * config/s390/s390.c (allocate_stack_space): Likewise.
+       (s390_emit_prologue): Likewise.
+       (s390_option_override_internal): Likewise.
+       * config/sparc/sparc.c (sparc_option_override): Likewise.
+       * config/visium/visium.c (visium_option_override): Likewise.
+       * coverage.c (get_coverage_counts): Likewise.
+       (coverage_compute_profile_id): Likewise.
+       (coverage_begin_function): Likewise.
+       (coverage_end_function): Likewise.
+       * cse.c (cse_find_path): Likewise.
+       (cse_extended_basic_block): Likewise.
+       (cse_main): Likewise.
+       * cselib.c (cselib_invalidate_mem): Likewise.
+       * dse.c (dse_step1): Likewise.
+       * emit-rtl.c (set_new_first_and_last_insn): Likewise.
+       (get_max_insn_count): Likewise.
+       (make_debug_insn_raw): Likewise.
+       (init_emit): Likewise.
+       * explow.c (compute_stack_clash_protection_loop_data): Likewise.
+       * final.c (compute_alignments): Likewise.
+       * fold-const.c (fold_range_test): Likewise.
+       (fold_truth_andor): Likewise.
+       (tree_single_nonnegative_warnv_p): Likewise.
+       (integer_valued_real_single_p): Likewise.
+       * gcse.c (want_to_gcse_p): Likewise.
+       (prune_insertions_deletions): Likewise.
+       (hoist_code): Likewise.
+       (gcse_or_cprop_is_too_expensive): Likewise.
+       * ggc-common.c: Likewise.
+       * ggc-page.c (ggc_collect): Likewise.
+       * gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise.
+       (MAX_DATAREFS): Likewise.
+       (OUTER_STRIDE_RATIO): Likewise.
+       * gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise.
+       * gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise.
+       * gimple-ssa-split-paths.c (is_feasible_trace): Likewise.
+       * gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise.
+       (imm_store_chain_info::coalesce_immediate_stores): Likewise.
+       (imm_store_chain_info::output_merged_store): Likewise.
+       (pass_store_merging::process_store): Likewise.
+       * gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise.
+       * graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise.
+       (scop_to_isl_ast): Likewise.
+       * graphite-optimize-isl.c (get_schedule_for_node_st): Likewise.
+       (optimize_isl): Likewise.
+       * graphite-scop-detection.c (build_scops): Likewise.
+       * haifa-sched.c (set_modulo_params): Likewise.
+       (rank_for_schedule): Likewise.
+       (model_add_to_worklist): Likewise.
+       (model_promote_insn): Likewise.
+       (model_choose_insn): Likewise.
+       (queue_to_ready): Likewise.
+       (autopref_multipass_dfa_lookahead_guard): Likewise.
+       (schedule_block): Likewise.
+       (sched_init): Likewise.
+       * hsa-gen.c (init_prologue): Likewise.
+       * ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise.
+       (cond_move_process_if_block): Likewise.
+       * ipa-cp.c (ipcp_lattice::add_value): Likewise.
+       (merge_agg_lats_step): Likewise.
+       (devirtualization_time_bonus): Likewise.
+       (hint_time_bonus): Likewise.
+       (incorporate_penalties): Likewise.
+       (good_cloning_opportunity_p): Likewise.
+       (ipcp_propagate_stage): Likewise.
+       * ipa-fnsummary.c (decompose_param_expr): Likewise.
+       (set_switch_stmt_execution_predicate): Likewise.
+       (analyze_function_body): Likewise.
+       (compute_fn_summary): Likewise.
+       * ipa-inline-analysis.c (estimate_growth): Likewise.
+       * ipa-inline.c (caller_growth_limits): Likewise.
+       (inline_insns_single): Likewise.
+       (inline_insns_auto): Likewise.
+       (can_inline_edge_by_limits_p): Likewise.
+       (want_early_inline_function_p): Likewise.
+       (big_speedup_p): Likewise.
+       (want_inline_small_function_p): Likewise.
+       (want_inline_self_recursive_call_p): Likewise.
+       (edge_badness): Likewise.
+       (recursive_inlining): Likewise.
+       (compute_max_insns): Likewise.
+       (early_inliner): Likewise.
+       * ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise.
+       * ipa-profile.c (ipa_profile): Likewise.
+       * ipa-prop.c (determine_known_aggregate_parts): Likewise.
+       (ipa_analyze_node): Likewise.
+       (ipcp_transform_function): Likewise.
+       * ipa-split.c (consider_split): Likewise.
+       * ipa-sra.c (allocate_access): Likewise.
+       (process_scan_results): Likewise.
+       (ipa_sra_summarize_function): Likewise.
+       (pull_accesses_from_callee): Likewise.
+       * ira-build.c (loop_compare_func): Likewise.
+       (mark_loops_for_removal): Likewise.
+       * ira-conflicts.c (build_conflict_bit_table): Likewise.
+       * loop-doloop.c (doloop_optimize): Likewise.
+       * loop-invariant.c (gain_for_invariant): Likewise.
+       (move_loop_invariants): Likewise.
+       * loop-unroll.c (decide_unroll_constant_iterations): Likewise.
+       (decide_unroll_runtime_iterations): Likewise.
+       (decide_unroll_stupid): Likewise.
+       (expand_var_during_unrolling): Likewise.
+       * lra-assigns.c (spill_for): Likewise.
+       * lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise.
+       * modulo-sched.c (sms_schedule): Likewise.
+       (DFA_HISTORY): Likewise.
+       * opts.c (default_options_optimization): Likewise.
+       (finish_options): Likewise.
+       (common_handle_option): Likewise.
+       * postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
+       (if): Likewise.
+       * predict.c (get_hot_bb_threshold): Likewise.
+       (maybe_hot_count_p): Likewise.
+       (probably_never_executed): Likewise.
+       (predictable_edge_p): Likewise.
+       (predict_loops): Likewise.
+       (expr_expected_value_1): Likewise.
+       (tree_predict_by_opcode): Likewise.
+       (handle_missing_profiles): Likewise.
+       * reload.c (find_equiv_reg): Likewise.
+       * reorg.c (redundant_insn): Likewise.
+       * resource.c (mark_target_live_regs): Likewise.
+       (incr_ticks_for_insn): Likewise.
+       * sanopt.c (pass_sanopt::execute): Likewise.
+       * sched-deps.c (sched_analyze_1): Likewise.
+       (sched_analyze_2): Likewise.
+       (sched_analyze_insn): Likewise.
+       (deps_analyze_insn): Likewise.
+       * sched-ebb.c (schedule_ebbs): Likewise.
+       * sched-rgn.c (find_single_block_region): Likewise.
+       (too_large): Likewise.
+       (haifa_find_rgns): Likewise.
+       (extend_rgns): Likewise.
+       (new_ready): Likewise.
+       (schedule_region): Likewise.
+       (sched_rgn_init): Likewise.
+       * sel-sched-ir.c (make_region_from_loop): Likewise.
+       * sel-sched-ir.h (MAX_WS): Likewise.
+       * sel-sched.c (process_pipelined_exprs): Likewise.
+       (sel_setup_region_sched_flags): Likewise.
+       * shrink-wrap.c (try_shrink_wrapping): Likewise.
+       * targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise.
+       * toplev.c (print_version): Likewise.
+       (process_options): Likewise.
+       * tracer.c (tail_duplicate): Likewise.
+       * trans-mem.c (tm_log_add): Likewise.
+       * tree-chrec.c (chrec_fold_plus_1): Likewise.
+       * tree-data-ref.c (split_constant_offset): Likewise.
+       (compute_all_dependences): Likewise.
+       * tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise.
+       * tree-inline.c (remap_gimple_stmt): Likewise.
+       * tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise.
+       * tree-parloops.c (MIN_PER_THREAD): Likewise.
+       (create_parallel_loop): Likewise.
+       * tree-predcom.c (determine_unroll_factor): Likewise.
+       * tree-scalar-evolution.c (instantiate_scev_r): Likewise.
+       * tree-sra.c (analyze_all_variable_accesses): Likewise.
+       * tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
+       * tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise.
+       (dse_optimize_redundant_stores): Likewise.
+       (dse_classify_store): Likewise.
+       * tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
+       * tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise.
+       * tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise.
+       * tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise.
+       (try_peel_loop): Likewise.
+       (tree_unroll_loops_completely): Likewise.
+       * tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise.
+       (CONSIDER_ALL_CANDIDATES_BOUND): Likewise.
+       (MAX_CONSIDERED_GROUPS): Likewise.
+       (ALWAYS_PRUNE_CAND_SET_BOUND): Likewise.
+       * tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise.
+       * tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise.
+       * tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise.
+       (L1_CACHE_SIZE_BYTES): Likewise.
+       (L2_CACHE_SIZE_BYTES): Likewise.
+       (should_issue_prefetch_p): Likewise.
+       (schedule_prefetches): Likewise.
+       (determine_unroll_factor): Likewise.
+       (volume_of_references): Likewise.
+       (add_subscript_strides): Likewise.
+       (self_reuse_distance): Likewise.
+       (mem_ref_count_reasonable_p): Likewise.
+       (insn_to_prefetch_ratio_too_small_p): Likewise.
+       (loop_prefetch_arrays): Likewise.
+       (tree_ssa_prefetch_arrays): Likewise.
+       * tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise.
+       * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise.
+       (convert_mult_to_fma): Likewise.
+       (math_opts_dom_walker::after_dom_children): Likewise.
+       * tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise.
+       (hoist_adjacent_loads): Likewise.
+       (gate_hoist_loads): Likewise.
+       * tree-ssa-pre.c (translate_vuse_through_block): Likewise.
+       (compute_partial_antic_aux): Likewise.
+       * tree-ssa-reassoc.c (get_reassociation_width): Likewise.
+       * tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise.
+       (vn_reference_lookup): Likewise.
+       (do_rpo_vn): Likewise.
+       * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise.
+       * tree-ssa-sink.c (select_best_block): Likewise.
+       * tree-ssa-strlen.c (new_stridx): Likewise.
+       (new_addr_stridx): Likewise.
+       (get_range_strlen_dynamic): Likewise.
+       (class ssa_name_limit_t): Likewise.
+       * tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise.
+       (create_variable_info_for_1): Likewise.
+       (init_alias_vars): Likewise.
+       * tree-ssa-tail-merge.c (find_clusters_1): Likewise.
+       (tail_merge_optimize): Likewise.
+       * tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise.
+       (thread_jumps::fsm_find_control_statement_thread_paths): Likewise.
+       (thread_jumps::find_jump_threads_backwards): Likewise.
+       * tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise.
+       * tree-ssa-uninit.c (compute_control_dep_chain): Likewise.
+       * tree-switch-conversion.c (switch_conversion::check_range): Likewise.
+       (jump_table_cluster::can_be_handled): Likewise.
+       * tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise.
+       (SWITCH_CONVERSION_BRANCH_RATIO): Likewise.
+       (param_switch_conversion_branch_ratio): Likewise.
+       * tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise.
+       (vect_enhance_data_refs_alignment): Likewise.
+       (vect_prune_runtime_alias_test_list): Likewise.
+       * tree-vect-loop.c (vect_analyze_loop_costing): Likewise.
+       (vect_get_datarefs_in_loop): Likewise.
+       (vect_analyze_loop): Likewise.
+       * tree-vect-slp.c (vect_slp_bb): Likewise.
+       * tree-vectorizer.h: Likewise.
+       * tree-vrp.c (find_switch_asserts): Likewise.
+       (vrp_prop::check_mem_ref): Likewise.
+       * tree.c (wide_int_to_tree_1): Likewise.
+       (cache_integer_cst): Likewise.
+       * var-tracking.c (EXPR_USE_DEPTH): Likewise.
+       (reverse_op): Likewise.
+       (vt_find_locations): Likewise.
+
 2019-11-12  Martin Liska  <mliska@suse.cz>
 
        * Makefile.in: Include params.opt.
index a731bd490b4e78e916ae20fc9a0249c1fc04daa5..5ae669429415bdd73cbce2918b88c8152f0e053d 100644 (file)
@@ -309,13 +309,13 @@ asan_mark_p (gimple *stmt, enum asan_mark_flags flag)
 bool
 asan_sanitize_stack_p (void)
 {
-  return (sanitize_flags_p (SANITIZE_ADDRESS) && ASAN_STACK);
+  return (sanitize_flags_p (SANITIZE_ADDRESS) && param_asan_stack);
 }
 
 bool
 asan_sanitize_allocas_p (void)
 {
-  return (asan_sanitize_stack_p () && ASAN_PROTECT_ALLOCAS);
+  return (asan_sanitize_stack_p () && param_asan_protect_allocas);
 }
 
 /* Checks whether section SEC should be sanitized.  */
@@ -1429,7 +1429,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 
   /* Emit the prologue sequence.  */
   if (asan_frame_size > 32 && asan_frame_size <= 65536 && pbase
-      && ASAN_USE_AFTER_RETURN)
+      && param_asan_use_after_return)
     {
       use_after_return_class = floor_log2 (asan_frame_size - 1) - 5;
       /* __asan_stack_malloc_N guarantees alignment
@@ -1750,7 +1750,7 @@ is_odr_indicator (tree decl)
 bool
 asan_protect_global (tree decl, bool ignore_decl_rtl_set_p)
 {
-  if (!ASAN_GLOBALS)
+  if (!param_asan_globals)
     return false;
 
   rtx rtl, symbol;
@@ -2190,9 +2190,9 @@ static void
 instrument_derefs (gimple_stmt_iterator *iter, tree t,
                   location_t location, bool is_store)
 {
-  if (is_store && !ASAN_INSTRUMENT_WRITES)
+  if (is_store && !param_asan_instrument_writes)
     return;
-  if (!is_store && !ASAN_INSTRUMENT_READS)
+  if (!is_store && !param_asan_instrument_reads)
     return;
 
   tree type, base;
@@ -2253,7 +2253,7 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
     {
       if (DECL_THREAD_LOCAL_P (inner))
        return;
-      if (!ASAN_GLOBALS && is_global_var (inner))
+      if (!param_asan_globals && is_global_var (inner))
         return;
       if (!TREE_STATIC (inner))
        {
@@ -2346,7 +2346,7 @@ instrument_mem_region_access (tree base, tree len,
 static bool
 instrument_builtin_call (gimple_stmt_iterator *iter)
 {
-  if (!ASAN_MEMINTRIN)
+  if (!param_asan_memintrin)
     return false;
 
   bool iter_advanced_p = false;
@@ -3219,7 +3219,8 @@ asan_expand_mark_ifn (gimple_stmt_iterator *iter)
   tree base_addr = gimple_assign_lhs (g);
 
   /* Generate direct emission if size_in_bytes is small.  */
-  if (size_in_bytes <= ASAN_PARAM_USE_AFTER_SCOPE_DIRECT_EMISSION_THRESHOLD)
+  if (size_in_bytes
+      <= (unsigned)param_use_after_scope_direct_emission_threshold)
     {
       const unsigned HOST_WIDE_INT shadow_size
        = shadow_mem_size (size_in_bytes);
index 4d8fb525cb172e3f0950b636003e84833dd2f5ec..92e980f68a204cd598e65aa80badbbdda48c5f95 100644 (file)
@@ -1629,7 +1629,7 @@ auto_profile (void)
        function before annotation, so the profile inside bar@loc_foo2
        will be useful.  */
     autofdo::stmt_set promoted_stmts;
-    for (int i = 0; i < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS); i++)
+    for (int i = 0; i < param_early_inliner_max_iterations; i++)
       {
         if (!flag_value_profile_transformations
             || !autofdo::afdo_vpt_for_early_inline (&promoted_stmts))
index 0ac39140c6ce3db8499f99cd8f483218888de61b..6a85c2a7fc0e1ffca0d22a50c2630d3269ccb02d 100644 (file)
@@ -1371,7 +1371,7 @@ copy_bb_p (const_basic_block bb, int code_may_grow)
     return false;
 
   if (code_may_grow && optimize_bb_for_speed_p (bb))
-    max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+    max_size *= param_max_grow_copy_bb_insns;
 
   FOR_BB_INSNS (bb, insn)
     {
@@ -2751,7 +2751,7 @@ duplicate_computed_gotos (function *fun)
 
   /* Never copy a block larger than this.  */
   int max_size
-    = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
+    = uncond_jump_length * param_max_goto_duplication_insns;
 
   bool changed = false;
 
index 245fad02d9c6638c0aae47d2ef31150fb7b342d8..68baeb9bbe9dfc4f64079c532592d78b07ace3b0 100644 (file)
@@ -7214,7 +7214,7 @@ inline_expand_builtin_string_cmp (tree exp, rtx target)
   /* If the length of the comparision is larger than the threshold,
      do nothing.  */
   if (length > (unsigned HOST_WIDE_INT)
-              PARAM_VALUE (BUILTIN_STRING_CMP_INLINE_LENGTH))
+              param_builtin_string_cmp_inline_length)
     return NULL_RTX;
 
   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
index a7d86a20c746b1591845d68b11722189cf3b4b37..abd104824ed4b97649976f134e4db7b32c4baf5a 100644 (file)
@@ -1,3 +1,9 @@
+2019-11-12  Martin Liska  <mliska@suse.cz>
+
+       * gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax
+       with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
+       macro.
+
 2019-11-12  Maciej W. Rozycki  <macro@codesourcery.com>
            Frederik Harwath  <frederik@codesourcery.com>
 
index ceec758ffbea9f755f2ba2974ef07ef2b9d03a6c..e40cfa2ec01af4e3fe199d7bec6d33bc21a8de2b 100644 (file)
@@ -354,7 +354,7 @@ c_parser_parse_gimple_body (c_parser *cparser, char *gimple_pass,
   if (cfun->curr_properties & PROP_cfg)
     {
       ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb_count;
-      gcov_type t = PARAM_VALUE (PARAM_GIMPLE_FE_COMPUTED_HOT_BB_THRESHOLD);
+      gcov_type t = param_gimple_fe_computed_hot_bb_threshold;
       set_hot_bb_threshold (t);
       update_max_bb_count ();
       cgraph_node::get_create (cfun->decl);
index 835f7d79ea41409dca188b2362bb5cce4b6e99c3..7b1dd245487eedc01171891f4648a4e31fb3f2b5 100644 (file)
@@ -2022,7 +2022,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2,
      of matching instructions or the 'from' block was totally matched
      (such that its predecessors will hopefully be redirected and the
      block removed).  */
-  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
+  if ((nmatch < param_min_crossjump_insns)
       && (newpos1 != BB_HEAD (src1)))
     return false;
 
@@ -2215,7 +2215,7 @@ try_crossjump_bb (int mode, basic_block bb)
      a block that falls through into BB, as that adds no branches to the
      program.  We'll try that combination first.  */
   fallthru = NULL;
-  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
+  max = param_max_crossjump_edges;
 
   if (EDGE_COUNT (bb->preds) > max)
     return false;
index c34a53b526b50d49cd73ab5a5c383efc6da5a23e..5fed0738211f8017e22a1d46d903c9ff11fe76f5 100644 (file)
@@ -1548,7 +1548,7 @@ defer_stack_allocation (tree var, bool toplevel)
   bool smallish
     = (poly_int_tree_p (size_unit, &size)
        && (estimated_poly_value (size)
-          < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
+          < param_min_size_for_stack_sharing));
 
   /* If stack protection is enabled, *all* stack variables must be deferred,
      so that we can re-order the strings to the top of the frame.
@@ -1788,7 +1788,7 @@ stack_protect_classify_type (tree type)
          || t == signed_char_type_node
          || t == unsigned_char_type_node)
        {
-         unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
+         unsigned HOST_WIDE_INT max = param_ssp_buffer_size;
          unsigned HOST_WIDE_INT len;
 
          if (!TYPE_SIZE_UNIT (type)
@@ -6435,7 +6435,7 @@ pass_expand::execute (function *fun)
        warning (OPT_Wstack_protector,
                 "stack protector not protecting function: "
                 "all local arrays are less than %d bytes long",
-                (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
+                (int) param_ssp_buffer_size);
     }
 
   /* Set up parameters and prepare for return, for the function.  */
@@ -6545,7 +6545,7 @@ pass_expand::execute (function *fun)
 
   /* If the function has too many markers, drop them while expanding.  */
   if (cfun->debug_marker_count
-      >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+      >= param_max_debug_marker_count)
     cfun->debug_nonbind_markers = false;
 
   lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
index 95ec929c7bde1ba4ce563a9717f3426dcc55db86..84516efcfb6d32be15932a004caae3e84be8deb8 100644 (file)
@@ -256,7 +256,7 @@ expected_loop_iterations_unbounded (const class loop *loop,
     {
       if (by_profile_only)
        return -1;
-      expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      expected = param_avg_loop_niter;
     }
   else if (loop->latch && (loop->latch->count.initialized_p ()
                           || loop->header->count.initialized_p ()))
@@ -274,7 +274,7 @@ expected_loop_iterations_unbounded (const class loop *loop,
        {
           if (by_profile_only)
            return -1;
-         expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+         expected = param_avg_loop_niter;
        }
       else if (!count_in.nonzero_p ())
        {
@@ -295,7 +295,7 @@ expected_loop_iterations_unbounded (const class loop *loop,
     {
       if (by_profile_only)
        return -1;
-      expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      expected = param_avg_loop_niter;
     }
 
   if (!by_profile_only)
@@ -427,7 +427,7 @@ estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
 
   if (optimize && (flag_ira_region == IRA_REGION_ALL
                   || flag_ira_region == IRA_REGION_MIXED)
-      && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
+      && number_of_loops (cfun) <= (unsigned) param_ira_max_loops_num)
     /* IRA regional allocation deals with high register pressure
        better.  So decrease the cost (to do more accurate the cost
        calculation for IRA, we need to know how many registers lives
index aa54e955c76c4958b73f29628610ab65b87bfcaf..5497235b7d43b95d976f262ba554d932e6f16c2d 100644 (file)
@@ -2707,7 +2707,7 @@ cgraph_edge::maybe_hot_p (void)
       if (count.apply_scale (2, 1) < where->count.apply_scale (3, 1))
        return false;
     }
-  else if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+  else if (count.apply_scale (param_hot_bb_frequency_fraction , 1)
           < where->count)
     return false;
   return true;
index 857ea30dafd917445220de42a845ff5c65275e8b..ae3bc468910d1b2edc7794471fa3c49ca0be2791 100644 (file)
@@ -1251,7 +1251,7 @@ combine_instructions (rtx_insn *f, unsigned int nregs)
   init_reg_last ();
   setup_incoming_promotions (first);
   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
-  int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
+  int max_combine = param_max_combine_insns;
 
   FOR_EACH_BB_FN (this_basic_block, cfun)
     {
@@ -13282,7 +13282,7 @@ record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
            {
              /* If there are two or more occurrences of REG in VALUE,
                 prevent the value from growing too much.  */
-             if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
+             if (count_rtxs (tem) > param_max_last_value_rtl)
                tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
            }
 
index 07c032539513eec8c9e7f800c35454d81d627301..adb3ff71af8e3094eb66bf4e02023a3dd41d98d3 100644 (file)
@@ -73,7 +73,7 @@ static bool
 aarch64_option_validate_param (const int value, const int param)
 {
   /* Check that both parameters are the same.  */
-  if (param == (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)
+  if (param == param_stack_clash_protection_guard_size)
     {
       if (value != 12 && value != 16)
        {
@@ -93,18 +93,15 @@ static void
 aarch64_option_default_params (void)
 {
   /* We assume the guard page is 64k.  */
-  int index = (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE;
-  set_default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
-                          DEFAULT_STK_CLASH_GUARD_SIZE == 0
-                            ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
+  int index = (int) param_stack_clash_protection_guard_size;
+  param_stack_clash_protection_guard_size
+    = (DEFAULT_STK_CLASH_GUARD_SIZE == 0 ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
 
-  int guard_size
-    = default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+  int guard_size = param_stack_clash_protection_guard_size;
 
   /* Set the interval parameter to be the same as the guard size.  This way the
      mid-end code does the right thing for us.  */
-  set_default_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
-                          guard_size);
+  param_stack_clash_protection_probe_interval = guard_size;
 
   /* Validate the options.  */
   aarch64_option_validate_param (guard_size, index);
index 02e297ad69bbc3591a15b7e4373986c71fef51db..0a187160fd246193946d8469c29d59ce798915e4 100644 (file)
@@ -88,13 +88,13 @@ static void
 ia64_option_default_params (void)
 {
   /* Let the scheduler form additional regions.  */
-  set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
+  param_max_sched_extend_regions_iters = 2;
 
   /* Set the default values for cache-related parameters.  */
-  set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
-  set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
+  param_simultaneous_prefetches = 6;
+  param_l1_cache_line_size = 32;
 
-  set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
+  param_sched_mem_true_dep_cost = 4;
 }
 
 #undef TARGET_OPTION_OPTIMIZATION_TABLE
index c949a601f578c2fb7dabbe8a80250c922eaf5c3d..7043a4bda31d078aacbff9abc36790e848a915ac 100644 (file)
@@ -57,7 +57,7 @@ static void
 rs6000_option_default_params (void)
 {
   /* Double growth factor to counter reduced min jump length.  */
-  set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+  param_max_grow_copy_bb_insns = 16;
 }
 
 /* If not otherwise specified by a target, make 'long double' equivalent to
index 9dc7ae87f434fdfad5190bb423801420e3fa6b7f..1d39912443b61092c2e93fafaae7a0feaef2ca4d 100644 (file)
@@ -76,7 +76,7 @@ static void
 rs6000_option_default_params (void)
 {
   /* Double growth factor to counter reduced min jump length.  */
-  set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+  param_max_grow_copy_bb_insns = 16;
 }
 
 /* If not otherwise specified by a target, make 'long double' equivalent to
index 4a92146f0af003c93645fdda6f50ff853bf30158..e6ecc3a632a0ccc3d322e32b7a90d61e8448562f 100644 (file)
@@ -149,7 +149,7 @@ sh_handle_option (struct gcc_options *opts,
 static void
 sh_option_default_params (void)
 {
-  set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 2);
+  param_simultaneous_prefetches = 2;
 }
 
 #undef TARGET_OPTION_OPTIMIZATION_TABLE
index 1dfff331a5a2a1bc5eeba5a58e41f4bf2ad22468..c4783861c5d7db5de85854b6160568d2d549a2d4 100644 (file)
@@ -5589,7 +5589,7 @@ aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
   ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
 
   HOST_WIDE_INT stack_clash_probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
 
   /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
   xops[0] = reg1;
@@ -6842,7 +6842,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
                                        bool final_adjustment_p)
 {
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
   HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
   HOST_WIDE_INT min_probe_threshold
     = (final_adjustment_p
@@ -7364,7 +7364,7 @@ aarch64_expand_epilogue (bool for_sibcall)
      for each allocation.  For stack clash we are in a usable state if
      the adjustment is less than GUARD_SIZE - GUARD_USED_BY_CALLER.  */
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
   HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
 
   /* We can re-use the registers when:
@@ -13306,73 +13306,62 @@ aarch64_override_options_internal (struct gcc_options *opts)
 
   /* We don't mind passing in global_options_set here as we don't use
      the *options_set structs anyway.  */
-  maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
-                        queue_depth,
-                        opts->x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+                      param_sched_autopref_queue_depth, queue_depth);
 
   /* Set up parameters to be used in prefetching algorithm.  Do not
      override the defaults unless we are tuning for a core we have
      researched values for.  */
   if (aarch64_tune_params.prefetch->num_slots > 0)
-    maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-                          aarch64_tune_params.prefetch->num_slots,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_simultaneous_prefetches,
+                        aarch64_tune_params.prefetch->num_slots);
   if (aarch64_tune_params.prefetch->l1_cache_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-                          aarch64_tune_params.prefetch->l1_cache_size,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_l1_cache_size,
+                        aarch64_tune_params.prefetch->l1_cache_size);
   if (aarch64_tune_params.prefetch->l1_cache_line_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-                          aarch64_tune_params.prefetch->l1_cache_line_size,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_l1_cache_line_size,
+                        aarch64_tune_params.prefetch->l1_cache_line_size);
   if (aarch64_tune_params.prefetch->l2_cache_size >= 0)
-    maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-                          aarch64_tune_params.prefetch->l2_cache_size,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_l2_cache_size,
+                        aarch64_tune_params.prefetch->l2_cache_size);
   if (!aarch64_tune_params.prefetch->prefetch_dynamic_strides)
-    maybe_set_param_value (PARAM_PREFETCH_DYNAMIC_STRIDES,
-                          0,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_prefetch_dynamic_strides, 0);
   if (aarch64_tune_params.prefetch->minimum_stride >= 0)
-    maybe_set_param_value (PARAM_PREFETCH_MINIMUM_STRIDE,
-                          aarch64_tune_params.prefetch->minimum_stride,
-                          opts->x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+                        param_prefetch_minimum_stride,
+                        aarch64_tune_params.prefetch->minimum_stride);
 
   /* Use the alternative scheduling-pressure algorithm by default.  */
-  maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
-                        opts->x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+                      param_sched_pressure_algorithm,
+                      SCHED_PRESSURE_MODEL);
 
   /* If the user hasn't changed it via configure then set the default to 64 KB
      for the backend.  */
-  maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
-                        DEFAULT_STK_CLASH_GUARD_SIZE == 0
-                          ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE,
-                        opts->x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+                      param_stack_clash_protection_guard_size,
+                      (DEFAULT_STK_CLASH_GUARD_SIZE == 0
+                       ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE));
 
   /* Validate the guard size.  */
-  int guard_size = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+  int guard_size = param_stack_clash_protection_guard_size;
 
   /* Enforce that interval is the same size as size so the mid-end does the
      right thing.  */
-  maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
-                        guard_size,
-                        opts->x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+                      param_stack_clash_protection_probe_interval,
+                      guard_size);
 
   /* The maybe_set calls won't update the value if the user has explicitly set
      one.  Which means we need to validate that probing interval and guard size
      are equal.  */
   int probe_interval
-    = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = param_stack_clash_protection_probe_interval;
   if (guard_size != probe_interval)
     error ("stack clash guard size %<%d%> must be equal to probing interval "
           "%<%d%>", guard_size, probe_interval);
index a7d5454b574b89379b81d23aba49ead1a205f21b..8f389ead32d16d7fad215fe17005a8e718c6844f 100644 (file)
@@ -68,6 +68,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "builtins.h"
 #include "rtl-iter.h"
 #include "flags.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -484,17 +485,14 @@ alpha_option_override (void)
     }
 
   if (line_size)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_l1_cache_line_size, line_size);
   if (l1_size)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_l1_cache_size, l1_size);
   if (l2_size)
-    maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_l2_cache_size, l2_size);
 
   /* Do some sanity checks on the above options.  */
 
index 7c9cdbd7a463310ae4c9bccf9930d74ec5726488..ca7fd9020b89fdba4e8af9cf6e16fa7f6c2b02fd 100644 (file)
@@ -3524,9 +3524,8 @@ arm_option_override (void)
        but measurable, size reduction for PIC code.  Therefore, we decrease
        the bar for unrestricted expression hoisting to the cost of PIC address
        calculation, which is 2 instructions.  */
-    maybe_set_param_value (PARAM_GCSE_UNRESTRICTED_COST, 2,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_gcse_unrestricted_cost, 2);
 
   /* ARM EABI defaults to strict volatile bitfields.  */
   if (TARGET_AAPCS_BASED && flag_strict_volatile_bitfields < 0
@@ -3546,47 +3545,43 @@ arm_option_override (void)
      override the defaults unless we are tuning for a core we have
      researched values for.  */
   if (current_tune->prefetch.num_slots > 0)
-    maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-                          current_tune->prefetch.num_slots,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_simultaneous_prefetches,
+                        current_tune->prefetch.num_slots);
   if (current_tune->prefetch.l1_cache_line_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-                          current_tune->prefetch.l1_cache_line_size,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_l1_cache_line_size,
+                        current_tune->prefetch.l1_cache_line_size);
   if (current_tune->prefetch.l1_cache_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-                          current_tune->prefetch.l1_cache_size,
-                          global_options.x_param_values,
-                          global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                        param_l1_cache_size,
+                        current_tune->prefetch.l1_cache_size);
 
   /* Look through ready list and all of queue for instructions
      relevant for L2 auto-prefetcher.  */
-  int param_sched_autopref_queue_depth;
+  int sched_autopref_queue_depth;
 
   switch (current_tune->sched_autopref)
     {
     case tune_params::SCHED_AUTOPREF_OFF:
-      param_sched_autopref_queue_depth = -1;
+      sched_autopref_queue_depth = -1;
       break;
 
     case tune_params::SCHED_AUTOPREF_RANK:
-      param_sched_autopref_queue_depth = 0;
+      sched_autopref_queue_depth = 0;
       break;
 
     case tune_params::SCHED_AUTOPREF_FULL:
-      param_sched_autopref_queue_depth = max_insn_queue_index + 1;
+      sched_autopref_queue_depth = max_insn_queue_index + 1;
       break;
 
     default:
       gcc_unreachable ();
     }
 
-  maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
-                        param_sched_autopref_queue_depth,
-                        global_options.x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                      param_sched_autopref_queue_depth,
+                      sched_autopref_queue_depth);
 
   /* Currently, for slow flash data, we just disable literal pools.  We also
      disable it for pure-code.  */
@@ -31748,8 +31743,6 @@ arm_valid_target_attribute_p (tree fndecl, tree ARG_UNUSED (name),
 
   DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
 
-  finalize_options_struct (&func_options);
-
   return ret;
 }
 
index dfc8ae23ba0b2692cd04f9f185d9e00888106059..72cd6dcc98ce7148ff8350f6fa153ebb35d15693 100644 (file)
@@ -2618,22 +2618,14 @@ ix86_option_override_internal (bool main_args_p,
   if (!TARGET_SCHEDULE)
     opts->x_flag_schedule_insns_after_reload = opts->x_flag_schedule_insns = 0;
 
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-                        ix86_tune_cost->simultaneous_prefetches,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-                        ix86_tune_cost->prefetch_block,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-                        ix86_tune_cost->l1_cache_size,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-                        ix86_tune_cost->l2_cache_size,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
+                      ix86_tune_cost->simultaneous_prefetches);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
+                      ix86_tune_cost->prefetch_block);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
+                      ix86_tune_cost->l1_cache_size);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
+                      ix86_tune_cost->l2_cache_size);
 
   /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful.  */
   if (opts->x_flag_prefetch_loop_arrays < 0
@@ -2868,13 +2860,9 @@ ix86_option_override_internal (bool main_args_p,
       = (cf_protection_level) (opts->x_flag_cf_protection | CF_SET);
 
   if (ix86_tune_features [X86_TUNE_AVOID_256FMA_CHAINS])
-    maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 256,
-                          opts->x_param_values,
-                          opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 256);
   else if (ix86_tune_features [X86_TUNE_AVOID_128FMA_CHAINS])
-    maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 128,
-                          opts->x_param_values,
-                          opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 128);
 
   /* PR86952: jump table usage with retpolines is slow.
      The PR provides some numbers about the slowness.  */
index 03a7082d2fc3e66b36760154b1c376e8bfa2cfad..f775697f982671529a0becdca3942ce60a98ef21 100644 (file)
@@ -5773,7 +5773,7 @@ get_probe_interval (void)
 {
   if (flag_stack_clash_protection)
     return (HOST_WIDE_INT_1U
-           << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+           << param_stack_clash_protection_probe_interval);
   else
     return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
 }
@@ -6942,7 +6942,7 @@ ix86_adjust_stack_and_probe_stack_clash (HOST_WIDE_INT size,
   /* If we allocate less than the size of the guard statically,
      then no probing is necessary, but we do need to allocate
      the stack.  */
-  if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
+  if (size < (1 << param_stack_clash_protection_guard_size))
     {
       pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
                                 GEN_INT (-size), -1,
@@ -21468,18 +21468,18 @@ static unsigned int
 ix86_max_noce_ifcvt_seq_cost (edge e)
 {
   bool predictable_p = predictable_edge_p (e);
-
-  enum compiler_param param
-    = (predictable_p
-       ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
-       : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
-  /* If we have a parameter set, use that, otherwise take a guess using
-     BRANCH_COST.  */
-  if (global_options_set.x_param_values[param])
-    return PARAM_VALUE (param);
+  if (predictable_p)
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+       return param_max_rtl_if_conversion_predictable_cost;
+    }
   else
-    return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+       return param_max_rtl_if_conversion_unpredictable_cost;
+    }
+
+  return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
 }
 
 /* Return true if SEQ is a good candidate as a replacement for the
index 7697e907aea6ed8ed29610760b63848710c38312..44f7f2eea06d4a74ece28cba96921a135dc7ef3d 100644 (file)
@@ -7307,7 +7307,7 @@ ia64_adjust_cost (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
 
   if (dw == MIN_DEP_WEAK)
     /* Store and load are likely to alias, use higher cost to avoid stall.  */
-    return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
+    return param_sched_mem_true_dep_cost;
   else if (dw > MIN_DEP_WEAK)
     {
       /* Store and load are less likely to alias.  */
index 04aae8052dbf46d0cbf1dd61e457c973aafae17a..f0fd2065c02218647f7a79a3e418428ec0ba31fd 100644 (file)
@@ -1515,14 +1515,14 @@ static HOST_WIDE_INT
 get_stack_clash_protection_probe_interval (void)
 {
   return (HOST_WIDE_INT_1U
-         << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+         << param_stack_clash_protection_probe_interval);
 }
 
 static HOST_WIDE_INT
 get_stack_clash_protection_guard_size (void)
 {
   return (HOST_WIDE_INT_1U
-         << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
+         << param_stack_clash_protection_guard_size);
 }
 
 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
index 32101b77ea3777f95bc9e1fbf42b0131931432d2..4c830fcfba3954592f0161cc7c02f15975988ff7 100644 (file)
@@ -80,6 +80,7 @@
 #include "tree-vrp.h"
 #include "tree-ssanames.h"
 #include "rs6000-internal.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -4514,34 +4515,29 @@ rs6000_option_override_internal (bool global_init_p)
 
   if (global_init_p)
     {
-      maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-                            rs6000_cost->simultaneous_prefetches,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-                            rs6000_cost->cache_line_size,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_simultaneous_prefetches,
+                          rs6000_cost->simultaneous_prefetches);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_l1_cache_size,
+                          rs6000_cost->l1_cache_size);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_l1_cache_line_size,
+                          rs6000_cost->cache_line_size);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_l2_cache_size,
+                          rs6000_cost->l2_cache_size);
 
       /* Increase loop peeling limits based on performance analysis. */
-      maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_max_peeled_insns, 400);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_max_completely_peeled_insns, 400);
 
       /* Use the 'model' -fsched-pressure algorithm by default.  */
-      maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
-                            SCHED_PRESSURE_MODEL,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_sched_pressure_algorithm,
+                          SCHED_PRESSURE_MODEL);
 
       /* Explicit -funroll-loops turns -munroll-only-small-loops off.  */
       if (((global_options_set.x_flag_unroll_loops && flag_unroll_loops)
index ff0b43c2c29b0fcad9b4806379999cf46d59bc45..b3a75222ac2ef5978540c270d9b6199cedb87235 100644 (file)
@@ -10968,9 +10968,9 @@ allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
 {
   bool temp_reg_clobbered_p = false;
   HOST_WIDE_INT probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
 
   if (flag_stack_clash_protection)
     {
@@ -11086,7 +11086,7 @@ s390_emit_prologue (void)
      only exception is when TARGET_BACKCHAIN is active, in which case
      we know *sp (offset 0) was written.  */
   HOST_WIDE_INT probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   HOST_WIDE_INT last_probe_offset
     = (TARGET_BACKCHAIN
        ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
@@ -15264,10 +15264,8 @@ s390_option_override_internal (struct gcc_options *opts,
      displacements.  Trim that value down to 4k if that happens.  This
      might result in too many probes being generated only on the
      oldest supported machine level z900.  */
-  if (!DISP_IN_RANGE ((1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL))))
-    set_param_value ("stack-clash-protection-probe-interval", 12,
-                    opts->x_param_values,
-                    opts_set->x_param_values);
+  if (!DISP_IN_RANGE ((1 << param_stack_clash_protection_probe_interval)))
+    param_stack_clash_protection_probe_interval = 12;
 
 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
   if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
@@ -15276,62 +15274,37 @@ s390_option_override_internal (struct gcc_options *opts,
 
   if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
     {
-      maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
-    }
-
-  maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_unrolled_insns,
+                          100);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_unroll_times, 32);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peeled_insns,
+                          2000);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peel_times,
+                          64);
+    }
+
+  SET_OPTION_IF_UNSET (opts, opts_set, param_max_pending_list_length,
+                      256);
   /* values for loop prefetching */
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size, 256);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size, 128);
   /* s390 has more than 2 levels and the size is much larger.  Since
      we are always running virtualized assume that we only get a small
      part of the caches above l1.  */
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size, 1500);
+  SET_OPTION_IF_UNSET (opts, opts_set,
+                      param_prefetch_min_insn_to_mem_ratio, 2);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches, 6);
 
   /* Use the alternative scheduling-pressure algorithm by default.  */
-  maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
-
-  maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
-                        opts->x_param_values,
-                        opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_sched_pressure_algorithm, 2);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_min_vect_loop_bound, 2);
 
   /* Use aggressive inlining parameters.  */
   if (opts->x_s390_tune >= PROCESSOR_2964_Z13)
     {
-      maybe_set_param_value (PARAM_INLINE_MIN_SPEEDUP, 2,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
-
-      maybe_set_param_value (PARAM_MAX_INLINE_INSNS_AUTO, 80,
-                            opts->x_param_values,
-                            opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_inline_min_speedup, 2);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 80);
     }
 
   /* Set the default alignment.  */
index 75b3d4ef499dedacbebcdcc3a3048a756cf25dfb..50385518b1bd6a73309be397b1a7ce498f282620 100644 (file)
@@ -61,6 +61,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "context.h"
 #include "builtins.h"
 #include "tree-vector-builder.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -2010,7 +2011,7 @@ sparc_option_override (void)
       gcc_unreachable ();
     };
 
-  /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
+  /* param_simultaneous_prefetches is the number of prefetches that
      can run at the same time.  More important, it is the threshold
      defining when additional prefetches will be dropped by the
      hardware.
@@ -2033,21 +2034,20 @@ sparc_option_override (void)
      single-threaded program.  Experimental results show that setting
      this parameter to 32 works well when the number of threads is not
      high.  */
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-                        ((sparc_cpu == PROCESSOR_ULTRASPARC
-                          || sparc_cpu == PROCESSOR_NIAGARA
-                          || sparc_cpu == PROCESSOR_NIAGARA2
-                          || sparc_cpu == PROCESSOR_NIAGARA3
-                          || sparc_cpu == PROCESSOR_NIAGARA4)
-                         ? 2
-                         : (sparc_cpu == PROCESSOR_ULTRASPARC3
-                            ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
-                                    || sparc_cpu == PROCESSOR_M8)
-                                   ? 32 : 3))),
-                        global_options.x_param_values,
-                        global_options_set.x_param_values);
-
-  /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                      param_simultaneous_prefetches,
+                      ((sparc_cpu == PROCESSOR_ULTRASPARC
+                        || sparc_cpu == PROCESSOR_NIAGARA
+                        || sparc_cpu == PROCESSOR_NIAGARA2
+                        || sparc_cpu == PROCESSOR_NIAGARA3
+                        || sparc_cpu == PROCESSOR_NIAGARA4)
+                       ? 2
+                       : (sparc_cpu == PROCESSOR_ULTRASPARC3
+                          ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
+                                  || sparc_cpu == PROCESSOR_M8)
+                                 ? 32 : 3))));
+
+  /* param_l1_cache_line_size is the size of the L1 cache line, in
      bytes.
 
      The Oracle SPARC Architecture (previously the UltraSPARC
@@ -2064,38 +2064,33 @@ sparc_option_override (void)
      L2 and L3, but only 32B are brought into the L1D$. (Assuming it
      is a read_n prefetch, which is the only type which allocates to
      the L1.)  */
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-                        (sparc_cpu == PROCESSOR_M8
-                         ? 64 : 32),
-                        global_options.x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                      param_l1_cache_line_size,
+                      (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
 
-  /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
+  /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
      Hardvard level-1 caches) in kilobytes.  Both UltraSPARC and
      Niagara processors feature a L1D$ of 16KB.  */
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-                        ((sparc_cpu == PROCESSOR_ULTRASPARC
-                          || sparc_cpu == PROCESSOR_ULTRASPARC3
-                          || sparc_cpu == PROCESSOR_NIAGARA
-                          || sparc_cpu == PROCESSOR_NIAGARA2
-                          || sparc_cpu == PROCESSOR_NIAGARA3
-                          || sparc_cpu == PROCESSOR_NIAGARA4
-                          || sparc_cpu == PROCESSOR_NIAGARA7
-                          || sparc_cpu == PROCESSOR_M8)
-                         ? 16 : 64),
-                        global_options.x_param_values,
-                        global_options_set.x_param_values);
-
-
-  /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes.  Note
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                      param_l1_cache_size,
+                      ((sparc_cpu == PROCESSOR_ULTRASPARC
+                        || sparc_cpu == PROCESSOR_ULTRASPARC3
+                        || sparc_cpu == PROCESSOR_NIAGARA
+                        || sparc_cpu == PROCESSOR_NIAGARA2
+                        || sparc_cpu == PROCESSOR_NIAGARA3
+                        || sparc_cpu == PROCESSOR_NIAGARA4
+                        || sparc_cpu == PROCESSOR_NIAGARA7
+                        || sparc_cpu == PROCESSOR_M8)
+                       ? 16 : 64));
+
+  /* param_l2_cache_size is the size fo the L2 in kilobytes.  Note
      that 512 is the default in params.def.  */
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-                        ((sparc_cpu == PROCESSOR_NIAGARA4
-                          || sparc_cpu == PROCESSOR_M8)
-                         ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
-                                  ? 256 : 512)),
-                        global_options.x_param_values,
-                        global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                      param_l2_cache_size,
+                      ((sparc_cpu == PROCESSOR_NIAGARA4
+                        || sparc_cpu == PROCESSOR_M8)
+                       ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
+                                ? 256 : 512)));
   
 
   /* Disable save slot sharing for call-clobbered registers by default.
index 8477008320c6681249d2e657c1792850f2264d10..b1ace70b5f77bef8ffb81c2b48cbfac30029add7 100644 (file)
@@ -57,6 +57,7 @@
 #include "tree-pass.h"
 #include "context.h"
 #include "builtins.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -457,9 +458,8 @@ visium_option_override (void)
       /* Allow the size of compilation units to double because of inlining.
         In practice the global size of the object code is hardly affected
         because the additional instructions will take up the padding.  */
-      maybe_set_param_value (PARAM_INLINE_UNIT_GROWTH, 100,
-                            global_options.x_param_values,
-                            global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+                          param_inline_unit_growth, 100);
     }
 
   /* Likewise for loops.  */
index bcba61c9a9affbed6aca79e63bcfaafcdd7418ba..ebe27a323b21104355f8c9d680b17ac91ed045f8 100644 (file)
@@ -324,7 +324,7 @@ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
        }
       return NULL;
     }
-  if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+  if (param_profile_func_internal_id)
     elt.ident = current_function_funcdef_no + 1;
   else
     {
@@ -560,7 +560,7 @@ coverage_compute_profile_id (struct cgraph_node *n)
     {
       expanded_location xloc
        = expand_location (DECL_SOURCE_LOCATION (n->decl));
-      bool use_name_only = (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID) == 0);
+      bool use_name_only = (param_profile_func_internal_id == 0);
 
       chksum = (use_name_only ? 0 : xloc.line);
       if (xloc.file)
@@ -628,7 +628,7 @@ coverage_begin_function (unsigned lineno_checksum, unsigned cfg_checksum)
 
   /* Announce function */
   offset = gcov_write_tag (GCOV_TAG_FUNCTION);
-  if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+  if (param_profile_func_internal_id)
     gcov_write_unsigned (current_function_funcdef_no + 1);
   else
     {
@@ -682,7 +682,7 @@ coverage_end_function (unsigned lineno_checksum, unsigned cfg_checksum)
 
       item = ggc_alloc<coverage_data> ();
 
-      if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+      if (param_profile_func_internal_id)
        item->ident = current_function_funcdef_no + 1;
       else
        {
index 972ef791fa7b9380373dfd69eb09b14cd6591463..754d66c432699b004b60df2bb83a315c7b53a1da 100644 (file)
@@ -1,3 +1,10 @@
+2019-11-12  Martin Liska  <mliska@suse.cz>
+
+       * name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax
+       with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
+       macro.
+       * typeck.c (comptypes): Likewise.
+
 2019-11-12  Maciej W. Rozycki  <macro@codesourcery.com>
            Frederik Harwath  <frederik@codesourcery.com>
 
index cd0d9551aa34fcbb12415e5eb374f9f5eb5a9459..be846a121419ac4cad71d80d362a53096599bf9e 100644 (file)
@@ -5358,7 +5358,7 @@ namespace_hints::namespace_hints (location_t loc, tree name)
 
   m_candidates = vNULL;
   m_limited = false;
-  m_limit = PARAM_VALUE (CXX_MAX_NAMESPACES_FOR_DIAGNOSTIC_HELP);
+  m_limit = param_cxx_max_namespaces_for_diagnostic_help;
 
   /* Breadth-first search of namespaces.  Up to limit namespaces
      searched (limit zero == unlimited).  */
index ff603f3d8d94cc146122f7c79b8279d9bbd2fd3e..d3e70311229683c04e2d7fda134730bcfd2358fb 100644 (file)
@@ -1498,7 +1498,7 @@ comptypes (tree t1, tree t2, int strict)
           perform a deep check. */
        return structural_comptypes (t1, t2, strict);
 
-      if (flag_checking && USE_CANONICAL_TYPES)
+      if (flag_checking && param_use_canonical_types)
        {
          bool result = structural_comptypes (t1, t2, strict);
          
@@ -1519,7 +1519,7 @@ comptypes (tree t1, tree t2, int strict)
          
          return result;
        }
-      if (!flag_checking && USE_CANONICAL_TYPES)
+      if (!flag_checking && param_use_canonical_types)
        return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
       else
        return structural_comptypes (t1, t2, strict);
index 097fb94e7733c2e7c0ef905baec0ec0a0f9be61a..b1c0276b0f717c776d182d8ee1f16fa61d6a44f3 100644 (file)
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -6414,7 +6414,7 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
   if (follow_jumps)
     {
       bb = data->path[path_size - 1].bb;
-      while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH))
+      while (bb && path_size < param_max_cse_path_length)
        {
          if (single_succ_p (bb))
            e = single_succ_edge (bb);
@@ -6592,7 +6592,7 @@ cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
             FIXME: This is a real kludge and needs to be done some other
                    way.  */
          if (NONDEBUG_INSN_P (insn)
-             && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+             && num_insns++ > param_max_cse_insns)
            {
              flush_hash_table ();
              num_insns = 0;
@@ -6736,7 +6736,7 @@ cse_main (rtx_insn *f ATTRIBUTE_UNUSED, int nregs)
   init_cse_reg_info (nregs);
 
   ebb_data.path = XNEWVEC (struct branch_path,
-                          PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+                          param_max_cse_path_length);
 
   cse_cfg_altered = false;
   cse_jumps_altered = false;
index 500793ba40eb59001c0b3f3848b86ab61c27b93c..1745256944a7943f1ed772d77a23a0148750d3a1 100644 (file)
@@ -2297,7 +2297,7 @@ cselib_invalidate_mem (rtx mem_rtx)
              p = &(*p)->next;
              continue;
            }
-         if (num_mems < PARAM_VALUE (PARAM_MAX_CSELIB_MEMORY_LOCATIONS)
+         if (num_mems < param_max_cselib_memory_locations
              && ! canon_anti_dependence (x, false, mem_rtx,
                                          GET_MODE (mem_rtx), mem_addr))
            {
index 5d8c6f990ec69e97e8b2cc723891a96b7e08c1c2..76abd873c78dbadbfa0535d125a8d54b4fbeec1a 100644 (file)
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -2657,7 +2657,7 @@ dse_step1 (void)
 
   /* For -O1 reduce the maximum number of active local stores for RTL DSE
      since this can consume huge amounts of memory (PR89115).  */
-  int max_active_local_stores = PARAM_VALUE (PARAM_MAX_DSE_ACTIVE_LOCAL_STORES);
+  int max_active_local_stores = param_max_dse_active_local_stores;
   if (optimize < 2)
     max_active_local_stores /= 10;
 
index feff49aa44f976e312abc20f0a306bc85c67b688..5f3e549a7fed1b38b3d864a59b0dec65896b54f2 100644 (file)
@@ -2762,15 +2762,15 @@ set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last)
   set_last_insn (last);
   cur_insn_uid = 0;
 
-  if (MIN_NONDEBUG_INSN_UID || MAY_HAVE_DEBUG_INSNS)
+  if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS)
     {
       int debug_count = 0;
 
-      cur_insn_uid = MIN_NONDEBUG_INSN_UID - 1;
+      cur_insn_uid = param_min_nondebug_insn_uid - 1;
       cur_debug_insn_uid = 0;
 
       for (insn = first; insn; insn = NEXT_INSN (insn))
-       if (INSN_UID (insn) < MIN_NONDEBUG_INSN_UID)
+       if (INSN_UID (insn) < param_min_nondebug_insn_uid)
          cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn));
        else
          {
@@ -2780,7 +2780,7 @@ set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last)
          }
 
       if (debug_count)
-       cur_debug_insn_uid = MIN_NONDEBUG_INSN_UID + debug_count;
+       cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count;
       else
        cur_debug_insn_uid++;
     }
@@ -3445,10 +3445,10 @@ get_max_insn_count (void)
      differences due to debug insns, and not be affected by
      -fmin-insn-uid, to avoid excessive table size and to simplify
      debugging of -fcompare-debug failures.  */
-  if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+  if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
     n -= cur_debug_insn_uid;
   else
-    n -= MIN_NONDEBUG_INSN_UID;
+    n -= param_min_nondebug_insn_uid;
 
   return n;
 }
@@ -4085,7 +4085,7 @@ make_debug_insn_raw (rtx pattern)
 
   insn = as_a <rtx_debug_insn *> (rtx_alloc (DEBUG_INSN));
   INSN_UID (insn) = cur_debug_insn_uid++;
-  if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+  if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
     INSN_UID (insn) = cur_insn_uid++;
 
   PATTERN (insn) = pattern;
@@ -5860,8 +5860,8 @@ init_emit (void)
 {
   set_first_insn (NULL);
   set_last_insn (NULL);
-  if (MIN_NONDEBUG_INSN_UID)
-    cur_insn_uid = MIN_NONDEBUG_INSN_UID;
+  if (param_min_nondebug_insn_uid)
+    cur_insn_uid = param_min_nondebug_insn_uid;
   else
     cur_insn_uid = 1;
   cur_debug_insn_uid = 1;
index 83c786366c1aaaaefd251e9805d4c698da485a3f..93e31cc3ba18cbbe98354f2bda0f054652936701 100644 (file)
@@ -1837,7 +1837,7 @@ compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr,
 {
   /* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */
   *probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   *rounded_size = simplify_gen_binary (AND, Pmode, size,
                                        GEN_INT (-*probe_interval));
 
index 7cf9ef1effda2870b67e26c5b9e2e2a3a7e5289b..3b9a88dc8a78992bf8210cd472f4e52e0928b87e 100644 (file)
@@ -657,7 +657,7 @@ compute_alignments (void)
     }
   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
   profile_count count_threshold = cfun->cfg->count_max.apply_scale
-                (1, PARAM_VALUE (PARAM_ALIGN_THRESHOLD));
+                (1, param_align_threshold);
 
   if (dump_file)
     {
@@ -743,7 +743,7 @@ compute_alignments (void)
          && branch_count + fallthru_count > count_threshold
          && (branch_count
              > fallthru_count.apply_scale
-                   (PARAM_VALUE (PARAM_ALIGN_LOOP_ITERATIONS), 1)))
+                   (param_align_loop_iterations, 1)))
        {
          align_flags alignment = LOOP_ALIGN (label);
          if (dump_file)
index 52cb2383db242e298194b7104eba5b51e6c39b77..542cc01865624ab31cfe0c61c39b62163f995e12 100644 (file)
@@ -5929,9 +5929,9 @@ fold_range_test (location_t loc, enum tree_code code, tree type,
      short-circuited branch and the underlying object on both sides
      is the same, make a non-short-circuit operation.  */
   bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-  if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+  if (param_logical_op_non_short_circuit != -1)
     logical_op_non_short_circuit
-      = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+      = param_logical_op_non_short_circuit;
   if (logical_op_non_short_circuit
       && !flag_sanitize_coverage
       && lhs != 0 && rhs != 0
@@ -8600,9 +8600,9 @@ fold_truth_andor (location_t loc, enum tree_code code, tree type,
     return tem;
 
   bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-  if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+  if (param_logical_op_non_short_circuit != -1)
     logical_op_non_short_circuit
-      = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+      = param_logical_op_non_short_circuit;
   if (logical_op_non_short_circuit
       && !flag_sanitize_coverage
       && (code == TRUTH_AND_EXPR
@@ -13365,7 +13365,7 @@ tree_single_nonnegative_warnv_p (tree t, bool *strict_overflow_p, int depth)
         would not, passes that need this information could be revised
         to provide it through dataflow propagation.  */
       return (!name_registered_for_update_p (t)
-             && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+             && depth < param_max_ssa_name_query_depth
              && gimple_stmt_nonnegative_warnv_p (SSA_NAME_DEF_STMT (t),
                                                  strict_overflow_p, depth));
 
@@ -14013,7 +14013,7 @@ integer_valued_real_single_p (tree t, int depth)
         would not, passes that need this information could be revised
         to provide it through dataflow propagation.  */
       return (!name_registered_for_update_p (t)
-             && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+             && depth < param_max_ssa_name_query_depth
              && gimple_stmt_integer_valued_real_p (SSA_NAME_DEF_STMT (t),
                                                    depth));
 
index aeb59c645e1705e00ebfdda1a4ed8f60efa34ec5..0ae39e44b01f2aa7c91d57458b75bb5b14799057 100644 (file)
@@ -799,10 +799,10 @@ want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
                      && optimize_function_for_size_p (cfun));
          cost = set_src_cost (x, mode, 0);
 
-         if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
+         if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
            {
              max_distance
-               = ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
+               = ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
              if (max_distance == 0)
                return 0;
 
@@ -1844,7 +1844,7 @@ prune_insertions_deletions (int n_elems)
      PRUNE_EXPRS.  */
   for (j = 0; j < (unsigned) n_elems; j++)
     if (deletions[j]
-       && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
+       && (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
       bitmap_set_bit (prune_exprs, j);
 
   /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
@@ -3133,7 +3133,8 @@ hoist_code (void)
      expressions, nothing gets hoisted from the entry block.  */
   FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
     {
-      domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
+      domby = get_dominated_to_depth (CDI_DOMINATORS, bb,
+                                     param_max_hoist_depth);
 
       if (domby.length () == 0)
        continue;
@@ -3982,9 +3983,9 @@ update_ld_motion_stores (struct gcse_expr * expr)
 bool
 gcse_or_cprop_is_too_expensive (const char *pass)
 {
-  unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
-                                * SBITMAP_SET_SIZE (max_reg_num ())
-                                * sizeof (SBITMAP_ELT_TYPE));
+  int memory_request = (n_basic_blocks_for_fn (cfun)
+                       * SBITMAP_SET_SIZE (max_reg_num ())
+                       * sizeof (SBITMAP_ELT_TYPE));
   
   /* Trying to perform global optimizations on flow graphs which have
      a high connectivity will take a long time and is unlikely to be
@@ -4007,7 +4008,7 @@ gcse_or_cprop_is_too_expensive (const char *pass)
 
   /* If allocating memory for the dataflow bitmaps would take up too much
      storage it's better just to disable the optimization.  */
-  if (memory_request > MAX_GCSE_MEMORY)
+  if (memory_request > param_max_gcse_memory)
     {
       warning (OPT_Wdisabled_optimization,
               "%s: %d basic blocks and %d registers; "
index f94c39f110d3d5658828bd336b60db1ada3b06d7..cf7a4f6840534f76f2fc78324fc4931986893291 100644 (file)
@@ -814,8 +814,8 @@ void
 init_ggc_heuristics (void)
 {
 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
-  set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
-  set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
+  param_ggc_min_expand = ggc_min_expand_heuristic ();
+  param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
 #endif
 }
 
index b443d87ea57bf1b35fc11c7b296d1975ef993b96..b0d26256072c577743e7b7fc36bf31822a33233f 100644 (file)
@@ -2185,9 +2185,9 @@ ggc_collect (void)
      total allocations haven't expanded much since the last
      collection.  */
   float allocated_last_gc =
-    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
+    MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * 1024);
 
-  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
+  float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
     return;
 
index b56155b1fefec3585899cc3c95602880b4730f1d..2a0cf6e41d8121ac44e9ec46165cbf967cc572a3 100644 (file)
@@ -78,14 +78,14 @@ along with GCC; see the file COPYING3.  If not see
    simple reduction of inner loop and double reduction of the loop nest.  */
 
 /* Maximum number of stmts in each loop that should be interchanged.  */
-#define MAX_NUM_STMT    (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_MAX_NUM_STMTS))
+#define MAX_NUM_STMT    (param_loop_interchange_max_num_stmts)
 /* Maximum number of data references in loop nest.  */
-#define MAX_DATAREFS    (PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+#define MAX_DATAREFS    (param_loop_max_datarefs_for_datadeps)
 
 /* Comparison ratio of access stride between inner/outer loops to be
    interchanged.  This is the minimum stride ratio for loop interchange
    to be profitable.  */
-#define OUTER_STRIDE_RATIO  (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_STRIDE_RATIO))
+#define OUTER_STRIDE_RATIO  (param_loop_interchange_stride_ratio)
 /* The same as above, but we require higher ratio for interchanging the
    innermost two loops.  */
 #define INNER_STRIDE_RATIO  ((OUTER_STRIDE_RATIO) + 1)
index 899653b0863475bd9f809bd5b7ec485c6b936ea0..c1fc9ba99166f9d60b859b4c6819c421cc671cd1 100644 (file)
@@ -572,15 +572,15 @@ tree_loop_unroll_and_jam (void)
       /* We regard a user-specified minimum percentage of zero as a request
         to ignore all profitability concerns and apply the transformation
         always.  */
-      if (!PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+      if (!param_unroll_jam_min_percent)
        profit_unroll = MAX(2, profit_unroll);
       else if (removed * 100 / datarefs.length ()
-         < (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+         < (unsigned)param_unroll_jam_min_percent)
        profit_unroll = 1;
       if (unroll_factor > profit_unroll)
        unroll_factor = profit_unroll;
-      if (unroll_factor > (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL))
-       unroll_factor = PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL);
+      if (unroll_factor > (unsigned)param_unroll_jam_max_unroll)
+       unroll_factor = param_unroll_jam_max_unroll;
       unroll = (unroll_factor > 1
                && can_unroll_loop_p (outer, unroll_factor, &desc));
 
index 1664d875e80004475ba91612b22b365050168fd6..18456e879582c217223c09dea9df8a27fd7950c1 100644 (file)
@@ -605,8 +605,8 @@ unsigned int
 loop_versioning::max_insns_for_loop (class loop *loop)
 {
   return (loop->inner
-         ? PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_OUTER_INSNS)
-         : PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_INNER_INSNS));
+         ? param_loop_versioning_max_outer_insns
+         : param_loop_versioning_max_inner_insns);
 }
 
 /* Return true if for cost reasons we should avoid versioning any loop
index 5bf45eeac28c9e3b124337802287d8933b67655e..49a0834d6473cc511a14d22431d8d62459cbd97f 100644 (file)
@@ -366,7 +366,7 @@ is_feasible_trace (basic_block bb)
 
   /* Upper Hard limit on the number statements to copy.  */
   if (num_stmts_in_join
-      >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+      >= param_max_jump_thread_duplication_stmts)
     return false;
 
   return true;
index c6dbac268c8d97b8a73bb01ef039df935031ed8e..fbab13c59a03036c047dcf82371e500d742f17ca 100644 (file)
@@ -2502,7 +2502,7 @@ imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
     return false;
 
   bool allow_unaligned
-    = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+    = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
   /* Punt if the combined store would not be aligned and we need alignment.  */
   if (!allow_unaligned)
     {
@@ -2762,7 +2762,7 @@ imm_store_chain_info::coalesce_immediate_stores ()
 
       if (info->order >= merged_store->first_nonmergeable_order
          || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
-             > (unsigned) PARAM_VALUE (PARAM_STORE_MERGING_MAX_SIZE)))
+             > (unsigned) param_store_merging_max_size))
        ;
 
       /* |---store 1---|
@@ -3710,7 +3710,7 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
 
   auto_vec<class split_store *, 32> split_stores;
   bool allow_unaligned_store
-    = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+    = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
   bool allow_unaligned_load = allow_unaligned_store;
   bool bzero_first = false;
   store_immediate_info *store;
@@ -4890,7 +4890,7 @@ pass_store_merging::process_store (gimple *stmt)
       /* If we reach the limit of stores to merge in a chain terminate and
         process the chain now.  */
       if ((*chain_info)->m_store_info.length ()
-         == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
+         == (unsigned int) param_max_stores_to_merge)
        {
          if (dump_file && (dump_flags & TDF_DETAILS))
            fprintf (dump_file,
index de7f36015efbb5e4c6d6eb503174619a993d5fba..ab5011a7a1e33bab5a09a6b6974b883bfddc12bc 100644 (file)
@@ -546,7 +546,7 @@ find_basis_for_base_expr (slsr_cand_t c, tree base_expr)
 
   // Limit potential of N^2 behavior for long candidate chains.
   int iters = 0;
-  int max_iters = PARAM_VALUE (PARAM_MAX_SLSR_CANDIDATE_SCAN);
+  int max_iters = param_max_slsr_candidate_scan;
 
   mapping_key.base_expr = base_expr;
   chain = base_cand_map->find (&mapping_key);
index 40d1e8de6ae94b4a7f2e24d6d63d027f2d04286e..11a4da0a623b75ed12ee5a26d96c973542a4feaa 100644 (file)
@@ -203,7 +203,7 @@ class translate_isl_ast_to_gimple
   {
     codegen_error = true;
     gcc_assert (! flag_checking
-               || PARAM_VALUE (PARAM_GRAPHITE_ALLOW_CODEGEN_ERRORS));
+               || param_graphite_allow_codegen_errors);
   }
 
   bool is_constant (tree op) const
@@ -1383,7 +1383,7 @@ scop_to_isl_ast (scop_p scop)
 {
   int old_err = isl_options_get_on_error (scop->isl_context);
   int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
-  int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+  int max_operations = param_max_isl_operations;
   if (max_operations)
     isl_ctx_set_max_operations (scop->isl_context, max_operations);
   isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
index c4b8f3bc876feac91a42f8287e8497852c4ba745..1dc9c3cb7be179456bf4145e34f3f1159b289695 100644 (file)
@@ -64,7 +64,7 @@ get_schedule_for_node_st (__isl_take isl_schedule_node *node, void *user)
   if (type != isl_schedule_node_leaf)
     return node;
 
-  long tile_size = PARAM_VALUE (PARAM_LOOP_BLOCK_TILE_SIZE);
+  long tile_size = param_loop_block_tile_size;
   if (dims <= 1
       || tile_size == 0
       || !isl_schedule_node_band_get_permutable (node))
@@ -115,7 +115,7 @@ optimize_isl (scop_p scop)
 {
   int old_err = isl_options_get_on_error (scop->isl_context);
   int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
-  int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+  int max_operations = param_max_isl_operations;
   if (max_operations)
     isl_ctx_set_max_operations (scop->isl_context, max_operations);
   isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
index 489d0b93b42f02a02be7051896fb390914b8c6ac..1505a13b8602dbf3e8aa50d0ec4a99b86839ae7e 100644 (file)
@@ -1639,7 +1639,7 @@ build_scops (vec<scop_p> *scops)
          continue;
        }
 
-      unsigned max_arrays = PARAM_VALUE (PARAM_GRAPHITE_MAX_ARRAYS_PER_SCOP);
+      unsigned max_arrays = param_graphite_max_arrays_per_scop;
       if (max_arrays > 0
          && scop->drs.length () >= max_arrays)
        {
@@ -1652,7 +1652,7 @@ build_scops (vec<scop_p> *scops)
        }
 
       find_scop_parameters (scop);
-      graphite_dim_t max_dim = PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS);
+      graphite_dim_t max_dim = param_graphite_max_nb_scop_params;
       if (max_dim > 0
          && scop_nb_params (scop) > max_dim)
        {
index 41cf1f362e8c34d009b0a310ff5b9a9ffb613631..58bf704ea15974a11459649ffb5fdc2f8b0b5cbb 100644 (file)
@@ -584,7 +584,7 @@ set_modulo_params (int ii, int max_stages, int insns, int max_uid)
   modulo_max_stages = max_stages;
   modulo_n_insns = insns;
   modulo_iter0_max_uid = max_uid;
-  modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
+  modulo_backtracks_left = param_max_modulo_backtrack_attempts;
 }
 
 /* A structure to record a pair of insns where the first one is a real
@@ -2712,7 +2712,7 @@ rank_for_schedule (const void *x, const void *y)
   if (flag_sched_critical_path_heuristic && priority_val)
     return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
 
-  if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
+  if (param_sched_autopref_queue_depth >= 0)
     {
       int autopref = autopref_rank_for_schedule (tmp, tmp2);
       if (autopref != 0)
@@ -3413,7 +3413,7 @@ model_remove_from_worklist (struct model_insn_info *insn)
 }
 
 /* Add INSN to the model worklist.  Start looking for a suitable position
-   between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
+   between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
    insns either side.  A null PREV indicates the beginning of the list and
    a null NEXT indicates the end.  */
 
@@ -3424,7 +3424,7 @@ model_add_to_worklist (struct model_insn_info *insn,
 {
   int count;
 
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   if (count > 0 && prev && model_order_p (insn, prev))
     do
       {
@@ -3452,7 +3452,7 @@ model_promote_insn (struct model_insn_info *insn)
   int count;
 
   prev = insn->prev;
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   while (count > 0 && prev && model_order_p (insn, prev))
     {
       count--;
@@ -3738,7 +3738,7 @@ model_choose_insn (void)
     {
       fprintf (sched_dump, ";;\t+--- worklist:\n");
       insn = model_worklist;
-      count = MAX_SCHED_READY_INSNS;
+      count = param_max_sched_ready_insns;
       while (count > 0 && insn)
        {
          fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
@@ -3770,7 +3770,7 @@ model_choose_insn (void)
 
      Failing that, just pick the highest-priority instruction in the
      worklist.  */
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   insn = model_worklist;
   fallback = 0;
   for (;;)
@@ -5147,12 +5147,12 @@ queue_to_ready (struct ready_list *ready)
       /* If the ready list is full, delay the insn for 1 cycle.
         See the comment in schedule_block for the rationale.  */
       if (!reload_completed
-         && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+         && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
              || (sched_pressure == SCHED_PRESSURE_MODEL
-                 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
-                    instructions too.  */
+                 /* Limit pressure recalculations to
+                    param_max_sched_ready_insns instructions too.  */
                  && model_index (insn) > (model_curr_point
-                                          + MAX_SCHED_READY_INSNS)))
+                                          + param_max_sched_ready_insns)))
          && !(sched_pressure == SCHED_PRESSURE_MODEL
               && model_curr_point < model_num_insns
               /* Always allow the next model instruction to issue.  */
@@ -5743,7 +5743,7 @@ autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
   /* Exit early if the param forbids this or if we're not entering here through
      normal haifa scheduling.  This can happen if selective scheduling is
      explicitly enabled.  */
-  if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
+  if (!insn_queue || param_sched_autopref_queue_depth <= 0)
     return 0;
 
   if (sched_verbose >= 2 && ready_index == 0)
@@ -5796,14 +5796,14 @@ autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
            }
        }
 
-      if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
+      if (param_sched_autopref_queue_depth == 1)
        continue;
 
       /* Everything from the current queue slot should have been moved to
         the ready list.  */
       gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
 
-      int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
+      int n_stalls = param_sched_autopref_queue_depth - 1;
       if (n_stalls > max_insn_queue_index)
        n_stalls = max_insn_queue_index;
 
@@ -6552,14 +6552,15 @@ schedule_block (basic_block *target_bb, state_t init_state)
      time in the worst case.  Before reload we are more likely to have
      big lists so truncate them to a reasonable size.  */
   if (!reload_completed
-      && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
+      && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
     {
       ready_sort_debug (&ready);
       ready_sort_real (&ready);
 
-      /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
+      /* Find first free-standing insn past param_max_sched_ready_insns.
          If there are debug insns, we know they're first.  */
-      for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
+      for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready;
+          i++)
        if (!SCHED_GROUP_P (ready_element (&ready, i)))
          break;
 
@@ -7258,7 +7259,7 @@ sched_init (void)
           && !reload_completed
           && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
     sched_pressure = ((enum sched_pressure_algorithm)
-                     PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
+                     param_sched_pressure_algorithm);
   else
     sched_pressure = SCHED_PRESSURE_NONE;
 
@@ -7273,11 +7274,10 @@ sched_init (void)
 
       if (spec_info->mask != 0)
         {
-          spec_info->data_weakness_cutoff =
-            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
-          spec_info->control_weakness_cutoff =
-            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
-             * REG_BR_PROB_BASE) / 100;
+         spec_info->data_weakness_cutoff
+           = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
+         spec_info->control_weakness_cutoff
+           = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100;
         }
       else
        /* So we won't read anything accidentally.  */
index 436f4c5f9f513de0c2da74f272552f812d10bbe5..c4a03f938cf438ea2e5329c6ae17cdc2058a4c44 100644 (file)
@@ -5940,7 +5940,7 @@ init_prologue (void)
   unsigned index = hsa_get_number_decl_kernel_mappings ();
 
   /* Emit store to debug argument.  */
-  if (PARAM_VALUE (PARAM_HSA_GEN_DEBUG_STORES) > 0)
+  if (param_hsa_gen_debug_stores > 0)
     set_debug_value (prologue, new hsa_op_immed (1000 + index, BRIG_TYPE_U64));
 }
 
index 8bc6f53cb38d8ffa41eddf0fb2564e69352d661f..5df8a43a5f7e31b1e6d10a5e9216dd1280efb95c 100644 (file)
@@ -3311,7 +3311,7 @@ bb_ok_for_noce_convert_multiple_sets (basic_block test_bb)
 {
   rtx_insn *insn;
   unsigned count = 0;
-  unsigned param = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+  unsigned param = param_max_rtl_if_conversion_insns;
 
   FOR_BB_INSNS (test_bb, insn)
     {
@@ -3838,7 +3838,7 @@ cond_move_process_if_block (struct noce_if_info *if_info)
   vec<rtx> else_regs = vNULL;
   unsigned int i;
   int success_p = FALSE;
-  int limit = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+  int limit = param_max_rtl_if_conversion_insns;
 
   /* Build a mapping for each block to the value used for each
      register.  */
index b1d899976e80ff73e13f73dc88492c093b5559d8..345f1792ad6c7dcbb155ac5f95aadf80162d8720 100644 (file)
@@ -1612,7 +1612,7 @@ ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
        return false;
       }
 
-  if (values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
+  if (values_count == param_ipa_cp_value_list_size)
     {
       /* We can only free sources, not the values themselves, because sources
         of other values in this SCC might point to them.   */
@@ -2089,7 +2089,7 @@ merge_agg_lats_step (class ipcp_param_lattices *dest_plats,
          set_agg_lats_to_bottom (dest_plats);
          return false;
        }
-      if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
+      if (dest_plats->aggs_count == param_ipa_max_agg_items)
        return false;
       dest_plats->aggs_count++;
       new_al = ipcp_agg_lattice_pool.allocate ();
@@ -2644,11 +2644,11 @@ devirtualization_time_bonus (struct cgraph_node *node,
       int size = ipa_size_summaries->get (callee)->size;
       /* FIXME: The values below need re-considering and perhaps also
         integrating into the cost metrics, at lest in some very basic way.  */
-      if (size <= MAX_INLINE_INSNS_AUTO / 4)
+      if (size <= param_max_inline_insns_auto / 4)
        res += 31 / ((int)speculative + 1);
-      else if (size <= MAX_INLINE_INSNS_AUTO / 2)
+      else if (size <= param_max_inline_insns_auto / 2)
        res += 15 / ((int)speculative + 1);
-      else if (size <= MAX_INLINE_INSNS_AUTO
+      else if (size <= param_max_inline_insns_auto
               || DECL_DECLARED_INLINE_P (callee->decl))
        res += 7 / ((int)speculative + 1);
     }
@@ -2663,7 +2663,7 @@ hint_time_bonus (ipa_hints hints)
 {
   int result = 0;
   if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
-    result += PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
+    result += param_ipa_cp_loop_hint_bonus;
   return result;
 }
 
@@ -2675,11 +2675,11 @@ incorporate_penalties (ipa_node_params *info, int64_t evaluation)
 {
   if (info->node_within_scc)
     evaluation = (evaluation
-                 * (100 - PARAM_VALUE (PARAM_IPA_CP_RECURSION_PENALTY))) / 100;
+                 * (100 - param_ipa_cp_recursion_penalty)) / 100;
 
   if (info->node_calling_single_call)
     evaluation = (evaluation
-                 * (100 - PARAM_VALUE (PARAM_IPA_CP_SINGLE_CALL_PENALTY)))
+                 * (100 - param_ipa_cp_single_call_penalty))
       / 100;
 
   return evaluation;
@@ -2719,10 +2719,10 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
                 ", threshold: %i\n",
                 info->node_within_scc ? ", scc" : "",
                 info->node_calling_single_call ? ", single_call" : "",
-                evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+                evaluation, param_ipa_cp_eval_threshold);
        }
 
-      return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+      return evaluation >= param_ipa_cp_eval_threshold;
     }
   else
     {
@@ -2737,9 +2737,9 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
                 time_benefit, size_cost, freq_sum,
                 info->node_within_scc ? ", scc" : "",
                 info->node_calling_single_call ? ", single_call" : "",
-                evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+                evaluation, param_ipa_cp_eval_threshold);
 
-      return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+      return evaluation >= param_ipa_cp_eval_threshold;
     }
 }
 
@@ -3364,9 +3364,9 @@ ipcp_propagate_stage (class ipa_topo_info *topo)
   }
 
   max_new_size = overall_size;
-  if (max_new_size < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
-    max_new_size = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
-  max_new_size += max_new_size * PARAM_VALUE (PARAM_IPCP_UNIT_GROWTH) / 100 + 1;
+  if (max_new_size < param_large_unit_insns)
+    max_new_size = param_large_unit_insns;
+  max_new_size += max_new_size * param_ipcp_unit_growth / 100 + 1;
 
   if (dump_file)
     fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
index 686d960f75be71b216030cc2b130f3776b750504..fe619d1dbb6dd9fae6a24012ef9e407bf9a50fa6 100644 (file)
@@ -1204,7 +1204,7 @@ decompose_param_expr (struct ipa_func_body_info *fbi,
                      struct agg_position_info *aggpos,
                      expr_eval_ops *param_ops_p = NULL)
 {
-  int op_limit = PARAM_VALUE (PARAM_IPA_MAX_PARAM_EXPR_OPS);
+  int op_limit = param_ipa_max_param_expr_ops;
   int op_count = 0;
 
   if (param_ops_p)
@@ -1435,7 +1435,7 @@ set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
 
   auto_vec<std::pair<tree, tree> > ranges;
   tree type = TREE_TYPE (op);
-  int bound_limit = PARAM_VALUE (PARAM_IPA_MAX_SWITCH_PREDICATE_BOUNDS);
+  int bound_limit = param_ipa_max_switch_predicate_bounds;
   int bound_count = 0;
   wide_int vr_wmin, vr_wmax;
   value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
@@ -2280,9 +2280,9 @@ fp_expression_p (gimple *stmt)
 static void
 analyze_function_body (struct cgraph_node *node, bool early)
 {
-  sreal time = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME);
+  sreal time = param_uninlined_function_time;
   /* Estimate static overhead for function prologue/epilogue and alignment. */
-  int size = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS);
+  int size = param_uninlined_function_insns;
   /* Benefits are scaled by probability of elimination that is in range
      <0,2>.  */
   basic_block bb;
@@ -2331,7 +2331,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
          fbi.bb_infos = vNULL;
          fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
          fbi.param_count = count_formal_params (node->decl);
-         fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+         fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
          nonconstant_names.safe_grow_cleared
            (SSANAMES (my_function)->length ());
@@ -2348,9 +2348,9 @@ analyze_function_body (struct cgraph_node *node, bool early)
   info->account_size_time (0, 0, bb_predicate, bb_predicate);
 
   bb_predicate = predicate::not_inlined ();
-  info->account_size_time (PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS)
+  info->account_size_time (param_uninlined_function_insns
                           * ipa_fn_summary::size_scale,
-                          PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME),
+                          param_uninlined_function_time,
                           bb_predicate,
                           bb_predicate);
 
@@ -2748,10 +2748,8 @@ compute_fn_summary (struct cgraph_node *node, bool early)
       es->call_stmt_size = eni_size_weights.call_cost;
       es->call_stmt_time = eni_time_weights.call_cost;
       info->account_size_time (ipa_fn_summary::size_scale
-                              * PARAM_VALUE
-                                (PARAM_UNINLINED_FUNCTION_THUNK_INSNS),
-                              PARAM_VALUE
-                                (PARAM_UNINLINED_FUNCTION_THUNK_TIME), t, t);
+                              * param_uninlined_function_thunk_insns,
+                              param_uninlined_function_thunk_time, t, t);
       t = predicate::not_inlined ();
       info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
       ipa_update_overall_fn_summary (node);
index 9839cae43a3fbf808cc7bbcfff7cd30f4f605175..d5a9fb0682ef282141a9348aa641959af1a763c3 100644 (file)
@@ -442,8 +442,8 @@ offline_size (struct cgraph_node *node, ipa_size_summary *info)
       else if (DECL_COMDAT (node->decl)
               && node->can_remove_if_no_direct_calls_p ())
        return (info->size
-               * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
-               + 50) / 100;
+               * (100 - param_comdat_sharing_probability)
+               + 50) / 100;
     }
   return 0;
 }
index 53252e18cf007c6374f25361fa5c77eb9e61b6df..b159f440e91d8d902dda9c73fa05775fdc59c5ca 100644 (file)
@@ -179,13 +179,13 @@ caller_growth_limits (struct cgraph_edge *e)
   if (limit < what_size_info->self_size)
     limit = what_size_info->self_size;
 
-  limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
+  limit += limit * param_large_function_growth / 100;
 
   /* Check the size after inlining against the function limits.  But allow
      the function to shrink if it went over the limits by forced inlining.  */
   newsize = estimate_size_after_inlining (to, e);
   if (newsize >= ipa_size_summaries->get (what)->size
-      && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
+      && newsize > param_large_function_insns
       && newsize > limit)
     {
       e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
@@ -201,7 +201,7 @@ caller_growth_limits (struct cgraph_edge *e)
      on every invocation of the caller (i.e. its call statement dominates
      exit block).  We do not track this information, yet.  */
   stack_size_limit += ((gcov_type)stack_size_limit
-                      * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
+                      * param_stack_frame_growth / 100);
 
   inlined_stack = (ipa_get_stack_frame_offset (to)
                   + outer_info->estimated_self_stack_size
@@ -214,7 +214,7 @@ caller_growth_limits (struct cgraph_edge *e)
         This bit overoptimistically assume that we are good at stack
         packing.  */
       && inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
-      && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
+      && inlined_stack > param_large_stack_frame)
     {
       e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
       return false;
@@ -399,16 +399,16 @@ inline_insns_single (cgraph_node *n, bool hint)
   if (opt_for_fn (n->decl, optimize) >= 3)
     {
       if (hint)
-       return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE)
-              * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE);
+       return param_max_inline_insns_single
+              * param_inline_heuristics_hint_percent / 100;
+      return param_max_inline_insns_single;
     }
   else
     {
       if (hint)
-       return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2)
-              * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2);
+       return param_max_inline_insns_single_o2
+              * param_inline_heuristics_hint_percent_o2 / 100;
+      return param_max_inline_insns_single_o2;
     }
 }
 
@@ -421,16 +421,16 @@ inline_insns_auto (cgraph_node *n, bool hint)
   if (opt_for_fn (n->decl, optimize) >= 3)
     {
       if (hint)
-       return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO)
-              * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO);
+       return param_max_inline_insns_auto
+              * param_inline_heuristics_hint_percent / 100;
+      return param_max_inline_insns_auto;
     }
   else
     {
       if (hint)
-       return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2)
-              * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2);
+       return param_max_inline_insns_auto_o2
+              * param_inline_heuristics_hint_percent_o2 / 100;
+      return param_max_inline_insns_auto_o2;
     }
 }
 
@@ -567,14 +567,14 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
          inlinable = false;
        }
       /* If callee is optimized for size and caller is not, allow inlining if
-        code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
-        is inline (and thus likely an unified comdat).  This will allow caller
-        to run faster.  */
+        code shrinks or we are in param_max_inline_insns_single limit and
+        callee is inline (and thus likely an unified comdat).
+        This will allow caller to run faster.  */
       else if (opt_for_fn (callee->decl, optimize_size)
               > opt_for_fn (caller->decl, optimize_size))
        {
          int growth = estimate_edge_growth (e);
-         if (growth > PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE)
+         if (growth > param_max_inline_insns_size
              && (!DECL_DECLARED_INLINE_P (callee->decl)
                  && growth >= MAX (inline_insns_single (caller, false),
                                    inline_insns_auto (caller, false))))
@@ -686,11 +686,11 @@ want_early_inline_function_p (struct cgraph_edge *e)
       int growth = estimate_edge_growth (e);
       int n;
       int early_inlining_insns = opt_for_fn (e->caller->decl, optimize) >= 3
-                                ? PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)
-                                : PARAM_VALUE (PARAM_EARLY_INLINING_INSNS_O2);
+                                ? param_early_inlining_insns
+                                : param_early_inlining_insns_o2;
 
 
-      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+      if (growth <= param_max_inline_insns_size)
        ;
       else if (!e->maybe_hot_p ())
        {
@@ -794,8 +794,8 @@ big_speedup_p (struct cgraph_edge *e)
                         ? e->caller->inlined_to
                         : e->caller);
   int limit = opt_for_fn (caller->decl, optimize) >= 3
-             ? PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
-             : PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP_O2);
+             ? param_inline_min_speedup
+             : param_inline_min_speedup_o2;
 
   if ((time - inlined_time) * 100 > time * limit)
     return true;
@@ -862,9 +862,9 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
                                   | INLINE_HINT_loop_iterations
                                   | INLINE_HINT_loop_stride));
 
-      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+      if (growth <= param_max_inline_insns_size)
        ;
-      /* Apply MAX_INLINE_INSNS_SINGLE limit.  Do not do so when
+      /* Apply param_max_inline_insns_single limit.  Do not do so when
         hints suggests that inlining given function is very profitable.
         Avoid computation of big_speedup_p when not necessary to change
         outcome of decision.  */
@@ -882,7 +882,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
        }
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
               && !opt_for_fn (e->caller->decl, flag_inline_functions)
-              && growth >= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SMALL))
+              && growth >= param_max_inline_insns_small)
        {
          /* growth_positive_p is expensive, always test it last.  */
           if (growth >= inline_insns_single (e->caller, false)
@@ -892,8 +892,8 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
              want_inline = false;
            }
        }
-      /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline.
-        Bypass the limit when speedup seems big.  */
+      /* Apply param_max_inline_insns_auto limit for functions not declared
+        inline.  Bypass the limit when speedup seems big.  */
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
               && growth >= inline_insns_auto (e->caller, apply_hints)
               && (apply_hints
@@ -945,10 +945,10 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
   char const *reason = NULL;
   bool want_inline = true;
   sreal caller_freq = 1;
-  int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
+  int max_depth = param_max_inline_recursive_depth_auto;
 
   if (DECL_DECLARED_INLINE_P (edge->caller->decl))
-    max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
+    max_depth = param_max_inline_recursive_depth;
 
   if (!edge->maybe_hot_p ())
     {
@@ -1010,7 +1010,7 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
     {
       if (edge->sreal_frequency () * 100
           <= caller_freq
-            * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
+            * param_min_inline_recursive_probability)
        {
          reason = "frequency of recursive call is too small";
          want_inline = false;
@@ -1207,9 +1207,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
              /* ... or when early optimizers decided to split and edge
                 frequency still indicates splitting is a win ... */
              || (callee->split_part && !caller->split_part
-                 && freq * 100
-                    < PARAM_VALUE
-                         (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
+                 && freq * 100 < param_partial_inlining_entry_probability
                  /* ... and do not overwrite user specified hints.   */
                  && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
                      || DECL_DECLARED_INLINE_P (caller->decl)))))
@@ -1539,7 +1537,7 @@ static bool
 recursive_inlining (struct cgraph_edge *edge,
                    vec<cgraph_edge *> *new_edges)
 {
-  int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
+  int limit = param_max_inline_insns_recursive_auto;
   edge_heap_t heap (sreal::min ());
   struct cgraph_node *node;
   struct cgraph_edge *e;
@@ -1552,7 +1550,7 @@ recursive_inlining (struct cgraph_edge *edge,
     node = node->inlined_to;
 
   if (DECL_DECLARED_INLINE_P (node->decl))
-    limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
+    limit = param_max_inline_insns_recursive;
 
   /* Make sure that function is small enough to be considered for inlining.  */
   if (estimate_size_after_inlining (node, edge)  >= limit)
@@ -1677,11 +1675,11 @@ static int
 compute_max_insns (int insns)
 {
   int max_insns = insns;
-  if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
-    max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
+  if (max_insns < param_large_unit_insns)
+    max_insns = param_large_unit_insns;
 
   return ((int64_t) max_insns
-         * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
+         * (100 + param_inline_unit_growth) / 100);
 }
 
 
@@ -2897,7 +2895,7 @@ early_inliner (function *fun)
        }
       /* We iterate incremental inlining to get trivial cases of indirect
         inlining.  */
-      while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
+      while (iterations < param_early_inliner_max_iterations
             && early_inline_small_functions (node))
        {
          timevar_push (TV_INTEGRATION);
@@ -2916,7 +2914,7 @@ early_inliner (function *fun)
              es->call_stmt_time
                = estimate_num_insns (edge->call_stmt, &eni_time_weights);
            }
-         if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
+         if (iterations < param_early_inliner_max_iterations - 1)
            ipa_update_overall_fn_summary (node);
          timevar_pop (TV_INTEGRATION);
          iterations++;
index 705af03d20cebbe24234be717a76ffa285fb7066..7c2b4c795fab93de12ff6c99a958714fd8ddb312 100644 (file)
@@ -1400,7 +1400,7 @@ record_known_type (struct type_change_info *tci, tree type, HOST_WIDE_INT offset
 static inline bool
 csftc_abort_walking_p (unsigned speculative)
 {
-  unsigned max = PARAM_VALUE (PARAM_MAX_SPECULATIVE_DEVIRT_MAYDEFS);
+  unsigned max = param_max_speculative_devirt_maydefs;
   return speculative > max ? true : false;
 }
 
index 79cabc5573e903265541d4e2ec343167be5cbac1..cd34ae628547ba9e44e24f4881308e4a769b546a 100644 (file)
@@ -506,7 +506,7 @@ ipa_profile (void)
 
       gcc_assert (overall_size);
 
-      cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
+      cutoff = (overall_time * param_hot_bb_count_ws_permille + 500) / 1000;
       threshold = 0;
       for (i = 0; cumulated < cutoff; i++)
        {
index a6c135f242bc56455901a30652ca585894a7dc2a..3e78321bf16e7c455948affb37aa6b543d20507b 100644 (file)
@@ -1601,7 +1601,7 @@ determine_known_aggregate_parts (gcall *call, tree arg,
   struct ipa_known_agg_contents_list *list = NULL, *all_list = NULL;
   bitmap visited = NULL;
   int item_count = 0, const_count = 0;
-  int ipa_max_agg_items = PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS);
+  int ipa_max_agg_items = param_ipa_max_agg_items;
   HOST_WIDE_INT arg_offset, arg_size;
   tree arg_base;
   bool check_ref, by_ref;
@@ -2632,7 +2632,7 @@ ipa_analyze_node (struct cgraph_node *node)
   fbi.bb_infos = vNULL;
   fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
   fbi.param_count = ipa_get_param_count (info);
-  fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+  fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
   for (struct cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
     {
@@ -5314,7 +5314,7 @@ ipcp_transform_function (struct cgraph_node *node)
   fbi.bb_infos = vNULL;
   fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
   fbi.param_count = param_count;
-  fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+  fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
   vec_safe_grow_cleared (descriptors, param_count);
   ipa_populate_param_decls (node, *descriptors);
index 0444bda704d3fede7d28098544e1623b112b6c57..c73b257ca7f9051d10b539fad5314e48712ad0f7 100644 (file)
@@ -453,7 +453,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars,
      is unknown.  */
   if (!(current->count
        < (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale
-          (PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY), 100))))
+          (param_partial_inlining_entry_probability, 100))))
     {
       /* When profile is guessed, we cannot expect it to give us
         realistic estimate on likelyness of function taking the
@@ -563,8 +563,8 @@ consider_split (class split_point *current, bitmap non_ssa_vars,
      that.  Next stage1 we should try to be more meaningful here.  */
   if (current->header_size + call_overhead
       >= (unsigned int)(DECL_DECLARED_INLINE_P (current_function_decl)
-                       ? MAX_INLINE_INSNS_SINGLE
-                       : MAX_INLINE_INSNS_AUTO) + 10)
+                       ? param_max_inline_insns_single
+                       : param_max_inline_insns_auto) + 10)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
@@ -577,7 +577,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars,
      Limit this duplication.  This is consistent with limit in tree-sra.c  
      FIXME: with LTO we ought to be able to do better!  */
   if (DECL_ONE_ONLY (current_function_decl)
-      && current->split_size >= (unsigned int) MAX_INLINE_INSNS_AUTO + 10)
+      && current->split_size >= (unsigned int) param_max_inline_insns_auto + 10)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
@@ -589,7 +589,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars,
      FIXME: with LTO we ought to be able to do better!  */
   if (DECL_ONE_ONLY (current_function_decl)
       && current->split_size
-        <= (unsigned int) PARAM_VALUE (PARAM_EARLY_INLINING_INSNS) / 2)
+        <= (unsigned int) param_early_inlining_insns / 2)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
index a9674af9c941b9c23aef942fc941e5bd44572e46..303e4a7028ae8c25bb911c7da5f1794b4de6426a 100644 (file)
@@ -1266,7 +1266,7 @@ allocate_access (gensum_param_desc *desc,
                 HOST_WIDE_INT offset, HOST_WIDE_INT size)
 {
   if (desc->access_count
-      == (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+      == (unsigned) param_ipa_sra_max_replacements)
     {
       disqualify_split_candidate (desc, "Too many replacement candidates");
       return NULL;
@@ -2280,8 +2280,7 @@ process_scan_results (cgraph_node *node, struct function *fun,
       if (!desc->by_ref || optimize_function_for_size_p (fun))
        param_size_limit = cur_param_size;
       else
-       param_size_limit = (PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR)
-                          * cur_param_size);
+       param_size_limit = param_ipa_sra_ptr_growth_factor * cur_param_size;
       if (nonarg_acc_size > param_size_limit
          || (!desc->by_ref && nonarg_acc_size == param_size_limit))
        {
@@ -2501,7 +2500,7 @@ ipa_sra_summarize_function (cgraph_node *node)
          bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
                                      by_ref_count
                                      * last_basic_block_for_fn (fun));
-         aa_walking_limit = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+         aa_walking_limit = param_ipa_max_aa_steps;
          scan_function (node, fun);
 
          if (dump_file)
@@ -3337,7 +3336,7 @@ pull_accesses_from_callee (isra_param_desc *param_desc,
       return NULL;
 
     if ((prop_count + pclen
-        > (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+        > (unsigned) param_ipa_sra_max_replacements)
        || size_would_violate_limit_p (param_desc,
                                       param_desc->size_reached + prop_size))
       return "propagating accesses would violate the count or size limit";
index 47ce189af472885a7d7ec2f49bf35a7872d788f2..e53bb813f37520af155ca0e9e29aab18b9b229d3 100644 (file)
@@ -2217,7 +2217,7 @@ loop_compare_func (const void *v1p, const void *v2p)
    hardly helps (for irregular register file architecture it could
    help by choosing a better hard register in the loop but we prefer
    faster allocation even in this case).  We also remove cheap loops
-   if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
+   if there are more than param_ira_max_loops_num of them.  Loop with EH
    exit or enter edges are removed too because the allocation might
    require put pseudo moves on the EH edges (we could still do this
    for pseudos with caller saved hard registers in some cases but it
@@ -2253,7 +2253,7 @@ mark_loops_for_removal (void)
             );
       }
   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
-  for (i = 0; i < n - IRA_MAX_LOOPS_NUM; i++)
+  for (i = 0; i < n - param_ira_max_loops_num; i++)
     {
       sorted_loops[i]->to_remove_p = true;
       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
index a0aefaa0549a9843e7bee36afa0b843375911c40..8e7c0c41f2fbf5b19e99919ccd14d8dd997ade0a 100644 (file)
@@ -113,13 +113,13 @@ build_conflict_bit_table (void)
             / IRA_INT_BITS);
        allocated_words_num += conflict_bit_vec_words_num;
        if ((uint64_t) allocated_words_num * sizeof (IRA_INT_TYPE)
-           > (uint64_t) IRA_MAX_CONFLICT_TABLE_SIZE * 1024 * 1024)
+           > (uint64_t) param_ira_max_conflict_table_size * 1024 * 1024)
          {
            if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
              fprintf
                (ira_dump_file,
                 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
-                IRA_MAX_CONFLICT_TABLE_SIZE);
+                param_ira_max_conflict_table_size);
            return false;
          }
       }
index ccd020a2dbab47569161b6e8d22c0ea15e1a14de..6a956a03b03870bdc798ba93ab1f4fe832c14b3f 100644 (file)
@@ -651,7 +651,7 @@ doloop_optimize (class loop *loop)
     }
 
   max_cost
-    = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
+    = COSTS_N_INSNS (param_max_iterations_computation_cost);
   if (set_src_cost (desc->niter_expr, mode, optimize_loop_for_speed_p (loop))
       > max_cost)
     {
index ef46c8aea22dac2ff522292f89f8583205172723..d40ad37cced300d89b7b2a72598cf68de3caa79f 100644 (file)
@@ -1491,7 +1491,7 @@ gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
          if ((int) new_regs[pressure_class]
              + (int) regs_needed[pressure_class]
              + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
-             + IRA_LOOP_RESERVED_REGS
+             + param_ira_loop_reserved_regs
              > ira_class_hard_regs_num[pressure_class])
            break;
        }
@@ -2279,7 +2279,7 @@ move_loop_invariants (void)
       /* move_single_loop_invariants for very large loops is time consuming
         and might need a lot of memory.  For -O1 only do loop invariant
         motion for very small loops.  */
-      unsigned max_bbs = LOOP_INVARIANT_MAX_BBS_IN_LOOP;
+      unsigned max_bbs = param_loop_invariant_max_bbs_in_loop;
       if (optimize < 2)
        max_bbs /= 10;
       if (loop->num_nodes <= max_bbs)
index 63fccd23fae38f8918a7d94411aaa43c72830dd3..551405ad0d1778c76c4e121b39fcd7ff3917c9cd 100644 (file)
@@ -364,13 +364,13 @@ decide_unroll_constant_iterations (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once).  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
   nunroll_by_av
-    = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+    = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -684,12 +684,12 @@ decide_unroll_runtime_iterations (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once.  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
-  nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
+  nunroll_by_av = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -1167,13 +1167,13 @@ decide_unroll_stupid (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once.  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
   nunroll_by_av
-    = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+    = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -1824,7 +1824,7 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx_insn *insn)
 
   /* Generate a new register only if the expansion limit has not been
      reached.  Else reuse an already existing expansion.  */
-  if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
+  if (param_max_variable_expansions > ve->expansion_count)
     {
       really_new_expansion = true;
       new_reg = gen_reg_rtx (GET_MODE (ve->reg));
index e14a246c0d232000a75195e680c3ce13f9b6d628..56ab96310899bb59ce72b005a521aaebaabf066f 100644 (file)
@@ -1008,7 +1008,7 @@ spill_for (int regno, bitmap spilled_pseudo_bitmap, bool first_p)
        }
       n = 0;
       if (sparseset_cardinality (live_range_reload_inheritance_pseudos)
-         <= (unsigned)LRA_MAX_CONSIDERED_RELOAD_PSEUDOS)
+         <= (unsigned)param_lra_max_considered_reload_pseudos)
        EXECUTE_IF_SET_IN_SPARSESET (live_range_reload_inheritance_pseudos,
                                     reload_regno)
          if ((int) reload_regno != regno
index 0db6d3151cdcf213e8a101a5909604a8a314db14..ced7be79017702638adff846298fe7b250189010 100644 (file)
@@ -6682,7 +6682,7 @@ inherit_in_ebb (rtx_insn *head, rtx_insn *tail)
    a BB is not greater than the following value, we don't add the BB
    to EBB.  */
 #define EBB_PROBABILITY_CUTOFF \
-  ((REG_BR_PROB_BASE * LRA_INHERITANCE_EBB_PROBABILITY_CUTOFF) / 100)
+  ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100)
 
 /* Current number of inheritance/split iteration.  */
 int lra_inheritance_iter;
index 38fd66a2925ec2ba9f757c2685b5d9a362613784..01b86f4e9574bd26157fb1bbba0abb2613833b8d 100644 (file)
@@ -1,3 +1,10 @@
+2019-11-12  Martin Liska  <mliska@suse.cz>
+
+       * lto-partition.c (lto_balanced_map): Replace old parameter syntax
+       with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
+       macro.
+       * lto.c (do_whole_program_analysis): Likewise.
+
 2019-11-11  Martin Liska  <mliska@suse.cz>
 
        * Make-lang.in: Relax dependency of lto-dump.o to
index 32090359814945e8529f548a9689cfc36281e28c..5354350378b3ee0b946a033214ae87e964aa519b 100644 (file)
@@ -560,13 +560,13 @@ lto_balanced_map (int n_lto_partitions, int max_partition_size)
   varpool_order.qsort (varpool_node_cmp);
 
   /* Compute partition size and create the first partition.  */
-  if (PARAM_VALUE (MIN_PARTITION_SIZE) > max_partition_size)
+  if (param_min_partition_size > max_partition_size)
     fatal_error (input_location, "min partition size cannot be greater "
                 "than max partition size");
 
   partition_size = total_size / n_lto_partitions;
-  if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
-    partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+  if (partition_size < param_min_partition_size)
+    partition_size = param_min_partition_size;
   npartitions = 1;
   partition = new_partition ("");
   if (dump_file)
@@ -816,8 +816,8 @@ lto_balanced_map (int n_lto_partitions, int max_partition_size)
            fprintf (dump_file,
                     "Total size: %" PRId64 " partition_size: %" PRId64 "\n",
                     total_size, partition_size);
-         if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
-           partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+         if (partition_size < param_min_partition_size)
+           partition_size = param_min_partition_size;
          npartitions ++;
        }
     }
index 9ef70da91a9cdf8288323ad06a04214513685c38..1d2d52754c7c65f75be9971267e47279c00f4b2b 100644 (file)
@@ -436,14 +436,14 @@ do_whole_program_analysis (void)
 
   /* TODO: jobserver communication is not supported, yet.  */
   if (!strcmp (flag_wpa, "jobserver"))
-    lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+    lto_parallelism = param_max_lto_streaming_parallelism;
   else
     {
       lto_parallelism = atoi (flag_wpa);
       if (lto_parallelism <= 0)
        lto_parallelism = 0;
-      if (lto_parallelism >= PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM))
-       lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+      if (lto_parallelism >= param_max_lto_streaming_parallelism)
+       lto_parallelism = param_max_lto_streaming_parallelism;
     }
 
   timevar_start (TV_PHASE_OPT_GEN);
@@ -496,8 +496,8 @@ do_whole_program_analysis (void)
   else if (flag_lto_partition == LTO_PARTITION_ONE)
     lto_balanced_map (1, INT_MAX);
   else if (flag_lto_partition == LTO_PARTITION_BALANCED)
-    lto_balanced_map (PARAM_VALUE (PARAM_LTO_PARTITIONS),
-                     PARAM_VALUE (MAX_PARTITION_SIZE));
+    lto_balanced_map (param_lto_partitions,
+                     param_max_partition_size);
   else
     gcc_unreachable ();
 
index c355594bb6bd3da23640a87e59517354e41b05cd..3127fb1481ea8ab48ad7b16e7724c94f048935dc 100644 (file)
@@ -1433,7 +1433,7 @@ sms_schedule (void)
       if ( latch_edge->count () > profile_count::zero ()
           && (latch_edge->count()
              < single_exit (loop)->count ().apply_scale
-                                (SMS_LOOP_AVERAGE_COUNT_THRESHOLD, 1)))
+                                (param_sms_loop_average_count_threshold, 1)))
        {
          if (dump_file)
            {
@@ -1640,7 +1640,7 @@ sms_schedule (void)
          /* The default value of PARAM_SMS_MIN_SC is 2 as stage count of
             1 means that there is no interleaving between iterations thus
             we let the scheduling passes do the job in this case.  */
-         if (stage_count < PARAM_VALUE (PARAM_SMS_MIN_SC)
+         if (stage_count < param_sms_min_sc
              || (count_init && (loop_count <= stage_count))
              || (max_trip_count >= 0 && max_trip_count <= stage_count)
              || (trip_count >= 0 && trip_count <= stage_count))
@@ -1832,7 +1832,7 @@ sms_schedule (void)
 /* A limit on the number of cycles that resource conflicts can span.  ??? Should
    be provided by DFA, and be dependent on the type of insn scheduled.  Currently
    set to 0 to save compile time.  */
-#define DFA_HISTORY SMS_DFA_HISTORY
+#define DFA_HISTORY param_sms_dfa_history
 
 /* A threshold for the number of repeated unsuccessful attempts to insert
    an empty row, before we flush the partial schedule and start over.  */
index 394cbfd1c5642fbdb5fadb504bc222b74a96e2d0..a47517df80e31fffba3d736ce14b93b5e6688254 100644 (file)
@@ -667,25 +667,18 @@ default_options_optimization (struct gcc_options *opts,
     opts->x_flag_ipa_pta = true;
 
   /* Track fields in field-sensitive alias analysis.  */
-  maybe_set_param_value
-    (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE,
-     opt2 ? 100 : default_param_value (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE),
-     opts->x_param_values, opts_set->x_param_values);
+  if (opt2)
+    SET_OPTION_IF_UNSET (opts, opts_set, param_max_fields_for_field_sensitive,
+                        100);
 
   if (opts->x_optimize_size)
     /* We want to crossjump as much as possible.  */
-    maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS, 1,
-                          opts->x_param_values, opts_set->x_param_values);
-  else
-    maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS,
-                          default_param_value (PARAM_MIN_CROSSJUMP_INSNS),
-                          opts->x_param_values, opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_min_crossjump_insns, 1);
 
   /* Restrict the amount of work combine does at -Og while retaining
      most of its useful transforms.  */
   if (opts->x_optimize_debug)
-    maybe_set_param_value (PARAM_MAX_COMBINE_INSNS, 2,
-                          opts->x_param_values, opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_max_combine_insns, 2);
 
   /* Allow default optimizations to be specified on a per-machine basis.  */
   maybe_default_options (opts, opts_set,
@@ -1036,10 +1029,8 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
 
   if (opts->x_flag_conserve_stack)
     {
-      maybe_set_param_value (PARAM_LARGE_STACK_FRAME, 100,
-                            opts->x_param_values, opts_set->x_param_values);
-      maybe_set_param_value (PARAM_STACK_FRAME_GROWTH, 40,
-                            opts->x_param_values, opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_large_stack_frame, 100);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_stack_frame_growth, 40);
     }
 
   if (opts->x_flag_lto)
@@ -2272,19 +2263,13 @@ common_handle_option (struct gcc_options *opts,
         all features.  */
       if (opts->x_flag_sanitize & SANITIZE_KERNEL_ADDRESS)
        {
-         maybe_set_param_value (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
-                                0, opts->x_param_values,
-                                opts_set->x_param_values);
-         maybe_set_param_value (PARAM_ASAN_GLOBALS, 0, opts->x_param_values,
-                                opts_set->x_param_values);
-         maybe_set_param_value (PARAM_ASAN_STACK, 0, opts->x_param_values,
-                                opts_set->x_param_values);
-         maybe_set_param_value (PARAM_ASAN_PROTECT_ALLOCAS, 0,
-                                opts->x_param_values,
-                                opts_set->x_param_values);
-         maybe_set_param_value (PARAM_ASAN_USE_AFTER_RETURN, 0,
-                                opts->x_param_values,
-                                opts_set->x_param_values);
+         SET_OPTION_IF_UNSET (opts, opts_set,
+                              param_asan_instrumentation_with_call_threshold,
+                              0);
+         SET_OPTION_IF_UNSET (opts, opts_set, param_asan_globals, 0);
+         SET_OPTION_IF_UNSET (opts, opts_set, param_asan_stack, 0);
+         SET_OPTION_IF_UNSET (opts, opts_set, param_asan_protect_allocas, 0);
+         SET_OPTION_IF_UNSET (opts, opts_set, param_asan_use_after_return, 0);
        }
       break;
 
@@ -2586,9 +2571,8 @@ common_handle_option (struct gcc_options *opts,
       enable_fdo_optimizations (opts, opts_set, value);
       if (!opts_set->x_flag_profile_correction)
        opts->x_flag_profile_correction = value;
-      maybe_set_param_value (
-       PARAM_EARLY_INLINER_MAX_ITERATIONS, 10,
-       opts->x_param_values, opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set,
+                          param_early_inliner_max_iterations, 10);
       break;
 
     case OPT_fprofile_generate_:
index 0c12b3808a66b5fc26da4f571521bc2ca29fbe59..161e6d926b52b8369cea63bcfdcd5f2609523e87 100644 (file)
@@ -1146,17 +1146,18 @@ eliminate_partially_redundant_load (basic_block bb, rtx_insn *insn,
 
   /* Check if it's worth applying the partial redundancy elimination.  */
   if (ok_count.to_gcov_type ()
-      < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count.to_gcov_type ())
+      < param_gcse_after_reload_partial_fraction * not_ok_count.to_gcov_type ())
     goto cleanup;
 
   gcov_type threshold;
 #if (GCC_VERSION >= 5000)
-  if (__builtin_mul_overflow (GCSE_AFTER_RELOAD_CRITICAL_FRACTION,
+  if (__builtin_mul_overflow (param_gcse_after_reload_critical_fraction,
                              critical_count.to_gcov_type (), &threshold))
     threshold = profile_count::max_count;
 #else
   threshold
-    = GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count.to_gcov_type ();
+    = (param_gcse_after_reload_critical_fraction
+       * critical_count.to_gcov_type ());
 #endif
 
   if (ok_count.to_gcov_type () < threshold)
index 915f0806b110f99030e0c817ce55c1c060f7a090..5ee56a33fd78cc5d286439b033d56380d28deea1 100644 (file)
@@ -132,7 +132,7 @@ get_hot_bb_threshold ()
 {
   if (min_count == -1)
     {
-      const int hot_frac = PARAM_VALUE (HOT_BB_COUNT_FRACTION);
+      const int hot_frac = param_hot_bb_count_fraction;
       const gcov_type min_hot_count
        = hot_frac
          ? profile_info->sum_max / hot_frac
@@ -177,7 +177,7 @@ maybe_hot_count_p (struct function *fun, profile_count count)
       if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
          && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
        return false;
-      if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+      if (count.apply_scale (param_hot_bb_frequency_fraction, 1)
          < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
        return false;
       return true;
@@ -223,7 +223,7 @@ probably_never_executed (struct function *fun, profile_count count)
      desirable.  */
   if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
     {
-      const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+      const int unlikely_frac = param_unlikely_bb_count_fraction;
       if (count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
        return false;
       return true;
@@ -412,9 +412,9 @@ predictable_edge_p (edge e)
   if (!e->probability.initialized_p ())
     return false;
   if ((e->probability.to_reg_br_prob_base ()
-       <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
+       <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100)
       || (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
-          <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
+         <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100))
     return true;
   return false;
 }
@@ -1963,7 +1963,7 @@ predict_loops (void)
        {
          tree niter = NULL;
          HOST_WIDE_INT nitercst;
-         int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
+         int max = param_max_predicted_iterations;
          int probability;
          enum br_predictor predictor;
          widest_int nit;
@@ -2443,7 +2443,7 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
                  *predictor = (enum br_predictor) tree_to_uhwi (val2);
                  if (*predictor == PRED_BUILTIN_EXPECT)
                    *probability
-                     = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+                     = HITRATE (param_builtin_expect_probability);
                  return gimple_call_arg (def, 1);
                }
              return NULL;
@@ -2469,7 +2469,7 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
                    return val;
                  *predictor = PRED_BUILTIN_EXPECT;
                  *probability
-                   = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+                   = HITRATE (param_builtin_expect_probability);
                  return gimple_call_arg (def, 1);
                }
              case BUILT_IN_EXPECT_WITH_PROBABILITY:
@@ -2660,7 +2660,7 @@ tree_predict_by_opcode (basic_block bb)
          edge e = find_taken_edge_switch_expr (sw, val);
          if (predictor == PRED_BUILTIN_EXPECT)
            {
-             int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
+             int percent = param_builtin_expect_probability;
              gcc_assert (percent >= 0 && percent <= 100);
              predict_edge (e, PRED_BUILTIN_EXPECT,
                            HITRATE (percent));
@@ -3531,7 +3531,7 @@ drop_profile (struct cgraph_node *node, profile_count call_count)
 void
 handle_missing_profiles (void)
 {
-  const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+  const int unlikely_frac = param_unlikely_bb_count_fraction;
   struct cgraph_node *node;
   auto_vec<struct cgraph_node *, 64> worklist;
 
index 8582b4840ae03ad3889696772523def3d4c469da..b8178238ff9ab94b70395f5603f25f43c68f9130 100644 (file)
@@ -6717,7 +6717,7 @@ find_equiv_reg (rtx goal, rtx_insn *insn, enum reg_class rclass, int other,
        continue;
       num++;
       if (p == 0 || LABEL_P (p)
-         || num > PARAM_VALUE (PARAM_MAX_RELOAD_SEARCH_INSNS))
+         || num > param_max_reload_search_insns)
        return 0;
 
       /* Don't reuse register contents from before a setjmp-type
index cba183e9c72817e67a9222be5089fb956f38f41b..460741213f2b591dca8f89aa54fd1416f5fe8b98 100644 (file)
@@ -1489,7 +1489,7 @@ redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
 
   /* Scan backwards looking for a match.  */
   for (trial = PREV_INSN (target),
-        insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+        insns_to_search = param_max_delay_slot_insn_search;
        trial && insns_to_search > 0;
        trial = PREV_INSN (trial))
     {
@@ -1593,7 +1593,7 @@ redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
      INSN sets or sets something insn uses or sets.  */
 
   for (trial = PREV_INSN (target),
-        insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+        insns_to_search = param_max_delay_slot_insn_search;
        trial && !LABEL_P (trial) && insns_to_search > 0;
        trial = PREV_INSN (trial))
     {
index bf2d6beaf39e6ceaa3d5793e352606d373d61751..5d16100ef4875fedb21efc888e0ec6361af9ef08 100644 (file)
@@ -928,7 +928,7 @@ mark_target_live_regs (rtx_insn *insns, rtx target_maybe_return, struct resource
     }
 
   if (b == -1)
-    b = find_basic_block (target, MAX_DELAY_SLOT_LIVE_SEARCH);
+    b = find_basic_block (target, param_max_delay_slot_live_search);
 
   if (target_hash_table != NULL)
     {
@@ -1289,7 +1289,7 @@ clear_hashed_info_for_insn (rtx_insn *insn)
 void
 incr_ticks_for_insn (rtx_insn *insn)
 {
-  int b = find_basic_block (insn, MAX_DELAY_SLOT_LIVE_SEARCH);
+  int b = find_basic_block (insn, param_max_delay_slot_live_search);
 
   if (b != -1)
     bb_ticks[b]++;
index 00ade87283223798038e58c37e9138fe483de5a1..7a4daeaf4588b1ae3d58e1de3720b5a45469788b 100644 (file)
@@ -1289,8 +1289,8 @@ pass_sanopt::execute (function *fun)
   if (asan_sanitize_stack_p ())
     sanitize_rewrite_addressable_params (fun);
 
-  bool use_calls = ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD < INT_MAX
-    && asan_num_accesses >= ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD;
+  bool use_calls = param_asan_instrumentation_with_call_threshold < INT_MAX
+    && asan_num_accesses >= param_asan_instrumentation_with_call_threshold;
 
   hash_map<tree, tree> shadow_vars_mapping;
   bool need_commit_edge_insert = false;
index 308db4e3ca0458a15a33707cfc91099254bed13d..7cd2e65ccac01c86f51b0b83d9b20b1c4920f329 100644 (file)
@@ -2480,7 +2480,7 @@ sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
       /* Pending lists can't get larger with a readonly context.  */
       if (!deps->readonly
           && ((deps->pending_read_list_length + deps->pending_write_list_length)
-              >= MAX_PENDING_LIST_LENGTH))
+             >= param_max_pending_list_length))
        {
          /* Flush all pending reads and writes to prevent the pending lists
             from getting any larger.  Insn scheduling runs too slowly when
@@ -2697,7 +2697,7 @@ sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
          {
            if ((deps->pending_read_list_length
                 + deps->pending_write_list_length)
-               >= MAX_PENDING_LIST_LENGTH
+               >= param_max_pending_list_length
                && !DEBUG_INSN_P (insn))
              flush_pending_lists (deps, insn, true, true);
            add_insn_mem_dependence (deps, true, insn, x);
@@ -3222,8 +3222,8 @@ sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
            {
              struct deps_reg *reg_last = &deps->reg_last[i];
-             if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
-                 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
+             if (reg_last->uses_length >= param_max_pending_list_length
+                 || reg_last->clobbers_length >= param_max_pending_list_length)
                {
                  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
                                                REG_DEP_OUTPUT, false);
@@ -3679,8 +3679,8 @@ deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
                && sel_insn_is_speculation_check (insn)))
         {
           /* Keep the list a reasonable size.  */
-          if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
-            flush_pending_lists (deps, insn, true, true);
+         if (deps->pending_flush_length++ >= param_max_pending_list_length)
+           flush_pending_lists (deps, insn, true, true);
           else
            deps->pending_jump_insns
               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
index a594b49ec66e554eea093b6dabcc08125d0d867a..c60afa340e7122b1a66b9810069c6e27b1b374e0 100644 (file)
@@ -620,9 +620,9 @@ schedule_ebbs (void)
     return;
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+    probability_cutoff = param_tracer_min_branch_probability_feedback;
   else
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+    probability_cutoff = param_tracer_min_branch_probability;
   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
   schedule_ebbs_init ();
index 3e0825075a34967f7fa495e2a486336f5c6fce36..6321692ed0004e71e0a12741e418c2c1d9cc6f48 100644 (file)
@@ -485,9 +485,9 @@ find_single_block_region (bool ebbs_p)
   if (ebbs_p) {
     int probability_cutoff;
     if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-      probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+      probability_cutoff = param_tracer_min_branch_probability_feedback;
     else
-      probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+      probability_cutoff = param_tracer_min_branch_probability;
     probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
     FOR_EACH_BB_FN (ebb_start, cfun)
@@ -569,8 +569,8 @@ too_large (int block, int *num_bbs, int *num_insns)
   (*num_insns) += (common_sched_info->estimate_number_of_insns
                    (BASIC_BLOCK_FOR_FN (cfun, block)));
 
-  return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
-         || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
+  return ((*num_bbs > param_max_sched_region_blocks)
+         || (*num_insns > param_max_sched_region_insns));
 }
 
 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
@@ -800,7 +800,7 @@ haifa_find_rgns (void)
 
       queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
 
-      extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+      extend_regions_p = param_max_sched_extend_regions_iters > 0;
       if (extend_regions_p)
         {
           degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
@@ -1161,7 +1161,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
   int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
   int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
-  max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+  max_iter = param_max_sched_extend_regions_iters;
 
   max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
 
@@ -2224,7 +2224,7 @@ new_ready (rtx_insn *next, ds_t ts)
          || (IS_SPECULATIVE_INSN (next)
              && ((recog_memoized (next) >= 0
                   && min_insn_conflict_delay (curr_state, next, next)
-                   > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+                  > param_max_sched_insn_conflict_delay)
                   || IS_SPECULATION_CHECK_P (next)
                  || !check_live (next, INSN_BB (next))
                  || (not_ex_free = !is_exception_free (next, INSN_BB (next),
@@ -3188,8 +3188,9 @@ schedule_region (int rgn)
          f = find_fallthru_edge (last_bb->succs);
          if (f
              && (!f->probability.initialized_p ()
-                 || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >=
-                    PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)))
+                 || (f->probability.to_reg_br_prob_base () * 100
+                     / REG_BR_PROB_BASE
+                     >= param_sched_state_edge_prob_cutoff)))
            {
              memcpy (bb_state[f->dest->index], curr_state,
                      dfa_state_size);
@@ -3229,7 +3230,7 @@ schedule_region (int rgn)
 void
 sched_rgn_init (bool single_blocks_p)
 {
-  min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+  min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE)
                    / 100);
 
   nr_inter = 0;
index 8a1d41473b9caf287f4a9eda03171e6c7b7bdb1f..d6513b136a0c132d835451dc87dd7471fe42656b 100644 (file)
@@ -6012,7 +6012,7 @@ make_region_from_loop (class loop *loop)
   basic_block preheader_block;
 
   if (loop->num_nodes
-      > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+      > (unsigned) param_max_pipeline_region_blocks)
     return -1;
 
   /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
@@ -6021,7 +6021,7 @@ make_region_from_loop (class loop *loop)
       return -1;
 
   loop->ninsns = num_loop_insns (loop);
-  if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+  if ((int) loop->ninsns > param_max_pipeline_region_insns)
     return -1;
 
   loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
index ddc76a73ede0dd719c2e013c3b0ad0155133aa8c..531b0129cd9a4f104ef52d512b17e7ef799ec118 100644 (file)
@@ -969,7 +969,7 @@ extern bool preheader_removed;
 /* Software lookahead window size.
    According to the results in Nakatani and Ebcioglu [1993], window size of 16
    is enough to extract most ILP in integer code.  */
-#define MAX_WS (PARAM_VALUE (PARAM_SELSCHED_MAX_LOOKAHEAD))
+#define MAX_WS (param_selsched_max_lookahead)
 
 extern regset sel_all_regs;
 \f
index 652784e79ed00fe4de7e872ec150cf18f24cd1eb..2ecc06d0f173a11642cddcdd866e56c348afc28e 100644 (file)
@@ -3454,7 +3454,7 @@ process_pipelined_exprs (av_set_t *av_ptr)
   FOR_EACH_EXPR_1 (expr, si, av_ptr)
     {
       if (EXPR_SCHED_TIMES (expr)
-         >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
+         >= param_selsched_max_sched_times)
        av_set_iter_remove (&si);
     }
 }
@@ -6806,7 +6806,7 @@ sel_setup_region_sched_flags (void)
                   && (flag_sel_sched_pipelining != 0)
                  && current_loop_nest != NULL
                  && loop_has_exit_edges (current_loop_nest));
-  max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
+  max_insns_to_rename = param_selsched_insns_to_rename;
   max_ws = MAX_WS;
 }
 
index 2dc92c34b17317be88af5937e6205d8c3b8bd279..e612f85c15ad77f1182680778e70d1d06828e44c 100644 (file)
@@ -775,7 +775,7 @@ try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq)
   vec.quick_push (pro);
 
   unsigned max_grow_size = get_uncond_jump_length ();
-  max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+  max_grow_size *= param_max_grow_copy_bb_insns;
 
   while (!vec.is_empty () && pro != entry)
     {
index fee4cc271cd86a2206e18da2c2db24c5e04b0f4f..2f26e468b8abf285b61eb0868cfdf592feb530fe 100644 (file)
@@ -2274,17 +2274,18 @@ default_max_noce_ifcvt_seq_cost (edge e)
 {
   bool predictable_p = predictable_edge_p (e);
 
-  enum compiler_param param
-    = (predictable_p
-       ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
-       : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
-  /* If we have a parameter set, use that, otherwise take a guess using
-     BRANCH_COST.  */
-  if (global_options_set.x_param_values[param])
-    return PARAM_VALUE (param);
+  if (predictable_p)
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+       return param_max_rtl_if_conversion_predictable_cost;
+    }
   else
-    return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+       return param_max_rtl_if_conversion_unpredictable_cost;
+    }
+
+  return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
 }
 
 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION.  */
index 18fea1c3dd1cbecd195ee2a5656a322b4e7feb5f..a836646f8a116765ba9dfa250e859298f7328a91 100644 (file)
@@ -679,7 +679,7 @@ print_version (FILE *file, const char *indent, bool show_global_state)
       fprintf (file,
               file == stderr ? _(fmt4) : fmt4,
               indent, *indent != 0 ? " " : "",
-              PARAM_VALUE (GGC_MIN_EXPAND), PARAM_VALUE (GGC_MIN_HEAPSIZE));
+              param_ggc_min_expand, param_ggc_min_heapsize);
 
       print_plugins_versions (file, indent);
     }
@@ -1863,7 +1863,7 @@ process_options (void)
 
   if (flag_checking >= 2)
     hash_table_sanitize_eq_limit
-      = PARAM_VALUE (PARAM_HASH_TABLE_VERIFICATION_LIMIT);
+      = param_hash_table_verification_limit;
 
   /* Please don't change global_options after this point, those changes won't
      be reflected in optimization_{default,current}_node.  */
index 52f07c56f03b901c1c86fb9698c859cd9b32b82b..02203923369423946618f342433fcaa6b722a155 100644 (file)
@@ -276,13 +276,13 @@ tail_duplicate (void)
   initialize_original_copy_tables ();
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+    probability_cutoff = param_tracer_min_branch_probability_feedback;
   else
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+    probability_cutoff = param_tracer_min_branch_probability;
   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
   branch_ratio_cutoff =
-    (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
+    (REG_BR_PROB_BASE / 100 * param_tracer_min_branch_ratio);
 
   FOR_EACH_BB_FN (bb, cfun)
     {
@@ -296,11 +296,11 @@ tail_duplicate (void)
     }
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
+    cover_insns = param_tracer_dynamic_coverage_feedback;
   else
-    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
+    cover_insns = param_tracer_dynamic_coverage;
   cover_insns = (weighted_insns * cover_insns + 50) / 100;
-  max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100;
+  max_dup_insns = (ninsns * param_tracer_max_code_growth + 50) / 100;
 
   while (traced_insns < cover_insns && nduplicated < max_dup_insns
          && !heap.empty ())
index 2e7752865406193ddd51a610bf67b2414171b8c0..4a0f6e7dee97135a5a973f480fde318f8c501a28 100644 (file)
@@ -1108,7 +1108,7 @@ tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
          && TYPE_SIZE_UNIT (type) != NULL
          && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
          && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
-             < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
+             < param_tm_max_aggregate_size)
          /* We must be able to copy this type normally.  I.e., no
             special constructors and the like.  */
          && !TREE_ADDRESSABLE (type))
index 5ed62260993caaa3648420323ee1edc15fab9d58..8d5fce30289b0bff2d3c620ecb72fe281b1a922d 100644 (file)
@@ -333,9 +333,9 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
            int size = 0;
            if ((tree_contains_chrecs (op0, &size)
                 || tree_contains_chrecs (op1, &size))
-               && size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+               && size < param_scev_max_expr_size)
              return build2 (code, type, op0, op1);
-           else if (size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+           else if (size < param_scev_max_expr_size)
              {
                if (code == POINTER_PLUS_EXPR)
                  return fold_build_pointer_plus (fold_convert (type, op0),
index 7f75b7e3afeebd33fda493d942ff7f7160102d39..e9fa4ae69c3591dc229ae83a07fd2d57efb26719 100644 (file)
@@ -836,7 +836,7 @@ split_constant_offset (tree exp, tree *var, tree *off,
 void
 split_constant_offset (tree exp, tree *var, tree *off)
 {
-  unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  unsigned limit = param_ssa_name_def_chain_limit;
   static hash_map<tree, std::pair<tree, tree> > *cache;
   if (!cache)
     cache = new hash_map<tree, std::pair<tree, tree> > (37);
@@ -4917,7 +4917,7 @@ compute_all_dependences (vec<data_reference_p> datarefs,
   unsigned int i, j;
 
   if ((int) datarefs.length ()
-      > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+      > param_loop_max_datarefs_for_datadeps)
     {
       struct data_dependence_relation *ddr;
 
index df9046a301478f6769431a03f4d5567689308585..09560198539477274b5b8ea92e092712be1f7622 100644 (file)
@@ -125,7 +125,7 @@ along with GCC; see the file COPYING3.  If not see
 /* Only handle PHIs with no more arguments unless we are asked to by
    simd pragma.  */
 #define MAX_PHI_ARG_NUM \
-  ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
+  ((unsigned) param_max_tree_if_conversion_phi_args)
 
 /* True if we've converted a statement that was only executed when some
    condition C was true, and if for correctness we need to predicate the
index 63274f746796d26db105293d52ef8dc53280e3b0..b05f05d446a61da35ea1cf0d3bce7f24aac46aac 100644 (file)
@@ -1812,7 +1812,7 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
          /* If the inlined function has too many debug markers,
             don't copy them.  */
          if (id->src_cfun->debug_marker_count
-             > PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+             > param_max_debug_marker_count)
            return stmts;
 
          gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
index 81784866ad11956dc04a15f79aba344b5c0d9b4e..9930daaafa995e0dd1367e2d2bedbda0c6dac158 100644 (file)
@@ -119,7 +119,7 @@ along with GCC; see the file COPYING3.  If not see
 
 
 #define MAX_DATAREFS_NUM \
-       ((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+       ((unsigned) param_loop_max_datarefs_for_datadeps)
 
 /* Threshold controlling number of distributed partitions.  Given it may
    be unnecessary if a memory stream cost model is invented in the future,
index ae880e151db6667a207fcd6a738c36dbd19a4249..1a35c7dbdc3a59ea9942f944f1d6052ef6ee877e 100644 (file)
@@ -890,7 +890,7 @@ parloops_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info
 
 /* Minimal number of iterations of a loop that should be executed in each
    thread.  */
-#define MIN_PER_THREAD PARAM_VALUE (PARAM_PARLOOPS_MIN_PER_THREAD)
+#define MIN_PER_THREAD param_parloops_min_per_thread
 
 /* Element of the hashtable, representing a
    reduction in the current loop.  */
@@ -2875,25 +2875,23 @@ create_parallel_loop (class loop *loop, tree loop_fn, tree data,
   else
     {
       t = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
-      int chunk_size = PARAM_VALUE (PARAM_PARLOOPS_CHUNK_SIZE);
-      enum PARAM_PARLOOPS_SCHEDULE_KIND schedule_type \
-       = (enum PARAM_PARLOOPS_SCHEDULE_KIND) PARAM_VALUE (PARAM_PARLOOPS_SCHEDULE);
-      switch (schedule_type)
+      int chunk_size = param_parloops_chunk_size;
+      switch (param_parloops_schedule)
        {
-       case PARAM_PARLOOPS_SCHEDULE_KIND_static:
+       case PARLOOPS_SCHEDULE_STATIC:
          OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
          break;
-       case PARAM_PARLOOPS_SCHEDULE_KIND_dynamic:
+       case PARLOOPS_SCHEDULE_DYNAMIC:
          OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
          break;
-       case PARAM_PARLOOPS_SCHEDULE_KIND_guided:
+       case PARLOOPS_SCHEDULE_GUIDED:
          OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_GUIDED;
          break;
-       case PARAM_PARLOOPS_SCHEDULE_KIND_auto:
+       case PARLOOPS_SCHEDULE_AUTO:
          OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_AUTO;
          chunk_size = 0;
          break;
-       case PARAM_PARLOOPS_SCHEDULE_KIND_runtime:
+       case PARLOOPS_SCHEDULE_RUNTIME:
          OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_RUNTIME;
          chunk_size = 0;
          break;
index 299c45e287bf8f16e6dfdd422f1a149c8fbe6d70..3fe2a6dcb0ae22f53750a937c03d2a78c645b7af 100644 (file)
@@ -2194,7 +2194,7 @@ determine_unroll_factor (vec<chain_p> chains)
 {
   chain_p chain;
   unsigned factor = 1, af, nfactor, i;
-  unsigned max = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  unsigned max = param_max_unroll_times;
 
   FOR_EACH_VEC_ELT (chains, i, chain)
     {
index 50b2700834ec6514e0cdf78a2858f4ade1f1a024..ed5d5663a3a85bcacbce6ebf66a98064297a3c4e 100644 (file)
@@ -1149,7 +1149,7 @@ tail_recurse:
        return t_false;
 
       /* Give up if the path is longer than the MAX that we allow.  */
-      if (limit > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_COMPLEXITY))
+      if (limit > param_scev_max_expr_complexity)
        {
          *evolution_of_loop = chrec_dont_know;
          return t_dont_know;
@@ -2623,7 +2623,7 @@ instantiate_scev_r (edge instantiate_below,
                    bool *fold_conversions, int size_expr)
 {
   /* Give up if the expression is larger than the MAX that we allow.  */
-  if (size_expr++ > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+  if (size_expr++ > param_scev_max_expr_size)
     return chrec_dont_know;
 
   if (chrec == NULL_TREE
index 8bcfef42e35c9fef79975a537ab8860dc0ea83cd..d739aed9e446d0d099f470d37af04dec5cda054b 100644 (file)
@@ -2787,16 +2787,21 @@ analyze_all_variable_accesses (void)
   unsigned i;
   bool optimize_speed_p = !optimize_function_for_size_p (cfun);
 
-  enum compiler_param param = optimize_speed_p
-                       ? PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED
-                       : PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE;
-
   /* If the user didn't set PARAM_SRA_MAX_SCALARIZATION_SIZE_<...>,
      fall back to a target default.  */
   unsigned HOST_WIDE_INT max_scalarization_size
-    = global_options_set.x_param_values[param]
-      ? PARAM_VALUE (param)
-      : get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+    = get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+
+  if (optimize_speed_p)
+    {
+      if (global_options_set.x_param_sra_max_scalarization_size_speed)
+       max_scalarization_size = param_sra_max_scalarization_size_speed;
+    }
+  else
+    {
+      if (global_options_set.x_param_sra_max_scalarization_size_size)
+       max_scalarization_size = param_sra_max_scalarization_size_size;
+    }
 
   max_scalarization_size *= BITS_PER_UNIT;
 
index 567aef8bc2694dc684b744a96ebedec27f494b33..335787e0517c750e6cf82820a67c3d194f3f0adf 100644 (file)
@@ -2195,7 +2195,7 @@ fold_builtin_alloca_with_align (gimple *stmt)
   size = tree_to_uhwi (arg);
 
   /* Heuristic: don't fold large allocas.  */
-  threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
+  threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
   /* In case the alloca is located at function entry, it has the same lifetime
      as a declared array, so we allow a larger size.  */
   block = gimple_block (stmt);
index 21a15eef6906e58632a2a52d84cd9ac48b838210..1b060d9e408f6f75af9906f3af49c2042bf98f55 100644 (file)
@@ -238,7 +238,7 @@ setup_live_bytes_from_ref (ao_ref *ref, sbitmap live_bytes)
   if (valid_ao_ref_for_dse (ref)
       && ref->size.is_constant (&const_size)
       && (const_size / BITS_PER_UNIT
-         <= PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)))
+         <= param_dse_max_object_size))
     {
       bitmap_clear (live_bytes);
       bitmap_set_range (live_bytes, 0, const_size / BITS_PER_UNIT);
@@ -611,7 +611,7 @@ dse_optimize_redundant_stores (gimple *stmt)
   FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
     {
       /* Limit stmt walking.  */
-      if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+      if (++cnt > param_dse_max_alias_queries_per_store)
        BREAK_FROM_IMM_USE_STMT (ui);
 
       /* If USE_STMT stores 0 into one or more of the same locations
@@ -704,7 +704,7 @@ dse_classify_store (ao_ref *ref, gimple *stmt,
       FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
        {
          /* Limit stmt walking.  */
-         if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+         if (++cnt > param_dse_max_alias_queries_per_store)
            {
              fail = true;
              BREAK_FROM_IMM_USE_STMT (ui);
@@ -853,7 +853,7 @@ class dse_dom_walker : public dom_walker
 public:
   dse_dom_walker (cdi_direction direction)
     : dom_walker (direction),
-    m_live_bytes (PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)),
+    m_live_bytes (param_dse_max_object_size),
     m_byte_tracking_enabled (false) {}
 
   virtual edge before_dom_children (basic_block);
index 21c1b0e8918219252740a10b83d99dc4da05fb08..fa3bc0a437773f9c6aa3247c1b7b60884fb1cadf 100644 (file)
@@ -565,9 +565,9 @@ ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
          tree t1, t2;
          gimple_stmt_iterator gsi;
          bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-         if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+         if (param_logical_op_non_short_circuit != -1)
            logical_op_non_short_circuit
-             = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+             = param_logical_op_non_short_circuit;
          if (!logical_op_non_short_circuit || flag_sanitize_coverage)
            return false;
          /* Only do this optimization if the inner bb contains only the conditional. */
index 5a30a296d5e09170200010677e657d266f365a56..fd6d74d589136f264e5108a1081f8bc498d3f603 100644 (file)
@@ -368,7 +368,7 @@ ch_base::copy_headers (function *fun)
 
   FOR_EACH_LOOP (loop, 0)
     {
-      int initial_limit = PARAM_VALUE (PARAM_MAX_LOOP_HEADER_INSNS);
+      int initial_limit = param_max_loop_header_insns;
       int remaining_limit = initial_limit;
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
index 78664188c4594ece492e6826f8ec5df08da499f3..cd1aa563e21b1df45c1a2e04b496f2e0e36dabe4 100644 (file)
@@ -230,7 +230,7 @@ static bool ref_indep_loop_p (class loop *, im_mem_ref *);
 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
 
 /* Minimum cost of an expensive expression.  */
-#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
+#define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
 
 /* The outermost loop for which execution of the header guarantees that the
    block will be executed.  */
index c505f85f91a997a50b840c8d185eec0894fb1f66..7b3524312259d8657be54ba479d706084ed93d86 100644 (file)
@@ -739,7 +739,7 @@ try_unroll_loop_completely (class loop *loop,
     return false;
 
   if (!loop->unroll
-      && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
+      && n_unroll > (unsigned) param_max_completely_peel_times)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file, "Not unrolling loop %d "
@@ -780,7 +780,7 @@ try_unroll_loop_completely (class loop *loop,
          bool large
            = tree_estimate_loop_size
                (loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
-                PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
+                param_max_completely_peeled_insns);
          if (large)
            {
              if (dump_file && (dump_flags & TDF_DETAILS))
@@ -864,7 +864,7 @@ try_unroll_loop_completely (class loop *loop,
             blow the branch predictor tables.  Limit number of
             branches on the hot path through the peeled sequence.  */
          else if (size.num_branches_on_hot_path * (int)n_unroll
-                  > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
+                  > param_max_peel_branches)
            {
              if (dump_file && (dump_flags & TDF_DETAILS))
                fprintf (dump_file, "Not unrolling loop %d: "
@@ -874,7 +874,7 @@ try_unroll_loop_completely (class loop *loop,
              return false;
            }
          else if (unr_insns
-                  > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
+                  > (unsigned) param_max_completely_peeled_insns)
            {
              if (dump_file && (dump_flags & TDF_DETAILS))
                fprintf (dump_file, "Not unrolling loop %d: "
@@ -998,7 +998,7 @@ try_peel_loop (class loop *loop,
   int peeled_size;
 
   if (!flag_peel_loops
-      || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
+      || param_max_peel_times <= 0
       || !peeled_loops)
     return false;
 
@@ -1057,7 +1057,7 @@ try_peel_loop (class loop *loop,
   /* We want to peel estimated number of iterations + 1 (so we never
      enter the loop on quick path).  Check against PARAM_MAX_PEEL_TIMES
      and be sure to avoid overflows.  */
-  if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
+  if (npeel > param_max_peel_times - 1)
     {
       if (dump_file)
        fprintf (dump_file, "Not peeling: rolls too much "
@@ -1068,9 +1068,9 @@ try_peel_loop (class loop *loop,
 
   /* Check peeled loops size.  */
   tree_estimate_loop_size (loop, exit, NULL, &size,
-                          PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
+                          param_max_peeled_insns);
   if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
-      > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
+      > param_max_peeled_insns)
     {
       if (dump_file)
        fprintf (dump_file, "Not peeling: peeled sequence size is too large "
@@ -1502,7 +1502,7 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
         BITMAP_FREE (loop_closed_ssa_invalidated);
     }
   while (changed
-        && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
+        && ++iteration <= param_max_unroll_iterations);
 
   BITMAP_FREE (father_bbs);
 
index ceaa327e40810ba875472103512eb23be0c65473..78efd62a9a5afc647fdae5a7d627a81c5ab19463 100644 (file)
@@ -152,8 +152,8 @@ avg_loop_niter (class loop *loop)
     {
       niter = likely_max_stmt_executions_int (loop);
 
-      if (niter == -1 || niter > PARAM_VALUE (PARAM_AVG_LOOP_NITER))
-       return PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      if (niter == -1 || niter > param_avg_loop_niter)
+       return param_avg_loop_niter;
     }
 
   return niter;
@@ -716,19 +716,19 @@ struct iv_ca_delta
 /* Bound on number of candidates below that all candidates are considered.  */
 
 #define CONSIDER_ALL_CANDIDATES_BOUND \
-  ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
+  ((unsigned) param_iv_consider_all_candidates_bound)
 
 /* If there are more iv occurrences, we just give up (it is quite unlikely that
    optimizing such a loop would help, and it would take ages).  */
 
 #define MAX_CONSIDERED_GROUPS \
-  ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
+  ((unsigned) param_iv_max_considered_uses)
 
 /* If there are at most this number of ivs in the set, try removing unnecessary
    ivs from the set always.  */
 
 #define ALWAYS_PRUNE_CAND_SET_BOUND \
-  ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
+  ((unsigned) param_iv_always_prune_cand_set_bound)
 
 /* The list of trees for that the decl_rtl field must be reset is stored
    here.  */
index 6a1bbaae573e0cfdb3d929f423d849001628ede1..06f9016004739d8df75f2b3c47dea7a7cae99fd0 100644 (file)
@@ -984,7 +984,7 @@ can_unroll_loop_p (class loop *loop, unsigned factor,
 
   /* The final loop should be small enough.  */
   if (tree_num_loop_insns (loop, &eni_size_weights) * factor
-      > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS))
+      > (unsigned) param_max_unrolled_insns)
     return false;
 
   return true;
index db666f019808850ed3a4aeef1a454a7ae2c65ef2..fe24a70451d350bd69fb4821b4cfbd7c78035bde 100644 (file)
@@ -2863,7 +2863,7 @@ finite_loop_p (class loop *loop)
 /* Bound on the number of iterations we try to evaluate.  */
 
 #define MAX_ITERATIONS_TO_TRACK \
-  ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
+  ((unsigned) param_max_iterations_to_track)
 
 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
    result by a chain of operations such that all but exactly one of their
index 04ff5244b69e03e10d25d05f80cebf3831599c8d..cb22657c309364c9c73029362b1bd9757dea299b 100644 (file)
@@ -167,7 +167,7 @@ along with GCC; see the file COPYING3.  If not see
    of cache hierarchy).  */
 
 #ifndef PREFETCH_BLOCK
-#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
+#define PREFETCH_BLOCK param_l1_cache_line_size
 #endif
 
 /* Do we have a forward hardware sequential prefetching?  */
@@ -191,8 +191,8 @@ along with GCC; see the file COPYING3.  If not see
 #define ACCEPTABLE_MISS_RATE 50
 #endif
 
-#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
-#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
+#define L1_CACHE_SIZE_BYTES ((unsigned) (param_l1_cache_size * 1024))
+#define L2_CACHE_SIZE_BYTES ((unsigned) (param_l2_cache_size * 1024))
 
 /* We consider a memory access nontemporal if it is not reused sooner than
    after L2_CACHE_SIZE_BYTES of memory are accessed.  However, we ignore
@@ -993,7 +993,8 @@ static bool
 should_issue_prefetch_p (struct mem_ref *ref)
 {
   /* Do we want to issue prefetches for non-constant strides?  */
-  if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
+  if (!cst_and_fits_in_hwi (ref->group->step)
+      && param_prefetch_dynamic_strides == 0)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
@@ -1008,14 +1009,14 @@ should_issue_prefetch_p (struct mem_ref *ref)
      range.  */
   if (cst_and_fits_in_hwi (ref->group->step)
       && abs_hwi (int_cst_value (ref->group->step))
-         < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
+         < (HOST_WIDE_INT) param_prefetch_minimum_stride)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
                 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
                 ") is less than the mininum required stride of %d\n",
                 ref->group->uid, ref->uid, int_cst_value (ref->group->step),
-                PREFETCH_MINIMUM_STRIDE);
+                param_prefetch_minimum_stride);
       return false;
     }
 
@@ -1055,8 +1056,9 @@ schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
   struct mem_ref *ref;
   bool any = false;
 
-  /* At most SIMULTANEOUS_PREFETCHES should be running at the same time.  */
-  remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
+  /* At most param_simultaneous_prefetches should be running
+     at the same time.  */
+  remaining_prefetch_slots = param_simultaneous_prefetches;
 
   /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
      AHEAD / UNROLL_FACTOR iterations of the unrolled loop.  In each iteration,
@@ -1406,7 +1408,7 @@ determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
      us from unrolling the loops too many times in cases where we only expect
      gains from better scheduling and decreasing loop overhead, which is not
      the case here.  */
-  upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
+  upper_bound = param_max_unrolled_insns / ninsns;
 
   /* If we unrolled the loop more times than it iterates, the unrolled version
      of the loop would be never entered.  */
@@ -1459,7 +1461,7 @@ volume_of_references (struct mem_ref_group *refs)
           accessed in each iteration.  TODO -- in the latter case, we should
           take the size of the reference into account, rounding it up on cache
           line size multiple.  */
-       volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
+       volume += param_l1_cache_line_size / ref->prefetch_mod;
       }
   return volume;
 }
@@ -1512,7 +1514,7 @@ add_subscript_strides (tree access_fn, unsigned stride,
       if (tree_fits_shwi_p (step))
        astep = tree_to_shwi (step);
       else
-       astep = L1_CACHE_LINE_SIZE;
+       astep = param_l1_cache_line_size;
 
       strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
 
@@ -1562,7 +1564,7 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
          if (tree_fits_uhwi_p (stride))
            astride = tree_to_uhwi (stride);
          else
-           astride = L1_CACHE_LINE_SIZE;
+           astride = param_l1_cache_line_size;
 
          ref = TREE_OPERAND (ref, 0);
        }
@@ -1578,7 +1580,7 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
 
       s = strides[i] < 0 ?  -strides[i] : strides[i];
 
-      if (s < (unsigned) L1_CACHE_LINE_SIZE
+      if (s < (unsigned) param_l1_cache_line_size
          && (loop_sizes[i]
              > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
        {
@@ -1825,7 +1827,7 @@ mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
      should account for cache misses.  */
   insn_to_mem_ratio = ninsns / mem_ref_count;
 
-  if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+  if (insn_to_mem_ratio < param_prefetch_min_insn_to_mem_ratio)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
         fprintf (dump_file,
@@ -1862,7 +1864,7 @@ insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
      and the exit branches will get eliminated), so it might be better to use
      tree_estimate_loop_size + estimated_unrolled_size.  */
   insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
-  if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
+  if (insn_to_prefetch_ratio < param_min_insn_to_prefetch_ratio)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
         fprintf (dump_file,
@@ -1902,7 +1904,7 @@ loop_prefetch_arrays (class loop *loop)
   if (time == 0)
     return false;
 
-  ahead = (PREFETCH_LATENCY + time - 1) / time;
+  ahead = (param_prefetch_latency + time - 1) / time;
   est_niter = estimated_stmt_executions_int (loop);
   if (est_niter == -1)
     est_niter = likely_max_stmt_executions_int (loop);
@@ -1998,17 +2000,19 @@ tree_ssa_prefetch_arrays (void)
     {
       fprintf (dump_file, "Prefetching parameters:\n");
       fprintf (dump_file, "    simultaneous prefetches: %d\n",
-              SIMULTANEOUS_PREFETCHES);
-      fprintf (dump_file, "    prefetch latency: %d\n", PREFETCH_LATENCY);
+              param_simultaneous_prefetches);
+      fprintf (dump_file, "    prefetch latency: %d\n", param_prefetch_latency);
       fprintf (dump_file, "    prefetch block size: %d\n", PREFETCH_BLOCK);
       fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
-              L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
-      fprintf (dump_file, "    L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
-      fprintf (dump_file, "    L2 cache size: %d kB\n", L2_CACHE_SIZE);
+              L1_CACHE_SIZE_BYTES / param_l1_cache_line_size,
+              param_l1_cache_size);
+      fprintf (dump_file, "    L1 cache line size: %d\n",
+              param_l1_cache_line_size);
+      fprintf (dump_file, "    L2 cache size: %d kB\n", param_l2_cache_size);
       fprintf (dump_file, "    min insn-to-prefetch ratio: %d \n",
-              MIN_INSN_TO_PREFETCH_RATIO);
+              param_min_insn_to_prefetch_ratio);
       fprintf (dump_file, "    min insn-to-mem ratio: %d \n",
-              PREFETCH_MIN_INSN_TO_MEM_RATIO);
+              param_prefetch_min_insn_to_mem_ratio);
       fprintf (dump_file, "\n");
     }
 
index 6302d044e092daabe4502a66928920ea0dde5713..57dbc8769dc3953f0d1cd16f8c4dd98ac8f07ea8 100644 (file)
@@ -1404,15 +1404,14 @@ get_cond_branch_to_split_loop (struct loop *loop, gcond *cond)
   profile_probability prob = invar_branch->probability;
   if (prob.reliable_p ())
     {
-      int thres = PARAM_VALUE (PARAM_MIN_LOOP_COND_SPLIT_PROB);
+      int thres = param_min_loop_cond_split_prob;
 
       if (prob < profile_probability::always ().apply_scale (thres, 100))
        return NULL;
     }
 
   /* Add a threshold for increased code size to disable loop split.  */
-  if (compute_added_num_insns (loop, invar_branch)
-      > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
+  if (compute_added_num_insns (loop, invar_branch) > param_max_peeled_insns)
     return NULL;
 
   return invar_branch;
index e60019db9466e11c4f25d6dfc6fd9dd543498a8d..4e3aa7c41b7be6235d73a364c665bcfee2662577 100644 (file)
@@ -288,7 +288,7 @@ tree_unswitch_single_loop (class loop *loop, int num)
 
       /* The loop should not be too large, to limit code growth. */
       if (tree_num_loop_insns (loop, &eni_size_weights)
-         > (unsigned) PARAM_VALUE (PARAM_MAX_UNSWITCH_INSNS))
+         > (unsigned) param_max_unswitch_insns)
        {
          if (dump_file && (dump_flags & TDF_DETAILS))
            fprintf (dump_file, ";; Not unswitching, loop too big\n");
@@ -323,7 +323,7 @@ tree_unswitch_single_loop (class loop *loop, int num)
       if (i == loop->num_nodes)
        {
          if (dump_file
-             && num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL)
+             && num > param_max_unswitch_level
              && (dump_flags & TDF_DETAILS))
            fprintf (dump_file, ";; Not unswitching anymore, hit max level\n");
 
@@ -352,7 +352,7 @@ tree_unswitch_single_loop (class loop *loop, int num)
          changed = true;
        }
       /* Do not unswitch too much.  */
-      else if (num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL))
+      else if (num > param_max_unswitch_level)
        {
          i++;
          continue;
index 013ef93e7ad00c40b357db094c4824b8e4c15bb3..8b5f3149cbc85c5f844eb9366bd38f0a9e5f39d2 100644 (file)
@@ -1975,7 +1975,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
       && !HONOR_SIGNED_ZEROS (mode))
     {
       unsigned int max_depth = speed_p
-                               ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
+                               ? param_max_pow_sqrt_depth
                                : 2;
 
       tree expand_with_sqrts
@@ -3089,7 +3089,7 @@ convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
   bool check_defer
     = (state->m_deferring_p
        && (tree_to_shwi (TYPE_SIZE (type))
-          <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
+          <= param_avoid_fma_max_bits));
   bool defer = check_defer;
   bool seen_negate_p = false;
   /* Make sure that the multiplication statement becomes dead after
@@ -3744,7 +3744,7 @@ math_opts_dom_walker::after_dom_children (basic_block bb)
 {
   gimple_stmt_iterator gsi;
 
-  fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
+  fma_deferring_state fma_state (param_avoid_fma_max_bits > 0);
 
   for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
     {
index 38bb8b241558bcbc59ad84e43c6eb84c18d65e35..43990b796448e99e60591ac3ccdba5a13137d1fa 100644 (file)
@@ -2469,7 +2469,7 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
 
   /* If either vectorization or if-conversion is disabled then do
      not sink any stores.  */
-  if (MAX_STORES_TO_SINK == 0
+  if (param_max_stores_to_sink == 0
       || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
       || !flag_tree_loop_if_convert)
     return false;
@@ -2528,7 +2528,7 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
 
   /* No pairs of stores found.  */
   if (!then_stores.length ()
-      || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
+      || then_stores.length () > (unsigned) param_max_stores_to_sink)
     {
       free_data_refs (then_datarefs);
       free_data_refs (else_datarefs);
@@ -2658,7 +2658,7 @@ static void
 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
                      basic_block bb2, basic_block bb3)
 {
-  int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
+  int param_align = param_l1_cache_line_size;
   unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
   gphi_iterator gsi;
 
@@ -2808,7 +2808,7 @@ static bool
 gate_hoist_loads (void)
 {
   return (flag_hoist_adjacent_loads == 1
-         && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
+         && param_l1_cache_line_size
          && HAVE_conditional_move);
 }
 
index 363dec6f4dd5f6b1943bc6af950e1d487255a10d..58a147036af900dc5f32fcf556dfe66274499fd8 100644 (file)
@@ -1156,7 +1156,7 @@ translate_vuse_through_block (vec<vn_reference_op_s> operands,
   if (gimple_bb (phi) != phiblock)
     return vuse;
 
-  unsigned int cnt = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+  unsigned int cnt = param_sccvn_max_alias_queries_per_access;
   use_oracle = ao_ref_init_from_vn_reference (&ref, set, type, operands);
 
   /* Use the alias-oracle to find either the PHI node in this block,
@@ -2235,7 +2235,7 @@ compute_partial_antic_aux (basic_block block,
   bitmap_set_t PA_OUT;
   edge e;
   edge_iterator ei;
-  unsigned long max_pa = PARAM_VALUE (PARAM_MAX_PARTIAL_ANTIC_LENGTH);
+  unsigned long max_pa = param_max_partial_antic_length;
 
   old_PA_IN = PA_OUT = NULL;
 
index 510dfd1e188f6d08483e05a67f5ac57924d8ea36..9e5b5290b55b83a5a739f21eb0e0ad09d52a91af 100644 (file)
@@ -4945,7 +4945,7 @@ static int
 get_reassociation_width (int ops_num, enum tree_code opc,
                         machine_mode mode)
 {
-  int param_width = PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH);
+  int param_width = param_tree_reassoc_width;
   int width;
   int width_min;
   int cycles_best;
index f58dbe1504716e9c3d04d8d7f7819faca1d1839f..429f09d40177b9867241a25d45007e7cdfd4c7ce 100644 (file)
@@ -3074,7 +3074,7 @@ vn_reference_lookup_pieces (tree vuse, alias_set_type set, tree type,
       && vr1.vuse)
     {
       ao_ref r;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       vn_walk_cb_data data (&vr1, NULL_TREE, NULL, kind, true);
       if (ao_ref_init_from_vn_reference (&r, set, type, vr1.operands))
        *vnresult =
@@ -3125,7 +3125,7 @@ vn_reference_lookup (tree op, tree vuse, vn_lookup_kind kind,
     {
       vn_reference_t wvnresult;
       ao_ref r;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       /* Make sure to use a valueized reference if we valueized anything.
          Otherwise preserve the full reference for advanced TBAA.  */
       if (!valuezied_anything
@@ -6985,7 +6985,7 @@ do_rpo_vn (function *fn, edge entry, bitmap exit_bbs,
   if (iterate)
     {
       loop_p loop;
-      unsigned max_depth = PARAM_VALUE (PARAM_RPO_VN_MAX_LOOP_DEPTH);
+      unsigned max_depth = param_rpo_vn_max_loop_depth;
       FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
        if (loop_depth (loop) > max_depth)
          for (unsigned i = 2;
index 574bc30eee18600566e98bb6a607ef7bc7bcccef..9fb2f500f4696507da933f229542d62f16ed169f 100644 (file)
@@ -292,7 +292,7 @@ avail_exprs_stack::lookup_avail_expr (gimple *stmt, bool insert, bool tbaa_p)
         up the virtual use-def chain using walk_non_aliased_vuses.
         But don't do this when removing expressions from the hash.  */
       ao_ref ref;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       if (!(vuse1 && vuse2
            && gimple_assign_single_p (stmt)
            && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
index 3bfad0f90ed3bcc860e45381682eca23df2b693d..cbad34b26de00ed27fac628ac7342618623286eb 100644 (file)
@@ -215,7 +215,7 @@ select_best_block (basic_block early_bb,
   /* Get the sinking threshold.  If the statement to be moved has memory
      operands, then increase the threshold by 7% as those are even more
      profitable to avoid, clamping at 100%.  */
-  threshold = PARAM_VALUE (PARAM_SINK_FREQUENCY_THRESHOLD);
+  threshold = param_sink_frequency_threshold;
   if (gimple_vuse (stmt) || gimple_vdef (stmt))
     {
       threshold += 7;
index 15c0c4576b71b747401dc432e9f3b9f72ed688ea..54f9bcf05f2a38bb43ff1e0f2c5af7af1add23f5 100644 (file)
@@ -528,7 +528,7 @@ static int
 new_stridx (tree exp)
 {
   int idx;
-  if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+  if (max_stridx >= param_max_tracked_strlens)
     return 0;
   if (TREE_CODE (exp) == SSA_NAME)
     {
@@ -557,7 +557,7 @@ static int
 new_addr_stridx (tree exp)
 {
   int *pidx;
-  if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+  if (max_stridx >= param_max_tracked_strlens)
     return 0;
   pidx = addr_stridxptr (exp);
   if (pidx != NULL)
@@ -1082,7 +1082,7 @@ get_range_strlen_dynamic (tree src, c_strlen_data *pdata,
   bitmap visited = NULL;
   tree maxbound = pdata->maxbound;
 
-  unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  unsigned limit = param_ssa_name_def_chain_limit;
   if (!get_range_strlen_dynamic (src, pdata, &visited, rvals, &limit))
     {
       /* On failure extend the length range to an impossible maximum
@@ -3972,7 +3972,7 @@ class ssa_name_limit_t
 
   ssa_name_limit_t ()
     : visited (NULL),
-    ssa_def_max (PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT)) { }
+    ssa_def_max (param_ssa_name_def_chain_limit) { }
 
   int next_ssa_name (tree);
 
index 6e7d4dbc5b332b6d1c779cfeb1b687c5d5dac0b6..74edcd4458f5f486413fd8b2758a864d423c2721 100644 (file)
@@ -5691,9 +5691,9 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack,
     return false;
 
   /* If the vector of fields is growing too big, bail out early.
-     Callers check for vec::length <= MAX_FIELDS_FOR_FIELD_SENSITIVE, make
+     Callers check for vec::length <= param_max_fields_for_field_sensitive, make
      sure this fails.  */
-  if (fieldstack->length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+  if (fieldstack->length () > (unsigned)param_max_fields_for_field_sensitive)
     return false;
 
   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -6114,7 +6114,7 @@ create_variable_info_for_1 (tree decl, const char *name, bool add_id,
   /* If we didn't end up collecting sub-variables create a full
      variable for the decl.  */
   if (fieldstack.length () == 0
-      || fieldstack.length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+      || fieldstack.length () > (unsigned)param_max_fields_for_field_sensitive)
     {
       vi = new_var_info (decl, name, add_id);
       vi->offset = 0;
@@ -7179,7 +7179,7 @@ init_base_vars (void)
 static void
 init_alias_vars (void)
 {
-  use_field_sensitive = (MAX_FIELDS_FOR_FIELD_SENSITIVE > 1);
+  use_field_sensitive = (param_max_fields_for_field_sensitive > 1);
 
   bitmap_obstack_initialize (&pta_obstack);
   bitmap_obstack_initialize (&oldpta_obstack);
index cbd5a277b3968d66cb8a782c143c4f920b144cae..ddf7449d945ab4b729bc0ab939fa4a7e10d0d609 100644 (file)
@@ -1469,7 +1469,7 @@ find_clusters_1 (same_succ *same_succ)
   unsigned int i, j;
   bitmap_iterator bi, bj;
   int nr_comparisons;
-  int max_comparisons = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_COMPARISONS);
+  int max_comparisons = param_max_tail_merge_comparisons;
 
   EXECUTE_IF_SET_IN_BITMAP (same_succ->bbs, 0, i, bi)
     {
@@ -1731,7 +1731,7 @@ tail_merge_optimize (unsigned int todo)
   int nr_bbs_removed;
   bool loop_entered = false;
   int iteration_nr = 0;
-  int max_iterations = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_ITERATIONS);
+  int max_iterations = param_max_tail_merge_iterations;
 
   if (!flag_tree_tail_merge
       || max_iterations == 0)
index 1ff870ad00bc929a3e79a07ac63b4ee1ee476922..6d534647c60493da889c000309f63a85efdd019c 100644 (file)
@@ -157,7 +157,7 @@ thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
       return NULL;
 
   if (m_path.length () + 1
-      > (unsigned) PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
+      > (unsigned) param_max_fsm_thread_length)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file, "FSM jump-thread path not considered: "
@@ -367,7 +367,7 @@ thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      as in PR 78407 this leads to noticeable improvements.  */
   if (m_speed_p && (optimize_edge_for_speed_p (taken_edge) || contains_hot_bb))
     {
-      if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
+      if (n_insns >= param_max_fsm_thread_path_insns)
        {
          if (dump_file && (dump_flags & TDF_DETAILS))
            fprintf (dump_file, "FSM jump-thread path not considered: "
@@ -397,9 +397,9 @@ thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      optimizer would have done anyway, so an irreducible loop is not
      so bad.  */
   if (!threaded_multiway_branch && *creates_irreducible_loop
-      && (n_insns * (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
+      && (n_insns * (unsigned) param_fsm_scale_path_stmts
          > (m_path.length () *
-            (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS))))
+            (unsigned) param_fsm_scale_path_blocks)))
 
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -419,8 +419,8 @@ thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      So for that case, drastically reduce the number of statements
      we are allowed to copy.  */
   if (!(threaded_through_latch && threaded_multiway_branch)
-      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
-         >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
+      && (n_insns * param_fsm_scale_path_stmts
+         >= param_max_jump_thread_duplication_stmts))
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
@@ -683,7 +683,7 @@ thread_jumps::fsm_find_control_statement_thread_paths (tree name)
 
   if (gimple_code (def_stmt) == GIMPLE_PHI
       && (gimple_phi_num_args (def_stmt)
-         >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
+         >= (unsigned) param_fsm_maximum_phi_arguments))
     return;
 
   if (is_gimple_assign (def_stmt)
@@ -771,7 +771,7 @@ thread_jumps::find_jump_threads_backwards (basic_block bb, bool speed_p)
   m_visited_bbs.empty ();
   m_seen_loop_phi = false;
   m_speed_p = speed_p;
-  m_max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
+  m_max_threaded_paths = param_max_fsm_thread_paths;
 
   fsm_find_control_statement_thread_paths (name);
 }
index a5d87662159cad946587f80477ba62dead47cd0f..c43d7c5c39e980c6f438e06fcd7a90734687f79a 100644 (file)
@@ -234,7 +234,7 @@ record_temporary_equivalences_from_stmts_at_dest (edge e,
   gimple_stmt_iterator gsi;
   int max_stmt_count;
 
-  max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
+  max_stmt_count = param_max_jump_thread_duplication_stmts;
 
   /* Walk through each statement in the block recording equivalences
      we discover.  Note any equivalences we discover are context
@@ -275,7 +275,7 @@ record_temporary_equivalences_from_stmts_at_dest (edge e,
             killed due to threading, grow the max count
             accordingly.  */
          if (max_stmt_count
-             == PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+             == param_max_jump_thread_duplication_stmts)
            {
              max_stmt_count += estimate_threading_killed_stmts (e->dest);
              if (dump_file)
index fe8f8f0bc28f022c408f624ddb80d1b4cc2d2112..ae441067789abf1291333c0d8487fb7810debd85 100644 (file)
@@ -545,7 +545,7 @@ compute_control_dep_chain (basic_block bb, basic_block dep_bb,
   bool found_cd_chain = false;
   size_t cur_chain_len = 0;
 
-  if (*num_calls > PARAM_VALUE (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS))
+  if (*num_calls > param_uninit_control_dep_attempts)
     return false;
   ++*num_calls;
 
index b7149039ae4310ae70dd5237fe977c0cec4ec7c0..166e40c3931201504a06157767b220b506179df2 100644 (file)
@@ -194,7 +194,7 @@ switch_conversion::check_range ()
     }
 
   if (tree_to_uhwi (m_range_size)
-      > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
+      > ((unsigned) m_count * param_switch_conversion_branch_ratio))
     {
       m_reason = "the maximum range-branch ratio exceeded";
       return false;
@@ -1268,8 +1268,8 @@ jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
 
   unsigned HOST_WIDE_INT max_ratio
     = (optimize_insn_for_size_p ()
-       ? PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SIZE)
-       : PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SPEED));
+       ? param_jump_table_max_growth_ratio_for_size
+       : param_jump_table_max_growth_ratio_for_speed);
   unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
                                            clusters[end]->get_high ());
   /* Check overflow.  */
index 653007f0e17b5beb7e80a4d530af0b0c45b5191a..c58bccea7f1f60aae5e07a78b0a7177bbe0c581c 100644 (file)
@@ -476,7 +476,7 @@ case_tree_node::case_tree_node ():
 unsigned int
 jump_table_cluster::case_values_threshold (void)
 {
-  unsigned int threshold = PARAM_VALUE (PARAM_CASE_VALUES_THRESHOLD);
+  unsigned int threshold = param_case_values_threshold;
 
   if (threshold == 0)
     threshold = targetm.case_values_threshold ();
@@ -683,8 +683,8 @@ is changed into:
        b_b = PHI <b_6, b_7>
 
 There are further constraints.  Specifically, the range of values across all
-case labels must not be bigger than SWITCH_CONVERSION_BRANCH_RATIO (default
-eight) times the number of the actual switch branches.
+case labels must not be bigger than param_switch_conversion_branch_ratio
+(default eight) times the number of the actual switch branches.
 
 This transformation was contributed by Martin Jambor, see this e-mail:
    http://gcc.gnu.org/ml/gcc-patches/2008-07/msg00011.html  */
index 88f14e73d65acd3026d72a2698212f4f0a5a6409..5abbdc747f48958a6a385e9b5bf3550c3ad5de4f 100644 (file)
@@ -185,7 +185,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
 {
   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
 
-  if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
+  if ((unsigned) param_vect_max_version_for_alias_checks == 0)
     return opt_result::failure_at (vect_location,
                                   "will not create alias checks, as"
                                   " --param vect-max-version-for-alias-checks"
@@ -2086,7 +2086,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
       if (do_peeling)
         {
           unsigned max_allowed_peel
-            = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
+           = param_vect_max_peeling_for_alignment;
          if (flag_vect_cost_model == VECT_COST_MODEL_CHEAP)
            max_allowed_peel = 0;
           if (max_allowed_peel != (unsigned)-1)
@@ -2227,7 +2227,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
 
               if (known_alignment_for_access_p (dr_info)
                   || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
-                     >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
+                 >= (unsigned) param_vect_max_version_for_alignment_checks)
                 {
                   do_versioning = false;
                   break;
@@ -3656,10 +3656,9 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
     dump_printf_loc (MSG_NOTE, vect_location,
                     "improved number of alias checks from %d to %d\n",
                     may_alias_ddrs.length (), count);
-  unsigned limit = PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS);
+  unsigned limit = param_vect_max_version_for_alias_checks;
   if (flag_simd_cost_model == VECT_COST_MODEL_CHEAP)
-    limit = default_param_value
-             (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) * 6 / 10;
+    limit = param_vect_max_version_for_alias_checks * 6 / 10;
   if (count > limit)
     return opt_result::failure_at
       (vect_location,
index b600d3157457c3180d0456c4f66cbc57012e3c71..db58a2f7c8182992e7e975cf4f59cf1f87664ce3 100644 (file)
@@ -1665,7 +1665,7 @@ vect_analyze_loop_costing (loop_vec_info loop_vinfo)
       return -1;
     }
 
-  int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+  int min_scalar_loop_bound = (param_min_vect_loop_bound
                               * assumed_vf);
 
   /* Use the cost model only if it is more conservative than user specified
@@ -1775,7 +1775,7 @@ vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
        /* If dependence analysis will give up due to the limit on the
           number of datarefs stop here and fail fatally.  */
        if (datarefs->length ()
-           > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+           > (unsigned)param_loop_max_datarefs_for_datadeps)
          return opt_result::failure_at (stmt, "exceeded param "
                                         "loop-max-datarefs-for-datadeps\n");
       }
@@ -2461,7 +2461,7 @@ vect_analyze_loop (class loop *loop, vec_info_shared *shared)
             TODO: Enable epilogue vectorization for loops with SIMDUID set.  */
          vect_epilogues = (!simdlen
                            && loop->inner == NULL
-                           && PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)
+                           && param_vect_epilogues_nomask
                            && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
                            && !loop->simduid
                            /* For now only allow one epilogue loop.  */
index f4b445ac1ef9cff8280964dcc8937b3b74fe2a7c..9d3d991e516326ee35ee52ab64c063143567d9f2 100644 (file)
@@ -3289,7 +3289,7 @@ vect_slp_bb (basic_block bb)
 
       gimple_stmt_iterator region_end = gsi;
 
-      if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
+      if (insns > param_slp_max_insns_in_bb)
        {
          if (dump_enabled_p ())
            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
index e9575a184ad02787cbdc6ea9059ef1dc35fbca94..e556e0e98886de5de81eb44e013e31ff09136021 100644 (file)
@@ -489,7 +489,7 @@ public:
 
   /* Threshold of number of iterations below which vectorization will not be
      performed. It is calculated from MIN_PROFITABLE_ITERS and
-     PARAM_MIN_VECT_LOOP_BOUND.  */
+     param_min_vect_loop_bound.  */
   unsigned int th;
 
   /* When applying loop versioning, the vector form should only be used
index da11dfb5b27a67fa95b5b2e0d82e66883ba8cf77..399c33324529e9121949f854506dc54861bbd77f 100644 (file)
@@ -3391,7 +3391,7 @@ find_switch_asserts (basic_block bb, gswitch *last)
 
   /* Now register along the default label assertions that correspond to the
      anti-range of each label.  */
-  int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
+  int insertion_limit = param_max_vrp_switch_assertions;
   if (insertion_limit == 0)
     return;
 
@@ -4305,7 +4305,7 @@ vrp_prop::check_mem_ref (location_t location, tree ref,
      The loop computes the range of the final offset for expressions such
      as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
      some range.  */
-  const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  const unsigned limit = param_ssa_name_def_chain_limit;
   for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
     {
       gimple *def = SSA_NAME_DEF_STMT (arg);
index d6a3970ddb61586645b64291ea08b73930797c17..c3cd527e14a753c6c2c1a2a88a9d00cc117910aa 100644 (file)
@@ -1553,15 +1553,15 @@ wide_int_to_tree_1 (tree type, const wide_int_ref &pcst)
          if (TYPE_SIGN (type) == UNSIGNED)
            {
              /* Cache [0, N).  */
-             limit = INTEGER_SHARE_LIMIT;
-             if (IN_RANGE (hwi, 0, INTEGER_SHARE_LIMIT - 1))
+             limit = param_integer_share_limit;
+             if (IN_RANGE (hwi, 0, param_integer_share_limit - 1))
                ix = hwi;
            }
          else
            {
              /* Cache [-1, N).  */
-             limit = INTEGER_SHARE_LIMIT + 1;
-             if (IN_RANGE (hwi, -1, INTEGER_SHARE_LIMIT - 1))
+             limit = param_integer_share_limit + 1;
+             if (IN_RANGE (hwi, -1, param_integer_share_limit - 1))
                ix = hwi + 1;
            }
          break;
@@ -1737,23 +1737,24 @@ cache_integer_cst (tree t)
       if (TYPE_UNSIGNED (type))
        {
          /* Cache 0..N */
-         limit = INTEGER_SHARE_LIMIT;
+         limit = param_integer_share_limit;
 
          /* This is a little hokie, but if the prec is smaller than
-            what is necessary to hold INTEGER_SHARE_LIMIT, then the
+            what is necessary to hold param_integer_share_limit, then the
             obvious test will not get the correct answer.  */
          if (prec < HOST_BITS_PER_WIDE_INT)
            {
-             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+             if (tree_to_uhwi (t)
+                 < (unsigned HOST_WIDE_INT) param_integer_share_limit)
                ix = tree_to_uhwi (t);
            }
-         else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+         else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
            ix = tree_to_uhwi (t);
        }
       else
        {
          /* Cache -1..N */
-         limit = INTEGER_SHARE_LIMIT + 1;
+         limit = param_integer_share_limit + 1;
 
          if (integer_minus_onep (t))
            ix = 0;
@@ -1761,10 +1762,10 @@ cache_integer_cst (tree t)
            {
              if (prec < HOST_BITS_PER_WIDE_INT)
                {
-                 if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+                 if (tree_to_shwi (t) < param_integer_share_limit)
                    ix = tree_to_shwi (t) + 1;
                }
-             else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+             else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
                ix = tree_to_shwi (t) + 1;
            }
        }
index 5de36ae2f47287a439a1e7ab6fc0826b882253da..c80a06c92b73d02d0875d38da650637eecac27a4 100644 (file)
@@ -5844,7 +5844,7 @@ add_uses_1 (rtx *x, void *cui)
    compile time for ridiculously complex expressions, although they're
    seldom useful, and they may often have to be discarded as not
    representable anyway.  */
-#define EXPR_USE_DEPTH (PARAM_VALUE (PARAM_MAX_VARTRACK_EXPR_DEPTH))
+#define EXPR_USE_DEPTH (param_max_vartrack_expr_depth)
 
 /* Attempt to reverse the EXPR operation in the debug info and record
    it in the cselib table.  Say for reg1 = reg2 + 6 even when reg2 is
@@ -5904,7 +5904,7 @@ reverse_op (rtx val, const_rtx expr, rtx_insn *insn)
        && (GET_CODE (l->loc) != CONST || !references_value_p (l->loc, 0)))
       return;
     /* Avoid creating too large locs lists.  */
-    else if (count == PARAM_VALUE (PARAM_MAX_VARTRACK_REVERSE_OP_SIZE))
+    else if (count == param_max_vartrack_reverse_op_size)
       return;
 
   switch (GET_CODE (src))
@@ -7054,7 +7054,7 @@ vt_find_locations (void)
   int *rc_order;
   int i;
   int htabsz = 0;
-  int htabmax = PARAM_VALUE (PARAM_MAX_VARTRACK_SIZE);
+  int htabmax = param_max_vartrack_size;
   bool success = true;
 
   timevar_push (TV_VAR_TRACKING_DATAFLOW);