+2019-11-12 Martin Liska <mliska@suse.cz>
+
+ * asan.c (asan_sanitize_stack_p): Replace old parameter syntax
+ with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET
+ macro.
+ (asan_sanitize_allocas_p): Likewise.
+ (asan_emit_stack_protection): Likewise.
+ (asan_protect_global): Likewise.
+ (instrument_derefs): Likewise.
+ (instrument_builtin_call): Likewise.
+ (asan_expand_mark_ifn): Likewise.
+ * auto-profile.c (auto_profile): Likewise.
+ * bb-reorder.c (copy_bb_p): Likewise.
+ (duplicate_computed_gotos): Likewise.
+ * builtins.c (inline_expand_builtin_string_cmp): Likewise.
+ * cfgcleanup.c (try_crossjump_to_edge): Likewise.
+ (try_crossjump_bb): Likewise.
+ * cfgexpand.c (defer_stack_allocation): Likewise.
+ (stack_protect_classify_type): Likewise.
+ (pass_expand::execute): Likewise.
+ * cfgloopanal.c (expected_loop_iterations_unbounded): Likewise.
+ (estimate_reg_pressure_cost): Likewise.
+ * cgraph.c (cgraph_edge::maybe_hot_p): Likewise.
+ * combine.c (combine_instructions): Likewise.
+ (record_value_for_reg): Likewise.
+ * common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise.
+ (aarch64_option_default_params): Likewise.
+ * common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise.
+ * common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise.
+ * common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise.
+ * common/config/sh/sh-common.c (sh_option_default_params): Likewise.
+ * config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise.
+ (aarch64_allocate_and_probe_stack_space): Likewise.
+ (aarch64_expand_epilogue): Likewise.
+ (aarch64_override_options_internal): Likewise.
+ * config/alpha/alpha.c (alpha_option_override): Likewise.
+ * config/arm/arm.c (arm_option_override): Likewise.
+ (arm_valid_target_attribute_p): Likewise.
+ * config/i386/i386-options.c (ix86_option_override_internal): Likewise.
+ * config/i386/i386.c (get_probe_interval): Likewise.
+ (ix86_adjust_stack_and_probe_stack_clash): Likewise.
+ (ix86_max_noce_ifcvt_seq_cost): Likewise.
+ * config/ia64/ia64.c (ia64_adjust_cost): Likewise.
+ * config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise.
+ (get_stack_clash_protection_guard_size): Likewise.
+ * config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise.
+ * config/s390/s390.c (allocate_stack_space): Likewise.
+ (s390_emit_prologue): Likewise.
+ (s390_option_override_internal): Likewise.
+ * config/sparc/sparc.c (sparc_option_override): Likewise.
+ * config/visium/visium.c (visium_option_override): Likewise.
+ * coverage.c (get_coverage_counts): Likewise.
+ (coverage_compute_profile_id): Likewise.
+ (coverage_begin_function): Likewise.
+ (coverage_end_function): Likewise.
+ * cse.c (cse_find_path): Likewise.
+ (cse_extended_basic_block): Likewise.
+ (cse_main): Likewise.
+ * cselib.c (cselib_invalidate_mem): Likewise.
+ * dse.c (dse_step1): Likewise.
+ * emit-rtl.c (set_new_first_and_last_insn): Likewise.
+ (get_max_insn_count): Likewise.
+ (make_debug_insn_raw): Likewise.
+ (init_emit): Likewise.
+ * explow.c (compute_stack_clash_protection_loop_data): Likewise.
+ * final.c (compute_alignments): Likewise.
+ * fold-const.c (fold_range_test): Likewise.
+ (fold_truth_andor): Likewise.
+ (tree_single_nonnegative_warnv_p): Likewise.
+ (integer_valued_real_single_p): Likewise.
+ * gcse.c (want_to_gcse_p): Likewise.
+ (prune_insertions_deletions): Likewise.
+ (hoist_code): Likewise.
+ (gcse_or_cprop_is_too_expensive): Likewise.
+ * ggc-common.c: Likewise.
+ * ggc-page.c (ggc_collect): Likewise.
+ * gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise.
+ (MAX_DATAREFS): Likewise.
+ (OUTER_STRIDE_RATIO): Likewise.
+ * gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise.
+ * gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise.
+ * gimple-ssa-split-paths.c (is_feasible_trace): Likewise.
+ * gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise.
+ (imm_store_chain_info::coalesce_immediate_stores): Likewise.
+ (imm_store_chain_info::output_merged_store): Likewise.
+ (pass_store_merging::process_store): Likewise.
+ * gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise.
+ * graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise.
+ (scop_to_isl_ast): Likewise.
+ * graphite-optimize-isl.c (get_schedule_for_node_st): Likewise.
+ (optimize_isl): Likewise.
+ * graphite-scop-detection.c (build_scops): Likewise.
+ * haifa-sched.c (set_modulo_params): Likewise.
+ (rank_for_schedule): Likewise.
+ (model_add_to_worklist): Likewise.
+ (model_promote_insn): Likewise.
+ (model_choose_insn): Likewise.
+ (queue_to_ready): Likewise.
+ (autopref_multipass_dfa_lookahead_guard): Likewise.
+ (schedule_block): Likewise.
+ (sched_init): Likewise.
+ * hsa-gen.c (init_prologue): Likewise.
+ * ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise.
+ (cond_move_process_if_block): Likewise.
+ * ipa-cp.c (ipcp_lattice::add_value): Likewise.
+ (merge_agg_lats_step): Likewise.
+ (devirtualization_time_bonus): Likewise.
+ (hint_time_bonus): Likewise.
+ (incorporate_penalties): Likewise.
+ (good_cloning_opportunity_p): Likewise.
+ (ipcp_propagate_stage): Likewise.
+ * ipa-fnsummary.c (decompose_param_expr): Likewise.
+ (set_switch_stmt_execution_predicate): Likewise.
+ (analyze_function_body): Likewise.
+ (compute_fn_summary): Likewise.
+ * ipa-inline-analysis.c (estimate_growth): Likewise.
+ * ipa-inline.c (caller_growth_limits): Likewise.
+ (inline_insns_single): Likewise.
+ (inline_insns_auto): Likewise.
+ (can_inline_edge_by_limits_p): Likewise.
+ (want_early_inline_function_p): Likewise.
+ (big_speedup_p): Likewise.
+ (want_inline_small_function_p): Likewise.
+ (want_inline_self_recursive_call_p): Likewise.
+ (edge_badness): Likewise.
+ (recursive_inlining): Likewise.
+ (compute_max_insns): Likewise.
+ (early_inliner): Likewise.
+ * ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise.
+ * ipa-profile.c (ipa_profile): Likewise.
+ * ipa-prop.c (determine_known_aggregate_parts): Likewise.
+ (ipa_analyze_node): Likewise.
+ (ipcp_transform_function): Likewise.
+ * ipa-split.c (consider_split): Likewise.
+ * ipa-sra.c (allocate_access): Likewise.
+ (process_scan_results): Likewise.
+ (ipa_sra_summarize_function): Likewise.
+ (pull_accesses_from_callee): Likewise.
+ * ira-build.c (loop_compare_func): Likewise.
+ (mark_loops_for_removal): Likewise.
+ * ira-conflicts.c (build_conflict_bit_table): Likewise.
+ * loop-doloop.c (doloop_optimize): Likewise.
+ * loop-invariant.c (gain_for_invariant): Likewise.
+ (move_loop_invariants): Likewise.
+ * loop-unroll.c (decide_unroll_constant_iterations): Likewise.
+ (decide_unroll_runtime_iterations): Likewise.
+ (decide_unroll_stupid): Likewise.
+ (expand_var_during_unrolling): Likewise.
+ * lra-assigns.c (spill_for): Likewise.
+ * lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise.
+ * modulo-sched.c (sms_schedule): Likewise.
+ (DFA_HISTORY): Likewise.
+ * opts.c (default_options_optimization): Likewise.
+ (finish_options): Likewise.
+ (common_handle_option): Likewise.
+ * postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
+ (if): Likewise.
+ * predict.c (get_hot_bb_threshold): Likewise.
+ (maybe_hot_count_p): Likewise.
+ (probably_never_executed): Likewise.
+ (predictable_edge_p): Likewise.
+ (predict_loops): Likewise.
+ (expr_expected_value_1): Likewise.
+ (tree_predict_by_opcode): Likewise.
+ (handle_missing_profiles): Likewise.
+ * reload.c (find_equiv_reg): Likewise.
+ * reorg.c (redundant_insn): Likewise.
+ * resource.c (mark_target_live_regs): Likewise.
+ (incr_ticks_for_insn): Likewise.
+ * sanopt.c (pass_sanopt::execute): Likewise.
+ * sched-deps.c (sched_analyze_1): Likewise.
+ (sched_analyze_2): Likewise.
+ (sched_analyze_insn): Likewise.
+ (deps_analyze_insn): Likewise.
+ * sched-ebb.c (schedule_ebbs): Likewise.
+ * sched-rgn.c (find_single_block_region): Likewise.
+ (too_large): Likewise.
+ (haifa_find_rgns): Likewise.
+ (extend_rgns): Likewise.
+ (new_ready): Likewise.
+ (schedule_region): Likewise.
+ (sched_rgn_init): Likewise.
+ * sel-sched-ir.c (make_region_from_loop): Likewise.
+ * sel-sched-ir.h (MAX_WS): Likewise.
+ * sel-sched.c (process_pipelined_exprs): Likewise.
+ (sel_setup_region_sched_flags): Likewise.
+ * shrink-wrap.c (try_shrink_wrapping): Likewise.
+ * targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise.
+ * toplev.c (print_version): Likewise.
+ (process_options): Likewise.
+ * tracer.c (tail_duplicate): Likewise.
+ * trans-mem.c (tm_log_add): Likewise.
+ * tree-chrec.c (chrec_fold_plus_1): Likewise.
+ * tree-data-ref.c (split_constant_offset): Likewise.
+ (compute_all_dependences): Likewise.
+ * tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise.
+ * tree-inline.c (remap_gimple_stmt): Likewise.
+ * tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise.
+ * tree-parloops.c (MIN_PER_THREAD): Likewise.
+ (create_parallel_loop): Likewise.
+ * tree-predcom.c (determine_unroll_factor): Likewise.
+ * tree-scalar-evolution.c (instantiate_scev_r): Likewise.
+ * tree-sra.c (analyze_all_variable_accesses): Likewise.
+ * tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
+ * tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise.
+ (dse_optimize_redundant_stores): Likewise.
+ (dse_classify_store): Likewise.
+ * tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
+ * tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise.
+ * tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise.
+ * tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise.
+ (try_peel_loop): Likewise.
+ (tree_unroll_loops_completely): Likewise.
+ * tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise.
+ (CONSIDER_ALL_CANDIDATES_BOUND): Likewise.
+ (MAX_CONSIDERED_GROUPS): Likewise.
+ (ALWAYS_PRUNE_CAND_SET_BOUND): Likewise.
+ * tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise.
+ * tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise.
+ * tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise.
+ (L1_CACHE_SIZE_BYTES): Likewise.
+ (L2_CACHE_SIZE_BYTES): Likewise.
+ (should_issue_prefetch_p): Likewise.
+ (schedule_prefetches): Likewise.
+ (determine_unroll_factor): Likewise.
+ (volume_of_references): Likewise.
+ (add_subscript_strides): Likewise.
+ (self_reuse_distance): Likewise.
+ (mem_ref_count_reasonable_p): Likewise.
+ (insn_to_prefetch_ratio_too_small_p): Likewise.
+ (loop_prefetch_arrays): Likewise.
+ (tree_ssa_prefetch_arrays): Likewise.
+ * tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise.
+ * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise.
+ (convert_mult_to_fma): Likewise.
+ (math_opts_dom_walker::after_dom_children): Likewise.
+ * tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise.
+ (hoist_adjacent_loads): Likewise.
+ (gate_hoist_loads): Likewise.
+ * tree-ssa-pre.c (translate_vuse_through_block): Likewise.
+ (compute_partial_antic_aux): Likewise.
+ * tree-ssa-reassoc.c (get_reassociation_width): Likewise.
+ * tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise.
+ (vn_reference_lookup): Likewise.
+ (do_rpo_vn): Likewise.
+ * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise.
+ * tree-ssa-sink.c (select_best_block): Likewise.
+ * tree-ssa-strlen.c (new_stridx): Likewise.
+ (new_addr_stridx): Likewise.
+ (get_range_strlen_dynamic): Likewise.
+ (class ssa_name_limit_t): Likewise.
+ * tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise.
+ (create_variable_info_for_1): Likewise.
+ (init_alias_vars): Likewise.
+ * tree-ssa-tail-merge.c (find_clusters_1): Likewise.
+ (tail_merge_optimize): Likewise.
+ * tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise.
+ (thread_jumps::fsm_find_control_statement_thread_paths): Likewise.
+ (thread_jumps::find_jump_threads_backwards): Likewise.
+ * tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise.
+ * tree-ssa-uninit.c (compute_control_dep_chain): Likewise.
+ * tree-switch-conversion.c (switch_conversion::check_range): Likewise.
+ (jump_table_cluster::can_be_handled): Likewise.
+ * tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise.
+ (SWITCH_CONVERSION_BRANCH_RATIO): Likewise.
+ (param_switch_conversion_branch_ratio): Likewise.
+ * tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise.
+ (vect_enhance_data_refs_alignment): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ * tree-vect-loop.c (vect_analyze_loop_costing): Likewise.
+ (vect_get_datarefs_in_loop): Likewise.
+ (vect_analyze_loop): Likewise.
+ * tree-vect-slp.c (vect_slp_bb): Likewise.
+ * tree-vectorizer.h: Likewise.
+ * tree-vrp.c (find_switch_asserts): Likewise.
+ (vrp_prop::check_mem_ref): Likewise.
+ * tree.c (wide_int_to_tree_1): Likewise.
+ (cache_integer_cst): Likewise.
+ * var-tracking.c (EXPR_USE_DEPTH): Likewise.
+ (reverse_op): Likewise.
+ (vt_find_locations): Likewise.
+
2019-11-12 Martin Liska <mliska@suse.cz>
* Makefile.in: Include params.opt.
bool
asan_sanitize_stack_p (void)
{
- return (sanitize_flags_p (SANITIZE_ADDRESS) && ASAN_STACK);
+ return (sanitize_flags_p (SANITIZE_ADDRESS) && param_asan_stack);
}
bool
asan_sanitize_allocas_p (void)
{
- return (asan_sanitize_stack_p () && ASAN_PROTECT_ALLOCAS);
+ return (asan_sanitize_stack_p () && param_asan_protect_allocas);
}
/* Checks whether section SEC should be sanitized. */
/* Emit the prologue sequence. */
if (asan_frame_size > 32 && asan_frame_size <= 65536 && pbase
- && ASAN_USE_AFTER_RETURN)
+ && param_asan_use_after_return)
{
use_after_return_class = floor_log2 (asan_frame_size - 1) - 5;
/* __asan_stack_malloc_N guarantees alignment
bool
asan_protect_global (tree decl, bool ignore_decl_rtl_set_p)
{
- if (!ASAN_GLOBALS)
+ if (!param_asan_globals)
return false;
rtx rtl, symbol;
instrument_derefs (gimple_stmt_iterator *iter, tree t,
location_t location, bool is_store)
{
- if (is_store && !ASAN_INSTRUMENT_WRITES)
+ if (is_store && !param_asan_instrument_writes)
return;
- if (!is_store && !ASAN_INSTRUMENT_READS)
+ if (!is_store && !param_asan_instrument_reads)
return;
tree type, base;
{
if (DECL_THREAD_LOCAL_P (inner))
return;
- if (!ASAN_GLOBALS && is_global_var (inner))
+ if (!param_asan_globals && is_global_var (inner))
return;
if (!TREE_STATIC (inner))
{
static bool
instrument_builtin_call (gimple_stmt_iterator *iter)
{
- if (!ASAN_MEMINTRIN)
+ if (!param_asan_memintrin)
return false;
bool iter_advanced_p = false;
tree base_addr = gimple_assign_lhs (g);
/* Generate direct emission if size_in_bytes is small. */
- if (size_in_bytes <= ASAN_PARAM_USE_AFTER_SCOPE_DIRECT_EMISSION_THRESHOLD)
+ if (size_in_bytes
+ <= (unsigned)param_use_after_scope_direct_emission_threshold)
{
const unsigned HOST_WIDE_INT shadow_size
= shadow_mem_size (size_in_bytes);
function before annotation, so the profile inside bar@loc_foo2
will be useful. */
autofdo::stmt_set promoted_stmts;
- for (int i = 0; i < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS); i++)
+ for (int i = 0; i < param_early_inliner_max_iterations; i++)
{
if (!flag_value_profile_transformations
|| !autofdo::afdo_vpt_for_early_inline (&promoted_stmts))
return false;
if (code_may_grow && optimize_bb_for_speed_p (bb))
- max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+ max_size *= param_max_grow_copy_bb_insns;
FOR_BB_INSNS (bb, insn)
{
/* Never copy a block larger than this. */
int max_size
- = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
+ = uncond_jump_length * param_max_goto_duplication_insns;
bool changed = false;
/* If the length of the comparision is larger than the threshold,
do nothing. */
if (length > (unsigned HOST_WIDE_INT)
- PARAM_VALUE (BUILTIN_STRING_CMP_INLINE_LENGTH))
+ param_builtin_string_cmp_inline_length)
return NULL_RTX;
machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+2019-11-12 Martin Liska <mliska@suse.cz>
+
+ * gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax
+ with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET
+ macro.
+
2019-11-12 Maciej W. Rozycki <macro@codesourcery.com>
Frederik Harwath <frederik@codesourcery.com>
if (cfun->curr_properties & PROP_cfg)
{
ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb_count;
- gcov_type t = PARAM_VALUE (PARAM_GIMPLE_FE_COMPUTED_HOT_BB_THRESHOLD);
+ gcov_type t = param_gimple_fe_computed_hot_bb_threshold;
set_hot_bb_threshold (t);
update_max_bb_count ();
cgraph_node::get_create (cfun->decl);
of matching instructions or the 'from' block was totally matched
(such that its predecessors will hopefully be redirected and the
block removed). */
- if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
+ if ((nmatch < param_min_crossjump_insns)
&& (newpos1 != BB_HEAD (src1)))
return false;
a block that falls through into BB, as that adds no branches to the
program. We'll try that combination first. */
fallthru = NULL;
- max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
+ max = param_max_crossjump_edges;
if (EDGE_COUNT (bb->preds) > max)
return false;
bool smallish
= (poly_int_tree_p (size_unit, &size)
&& (estimated_poly_value (size)
- < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
+ < param_min_size_for_stack_sharing));
/* If stack protection is enabled, *all* stack variables must be deferred,
so that we can re-order the strings to the top of the frame.
|| t == signed_char_type_node
|| t == unsigned_char_type_node)
{
- unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
+ unsigned HOST_WIDE_INT max = param_ssp_buffer_size;
unsigned HOST_WIDE_INT len;
if (!TYPE_SIZE_UNIT (type)
warning (OPT_Wstack_protector,
"stack protector not protecting function: "
"all local arrays are less than %d bytes long",
- (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
+ (int) param_ssp_buffer_size);
}
/* Set up parameters and prepare for return, for the function. */
/* If the function has too many markers, drop them while expanding. */
if (cfun->debug_marker_count
- >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+ >= param_max_debug_marker_count)
cfun->debug_nonbind_markers = false;
lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
{
if (by_profile_only)
return -1;
- expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+ expected = param_avg_loop_niter;
}
else if (loop->latch && (loop->latch->count.initialized_p ()
|| loop->header->count.initialized_p ()))
{
if (by_profile_only)
return -1;
- expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+ expected = param_avg_loop_niter;
}
else if (!count_in.nonzero_p ())
{
{
if (by_profile_only)
return -1;
- expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+ expected = param_avg_loop_niter;
}
if (!by_profile_only)
if (optimize && (flag_ira_region == IRA_REGION_ALL
|| flag_ira_region == IRA_REGION_MIXED)
- && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
+ && number_of_loops (cfun) <= (unsigned) param_ira_max_loops_num)
/* IRA regional allocation deals with high register pressure
better. So decrease the cost (to do more accurate the cost
calculation for IRA, we need to know how many registers lives
if (count.apply_scale (2, 1) < where->count.apply_scale (3, 1))
return false;
}
- else if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+ else if (count.apply_scale (param_hot_bb_frequency_fraction , 1)
< where->count)
return false;
return true;
init_reg_last ();
setup_incoming_promotions (first);
last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
- int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
+ int max_combine = param_max_combine_insns;
FOR_EACH_BB_FN (this_basic_block, cfun)
{
{
/* If there are two or more occurrences of REG in VALUE,
prevent the value from growing too much. */
- if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
+ if (count_rtxs (tem) > param_max_last_value_rtl)
tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
}
aarch64_option_validate_param (const int value, const int param)
{
/* Check that both parameters are the same. */
- if (param == (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)
+ if (param == param_stack_clash_protection_guard_size)
{
if (value != 12 && value != 16)
{
aarch64_option_default_params (void)
{
/* We assume the guard page is 64k. */
- int index = (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE;
- set_default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
- DEFAULT_STK_CLASH_GUARD_SIZE == 0
- ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
+ int index = (int) param_stack_clash_protection_guard_size;
+ param_stack_clash_protection_guard_size
+ = (DEFAULT_STK_CLASH_GUARD_SIZE == 0 ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
- int guard_size
- = default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ int guard_size = param_stack_clash_protection_guard_size;
/* Set the interval parameter to be the same as the guard size. This way the
mid-end code does the right thing for us. */
- set_default_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
- guard_size);
+ param_stack_clash_protection_probe_interval = guard_size;
/* Validate the options. */
aarch64_option_validate_param (guard_size, index);
ia64_option_default_params (void)
{
/* Let the scheduler form additional regions. */
- set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
+ param_max_sched_extend_regions_iters = 2;
/* Set the default values for cache-related parameters. */
- set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
- set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
+ param_simultaneous_prefetches = 6;
+ param_l1_cache_line_size = 32;
- set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
+ param_sched_mem_true_dep_cost = 4;
}
#undef TARGET_OPTION_OPTIMIZATION_TABLE
rs6000_option_default_params (void)
{
/* Double growth factor to counter reduced min jump length. */
- set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+ param_max_grow_copy_bb_insns = 16;
}
/* If not otherwise specified by a target, make 'long double' equivalent to
rs6000_option_default_params (void)
{
/* Double growth factor to counter reduced min jump length. */
- set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+ param_max_grow_copy_bb_insns = 16;
}
/* If not otherwise specified by a target, make 'long double' equivalent to
static void
sh_option_default_params (void)
{
- set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 2);
+ param_simultaneous_prefetches = 2;
}
#undef TARGET_OPTION_OPTIMIZATION_TABLE
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
HOST_WIDE_INT stack_clash_probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
xops[0] = reg1;
bool final_adjustment_p)
{
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
HOST_WIDE_INT min_probe_threshold
= (final_adjustment_p
for each allocation. For stack clash we are in a usable state if
the adjustment is less than GUARD_SIZE - GUARD_USED_BY_CALLER. */
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
/* We can re-use the registers when:
/* We don't mind passing in global_options_set here as we don't use
the *options_set structs anyway. */
- maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
- queue_depth,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_sched_autopref_queue_depth, queue_depth);
/* Set up parameters to be used in prefetching algorithm. Do not
override the defaults unless we are tuning for a core we have
researched values for. */
if (aarch64_tune_params.prefetch->num_slots > 0)
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- aarch64_tune_params.prefetch->num_slots,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_simultaneous_prefetches,
+ aarch64_tune_params.prefetch->num_slots);
if (aarch64_tune_params.prefetch->l1_cache_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- aarch64_tune_params.prefetch->l1_cache_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l1_cache_size,
+ aarch64_tune_params.prefetch->l1_cache_size);
if (aarch64_tune_params.prefetch->l1_cache_line_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- aarch64_tune_params.prefetch->l1_cache_line_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l1_cache_line_size,
+ aarch64_tune_params.prefetch->l1_cache_line_size);
if (aarch64_tune_params.prefetch->l2_cache_size >= 0)
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- aarch64_tune_params.prefetch->l2_cache_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l2_cache_size,
+ aarch64_tune_params.prefetch->l2_cache_size);
if (!aarch64_tune_params.prefetch->prefetch_dynamic_strides)
- maybe_set_param_value (PARAM_PREFETCH_DYNAMIC_STRIDES,
- 0,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_prefetch_dynamic_strides, 0);
if (aarch64_tune_params.prefetch->minimum_stride >= 0)
- maybe_set_param_value (PARAM_PREFETCH_MINIMUM_STRIDE,
- aarch64_tune_params.prefetch->minimum_stride,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_prefetch_minimum_stride,
+ aarch64_tune_params.prefetch->minimum_stride);
/* Use the alternative scheduling-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_sched_pressure_algorithm,
+ SCHED_PRESSURE_MODEL);
/* If the user hasn't changed it via configure then set the default to 64 KB
for the backend. */
- maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
- DEFAULT_STK_CLASH_GUARD_SIZE == 0
- ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_stack_clash_protection_guard_size,
+ (DEFAULT_STK_CLASH_GUARD_SIZE == 0
+ ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE));
/* Validate the guard size. */
- int guard_size = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ int guard_size = param_stack_clash_protection_guard_size;
/* Enforce that interval is the same size as size so the mid-end does the
right thing. */
- maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
- guard_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_stack_clash_protection_probe_interval,
+ guard_size);
/* The maybe_set calls won't update the value if the user has explicitly set
one. Which means we need to validate that probing interval and guard size
are equal. */
int probe_interval
- = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = param_stack_clash_protection_probe_interval;
if (guard_size != probe_interval)
error ("stack clash guard size %<%d%> must be equal to probing interval "
"%<%d%>", guard_size, probe_interval);
#include "builtins.h"
#include "rtl-iter.h"
#include "flags.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
}
if (line_size)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size, line_size);
if (l1_size)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size, l1_size);
if (l2_size)
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size, l2_size);
/* Do some sanity checks on the above options. */
but measurable, size reduction for PIC code. Therefore, we decrease
the bar for unrestricted expression hoisting to the cost of PIC address
calculation, which is 2 instructions. */
- maybe_set_param_value (PARAM_GCSE_UNRESTRICTED_COST, 2,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_gcse_unrestricted_cost, 2);
/* ARM EABI defaults to strict volatile bitfields. */
if (TARGET_AAPCS_BASED && flag_strict_volatile_bitfields < 0
override the defaults unless we are tuning for a core we have
researched values for. */
if (current_tune->prefetch.num_slots > 0)
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- current_tune->prefetch.num_slots,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ current_tune->prefetch.num_slots);
if (current_tune->prefetch.l1_cache_line_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- current_tune->prefetch.l1_cache_line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ current_tune->prefetch.l1_cache_line_size);
if (current_tune->prefetch.l1_cache_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- current_tune->prefetch.l1_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ current_tune->prefetch.l1_cache_size);
/* Look through ready list and all of queue for instructions
relevant for L2 auto-prefetcher. */
- int param_sched_autopref_queue_depth;
+ int sched_autopref_queue_depth;
switch (current_tune->sched_autopref)
{
case tune_params::SCHED_AUTOPREF_OFF:
- param_sched_autopref_queue_depth = -1;
+ sched_autopref_queue_depth = -1;
break;
case tune_params::SCHED_AUTOPREF_RANK:
- param_sched_autopref_queue_depth = 0;
+ sched_autopref_queue_depth = 0;
break;
case tune_params::SCHED_AUTOPREF_FULL:
- param_sched_autopref_queue_depth = max_insn_queue_index + 1;
+ sched_autopref_queue_depth = max_insn_queue_index + 1;
break;
default:
gcc_unreachable ();
}
- maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
- param_sched_autopref_queue_depth,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_sched_autopref_queue_depth,
+ sched_autopref_queue_depth);
/* Currently, for slow flash data, we just disable literal pools. We also
disable it for pure-code. */
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
- finalize_options_struct (&func_options);
-
return ret;
}
if (!TARGET_SCHEDULE)
opts->x_flag_schedule_insns_after_reload = opts->x_flag_schedule_insns = 0;
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- ix86_tune_cost->simultaneous_prefetches,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- ix86_tune_cost->prefetch_block,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- ix86_tune_cost->l1_cache_size,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- ix86_tune_cost->l2_cache_size,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
+ ix86_tune_cost->simultaneous_prefetches);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
+ ix86_tune_cost->prefetch_block);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
+ ix86_tune_cost->l1_cache_size);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
+ ix86_tune_cost->l2_cache_size);
/* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
if (opts->x_flag_prefetch_loop_arrays < 0
= (cf_protection_level) (opts->x_flag_cf_protection | CF_SET);
if (ix86_tune_features [X86_TUNE_AVOID_256FMA_CHAINS])
- maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 256,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 256);
else if (ix86_tune_features [X86_TUNE_AVOID_128FMA_CHAINS])
- maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 128,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 128);
/* PR86952: jump table usage with retpolines is slow.
The PR provides some numbers about the slowness. */
{
if (flag_stack_clash_protection)
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ << param_stack_clash_protection_probe_interval);
else
return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
}
/* If we allocate less than the size of the guard statically,
then no probing is necessary, but we do need to allocate
the stack. */
- if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
+ if (size < (1 << param_stack_clash_protection_guard_size))
{
pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (-size), -1,
ix86_max_noce_ifcvt_seq_cost (edge e)
{
bool predictable_p = predictable_edge_p (e);
-
- enum compiler_param param
- = (predictable_p
- ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
- : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
- /* If we have a parameter set, use that, otherwise take a guess using
- BRANCH_COST. */
- if (global_options_set.x_param_values[param])
- return PARAM_VALUE (param);
+ if (predictable_p)
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+ return param_max_rtl_if_conversion_predictable_cost;
+ }
else
- return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+ return param_max_rtl_if_conversion_unpredictable_cost;
+ }
+
+ return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
}
/* Return true if SEQ is a good candidate as a replacement for the
if (dw == MIN_DEP_WEAK)
/* Store and load are likely to alias, use higher cost to avoid stall. */
- return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
+ return param_sched_mem_true_dep_cost;
else if (dw > MIN_DEP_WEAK)
{
/* Store and load are less likely to alias. */
get_stack_clash_protection_probe_interval (void)
{
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ << param_stack_clash_protection_probe_interval);
}
static HOST_WIDE_INT
get_stack_clash_protection_guard_size (void)
{
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
+ << param_stack_clash_protection_guard_size);
}
/* Allocate ORIG_SIZE bytes on the stack and probe the newly
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "rs6000-internal.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
if (global_init_p)
{
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- rs6000_cost->simultaneous_prefetches,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- rs6000_cost->cache_line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ rs6000_cost->simultaneous_prefetches);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ rs6000_cost->l1_cache_size);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ rs6000_cost->cache_line_size);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size,
+ rs6000_cost->l2_cache_size);
/* Increase loop peeling limits based on performance analysis. */
- maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_max_peeled_insns, 400);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_max_completely_peeled_insns, 400);
/* Use the 'model' -fsched-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
- SCHED_PRESSURE_MODEL,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_sched_pressure_algorithm,
+ SCHED_PRESSURE_MODEL);
/* Explicit -funroll-loops turns -munroll-only-small-loops off. */
if (((global_options_set.x_flag_unroll_loops && flag_unroll_loops)
{
bool temp_reg_clobbered_p = false;
HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = 1 << param_stack_clash_protection_probe_interval;
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
if (flag_stack_clash_protection)
{
only exception is when TARGET_BACKCHAIN is active, in which case
we know *sp (offset 0) was written. */
HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = 1 << param_stack_clash_protection_probe_interval;
HOST_WIDE_INT last_probe_offset
= (TARGET_BACKCHAIN
? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
displacements. Trim that value down to 4k if that happens. This
might result in too many probes being generated only on the
oldest supported machine level z900. */
- if (!DISP_IN_RANGE ((1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL))))
- set_param_value ("stack-clash-protection-probe-interval", 12,
- opts->x_param_values,
- opts_set->x_param_values);
+ if (!DISP_IN_RANGE ((1 << param_stack_clash_protection_probe_interval)))
+ param_stack_clash_protection_probe_interval = 12;
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
{
- maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
- opts->x_param_values,
- opts_set->x_param_values);
- }
-
- maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_unrolled_insns,
+ 100);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_unroll_times, 32);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peeled_insns,
+ 2000);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peel_times,
+ 64);
+ }
+
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_pending_list_length,
+ 256);
/* values for loop prefetching */
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size, 256);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size, 128);
/* s390 has more than 2 levels and the size is much larger. Since
we are always running virtualized assume that we only get a small
part of the caches above l1. */
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size, 1500);
+ SET_OPTION_IF_UNSET (opts, opts_set,
+ param_prefetch_min_insn_to_mem_ratio, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches, 6);
/* Use the alternative scheduling-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
- opts->x_param_values,
- opts_set->x_param_values);
-
- maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_sched_pressure_algorithm, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_min_vect_loop_bound, 2);
/* Use aggressive inlining parameters. */
if (opts->x_s390_tune >= PROCESSOR_2964_Z13)
{
- maybe_set_param_value (PARAM_INLINE_MIN_SPEEDUP, 2,
- opts->x_param_values,
- opts_set->x_param_values);
-
- maybe_set_param_value (PARAM_MAX_INLINE_INSNS_AUTO, 80,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_inline_min_speedup, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 80);
}
/* Set the default alignment. */
#include "context.h"
#include "builtins.h"
#include "tree-vector-builder.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
gcc_unreachable ();
};
- /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
+ /* param_simultaneous_prefetches is the number of prefetches that
can run at the same time. More important, it is the threshold
defining when additional prefetches will be dropped by the
hardware.
single-threaded program. Experimental results show that setting
this parameter to 32 works well when the number of threads is not
high. */
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- ((sparc_cpu == PROCESSOR_ULTRASPARC
- || sparc_cpu == PROCESSOR_NIAGARA
- || sparc_cpu == PROCESSOR_NIAGARA2
- || sparc_cpu == PROCESSOR_NIAGARA3
- || sparc_cpu == PROCESSOR_NIAGARA4)
- ? 2
- : (sparc_cpu == PROCESSOR_ULTRASPARC3
- ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
- || sparc_cpu == PROCESSOR_M8)
- ? 32 : 3))),
- global_options.x_param_values,
- global_options_set.x_param_values);
-
- /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ ((sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_NIAGARA
+ || sparc_cpu == PROCESSOR_NIAGARA2
+ || sparc_cpu == PROCESSOR_NIAGARA3
+ || sparc_cpu == PROCESSOR_NIAGARA4)
+ ? 2
+ : (sparc_cpu == PROCESSOR_ULTRASPARC3
+ ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
+ || sparc_cpu == PROCESSOR_M8)
+ ? 32 : 3))));
+
+ /* param_l1_cache_line_size is the size of the L1 cache line, in
bytes.
The Oracle SPARC Architecture (previously the UltraSPARC
L2 and L3, but only 32B are brought into the L1D$. (Assuming it
is a read_n prefetch, which is the only type which allocates to
the L1.) */
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- (sparc_cpu == PROCESSOR_M8
- ? 64 : 32),
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
- /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
+ /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
Niagara processors feature a L1D$ of 16KB. */
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- ((sparc_cpu == PROCESSOR_ULTRASPARC
- || sparc_cpu == PROCESSOR_ULTRASPARC3
- || sparc_cpu == PROCESSOR_NIAGARA
- || sparc_cpu == PROCESSOR_NIAGARA2
- || sparc_cpu == PROCESSOR_NIAGARA3
- || sparc_cpu == PROCESSOR_NIAGARA4
- || sparc_cpu == PROCESSOR_NIAGARA7
- || sparc_cpu == PROCESSOR_M8)
- ? 16 : 64),
- global_options.x_param_values,
- global_options_set.x_param_values);
-
-
- /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ ((sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_ULTRASPARC3
+ || sparc_cpu == PROCESSOR_NIAGARA
+ || sparc_cpu == PROCESSOR_NIAGARA2
+ || sparc_cpu == PROCESSOR_NIAGARA3
+ || sparc_cpu == PROCESSOR_NIAGARA4
+ || sparc_cpu == PROCESSOR_NIAGARA7
+ || sparc_cpu == PROCESSOR_M8)
+ ? 16 : 64));
+
+ /* param_l2_cache_size is the size fo the L2 in kilobytes. Note
that 512 is the default in params.def. */
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- ((sparc_cpu == PROCESSOR_NIAGARA4
- || sparc_cpu == PROCESSOR_M8)
- ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
- ? 256 : 512)),
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size,
+ ((sparc_cpu == PROCESSOR_NIAGARA4
+ || sparc_cpu == PROCESSOR_M8)
+ ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
+ ? 256 : 512)));
/* Disable save slot sharing for call-clobbered registers by default.
#include "tree-pass.h"
#include "context.h"
#include "builtins.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
/* Allow the size of compilation units to double because of inlining.
In practice the global size of the object code is hardly affected
because the additional instructions will take up the padding. */
- maybe_set_param_value (PARAM_INLINE_UNIT_GROWTH, 100,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_inline_unit_growth, 100);
}
/* Likewise for loops. */
}
return NULL;
}
- if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+ if (param_profile_func_internal_id)
elt.ident = current_function_funcdef_no + 1;
else
{
{
expanded_location xloc
= expand_location (DECL_SOURCE_LOCATION (n->decl));
- bool use_name_only = (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID) == 0);
+ bool use_name_only = (param_profile_func_internal_id == 0);
chksum = (use_name_only ? 0 : xloc.line);
if (xloc.file)
/* Announce function */
offset = gcov_write_tag (GCOV_TAG_FUNCTION);
- if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+ if (param_profile_func_internal_id)
gcov_write_unsigned (current_function_funcdef_no + 1);
else
{
item = ggc_alloc<coverage_data> ();
- if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+ if (param_profile_func_internal_id)
item->ident = current_function_funcdef_no + 1;
else
{
+2019-11-12 Martin Liska <mliska@suse.cz>
+
+ * name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax
+ with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET
+ macro.
+ * typeck.c (comptypes): Likewise.
+
2019-11-12 Maciej W. Rozycki <macro@codesourcery.com>
Frederik Harwath <frederik@codesourcery.com>
m_candidates = vNULL;
m_limited = false;
- m_limit = PARAM_VALUE (CXX_MAX_NAMESPACES_FOR_DIAGNOSTIC_HELP);
+ m_limit = param_cxx_max_namespaces_for_diagnostic_help;
/* Breadth-first search of namespaces. Up to limit namespaces
searched (limit zero == unlimited). */
perform a deep check. */
return structural_comptypes (t1, t2, strict);
- if (flag_checking && USE_CANONICAL_TYPES)
+ if (flag_checking && param_use_canonical_types)
{
bool result = structural_comptypes (t1, t2, strict);
return result;
}
- if (!flag_checking && USE_CANONICAL_TYPES)
+ if (!flag_checking && param_use_canonical_types)
return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
else
return structural_comptypes (t1, t2, strict);
if (follow_jumps)
{
bb = data->path[path_size - 1].bb;
- while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH))
+ while (bb && path_size < param_max_cse_path_length)
{
if (single_succ_p (bb))
e = single_succ_edge (bb);
FIXME: This is a real kludge and needs to be done some other
way. */
if (NONDEBUG_INSN_P (insn)
- && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+ && num_insns++ > param_max_cse_insns)
{
flush_hash_table ();
num_insns = 0;
init_cse_reg_info (nregs);
ebb_data.path = XNEWVEC (struct branch_path,
- PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+ param_max_cse_path_length);
cse_cfg_altered = false;
cse_jumps_altered = false;
p = &(*p)->next;
continue;
}
- if (num_mems < PARAM_VALUE (PARAM_MAX_CSELIB_MEMORY_LOCATIONS)
+ if (num_mems < param_max_cselib_memory_locations
&& ! canon_anti_dependence (x, false, mem_rtx,
GET_MODE (mem_rtx), mem_addr))
{
/* For -O1 reduce the maximum number of active local stores for RTL DSE
since this can consume huge amounts of memory (PR89115). */
- int max_active_local_stores = PARAM_VALUE (PARAM_MAX_DSE_ACTIVE_LOCAL_STORES);
+ int max_active_local_stores = param_max_dse_active_local_stores;
if (optimize < 2)
max_active_local_stores /= 10;
set_last_insn (last);
cur_insn_uid = 0;
- if (MIN_NONDEBUG_INSN_UID || MAY_HAVE_DEBUG_INSNS)
+ if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS)
{
int debug_count = 0;
- cur_insn_uid = MIN_NONDEBUG_INSN_UID - 1;
+ cur_insn_uid = param_min_nondebug_insn_uid - 1;
cur_debug_insn_uid = 0;
for (insn = first; insn; insn = NEXT_INSN (insn))
- if (INSN_UID (insn) < MIN_NONDEBUG_INSN_UID)
+ if (INSN_UID (insn) < param_min_nondebug_insn_uid)
cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn));
else
{
}
if (debug_count)
- cur_debug_insn_uid = MIN_NONDEBUG_INSN_UID + debug_count;
+ cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count;
else
cur_debug_insn_uid++;
}
differences due to debug insns, and not be affected by
-fmin-insn-uid, to avoid excessive table size and to simplify
debugging of -fcompare-debug failures. */
- if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+ if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
n -= cur_debug_insn_uid;
else
- n -= MIN_NONDEBUG_INSN_UID;
+ n -= param_min_nondebug_insn_uid;
return n;
}
insn = as_a <rtx_debug_insn *> (rtx_alloc (DEBUG_INSN));
INSN_UID (insn) = cur_debug_insn_uid++;
- if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+ if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
INSN_UID (insn) = cur_insn_uid++;
PATTERN (insn) = pattern;
{
set_first_insn (NULL);
set_last_insn (NULL);
- if (MIN_NONDEBUG_INSN_UID)
- cur_insn_uid = MIN_NONDEBUG_INSN_UID;
+ if (param_min_nondebug_insn_uid)
+ cur_insn_uid = param_min_nondebug_insn_uid;
else
cur_insn_uid = 1;
cur_debug_insn_uid = 1;
{
/* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */
*probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = 1 << param_stack_clash_protection_probe_interval;
*rounded_size = simplify_gen_binary (AND, Pmode, size,
GEN_INT (-*probe_interval));
}
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
profile_count count_threshold = cfun->cfg->count_max.apply_scale
- (1, PARAM_VALUE (PARAM_ALIGN_THRESHOLD));
+ (1, param_align_threshold);
if (dump_file)
{
&& branch_count + fallthru_count > count_threshold
&& (branch_count
> fallthru_count.apply_scale
- (PARAM_VALUE (PARAM_ALIGN_LOOP_ITERATIONS), 1)))
+ (param_align_loop_iterations, 1)))
{
align_flags alignment = LOOP_ALIGN (label);
if (dump_file)
short-circuited branch and the underlying object on both sides
is the same, make a non-short-circuit operation. */
bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
- if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+ if (param_logical_op_non_short_circuit != -1)
logical_op_non_short_circuit
- = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+ = param_logical_op_non_short_circuit;
if (logical_op_non_short_circuit
&& !flag_sanitize_coverage
&& lhs != 0 && rhs != 0
return tem;
bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
- if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+ if (param_logical_op_non_short_circuit != -1)
logical_op_non_short_circuit
- = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+ = param_logical_op_non_short_circuit;
if (logical_op_non_short_circuit
&& !flag_sanitize_coverage
&& (code == TRUTH_AND_EXPR
would not, passes that need this information could be revised
to provide it through dataflow propagation. */
return (!name_registered_for_update_p (t)
- && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+ && depth < param_max_ssa_name_query_depth
&& gimple_stmt_nonnegative_warnv_p (SSA_NAME_DEF_STMT (t),
strict_overflow_p, depth));
would not, passes that need this information could be revised
to provide it through dataflow propagation. */
return (!name_registered_for_update_p (t)
- && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+ && depth < param_max_ssa_name_query_depth
&& gimple_stmt_integer_valued_real_p (SSA_NAME_DEF_STMT (t),
depth));
&& optimize_function_for_size_p (cfun));
cost = set_src_cost (x, mode, 0);
- if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
+ if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
{
max_distance
- = ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
+ = ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
if (max_distance == 0)
return 0;
PRUNE_EXPRS. */
for (j = 0; j < (unsigned) n_elems; j++)
if (deletions[j]
- && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
+ && (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
bitmap_set_bit (prune_exprs, j);
/* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
expressions, nothing gets hoisted from the entry block. */
FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
{
- domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
+ domby = get_dominated_to_depth (CDI_DOMINATORS, bb,
+ param_max_hoist_depth);
if (domby.length () == 0)
continue;
bool
gcse_or_cprop_is_too_expensive (const char *pass)
{
- unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
- * SBITMAP_SET_SIZE (max_reg_num ())
- * sizeof (SBITMAP_ELT_TYPE));
+ int memory_request = (n_basic_blocks_for_fn (cfun)
+ * SBITMAP_SET_SIZE (max_reg_num ())
+ * sizeof (SBITMAP_ELT_TYPE));
/* Trying to perform global optimizations on flow graphs which have
a high connectivity will take a long time and is unlikely to be
/* If allocating memory for the dataflow bitmaps would take up too much
storage it's better just to disable the optimization. */
- if (memory_request > MAX_GCSE_MEMORY)
+ if (memory_request > param_max_gcse_memory)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d registers; "
init_ggc_heuristics (void)
{
#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
- set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
- set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
+ param_ggc_min_expand = ggc_min_expand_heuristic ();
+ param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
#endif
}
total allocations haven't expanded much since the last
collection. */
float allocated_last_gc =
- MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
+ MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * 1024);
- float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
+ float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
return;
simple reduction of inner loop and double reduction of the loop nest. */
/* Maximum number of stmts in each loop that should be interchanged. */
-#define MAX_NUM_STMT (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_MAX_NUM_STMTS))
+#define MAX_NUM_STMT (param_loop_interchange_max_num_stmts)
/* Maximum number of data references in loop nest. */
-#define MAX_DATAREFS (PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+#define MAX_DATAREFS (param_loop_max_datarefs_for_datadeps)
/* Comparison ratio of access stride between inner/outer loops to be
interchanged. This is the minimum stride ratio for loop interchange
to be profitable. */
-#define OUTER_STRIDE_RATIO (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_STRIDE_RATIO))
+#define OUTER_STRIDE_RATIO (param_loop_interchange_stride_ratio)
/* The same as above, but we require higher ratio for interchanging the
innermost two loops. */
#define INNER_STRIDE_RATIO ((OUTER_STRIDE_RATIO) + 1)
/* We regard a user-specified minimum percentage of zero as a request
to ignore all profitability concerns and apply the transformation
always. */
- if (!PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+ if (!param_unroll_jam_min_percent)
profit_unroll = MAX(2, profit_unroll);
else if (removed * 100 / datarefs.length ()
- < (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+ < (unsigned)param_unroll_jam_min_percent)
profit_unroll = 1;
if (unroll_factor > profit_unroll)
unroll_factor = profit_unroll;
- if (unroll_factor > (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL))
- unroll_factor = PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL);
+ if (unroll_factor > (unsigned)param_unroll_jam_max_unroll)
+ unroll_factor = param_unroll_jam_max_unroll;
unroll = (unroll_factor > 1
&& can_unroll_loop_p (outer, unroll_factor, &desc));
loop_versioning::max_insns_for_loop (class loop *loop)
{
return (loop->inner
- ? PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_OUTER_INSNS)
- : PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_INNER_INSNS));
+ ? param_loop_versioning_max_outer_insns
+ : param_loop_versioning_max_inner_insns);
}
/* Return true if for cost reasons we should avoid versioning any loop
/* Upper Hard limit on the number statements to copy. */
if (num_stmts_in_join
- >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+ >= param_max_jump_thread_duplication_stmts)
return false;
return true;
return false;
bool allow_unaligned
- = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+ = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
/* Punt if the combined store would not be aligned and we need alignment. */
if (!allow_unaligned)
{
if (info->order >= merged_store->first_nonmergeable_order
|| (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
- > (unsigned) PARAM_VALUE (PARAM_STORE_MERGING_MAX_SIZE)))
+ > (unsigned) param_store_merging_max_size))
;
/* |---store 1---|
auto_vec<class split_store *, 32> split_stores;
bool allow_unaligned_store
- = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+ = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
bool allow_unaligned_load = allow_unaligned_store;
bool bzero_first = false;
store_immediate_info *store;
/* If we reach the limit of stores to merge in a chain terminate and
process the chain now. */
if ((*chain_info)->m_store_info.length ()
- == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
+ == (unsigned int) param_max_stores_to_merge)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
// Limit potential of N^2 behavior for long candidate chains.
int iters = 0;
- int max_iters = PARAM_VALUE (PARAM_MAX_SLSR_CANDIDATE_SCAN);
+ int max_iters = param_max_slsr_candidate_scan;
mapping_key.base_expr = base_expr;
chain = base_cand_map->find (&mapping_key);
{
codegen_error = true;
gcc_assert (! flag_checking
- || PARAM_VALUE (PARAM_GRAPHITE_ALLOW_CODEGEN_ERRORS));
+ || param_graphite_allow_codegen_errors);
}
bool is_constant (tree op) const
{
int old_err = isl_options_get_on_error (scop->isl_context);
int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
- int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+ int max_operations = param_max_isl_operations;
if (max_operations)
isl_ctx_set_max_operations (scop->isl_context, max_operations);
isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
if (type != isl_schedule_node_leaf)
return node;
- long tile_size = PARAM_VALUE (PARAM_LOOP_BLOCK_TILE_SIZE);
+ long tile_size = param_loop_block_tile_size;
if (dims <= 1
|| tile_size == 0
|| !isl_schedule_node_band_get_permutable (node))
{
int old_err = isl_options_get_on_error (scop->isl_context);
int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
- int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+ int max_operations = param_max_isl_operations;
if (max_operations)
isl_ctx_set_max_operations (scop->isl_context, max_operations);
isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
continue;
}
- unsigned max_arrays = PARAM_VALUE (PARAM_GRAPHITE_MAX_ARRAYS_PER_SCOP);
+ unsigned max_arrays = param_graphite_max_arrays_per_scop;
if (max_arrays > 0
&& scop->drs.length () >= max_arrays)
{
}
find_scop_parameters (scop);
- graphite_dim_t max_dim = PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS);
+ graphite_dim_t max_dim = param_graphite_max_nb_scop_params;
if (max_dim > 0
&& scop_nb_params (scop) > max_dim)
{
modulo_max_stages = max_stages;
modulo_n_insns = insns;
modulo_iter0_max_uid = max_uid;
- modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
+ modulo_backtracks_left = param_max_modulo_backtrack_attempts;
}
/* A structure to record a pair of insns where the first one is a real
if (flag_sched_critical_path_heuristic && priority_val)
return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
- if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
+ if (param_sched_autopref_queue_depth >= 0)
{
int autopref = autopref_rank_for_schedule (tmp, tmp2);
if (autopref != 0)
}
/* Add INSN to the model worklist. Start looking for a suitable position
- between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
+ between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
insns either side. A null PREV indicates the beginning of the list and
a null NEXT indicates the end. */
{
int count;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
if (count > 0 && prev && model_order_p (insn, prev))
do
{
int count;
prev = insn->prev;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
while (count > 0 && prev && model_order_p (insn, prev))
{
count--;
{
fprintf (sched_dump, ";;\t+--- worklist:\n");
insn = model_worklist;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
while (count > 0 && insn)
{
fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
Failing that, just pick the highest-priority instruction in the
worklist. */
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
insn = model_worklist;
fallback = 0;
for (;;)
/* If the ready list is full, delay the insn for 1 cycle.
See the comment in schedule_block for the rationale. */
if (!reload_completed
- && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+ && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
|| (sched_pressure == SCHED_PRESSURE_MODEL
- /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
- instructions too. */
+ /* Limit pressure recalculations to
+ param_max_sched_ready_insns instructions too. */
&& model_index (insn) > (model_curr_point
- + MAX_SCHED_READY_INSNS)))
+ + param_max_sched_ready_insns)))
&& !(sched_pressure == SCHED_PRESSURE_MODEL
&& model_curr_point < model_num_insns
/* Always allow the next model instruction to issue. */
/* Exit early if the param forbids this or if we're not entering here through
normal haifa scheduling. This can happen if selective scheduling is
explicitly enabled. */
- if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
+ if (!insn_queue || param_sched_autopref_queue_depth <= 0)
return 0;
if (sched_verbose >= 2 && ready_index == 0)
}
}
- if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
+ if (param_sched_autopref_queue_depth == 1)
continue;
/* Everything from the current queue slot should have been moved to
the ready list. */
gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
- int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
+ int n_stalls = param_sched_autopref_queue_depth - 1;
if (n_stalls > max_insn_queue_index)
n_stalls = max_insn_queue_index;
time in the worst case. Before reload we are more likely to have
big lists so truncate them to a reasonable size. */
if (!reload_completed
- && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
+ && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
{
ready_sort_debug (&ready);
ready_sort_real (&ready);
- /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
+ /* Find first free-standing insn past param_max_sched_ready_insns.
If there are debug insns, we know they're first. */
- for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
+ for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready;
+ i++)
if (!SCHED_GROUP_P (ready_element (&ready, i)))
break;
&& !reload_completed
&& common_sched_info->sched_pass_id == SCHED_RGN_PASS)
sched_pressure = ((enum sched_pressure_algorithm)
- PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
+ param_sched_pressure_algorithm);
else
sched_pressure = SCHED_PRESSURE_NONE;
if (spec_info->mask != 0)
{
- spec_info->data_weakness_cutoff =
- (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
- spec_info->control_weakness_cutoff =
- (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
- * REG_BR_PROB_BASE) / 100;
+ spec_info->data_weakness_cutoff
+ = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
+ spec_info->control_weakness_cutoff
+ = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100;
}
else
/* So we won't read anything accidentally. */
unsigned index = hsa_get_number_decl_kernel_mappings ();
/* Emit store to debug argument. */
- if (PARAM_VALUE (PARAM_HSA_GEN_DEBUG_STORES) > 0)
+ if (param_hsa_gen_debug_stores > 0)
set_debug_value (prologue, new hsa_op_immed (1000 + index, BRIG_TYPE_U64));
}
{
rtx_insn *insn;
unsigned count = 0;
- unsigned param = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+ unsigned param = param_max_rtl_if_conversion_insns;
FOR_BB_INSNS (test_bb, insn)
{
vec<rtx> else_regs = vNULL;
unsigned int i;
int success_p = FALSE;
- int limit = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+ int limit = param_max_rtl_if_conversion_insns;
/* Build a mapping for each block to the value used for each
register. */
return false;
}
- if (values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
+ if (values_count == param_ipa_cp_value_list_size)
{
/* We can only free sources, not the values themselves, because sources
of other values in this SCC might point to them. */
set_agg_lats_to_bottom (dest_plats);
return false;
}
- if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
+ if (dest_plats->aggs_count == param_ipa_max_agg_items)
return false;
dest_plats->aggs_count++;
new_al = ipcp_agg_lattice_pool.allocate ();
int size = ipa_size_summaries->get (callee)->size;
/* FIXME: The values below need re-considering and perhaps also
integrating into the cost metrics, at lest in some very basic way. */
- if (size <= MAX_INLINE_INSNS_AUTO / 4)
+ if (size <= param_max_inline_insns_auto / 4)
res += 31 / ((int)speculative + 1);
- else if (size <= MAX_INLINE_INSNS_AUTO / 2)
+ else if (size <= param_max_inline_insns_auto / 2)
res += 15 / ((int)speculative + 1);
- else if (size <= MAX_INLINE_INSNS_AUTO
+ else if (size <= param_max_inline_insns_auto
|| DECL_DECLARED_INLINE_P (callee->decl))
res += 7 / ((int)speculative + 1);
}
{
int result = 0;
if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
- result += PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
+ result += param_ipa_cp_loop_hint_bonus;
return result;
}
{
if (info->node_within_scc)
evaluation = (evaluation
- * (100 - PARAM_VALUE (PARAM_IPA_CP_RECURSION_PENALTY))) / 100;
+ * (100 - param_ipa_cp_recursion_penalty)) / 100;
if (info->node_calling_single_call)
evaluation = (evaluation
- * (100 - PARAM_VALUE (PARAM_IPA_CP_SINGLE_CALL_PENALTY)))
+ * (100 - param_ipa_cp_single_call_penalty))
/ 100;
return evaluation;
", threshold: %i\n",
info->node_within_scc ? ", scc" : "",
info->node_calling_single_call ? ", single_call" : "",
- evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+ evaluation, param_ipa_cp_eval_threshold);
}
- return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+ return evaluation >= param_ipa_cp_eval_threshold;
}
else
{
time_benefit, size_cost, freq_sum,
info->node_within_scc ? ", scc" : "",
info->node_calling_single_call ? ", single_call" : "",
- evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+ evaluation, param_ipa_cp_eval_threshold);
- return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+ return evaluation >= param_ipa_cp_eval_threshold;
}
}
}
max_new_size = overall_size;
- if (max_new_size < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
- max_new_size = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
- max_new_size += max_new_size * PARAM_VALUE (PARAM_IPCP_UNIT_GROWTH) / 100 + 1;
+ if (max_new_size < param_large_unit_insns)
+ max_new_size = param_large_unit_insns;
+ max_new_size += max_new_size * param_ipcp_unit_growth / 100 + 1;
if (dump_file)
fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
struct agg_position_info *aggpos,
expr_eval_ops *param_ops_p = NULL)
{
- int op_limit = PARAM_VALUE (PARAM_IPA_MAX_PARAM_EXPR_OPS);
+ int op_limit = param_ipa_max_param_expr_ops;
int op_count = 0;
if (param_ops_p)
auto_vec<std::pair<tree, tree> > ranges;
tree type = TREE_TYPE (op);
- int bound_limit = PARAM_VALUE (PARAM_IPA_MAX_SWITCH_PREDICATE_BOUNDS);
+ int bound_limit = param_ipa_max_switch_predicate_bounds;
int bound_count = 0;
wide_int vr_wmin, vr_wmax;
value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
static void
analyze_function_body (struct cgraph_node *node, bool early)
{
- sreal time = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME);
+ sreal time = param_uninlined_function_time;
/* Estimate static overhead for function prologue/epilogue and alignment. */
- int size = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS);
+ int size = param_uninlined_function_insns;
/* Benefits are scaled by probability of elimination that is in range
<0,2>. */
basic_block bb;
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = count_formal_params (node->decl);
- fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+ fbi.aa_walk_budget = param_ipa_max_aa_steps;
nonconstant_names.safe_grow_cleared
(SSANAMES (my_function)->length ());
info->account_size_time (0, 0, bb_predicate, bb_predicate);
bb_predicate = predicate::not_inlined ();
- info->account_size_time (PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS)
+ info->account_size_time (param_uninlined_function_insns
* ipa_fn_summary::size_scale,
- PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME),
+ param_uninlined_function_time,
bb_predicate,
bb_predicate);
es->call_stmt_size = eni_size_weights.call_cost;
es->call_stmt_time = eni_time_weights.call_cost;
info->account_size_time (ipa_fn_summary::size_scale
- * PARAM_VALUE
- (PARAM_UNINLINED_FUNCTION_THUNK_INSNS),
- PARAM_VALUE
- (PARAM_UNINLINED_FUNCTION_THUNK_TIME), t, t);
+ * param_uninlined_function_thunk_insns,
+ param_uninlined_function_thunk_time, t, t);
t = predicate::not_inlined ();
info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
ipa_update_overall_fn_summary (node);
else if (DECL_COMDAT (node->decl)
&& node->can_remove_if_no_direct_calls_p ())
return (info->size
- * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
- + 50) / 100;
+ * (100 - param_comdat_sharing_probability)
+ + 50) / 100;
}
return 0;
}
if (limit < what_size_info->self_size)
limit = what_size_info->self_size;
- limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
+ limit += limit * param_large_function_growth / 100;
/* Check the size after inlining against the function limits. But allow
the function to shrink if it went over the limits by forced inlining. */
newsize = estimate_size_after_inlining (to, e);
if (newsize >= ipa_size_summaries->get (what)->size
- && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
+ && newsize > param_large_function_insns
&& newsize > limit)
{
e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
on every invocation of the caller (i.e. its call statement dominates
exit block). We do not track this information, yet. */
stack_size_limit += ((gcov_type)stack_size_limit
- * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
+ * param_stack_frame_growth / 100);
inlined_stack = (ipa_get_stack_frame_offset (to)
+ outer_info->estimated_self_stack_size
This bit overoptimistically assume that we are good at stack
packing. */
&& inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
- && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
+ && inlined_stack > param_large_stack_frame)
{
e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
return false;
if (opt_for_fn (n->decl, optimize) >= 3)
{
if (hint)
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE)
- * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE);
+ return param_max_inline_insns_single
+ * param_inline_heuristics_hint_percent / 100;
+ return param_max_inline_insns_single;
}
else
{
if (hint)
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2)
- * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2);
+ return param_max_inline_insns_single_o2
+ * param_inline_heuristics_hint_percent_o2 / 100;
+ return param_max_inline_insns_single_o2;
}
}
if (opt_for_fn (n->decl, optimize) >= 3)
{
if (hint)
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO)
- * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO);
+ return param_max_inline_insns_auto
+ * param_inline_heuristics_hint_percent / 100;
+ return param_max_inline_insns_auto;
}
else
{
if (hint)
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2)
- * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
- return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2);
+ return param_max_inline_insns_auto_o2
+ * param_inline_heuristics_hint_percent_o2 / 100;
+ return param_max_inline_insns_auto_o2;
}
}
inlinable = false;
}
/* If callee is optimized for size and caller is not, allow inlining if
- code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
- is inline (and thus likely an unified comdat). This will allow caller
- to run faster. */
+ code shrinks or we are in param_max_inline_insns_single limit and
+ callee is inline (and thus likely an unified comdat).
+ This will allow caller to run faster. */
else if (opt_for_fn (callee->decl, optimize_size)
> opt_for_fn (caller->decl, optimize_size))
{
int growth = estimate_edge_growth (e);
- if (growth > PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE)
+ if (growth > param_max_inline_insns_size
&& (!DECL_DECLARED_INLINE_P (callee->decl)
&& growth >= MAX (inline_insns_single (caller, false),
inline_insns_auto (caller, false))))
int growth = estimate_edge_growth (e);
int n;
int early_inlining_insns = opt_for_fn (e->caller->decl, optimize) >= 3
- ? PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)
- : PARAM_VALUE (PARAM_EARLY_INLINING_INSNS_O2);
+ ? param_early_inlining_insns
+ : param_early_inlining_insns_o2;
- if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+ if (growth <= param_max_inline_insns_size)
;
else if (!e->maybe_hot_p ())
{
? e->caller->inlined_to
: e->caller);
int limit = opt_for_fn (caller->decl, optimize) >= 3
- ? PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
- : PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP_O2);
+ ? param_inline_min_speedup
+ : param_inline_min_speedup_o2;
if ((time - inlined_time) * 100 > time * limit)
return true;
| INLINE_HINT_loop_iterations
| INLINE_HINT_loop_stride));
- if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+ if (growth <= param_max_inline_insns_size)
;
- /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
+ /* Apply param_max_inline_insns_single limit. Do not do so when
hints suggests that inlining given function is very profitable.
Avoid computation of big_speedup_p when not necessary to change
outcome of decision. */
}
else if (!DECL_DECLARED_INLINE_P (callee->decl)
&& !opt_for_fn (e->caller->decl, flag_inline_functions)
- && growth >= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SMALL))
+ && growth >= param_max_inline_insns_small)
{
/* growth_positive_p is expensive, always test it last. */
if (growth >= inline_insns_single (e->caller, false)
want_inline = false;
}
}
- /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline.
- Bypass the limit when speedup seems big. */
+ /* Apply param_max_inline_insns_auto limit for functions not declared
+ inline. Bypass the limit when speedup seems big. */
else if (!DECL_DECLARED_INLINE_P (callee->decl)
&& growth >= inline_insns_auto (e->caller, apply_hints)
&& (apply_hints
char const *reason = NULL;
bool want_inline = true;
sreal caller_freq = 1;
- int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
+ int max_depth = param_max_inline_recursive_depth_auto;
if (DECL_DECLARED_INLINE_P (edge->caller->decl))
- max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
+ max_depth = param_max_inline_recursive_depth;
if (!edge->maybe_hot_p ())
{
{
if (edge->sreal_frequency () * 100
<= caller_freq
- * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
+ * param_min_inline_recursive_probability)
{
reason = "frequency of recursive call is too small";
want_inline = false;
/* ... or when early optimizers decided to split and edge
frequency still indicates splitting is a win ... */
|| (callee->split_part && !caller->split_part
- && freq * 100
- < PARAM_VALUE
- (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
+ && freq * 100 < param_partial_inlining_entry_probability
/* ... and do not overwrite user specified hints. */
&& (!DECL_DECLARED_INLINE_P (edge->callee->decl)
|| DECL_DECLARED_INLINE_P (caller->decl)))))
recursive_inlining (struct cgraph_edge *edge,
vec<cgraph_edge *> *new_edges)
{
- int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
+ int limit = param_max_inline_insns_recursive_auto;
edge_heap_t heap (sreal::min ());
struct cgraph_node *node;
struct cgraph_edge *e;
node = node->inlined_to;
if (DECL_DECLARED_INLINE_P (node->decl))
- limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
+ limit = param_max_inline_insns_recursive;
/* Make sure that function is small enough to be considered for inlining. */
if (estimate_size_after_inlining (node, edge) >= limit)
compute_max_insns (int insns)
{
int max_insns = insns;
- if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
- max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
+ if (max_insns < param_large_unit_insns)
+ max_insns = param_large_unit_insns;
return ((int64_t) max_insns
- * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
+ * (100 + param_inline_unit_growth) / 100);
}
}
/* We iterate incremental inlining to get trivial cases of indirect
inlining. */
- while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
+ while (iterations < param_early_inliner_max_iterations
&& early_inline_small_functions (node))
{
timevar_push (TV_INTEGRATION);
es->call_stmt_time
= estimate_num_insns (edge->call_stmt, &eni_time_weights);
}
- if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
+ if (iterations < param_early_inliner_max_iterations - 1)
ipa_update_overall_fn_summary (node);
timevar_pop (TV_INTEGRATION);
iterations++;
static inline bool
csftc_abort_walking_p (unsigned speculative)
{
- unsigned max = PARAM_VALUE (PARAM_MAX_SPECULATIVE_DEVIRT_MAYDEFS);
+ unsigned max = param_max_speculative_devirt_maydefs;
return speculative > max ? true : false;
}
gcc_assert (overall_size);
- cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
+ cutoff = (overall_time * param_hot_bb_count_ws_permille + 500) / 1000;
threshold = 0;
for (i = 0; cumulated < cutoff; i++)
{
struct ipa_known_agg_contents_list *list = NULL, *all_list = NULL;
bitmap visited = NULL;
int item_count = 0, const_count = 0;
- int ipa_max_agg_items = PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS);
+ int ipa_max_agg_items = param_ipa_max_agg_items;
HOST_WIDE_INT arg_offset, arg_size;
tree arg_base;
bool check_ref, by_ref;
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = ipa_get_param_count (info);
- fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+ fbi.aa_walk_budget = param_ipa_max_aa_steps;
for (struct cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
{
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = param_count;
- fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+ fbi.aa_walk_budget = param_ipa_max_aa_steps;
vec_safe_grow_cleared (descriptors, param_count);
ipa_populate_param_decls (node, *descriptors);
is unknown. */
if (!(current->count
< (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale
- (PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY), 100))))
+ (param_partial_inlining_entry_probability, 100))))
{
/* When profile is guessed, we cannot expect it to give us
realistic estimate on likelyness of function taking the
that. Next stage1 we should try to be more meaningful here. */
if (current->header_size + call_overhead
>= (unsigned int)(DECL_DECLARED_INLINE_P (current_function_decl)
- ? MAX_INLINE_INSNS_SINGLE
- : MAX_INLINE_INSNS_AUTO) + 10)
+ ? param_max_inline_insns_single
+ : param_max_inline_insns_auto) + 10)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
Limit this duplication. This is consistent with limit in tree-sra.c
FIXME: with LTO we ought to be able to do better! */
if (DECL_ONE_ONLY (current_function_decl)
- && current->split_size >= (unsigned int) MAX_INLINE_INSNS_AUTO + 10)
+ && current->split_size >= (unsigned int) param_max_inline_insns_auto + 10)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
FIXME: with LTO we ought to be able to do better! */
if (DECL_ONE_ONLY (current_function_decl)
&& current->split_size
- <= (unsigned int) PARAM_VALUE (PARAM_EARLY_INLINING_INSNS) / 2)
+ <= (unsigned int) param_early_inlining_insns / 2)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
HOST_WIDE_INT offset, HOST_WIDE_INT size)
{
if (desc->access_count
- == (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+ == (unsigned) param_ipa_sra_max_replacements)
{
disqualify_split_candidate (desc, "Too many replacement candidates");
return NULL;
if (!desc->by_ref || optimize_function_for_size_p (fun))
param_size_limit = cur_param_size;
else
- param_size_limit = (PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR)
- * cur_param_size);
+ param_size_limit = param_ipa_sra_ptr_growth_factor * cur_param_size;
if (nonarg_acc_size > param_size_limit
|| (!desc->by_ref && nonarg_acc_size == param_size_limit))
{
bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
by_ref_count
* last_basic_block_for_fn (fun));
- aa_walking_limit = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+ aa_walking_limit = param_ipa_max_aa_steps;
scan_function (node, fun);
if (dump_file)
return NULL;
if ((prop_count + pclen
- > (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+ > (unsigned) param_ipa_sra_max_replacements)
|| size_would_violate_limit_p (param_desc,
param_desc->size_reached + prop_size))
return "propagating accesses would violate the count or size limit";
hardly helps (for irregular register file architecture it could
help by choosing a better hard register in the loop but we prefer
faster allocation even in this case). We also remove cheap loops
- if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
+ if there are more than param_ira_max_loops_num of them. Loop with EH
exit or enter edges are removed too because the allocation might
require put pseudo moves on the EH edges (we could still do this
for pseudos with caller saved hard registers in some cases but it
);
}
qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
- for (i = 0; i < n - IRA_MAX_LOOPS_NUM; i++)
+ for (i = 0; i < n - param_ira_max_loops_num; i++)
{
sorted_loops[i]->to_remove_p = true;
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
/ IRA_INT_BITS);
allocated_words_num += conflict_bit_vec_words_num;
if ((uint64_t) allocated_words_num * sizeof (IRA_INT_TYPE)
- > (uint64_t) IRA_MAX_CONFLICT_TABLE_SIZE * 1024 * 1024)
+ > (uint64_t) param_ira_max_conflict_table_size * 1024 * 1024)
{
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf
(ira_dump_file,
"+++Conflict table will be too big(>%dMB) -- don't use it\n",
- IRA_MAX_CONFLICT_TABLE_SIZE);
+ param_ira_max_conflict_table_size);
return false;
}
}
}
max_cost
- = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
+ = COSTS_N_INSNS (param_max_iterations_computation_cost);
if (set_src_cost (desc->niter_expr, mode, optimize_loop_for_speed_p (loop))
> max_cost)
{
if ((int) new_regs[pressure_class]
+ (int) regs_needed[pressure_class]
+ LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
- + IRA_LOOP_RESERVED_REGS
+ + param_ira_loop_reserved_regs
> ira_class_hard_regs_num[pressure_class])
break;
}
/* move_single_loop_invariants for very large loops is time consuming
and might need a lot of memory. For -O1 only do loop invariant
motion for very small loops. */
- unsigned max_bbs = LOOP_INVARIANT_MAX_BBS_IN_LOOP;
+ unsigned max_bbs = param_loop_invariant_max_bbs_in_loop;
if (optimize < 2)
max_bbs /= 10;
if (loop->num_nodes <= max_bbs)
/* nunroll = total number of copies of the original loop body in
unrolled loop (i.e. if it is 2, we have to duplicate loop body once). */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+ nunroll = param_max_unrolled_insns / loop->ninsns;
nunroll_by_av
- = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
/* nunroll = total number of copies of the original loop body in
unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
- nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ nunroll = param_max_unrolled_insns / loop->ninsns;
+ nunroll_by_av = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
/* nunroll = total number of copies of the original loop body in
unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+ nunroll = param_max_unrolled_insns / loop->ninsns;
nunroll_by_av
- = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+ = param_max_average_unrolled_insns / loop->av_ninsns;
if (nunroll > nunroll_by_av)
nunroll = nunroll_by_av;
- if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
- nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ if (nunroll > (unsigned) param_max_unroll_times)
+ nunroll = param_max_unroll_times;
if (targetm.loop_unroll_adjust)
nunroll = targetm.loop_unroll_adjust (nunroll, loop);
/* Generate a new register only if the expansion limit has not been
reached. Else reuse an already existing expansion. */
- if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
+ if (param_max_variable_expansions > ve->expansion_count)
{
really_new_expansion = true;
new_reg = gen_reg_rtx (GET_MODE (ve->reg));
}
n = 0;
if (sparseset_cardinality (live_range_reload_inheritance_pseudos)
- <= (unsigned)LRA_MAX_CONSIDERED_RELOAD_PSEUDOS)
+ <= (unsigned)param_lra_max_considered_reload_pseudos)
EXECUTE_IF_SET_IN_SPARSESET (live_range_reload_inheritance_pseudos,
reload_regno)
if ((int) reload_regno != regno
a BB is not greater than the following value, we don't add the BB
to EBB. */
#define EBB_PROBABILITY_CUTOFF \
- ((REG_BR_PROB_BASE * LRA_INHERITANCE_EBB_PROBABILITY_CUTOFF) / 100)
+ ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100)
/* Current number of inheritance/split iteration. */
int lra_inheritance_iter;
+2019-11-12 Martin Liska <mliska@suse.cz>
+
+ * lto-partition.c (lto_balanced_map): Replace old parameter syntax
+ with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET
+ macro.
+ * lto.c (do_whole_program_analysis): Likewise.
+
2019-11-11 Martin Liska <mliska@suse.cz>
* Make-lang.in: Relax dependency of lto-dump.o to
varpool_order.qsort (varpool_node_cmp);
/* Compute partition size and create the first partition. */
- if (PARAM_VALUE (MIN_PARTITION_SIZE) > max_partition_size)
+ if (param_min_partition_size > max_partition_size)
fatal_error (input_location, "min partition size cannot be greater "
"than max partition size");
partition_size = total_size / n_lto_partitions;
- if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
- partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+ if (partition_size < param_min_partition_size)
+ partition_size = param_min_partition_size;
npartitions = 1;
partition = new_partition ("");
if (dump_file)
fprintf (dump_file,
"Total size: %" PRId64 " partition_size: %" PRId64 "\n",
total_size, partition_size);
- if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
- partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+ if (partition_size < param_min_partition_size)
+ partition_size = param_min_partition_size;
npartitions ++;
}
}
/* TODO: jobserver communication is not supported, yet. */
if (!strcmp (flag_wpa, "jobserver"))
- lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+ lto_parallelism = param_max_lto_streaming_parallelism;
else
{
lto_parallelism = atoi (flag_wpa);
if (lto_parallelism <= 0)
lto_parallelism = 0;
- if (lto_parallelism >= PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM))
- lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+ if (lto_parallelism >= param_max_lto_streaming_parallelism)
+ lto_parallelism = param_max_lto_streaming_parallelism;
}
timevar_start (TV_PHASE_OPT_GEN);
else if (flag_lto_partition == LTO_PARTITION_ONE)
lto_balanced_map (1, INT_MAX);
else if (flag_lto_partition == LTO_PARTITION_BALANCED)
- lto_balanced_map (PARAM_VALUE (PARAM_LTO_PARTITIONS),
- PARAM_VALUE (MAX_PARTITION_SIZE));
+ lto_balanced_map (param_lto_partitions,
+ param_max_partition_size);
else
gcc_unreachable ();
if ( latch_edge->count () > profile_count::zero ()
&& (latch_edge->count()
< single_exit (loop)->count ().apply_scale
- (SMS_LOOP_AVERAGE_COUNT_THRESHOLD, 1)))
+ (param_sms_loop_average_count_threshold, 1)))
{
if (dump_file)
{
/* The default value of PARAM_SMS_MIN_SC is 2 as stage count of
1 means that there is no interleaving between iterations thus
we let the scheduling passes do the job in this case. */
- if (stage_count < PARAM_VALUE (PARAM_SMS_MIN_SC)
+ if (stage_count < param_sms_min_sc
|| (count_init && (loop_count <= stage_count))
|| (max_trip_count >= 0 && max_trip_count <= stage_count)
|| (trip_count >= 0 && trip_count <= stage_count))
/* A limit on the number of cycles that resource conflicts can span. ??? Should
be provided by DFA, and be dependent on the type of insn scheduled. Currently
set to 0 to save compile time. */
-#define DFA_HISTORY SMS_DFA_HISTORY
+#define DFA_HISTORY param_sms_dfa_history
/* A threshold for the number of repeated unsuccessful attempts to insert
an empty row, before we flush the partial schedule and start over. */
opts->x_flag_ipa_pta = true;
/* Track fields in field-sensitive alias analysis. */
- maybe_set_param_value
- (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE,
- opt2 ? 100 : default_param_value (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE),
- opts->x_param_values, opts_set->x_param_values);
+ if (opt2)
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_fields_for_field_sensitive,
+ 100);
if (opts->x_optimize_size)
/* We want to crossjump as much as possible. */
- maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS, 1,
- opts->x_param_values, opts_set->x_param_values);
- else
- maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS,
- default_param_value (PARAM_MIN_CROSSJUMP_INSNS),
- opts->x_param_values, opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_min_crossjump_insns, 1);
/* Restrict the amount of work combine does at -Og while retaining
most of its useful transforms. */
if (opts->x_optimize_debug)
- maybe_set_param_value (PARAM_MAX_COMBINE_INSNS, 2,
- opts->x_param_values, opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_combine_insns, 2);
/* Allow default optimizations to be specified on a per-machine basis. */
maybe_default_options (opts, opts_set,
if (opts->x_flag_conserve_stack)
{
- maybe_set_param_value (PARAM_LARGE_STACK_FRAME, 100,
- opts->x_param_values, opts_set->x_param_values);
- maybe_set_param_value (PARAM_STACK_FRAME_GROWTH, 40,
- opts->x_param_values, opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_large_stack_frame, 100);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_stack_frame_growth, 40);
}
if (opts->x_flag_lto)
all features. */
if (opts->x_flag_sanitize & SANITIZE_KERNEL_ADDRESS)
{
- maybe_set_param_value (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
- 0, opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_ASAN_GLOBALS, 0, opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_ASAN_STACK, 0, opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_ASAN_PROTECT_ALLOCAS, 0,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_ASAN_USE_AFTER_RETURN, 0,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set,
+ param_asan_instrumentation_with_call_threshold,
+ 0);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_asan_globals, 0);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_asan_stack, 0);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_asan_protect_allocas, 0);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_asan_use_after_return, 0);
}
break;
enable_fdo_optimizations (opts, opts_set, value);
if (!opts_set->x_flag_profile_correction)
opts->x_flag_profile_correction = value;
- maybe_set_param_value (
- PARAM_EARLY_INLINER_MAX_ITERATIONS, 10,
- opts->x_param_values, opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set,
+ param_early_inliner_max_iterations, 10);
break;
case OPT_fprofile_generate_:
/* Check if it's worth applying the partial redundancy elimination. */
if (ok_count.to_gcov_type ()
- < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count.to_gcov_type ())
+ < param_gcse_after_reload_partial_fraction * not_ok_count.to_gcov_type ())
goto cleanup;
gcov_type threshold;
#if (GCC_VERSION >= 5000)
- if (__builtin_mul_overflow (GCSE_AFTER_RELOAD_CRITICAL_FRACTION,
+ if (__builtin_mul_overflow (param_gcse_after_reload_critical_fraction,
critical_count.to_gcov_type (), &threshold))
threshold = profile_count::max_count;
#else
threshold
- = GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count.to_gcov_type ();
+ = (param_gcse_after_reload_critical_fraction
+ * critical_count.to_gcov_type ());
#endif
if (ok_count.to_gcov_type () < threshold)
{
if (min_count == -1)
{
- const int hot_frac = PARAM_VALUE (HOT_BB_COUNT_FRACTION);
+ const int hot_frac = param_hot_bb_count_fraction;
const gcov_type min_hot_count
= hot_frac
? profile_info->sum_max / hot_frac
if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
&& count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
return false;
- if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+ if (count.apply_scale (param_hot_bb_frequency_fraction, 1)
< ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
return false;
return true;
desirable. */
if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
{
- const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+ const int unlikely_frac = param_unlikely_bb_count_fraction;
if (count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
return false;
return true;
if (!e->probability.initialized_p ())
return false;
if ((e->probability.to_reg_br_prob_base ()
- <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
+ <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100)
|| (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
- <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
+ <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100))
return true;
return false;
}
{
tree niter = NULL;
HOST_WIDE_INT nitercst;
- int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
+ int max = param_max_predicted_iterations;
int probability;
enum br_predictor predictor;
widest_int nit;
*predictor = (enum br_predictor) tree_to_uhwi (val2);
if (*predictor == PRED_BUILTIN_EXPECT)
*probability
- = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+ = HITRATE (param_builtin_expect_probability);
return gimple_call_arg (def, 1);
}
return NULL;
return val;
*predictor = PRED_BUILTIN_EXPECT;
*probability
- = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+ = HITRATE (param_builtin_expect_probability);
return gimple_call_arg (def, 1);
}
case BUILT_IN_EXPECT_WITH_PROBABILITY:
edge e = find_taken_edge_switch_expr (sw, val);
if (predictor == PRED_BUILTIN_EXPECT)
{
- int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
+ int percent = param_builtin_expect_probability;
gcc_assert (percent >= 0 && percent <= 100);
predict_edge (e, PRED_BUILTIN_EXPECT,
HITRATE (percent));
void
handle_missing_profiles (void)
{
- const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+ const int unlikely_frac = param_unlikely_bb_count_fraction;
struct cgraph_node *node;
auto_vec<struct cgraph_node *, 64> worklist;
continue;
num++;
if (p == 0 || LABEL_P (p)
- || num > PARAM_VALUE (PARAM_MAX_RELOAD_SEARCH_INSNS))
+ || num > param_max_reload_search_insns)
return 0;
/* Don't reuse register contents from before a setjmp-type
/* Scan backwards looking for a match. */
for (trial = PREV_INSN (target),
- insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+ insns_to_search = param_max_delay_slot_insn_search;
trial && insns_to_search > 0;
trial = PREV_INSN (trial))
{
INSN sets or sets something insn uses or sets. */
for (trial = PREV_INSN (target),
- insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+ insns_to_search = param_max_delay_slot_insn_search;
trial && !LABEL_P (trial) && insns_to_search > 0;
trial = PREV_INSN (trial))
{
}
if (b == -1)
- b = find_basic_block (target, MAX_DELAY_SLOT_LIVE_SEARCH);
+ b = find_basic_block (target, param_max_delay_slot_live_search);
if (target_hash_table != NULL)
{
void
incr_ticks_for_insn (rtx_insn *insn)
{
- int b = find_basic_block (insn, MAX_DELAY_SLOT_LIVE_SEARCH);
+ int b = find_basic_block (insn, param_max_delay_slot_live_search);
if (b != -1)
bb_ticks[b]++;
if (asan_sanitize_stack_p ())
sanitize_rewrite_addressable_params (fun);
- bool use_calls = ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD < INT_MAX
- && asan_num_accesses >= ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD;
+ bool use_calls = param_asan_instrumentation_with_call_threshold < INT_MAX
+ && asan_num_accesses >= param_asan_instrumentation_with_call_threshold;
hash_map<tree, tree> shadow_vars_mapping;
bool need_commit_edge_insert = false;
/* Pending lists can't get larger with a readonly context. */
if (!deps->readonly
&& ((deps->pending_read_list_length + deps->pending_write_list_length)
- >= MAX_PENDING_LIST_LENGTH))
+ >= param_max_pending_list_length))
{
/* Flush all pending reads and writes to prevent the pending lists
from getting any larger. Insn scheduling runs too slowly when
{
if ((deps->pending_read_list_length
+ deps->pending_write_list_length)
- >= MAX_PENDING_LIST_LENGTH
+ >= param_max_pending_list_length
&& !DEBUG_INSN_P (insn))
flush_pending_lists (deps, insn, true, true);
add_insn_mem_dependence (deps, true, insn, x);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
- || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
+ if (reg_last->uses_length >= param_max_pending_list_length
+ || reg_last->clobbers_length >= param_max_pending_list_length)
{
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
REG_DEP_OUTPUT, false);
&& sel_insn_is_speculation_check (insn)))
{
/* Keep the list a reasonable size. */
- if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
- flush_pending_lists (deps, insn, true, true);
+ if (deps->pending_flush_length++ >= param_max_pending_list_length)
+ flush_pending_lists (deps, insn, true, true);
else
deps->pending_jump_insns
= alloc_INSN_LIST (insn, deps->pending_jump_insns);
return;
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ probability_cutoff = param_tracer_min_branch_probability_feedback;
else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = param_tracer_min_branch_probability;
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
schedule_ebbs_init ();
if (ebbs_p) {
int probability_cutoff;
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ probability_cutoff = param_tracer_min_branch_probability_feedback;
else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = param_tracer_min_branch_probability;
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
FOR_EACH_BB_FN (ebb_start, cfun)
(*num_insns) += (common_sched_info->estimate_number_of_insns
(BASIC_BLOCK_FOR_FN (cfun, block)));
- return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
- || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
+ return ((*num_bbs > param_max_sched_region_blocks)
+ || (*num_insns > param_max_sched_region_insns));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
- extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+ extend_regions_p = param_max_sched_extend_regions_iters > 0;
if (extend_regions_p)
{
degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
- max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+ max_iter = param_max_sched_extend_regions_iters;
max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
|| (IS_SPECULATIVE_INSN (next)
&& ((recog_memoized (next) >= 0
&& min_insn_conflict_delay (curr_state, next, next)
- > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ > param_max_sched_insn_conflict_delay)
|| IS_SPECULATION_CHECK_P (next)
|| !check_live (next, INSN_BB (next))
|| (not_ex_free = !is_exception_free (next, INSN_BB (next),
f = find_fallthru_edge (last_bb->succs);
if (f
&& (!f->probability.initialized_p ()
- || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >=
- PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)))
+ || (f->probability.to_reg_br_prob_base () * 100
+ / REG_BR_PROB_BASE
+ >= param_sched_state_edge_prob_cutoff)))
{
memcpy (bb_state[f->dest->index], curr_state,
dfa_state_size);
void
sched_rgn_init (bool single_blocks_p)
{
- min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE)
/ 100);
nr_inter = 0;
basic_block preheader_block;
if (loop->num_nodes
- > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+ > (unsigned) param_max_pipeline_region_blocks)
return -1;
/* Don't pipeline loops whose latch belongs to some of its inner loops. */
return -1;
loop->ninsns = num_loop_insns (loop);
- if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+ if ((int) loop->ninsns > param_max_pipeline_region_insns)
return -1;
loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
/* Software lookahead window size.
According to the results in Nakatani and Ebcioglu [1993], window size of 16
is enough to extract most ILP in integer code. */
-#define MAX_WS (PARAM_VALUE (PARAM_SELSCHED_MAX_LOOKAHEAD))
+#define MAX_WS (param_selsched_max_lookahead)
extern regset sel_all_regs;
\f
FOR_EACH_EXPR_1 (expr, si, av_ptr)
{
if (EXPR_SCHED_TIMES (expr)
- >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
+ >= param_selsched_max_sched_times)
av_set_iter_remove (&si);
}
}
&& (flag_sel_sched_pipelining != 0)
&& current_loop_nest != NULL
&& loop_has_exit_edges (current_loop_nest));
- max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
+ max_insns_to_rename = param_selsched_insns_to_rename;
max_ws = MAX_WS;
}
vec.quick_push (pro);
unsigned max_grow_size = get_uncond_jump_length ();
- max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+ max_grow_size *= param_max_grow_copy_bb_insns;
while (!vec.is_empty () && pro != entry)
{
{
bool predictable_p = predictable_edge_p (e);
- enum compiler_param param
- = (predictable_p
- ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
- : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
- /* If we have a parameter set, use that, otherwise take a guess using
- BRANCH_COST. */
- if (global_options_set.x_param_values[param])
- return PARAM_VALUE (param);
+ if (predictable_p)
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+ return param_max_rtl_if_conversion_predictable_cost;
+ }
else
- return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+ return param_max_rtl_if_conversion_unpredictable_cost;
+ }
+
+ return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
}
/* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */
fprintf (file,
file == stderr ? _(fmt4) : fmt4,
indent, *indent != 0 ? " " : "",
- PARAM_VALUE (GGC_MIN_EXPAND), PARAM_VALUE (GGC_MIN_HEAPSIZE));
+ param_ggc_min_expand, param_ggc_min_heapsize);
print_plugins_versions (file, indent);
}
if (flag_checking >= 2)
hash_table_sanitize_eq_limit
- = PARAM_VALUE (PARAM_HASH_TABLE_VERIFICATION_LIMIT);
+ = param_hash_table_verification_limit;
/* Please don't change global_options after this point, those changes won't
be reflected in optimization_{default,current}_node. */
initialize_original_copy_tables ();
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ probability_cutoff = param_tracer_min_branch_probability_feedback;
else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = param_tracer_min_branch_probability;
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
branch_ratio_cutoff =
- (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
+ (REG_BR_PROB_BASE / 100 * param_tracer_min_branch_ratio);
FOR_EACH_BB_FN (bb, cfun)
{
}
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
- cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
+ cover_insns = param_tracer_dynamic_coverage_feedback;
else
- cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
+ cover_insns = param_tracer_dynamic_coverage;
cover_insns = (weighted_insns * cover_insns + 50) / 100;
- max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100;
+ max_dup_insns = (ninsns * param_tracer_max_code_growth + 50) / 100;
while (traced_insns < cover_insns && nduplicated < max_dup_insns
&& !heap.empty ())
&& TYPE_SIZE_UNIT (type) != NULL
&& tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
&& ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
- < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
+ < param_tm_max_aggregate_size)
/* We must be able to copy this type normally. I.e., no
special constructors and the like. */
&& !TREE_ADDRESSABLE (type))
int size = 0;
if ((tree_contains_chrecs (op0, &size)
|| tree_contains_chrecs (op1, &size))
- && size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+ && size < param_scev_max_expr_size)
return build2 (code, type, op0, op1);
- else if (size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+ else if (size < param_scev_max_expr_size)
{
if (code == POINTER_PLUS_EXPR)
return fold_build_pointer_plus (fold_convert (type, op0),
void
split_constant_offset (tree exp, tree *var, tree *off)
{
- unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+ unsigned limit = param_ssa_name_def_chain_limit;
static hash_map<tree, std::pair<tree, tree> > *cache;
if (!cache)
cache = new hash_map<tree, std::pair<tree, tree> > (37);
unsigned int i, j;
if ((int) datarefs.length ()
- > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+ > param_loop_max_datarefs_for_datadeps)
{
struct data_dependence_relation *ddr;
/* Only handle PHIs with no more arguments unless we are asked to by
simd pragma. */
#define MAX_PHI_ARG_NUM \
- ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
+ ((unsigned) param_max_tree_if_conversion_phi_args)
/* True if we've converted a statement that was only executed when some
condition C was true, and if for correctness we need to predicate the
/* If the inlined function has too many debug markers,
don't copy them. */
if (id->src_cfun->debug_marker_count
- > PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+ > param_max_debug_marker_count)
return stmts;
gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
#define MAX_DATAREFS_NUM \
- ((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+ ((unsigned) param_loop_max_datarefs_for_datadeps)
/* Threshold controlling number of distributed partitions. Given it may
be unnecessary if a memory stream cost model is invented in the future,
/* Minimal number of iterations of a loop that should be executed in each
thread. */
-#define MIN_PER_THREAD PARAM_VALUE (PARAM_PARLOOPS_MIN_PER_THREAD)
+#define MIN_PER_THREAD param_parloops_min_per_thread
/* Element of the hashtable, representing a
reduction in the current loop. */
else
{
t = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
- int chunk_size = PARAM_VALUE (PARAM_PARLOOPS_CHUNK_SIZE);
- enum PARAM_PARLOOPS_SCHEDULE_KIND schedule_type \
- = (enum PARAM_PARLOOPS_SCHEDULE_KIND) PARAM_VALUE (PARAM_PARLOOPS_SCHEDULE);
- switch (schedule_type)
+ int chunk_size = param_parloops_chunk_size;
+ switch (param_parloops_schedule)
{
- case PARAM_PARLOOPS_SCHEDULE_KIND_static:
+ case PARLOOPS_SCHEDULE_STATIC:
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
break;
- case PARAM_PARLOOPS_SCHEDULE_KIND_dynamic:
+ case PARLOOPS_SCHEDULE_DYNAMIC:
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
- case PARAM_PARLOOPS_SCHEDULE_KIND_guided:
+ case PARLOOPS_SCHEDULE_GUIDED:
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
- case PARAM_PARLOOPS_SCHEDULE_KIND_auto:
+ case PARLOOPS_SCHEDULE_AUTO:
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_AUTO;
chunk_size = 0;
break;
- case PARAM_PARLOOPS_SCHEDULE_KIND_runtime:
+ case PARLOOPS_SCHEDULE_RUNTIME:
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_RUNTIME;
chunk_size = 0;
break;
{
chain_p chain;
unsigned factor = 1, af, nfactor, i;
- unsigned max = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+ unsigned max = param_max_unroll_times;
FOR_EACH_VEC_ELT (chains, i, chain)
{
return t_false;
/* Give up if the path is longer than the MAX that we allow. */
- if (limit > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_COMPLEXITY))
+ if (limit > param_scev_max_expr_complexity)
{
*evolution_of_loop = chrec_dont_know;
return t_dont_know;
bool *fold_conversions, int size_expr)
{
/* Give up if the expression is larger than the MAX that we allow. */
- if (size_expr++ > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+ if (size_expr++ > param_scev_max_expr_size)
return chrec_dont_know;
if (chrec == NULL_TREE
unsigned i;
bool optimize_speed_p = !optimize_function_for_size_p (cfun);
- enum compiler_param param = optimize_speed_p
- ? PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED
- : PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE;
-
/* If the user didn't set PARAM_SRA_MAX_SCALARIZATION_SIZE_<...>,
fall back to a target default. */
unsigned HOST_WIDE_INT max_scalarization_size
- = global_options_set.x_param_values[param]
- ? PARAM_VALUE (param)
- : get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+ = get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+
+ if (optimize_speed_p)
+ {
+ if (global_options_set.x_param_sra_max_scalarization_size_speed)
+ max_scalarization_size = param_sra_max_scalarization_size_speed;
+ }
+ else
+ {
+ if (global_options_set.x_param_sra_max_scalarization_size_size)
+ max_scalarization_size = param_sra_max_scalarization_size_size;
+ }
max_scalarization_size *= BITS_PER_UNIT;
size = tree_to_uhwi (arg);
/* Heuristic: don't fold large allocas. */
- threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
+ threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
/* In case the alloca is located at function entry, it has the same lifetime
as a declared array, so we allow a larger size. */
block = gimple_block (stmt);
if (valid_ao_ref_for_dse (ref)
&& ref->size.is_constant (&const_size)
&& (const_size / BITS_PER_UNIT
- <= PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)))
+ <= param_dse_max_object_size))
{
bitmap_clear (live_bytes);
bitmap_set_range (live_bytes, 0, const_size / BITS_PER_UNIT);
FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
{
/* Limit stmt walking. */
- if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+ if (++cnt > param_dse_max_alias_queries_per_store)
BREAK_FROM_IMM_USE_STMT (ui);
/* If USE_STMT stores 0 into one or more of the same locations
FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
{
/* Limit stmt walking. */
- if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+ if (++cnt > param_dse_max_alias_queries_per_store)
{
fail = true;
BREAK_FROM_IMM_USE_STMT (ui);
public:
dse_dom_walker (cdi_direction direction)
: dom_walker (direction),
- m_live_bytes (PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)),
+ m_live_bytes (param_dse_max_object_size),
m_byte_tracking_enabled (false) {}
virtual edge before_dom_children (basic_block);
tree t1, t2;
gimple_stmt_iterator gsi;
bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
- if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+ if (param_logical_op_non_short_circuit != -1)
logical_op_non_short_circuit
- = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+ = param_logical_op_non_short_circuit;
if (!logical_op_non_short_circuit || flag_sanitize_coverage)
return false;
/* Only do this optimization if the inner bb contains only the conditional. */
FOR_EACH_LOOP (loop, 0)
{
- int initial_limit = PARAM_VALUE (PARAM_MAX_LOOP_HEADER_INSNS);
+ int initial_limit = param_max_loop_header_insns;
int remaining_limit = initial_limit;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
/* Minimum cost of an expensive expression. */
-#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
+#define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
/* The outermost loop for which execution of the header guarantees that the
block will be executed. */
return false;
if (!loop->unroll
- && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
+ && n_unroll > (unsigned) param_max_completely_peel_times)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d "
bool large
= tree_estimate_loop_size
(loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
- PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
+ param_max_completely_peeled_insns);
if (large)
{
if (dump_file && (dump_flags & TDF_DETAILS))
blow the branch predictor tables. Limit number of
branches on the hot path through the peeled sequence. */
else if (size.num_branches_on_hot_path * (int)n_unroll
- > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
+ > param_max_peel_branches)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
return false;
}
else if (unr_insns
- > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
+ > (unsigned) param_max_completely_peeled_insns)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
int peeled_size;
if (!flag_peel_loops
- || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
+ || param_max_peel_times <= 0
|| !peeled_loops)
return false;
/* We want to peel estimated number of iterations + 1 (so we never
enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
and be sure to avoid overflows. */
- if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
+ if (npeel > param_max_peel_times - 1)
{
if (dump_file)
fprintf (dump_file, "Not peeling: rolls too much "
/* Check peeled loops size. */
tree_estimate_loop_size (loop, exit, NULL, &size,
- PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
+ param_max_peeled_insns);
if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
- > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
+ > param_max_peeled_insns)
{
if (dump_file)
fprintf (dump_file, "Not peeling: peeled sequence size is too large "
BITMAP_FREE (loop_closed_ssa_invalidated);
}
while (changed
- && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
+ && ++iteration <= param_max_unroll_iterations);
BITMAP_FREE (father_bbs);
{
niter = likely_max_stmt_executions_int (loop);
- if (niter == -1 || niter > PARAM_VALUE (PARAM_AVG_LOOP_NITER))
- return PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+ if (niter == -1 || niter > param_avg_loop_niter)
+ return param_avg_loop_niter;
}
return niter;
/* Bound on number of candidates below that all candidates are considered. */
#define CONSIDER_ALL_CANDIDATES_BOUND \
- ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
+ ((unsigned) param_iv_consider_all_candidates_bound)
/* If there are more iv occurrences, we just give up (it is quite unlikely that
optimizing such a loop would help, and it would take ages). */
#define MAX_CONSIDERED_GROUPS \
- ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
+ ((unsigned) param_iv_max_considered_uses)
/* If there are at most this number of ivs in the set, try removing unnecessary
ivs from the set always. */
#define ALWAYS_PRUNE_CAND_SET_BOUND \
- ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
+ ((unsigned) param_iv_always_prune_cand_set_bound)
/* The list of trees for that the decl_rtl field must be reset is stored
here. */
/* The final loop should be small enough. */
if (tree_num_loop_insns (loop, &eni_size_weights) * factor
- > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS))
+ > (unsigned) param_max_unrolled_insns)
return false;
return true;
/* Bound on the number of iterations we try to evaluate. */
#define MAX_ITERATIONS_TO_TRACK \
- ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
+ ((unsigned) param_max_iterations_to_track)
/* Returns the loop phi node of LOOP such that ssa name X is derived from its
result by a chain of operations such that all but exactly one of their
of cache hierarchy). */
#ifndef PREFETCH_BLOCK
-#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
+#define PREFETCH_BLOCK param_l1_cache_line_size
#endif
/* Do we have a forward hardware sequential prefetching? */
#define ACCEPTABLE_MISS_RATE 50
#endif
-#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
-#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
+#define L1_CACHE_SIZE_BYTES ((unsigned) (param_l1_cache_size * 1024))
+#define L2_CACHE_SIZE_BYTES ((unsigned) (param_l2_cache_size * 1024))
/* We consider a memory access nontemporal if it is not reused sooner than
after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
should_issue_prefetch_p (struct mem_ref *ref)
{
/* Do we want to issue prefetches for non-constant strides? */
- if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
+ if (!cst_and_fits_in_hwi (ref->group->step)
+ && param_prefetch_dynamic_strides == 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
range. */
if (cst_and_fits_in_hwi (ref->group->step)
&& abs_hwi (int_cst_value (ref->group->step))
- < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
+ < (HOST_WIDE_INT) param_prefetch_minimum_stride)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
") is less than the mininum required stride of %d\n",
ref->group->uid, ref->uid, int_cst_value (ref->group->step),
- PREFETCH_MINIMUM_STRIDE);
+ param_prefetch_minimum_stride);
return false;
}
struct mem_ref *ref;
bool any = false;
- /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
- remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
+ /* At most param_simultaneous_prefetches should be running
+ at the same time. */
+ remaining_prefetch_slots = param_simultaneous_prefetches;
/* The prefetch will run for AHEAD iterations of the original loop, i.e.,
AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
us from unrolling the loops too many times in cases where we only expect
gains from better scheduling and decreasing loop overhead, which is not
the case here. */
- upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
+ upper_bound = param_max_unrolled_insns / ninsns;
/* If we unrolled the loop more times than it iterates, the unrolled version
of the loop would be never entered. */
accessed in each iteration. TODO -- in the latter case, we should
take the size of the reference into account, rounding it up on cache
line size multiple. */
- volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
+ volume += param_l1_cache_line_size / ref->prefetch_mod;
}
return volume;
}
if (tree_fits_shwi_p (step))
astep = tree_to_shwi (step);
else
- astep = L1_CACHE_LINE_SIZE;
+ astep = param_l1_cache_line_size;
strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
if (tree_fits_uhwi_p (stride))
astride = tree_to_uhwi (stride);
else
- astride = L1_CACHE_LINE_SIZE;
+ astride = param_l1_cache_line_size;
ref = TREE_OPERAND (ref, 0);
}
s = strides[i] < 0 ? -strides[i] : strides[i];
- if (s < (unsigned) L1_CACHE_LINE_SIZE
+ if (s < (unsigned) param_l1_cache_line_size
&& (loop_sizes[i]
> (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
{
should account for cache misses. */
insn_to_mem_ratio = ninsns / mem_ref_count;
- if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+ if (insn_to_mem_ratio < param_prefetch_min_insn_to_mem_ratio)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
and the exit branches will get eliminated), so it might be better to use
tree_estimate_loop_size + estimated_unrolled_size. */
insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
- if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
+ if (insn_to_prefetch_ratio < param_min_insn_to_prefetch_ratio)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
if (time == 0)
return false;
- ahead = (PREFETCH_LATENCY + time - 1) / time;
+ ahead = (param_prefetch_latency + time - 1) / time;
est_niter = estimated_stmt_executions_int (loop);
if (est_niter == -1)
est_niter = likely_max_stmt_executions_int (loop);
{
fprintf (dump_file, "Prefetching parameters:\n");
fprintf (dump_file, " simultaneous prefetches: %d\n",
- SIMULTANEOUS_PREFETCHES);
- fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
+ param_simultaneous_prefetches);
+ fprintf (dump_file, " prefetch latency: %d\n", param_prefetch_latency);
fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
- L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
- fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
- fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
+ L1_CACHE_SIZE_BYTES / param_l1_cache_line_size,
+ param_l1_cache_size);
+ fprintf (dump_file, " L1 cache line size: %d\n",
+ param_l1_cache_line_size);
+ fprintf (dump_file, " L2 cache size: %d kB\n", param_l2_cache_size);
fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
- MIN_INSN_TO_PREFETCH_RATIO);
+ param_min_insn_to_prefetch_ratio);
fprintf (dump_file, " min insn-to-mem ratio: %d \n",
- PREFETCH_MIN_INSN_TO_MEM_RATIO);
+ param_prefetch_min_insn_to_mem_ratio);
fprintf (dump_file, "\n");
}
profile_probability prob = invar_branch->probability;
if (prob.reliable_p ())
{
- int thres = PARAM_VALUE (PARAM_MIN_LOOP_COND_SPLIT_PROB);
+ int thres = param_min_loop_cond_split_prob;
if (prob < profile_probability::always ().apply_scale (thres, 100))
return NULL;
}
/* Add a threshold for increased code size to disable loop split. */
- if (compute_added_num_insns (loop, invar_branch)
- > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
+ if (compute_added_num_insns (loop, invar_branch) > param_max_peeled_insns)
return NULL;
return invar_branch;
/* The loop should not be too large, to limit code growth. */
if (tree_num_loop_insns (loop, &eni_size_weights)
- > (unsigned) PARAM_VALUE (PARAM_MAX_UNSWITCH_INSNS))
+ > (unsigned) param_max_unswitch_insns)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ";; Not unswitching, loop too big\n");
if (i == loop->num_nodes)
{
if (dump_file
- && num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL)
+ && num > param_max_unswitch_level
&& (dump_flags & TDF_DETAILS))
fprintf (dump_file, ";; Not unswitching anymore, hit max level\n");
changed = true;
}
/* Do not unswitch too much. */
- else if (num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL))
+ else if (num > param_max_unswitch_level)
{
i++;
continue;
&& !HONOR_SIGNED_ZEROS (mode))
{
unsigned int max_depth = speed_p
- ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
+ ? param_max_pow_sqrt_depth
: 2;
tree expand_with_sqrts
bool check_defer
= (state->m_deferring_p
&& (tree_to_shwi (TYPE_SIZE (type))
- <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
+ <= param_avoid_fma_max_bits));
bool defer = check_defer;
bool seen_negate_p = false;
/* Make sure that the multiplication statement becomes dead after
{
gimple_stmt_iterator gsi;
- fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
+ fma_deferring_state fma_state (param_avoid_fma_max_bits > 0);
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
{
/* If either vectorization or if-conversion is disabled then do
not sink any stores. */
- if (MAX_STORES_TO_SINK == 0
+ if (param_max_stores_to_sink == 0
|| (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
|| !flag_tree_loop_if_convert)
return false;
/* No pairs of stores found. */
if (!then_stores.length ()
- || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
+ || then_stores.length () > (unsigned) param_max_stores_to_sink)
{
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
hoist_adjacent_loads (basic_block bb0, basic_block bb1,
basic_block bb2, basic_block bb3)
{
- int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
+ int param_align = param_l1_cache_line_size;
unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
gphi_iterator gsi;
gate_hoist_loads (void)
{
return (flag_hoist_adjacent_loads == 1
- && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
+ && param_l1_cache_line_size
&& HAVE_conditional_move);
}
if (gimple_bb (phi) != phiblock)
return vuse;
- unsigned int cnt = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+ unsigned int cnt = param_sccvn_max_alias_queries_per_access;
use_oracle = ao_ref_init_from_vn_reference (&ref, set, type, operands);
/* Use the alias-oracle to find either the PHI node in this block,
bitmap_set_t PA_OUT;
edge e;
edge_iterator ei;
- unsigned long max_pa = PARAM_VALUE (PARAM_MAX_PARTIAL_ANTIC_LENGTH);
+ unsigned long max_pa = param_max_partial_antic_length;
old_PA_IN = PA_OUT = NULL;
get_reassociation_width (int ops_num, enum tree_code opc,
machine_mode mode)
{
- int param_width = PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH);
+ int param_width = param_tree_reassoc_width;
int width;
int width_min;
int cycles_best;
&& vr1.vuse)
{
ao_ref r;
- unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+ unsigned limit = param_sccvn_max_alias_queries_per_access;
vn_walk_cb_data data (&vr1, NULL_TREE, NULL, kind, true);
if (ao_ref_init_from_vn_reference (&r, set, type, vr1.operands))
*vnresult =
{
vn_reference_t wvnresult;
ao_ref r;
- unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+ unsigned limit = param_sccvn_max_alias_queries_per_access;
/* Make sure to use a valueized reference if we valueized anything.
Otherwise preserve the full reference for advanced TBAA. */
if (!valuezied_anything
if (iterate)
{
loop_p loop;
- unsigned max_depth = PARAM_VALUE (PARAM_RPO_VN_MAX_LOOP_DEPTH);
+ unsigned max_depth = param_rpo_vn_max_loop_depth;
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
if (loop_depth (loop) > max_depth)
for (unsigned i = 2;
up the virtual use-def chain using walk_non_aliased_vuses.
But don't do this when removing expressions from the hash. */
ao_ref ref;
- unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+ unsigned limit = param_sccvn_max_alias_queries_per_access;
if (!(vuse1 && vuse2
&& gimple_assign_single_p (stmt)
&& TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
/* Get the sinking threshold. If the statement to be moved has memory
operands, then increase the threshold by 7% as those are even more
profitable to avoid, clamping at 100%. */
- threshold = PARAM_VALUE (PARAM_SINK_FREQUENCY_THRESHOLD);
+ threshold = param_sink_frequency_threshold;
if (gimple_vuse (stmt) || gimple_vdef (stmt))
{
threshold += 7;
new_stridx (tree exp)
{
int idx;
- if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+ if (max_stridx >= param_max_tracked_strlens)
return 0;
if (TREE_CODE (exp) == SSA_NAME)
{
new_addr_stridx (tree exp)
{
int *pidx;
- if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+ if (max_stridx >= param_max_tracked_strlens)
return 0;
pidx = addr_stridxptr (exp);
if (pidx != NULL)
bitmap visited = NULL;
tree maxbound = pdata->maxbound;
- unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+ unsigned limit = param_ssa_name_def_chain_limit;
if (!get_range_strlen_dynamic (src, pdata, &visited, rvals, &limit))
{
/* On failure extend the length range to an impossible maximum
ssa_name_limit_t ()
: visited (NULL),
- ssa_def_max (PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT)) { }
+ ssa_def_max (param_ssa_name_def_chain_limit) { }
int next_ssa_name (tree);
return false;
/* If the vector of fields is growing too big, bail out early.
- Callers check for vec::length <= MAX_FIELDS_FOR_FIELD_SENSITIVE, make
+ Callers check for vec::length <= param_max_fields_for_field_sensitive, make
sure this fails. */
- if (fieldstack->length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+ if (fieldstack->length () > (unsigned)param_max_fields_for_field_sensitive)
return false;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
/* If we didn't end up collecting sub-variables create a full
variable for the decl. */
if (fieldstack.length () == 0
- || fieldstack.length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+ || fieldstack.length () > (unsigned)param_max_fields_for_field_sensitive)
{
vi = new_var_info (decl, name, add_id);
vi->offset = 0;
static void
init_alias_vars (void)
{
- use_field_sensitive = (MAX_FIELDS_FOR_FIELD_SENSITIVE > 1);
+ use_field_sensitive = (param_max_fields_for_field_sensitive > 1);
bitmap_obstack_initialize (&pta_obstack);
bitmap_obstack_initialize (&oldpta_obstack);
unsigned int i, j;
bitmap_iterator bi, bj;
int nr_comparisons;
- int max_comparisons = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_COMPARISONS);
+ int max_comparisons = param_max_tail_merge_comparisons;
EXECUTE_IF_SET_IN_BITMAP (same_succ->bbs, 0, i, bi)
{
int nr_bbs_removed;
bool loop_entered = false;
int iteration_nr = 0;
- int max_iterations = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_ITERATIONS);
+ int max_iterations = param_max_tail_merge_iterations;
if (!flag_tree_tail_merge
|| max_iterations == 0)
return NULL;
if (m_path.length () + 1
- > (unsigned) PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
+ > (unsigned) param_max_fsm_thread_length)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "FSM jump-thread path not considered: "
as in PR 78407 this leads to noticeable improvements. */
if (m_speed_p && (optimize_edge_for_speed_p (taken_edge) || contains_hot_bb))
{
- if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
+ if (n_insns >= param_max_fsm_thread_path_insns)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "FSM jump-thread path not considered: "
optimizer would have done anyway, so an irreducible loop is not
so bad. */
if (!threaded_multiway_branch && *creates_irreducible_loop
- && (n_insns * (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
+ && (n_insns * (unsigned) param_fsm_scale_path_stmts
> (m_path.length () *
- (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS))))
+ (unsigned) param_fsm_scale_path_blocks)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
So for that case, drastically reduce the number of statements
we are allowed to copy. */
if (!(threaded_through_latch && threaded_multiway_branch)
- && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
- >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
+ && (n_insns * param_fsm_scale_path_stmts
+ >= param_max_jump_thread_duplication_stmts))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
if (gimple_code (def_stmt) == GIMPLE_PHI
&& (gimple_phi_num_args (def_stmt)
- >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
+ >= (unsigned) param_fsm_maximum_phi_arguments))
return;
if (is_gimple_assign (def_stmt)
m_visited_bbs.empty ();
m_seen_loop_phi = false;
m_speed_p = speed_p;
- m_max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
+ m_max_threaded_paths = param_max_fsm_thread_paths;
fsm_find_control_statement_thread_paths (name);
}
gimple_stmt_iterator gsi;
int max_stmt_count;
- max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
+ max_stmt_count = param_max_jump_thread_duplication_stmts;
/* Walk through each statement in the block recording equivalences
we discover. Note any equivalences we discover are context
killed due to threading, grow the max count
accordingly. */
if (max_stmt_count
- == PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+ == param_max_jump_thread_duplication_stmts)
{
max_stmt_count += estimate_threading_killed_stmts (e->dest);
if (dump_file)
bool found_cd_chain = false;
size_t cur_chain_len = 0;
- if (*num_calls > PARAM_VALUE (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS))
+ if (*num_calls > param_uninit_control_dep_attempts)
return false;
++*num_calls;
}
if (tree_to_uhwi (m_range_size)
- > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
+ > ((unsigned) m_count * param_switch_conversion_branch_ratio))
{
m_reason = "the maximum range-branch ratio exceeded";
return false;
unsigned HOST_WIDE_INT max_ratio
= (optimize_insn_for_size_p ()
- ? PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SIZE)
- : PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SPEED));
+ ? param_jump_table_max_growth_ratio_for_size
+ : param_jump_table_max_growth_ratio_for_speed);
unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
clusters[end]->get_high ());
/* Check overflow. */
unsigned int
jump_table_cluster::case_values_threshold (void)
{
- unsigned int threshold = PARAM_VALUE (PARAM_CASE_VALUES_THRESHOLD);
+ unsigned int threshold = param_case_values_threshold;
if (threshold == 0)
threshold = targetm.case_values_threshold ();
b_b = PHI <b_6, b_7>
There are further constraints. Specifically, the range of values across all
-case labels must not be bigger than SWITCH_CONVERSION_BRANCH_RATIO (default
-eight) times the number of the actual switch branches.
+case labels must not be bigger than param_switch_conversion_branch_ratio
+(default eight) times the number of the actual switch branches.
This transformation was contributed by Martin Jambor, see this e-mail:
http://gcc.gnu.org/ml/gcc-patches/2008-07/msg00011.html */
{
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
+ if ((unsigned) param_vect_max_version_for_alias_checks == 0)
return opt_result::failure_at (vect_location,
"will not create alias checks, as"
" --param vect-max-version-for-alias-checks"
if (do_peeling)
{
unsigned max_allowed_peel
- = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
+ = param_vect_max_peeling_for_alignment;
if (flag_vect_cost_model == VECT_COST_MODEL_CHEAP)
max_allowed_peel = 0;
if (max_allowed_peel != (unsigned)-1)
if (known_alignment_for_access_p (dr_info)
|| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
- >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
+ >= (unsigned) param_vect_max_version_for_alignment_checks)
{
do_versioning = false;
break;
dump_printf_loc (MSG_NOTE, vect_location,
"improved number of alias checks from %d to %d\n",
may_alias_ddrs.length (), count);
- unsigned limit = PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS);
+ unsigned limit = param_vect_max_version_for_alias_checks;
if (flag_simd_cost_model == VECT_COST_MODEL_CHEAP)
- limit = default_param_value
- (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) * 6 / 10;
+ limit = param_vect_max_version_for_alias_checks * 6 / 10;
if (count > limit)
return opt_result::failure_at
(vect_location,
return -1;
}
- int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+ int min_scalar_loop_bound = (param_min_vect_loop_bound
* assumed_vf);
/* Use the cost model only if it is more conservative than user specified
/* If dependence analysis will give up due to the limit on the
number of datarefs stop here and fail fatally. */
if (datarefs->length ()
- > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+ > (unsigned)param_loop_max_datarefs_for_datadeps)
return opt_result::failure_at (stmt, "exceeded param "
"loop-max-datarefs-for-datadeps\n");
}
TODO: Enable epilogue vectorization for loops with SIMDUID set. */
vect_epilogues = (!simdlen
&& loop->inner == NULL
- && PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)
+ && param_vect_epilogues_nomask
&& LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
&& !loop->simduid
/* For now only allow one epilogue loop. */
gimple_stmt_iterator region_end = gsi;
- if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
+ if (insns > param_slp_max_insns_in_bb)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* Threshold of number of iterations below which vectorization will not be
performed. It is calculated from MIN_PROFITABLE_ITERS and
- PARAM_MIN_VECT_LOOP_BOUND. */
+ param_min_vect_loop_bound. */
unsigned int th;
/* When applying loop versioning, the vector form should only be used
/* Now register along the default label assertions that correspond to the
anti-range of each label. */
- int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
+ int insertion_limit = param_max_vrp_switch_assertions;
if (insertion_limit == 0)
return;
The loop computes the range of the final offset for expressions such
as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
some range. */
- const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+ const unsigned limit = param_ssa_name_def_chain_limit;
for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
{
gimple *def = SSA_NAME_DEF_STMT (arg);
if (TYPE_SIGN (type) == UNSIGNED)
{
/* Cache [0, N). */
- limit = INTEGER_SHARE_LIMIT;
- if (IN_RANGE (hwi, 0, INTEGER_SHARE_LIMIT - 1))
+ limit = param_integer_share_limit;
+ if (IN_RANGE (hwi, 0, param_integer_share_limit - 1))
ix = hwi;
}
else
{
/* Cache [-1, N). */
- limit = INTEGER_SHARE_LIMIT + 1;
- if (IN_RANGE (hwi, -1, INTEGER_SHARE_LIMIT - 1))
+ limit = param_integer_share_limit + 1;
+ if (IN_RANGE (hwi, -1, param_integer_share_limit - 1))
ix = hwi + 1;
}
break;
if (TYPE_UNSIGNED (type))
{
/* Cache 0..N */
- limit = INTEGER_SHARE_LIMIT;
+ limit = param_integer_share_limit;
/* This is a little hokie, but if the prec is smaller than
- what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ what is necessary to hold param_integer_share_limit, then the
obvious test will not get the correct answer. */
if (prec < HOST_BITS_PER_WIDE_INT)
{
- if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ if (tree_to_uhwi (t)
+ < (unsigned HOST_WIDE_INT) param_integer_share_limit)
ix = tree_to_uhwi (t);
}
- else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
ix = tree_to_uhwi (t);
}
else
{
/* Cache -1..N */
- limit = INTEGER_SHARE_LIMIT + 1;
+ limit = param_integer_share_limit + 1;
if (integer_minus_onep (t))
ix = 0;
{
if (prec < HOST_BITS_PER_WIDE_INT)
{
- if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+ if (tree_to_shwi (t) < param_integer_share_limit)
ix = tree_to_shwi (t) + 1;
}
- else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
ix = tree_to_shwi (t) + 1;
}
}
compile time for ridiculously complex expressions, although they're
seldom useful, and they may often have to be discarded as not
representable anyway. */
-#define EXPR_USE_DEPTH (PARAM_VALUE (PARAM_MAX_VARTRACK_EXPR_DEPTH))
+#define EXPR_USE_DEPTH (param_max_vartrack_expr_depth)
/* Attempt to reverse the EXPR operation in the debug info and record
it in the cselib table. Say for reg1 = reg2 + 6 even when reg2 is
&& (GET_CODE (l->loc) != CONST || !references_value_p (l->loc, 0)))
return;
/* Avoid creating too large locs lists. */
- else if (count == PARAM_VALUE (PARAM_MAX_VARTRACK_REVERSE_OP_SIZE))
+ else if (count == param_max_vartrack_reverse_op_size)
return;
switch (GET_CODE (src))
int *rc_order;
int i;
int htabsz = 0;
- int htabmax = PARAM_VALUE (PARAM_MAX_VARTRACK_SIZE);
+ int htabmax = param_max_vartrack_size;
bool success = true;
timevar_push (TV_VAR_TRACKING_DATAFLOW);