From cf098191e47535b89373dccb9a2d3cc4a4ebaef7 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Wed, 3 Jan 2018 21:42:52 +0000 Subject: [PATCH] poly_int: GET_MODE_SIZE This patch changes GET_MODE_SIZE from unsigned short to poly_uint16. The non-mechanical parts were handled by previous patches. 2018-01-03 Richard Sandiford Alan Hayward David Sherwood gcc/ * machmode.h (mode_size): Change from unsigned short to poly_uint16_pod. (mode_to_bytes): Return a poly_uint16 rather than an unsigned short. (GET_MODE_SIZE): Return a constant if ONLY_FIXED_SIZE_MODES, or if measurement_type is not polynomial. (fixed_size_mode::includes_p): Check for constant-sized modes. * genmodes.c (emit_mode_size_inline): Make mode_size_inline return a poly_uint16 rather than an unsigned short. (emit_mode_size): Change the type of mode_size from unsigned short to poly_uint16_pod. Use ZERO_COEFFS for the initializer. (emit_mode_adjustments): Cope with polynomial vector sizes. * lto-streamer-in.c (lto_input_mode_table): Use bp_unpack_poly_value for GET_MODE_SIZE. * lto-streamer-out.c (lto_write_mode_table): Use bp_pack_poly_value for GET_MODE_SIZE. * auto-inc-dec.c (try_merge): Treat GET_MODE_SIZE as polynomial. * builtins.c (expand_ifn_atomic_compare_exchange_into_call): Likewise. * caller-save.c (setup_save_areas): Likewise. (replace_reg_with_saved_mem): Likewise. * calls.c (emit_library_call_value_1): Likewise. * combine-stack-adj.c (combine_stack_adjustments_for_block): Likewise. * combine.c (simplify_set, make_extraction, simplify_shift_const_1) (gen_lowpart_for_combine): Likewise. * convert.c (convert_to_integer_1): Likewise. * cse.c (equiv_constant, cse_insn): Likewise. * cselib.c (autoinc_split, cselib_hash_rtx): Likewise. (cselib_subst_to_values): Likewise. * dce.c (word_dce_process_block): Likewise. * df-problems.c (df_word_lr_mark_ref): Likewise. * dwarf2cfi.c (init_one_dwarf_reg_size): Likewise. * dwarf2out.c (multiple_reg_loc_descriptor, mem_loc_descriptor) (concat_loc_descriptor, concatn_loc_descriptor, loc_descriptor) (rtl_for_decl_location): Likewise. * emit-rtl.c (gen_highpart, widen_memory_access): Likewise. * expmed.c (extract_bit_field_1, extract_integral_bit_field): Likewise. * expr.c (emit_group_load_1, clear_storage_hints): Likewise. (emit_move_complex, emit_move_multi_word, emit_push_insn): Likewise. (expand_expr_real_1): Likewise. * function.c (assign_parm_setup_block_p, assign_parm_setup_block) (pad_below): Likewise. * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise. * gimple-ssa-store-merging.c (rhs_valid_for_store_merging_p): Likewise. * ira.c (get_subreg_tracking_sizes): Likewise. * ira-build.c (ira_create_allocno_objects): Likewise. * ira-color.c (coalesced_pseudo_reg_slot_compare): Likewise. (ira_sort_regnos_for_alter_reg): Likewise. * ira-costs.c (record_operand_costs): Likewise. * lower-subreg.c (interesting_mode_p, simplify_gen_subreg_concatn) (resolve_simple_move): Likewise. * lra-constraints.c (get_reload_reg, operands_match_p): Likewise. (process_addr_reg, simplify_operand_subreg, curr_insn_transform) (lra_constraints): Likewise. (CONST_POOL_OK_P): Reject variable-sized modes. * lra-spills.c (slot, assign_mem_slot, pseudo_reg_slot_compare) (add_pseudo_to_slot, lra_spill): Likewise. * omp-low.c (omp_clause_aligned_alignment): Likewise. * optabs-query.c (get_best_extraction_insn): Likewise. * optabs-tree.c (expand_vec_cond_expr_p): Likewise. * optabs.c (expand_vec_perm_var, expand_vec_cond_expr): Likewise. (expand_mult_highpart, valid_multiword_target_p): Likewise. * recog.c (offsettable_address_addr_space_p): Likewise. * regcprop.c (maybe_mode_change): Likewise. * reginfo.c (choose_hard_reg_mode, record_subregs_of_mode): Likewise. * regrename.c (build_def_use): Likewise. * regstat.c (dump_reg_info): Likewise. * reload.c (complex_word_subreg_p, push_reload, find_dummy_reload) (find_reloads, find_reloads_subreg_address): Likewise. * reload1.c (eliminate_regs_1): Likewise. * rtlanal.c (for_each_inc_dec_find_inc_dec, rtx_cost): Likewise. * simplify-rtx.c (avoid_constant_pool_reference): Likewise. (simplify_binary_operation_1, simplify_subreg): Likewise. * targhooks.c (default_function_arg_padding): Likewise. (default_hard_regno_nregs, default_class_max_nregs): Likewise. * tree-cfg.c (verify_gimple_assign_binary): Likewise. (verify_gimple_assign_ternary): Likewise. * tree-inline.c (estimate_move_cost): Likewise. * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise. * tree-ssa-loop-ivopts.c (add_autoinc_candidates): Likewise. (get_address_cost_ainc): Likewise. * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise. (vect_supportable_dr_alignment): Likewise. * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise. (vectorizable_reduction): Likewise. * tree-vect-stmts.c (vectorizable_assignment, vectorizable_shift) (vectorizable_operation, vectorizable_load): Likewise. * tree.c (build_same_sized_truth_vector_type): Likewise. * valtrack.c (cleanup_auto_inc_dec): Likewise. * var-tracking.c (emit_note_insn_var_location): Likewise. * config/arc/arc.h (ASM_OUTPUT_CASE_END): Use as_a . (ADDR_VEC_ALIGN): Likewise. Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r256201 --- gcc/ChangeLog | 95 ++++++++++++++++++++++++++++++++++ gcc/auto-inc-dec.c | 2 +- gcc/builtins.c | 2 +- gcc/caller-save.c | 12 ++--- gcc/calls.c | 13 ++--- gcc/combine-stack-adj.c | 8 +-- gcc/combine.c | 26 ++++------ gcc/config/arc/arc.h | 6 ++- gcc/convert.c | 16 +++--- gcc/cse.c | 11 ++-- gcc/cselib.c | 19 ++++--- gcc/dce.c | 4 +- gcc/df-problems.c | 2 +- gcc/dwarf2cfi.c | 7 +-- gcc/dwarf2out.c | 59 ++++++++++++++------- gcc/emit-rtl.c | 8 +-- gcc/expmed.c | 14 ++--- gcc/expr.c | 37 +++++++------ gcc/function.c | 10 ++-- gcc/genmodes.c | 43 ++++++++++----- gcc/gimple-fold.c | 2 +- gcc/gimple-ssa-store-merging.c | 5 +- gcc/ira-build.c | 2 +- gcc/ira-color.c | 10 ++-- gcc/ira-costs.c | 8 +-- gcc/ira.c | 6 +-- gcc/lower-subreg.c | 14 ++--- gcc/lra-constraints.c | 17 +++--- gcc/lra-spills.c | 22 ++++---- gcc/lto-streamer-in.c | 4 +- gcc/lto-streamer-out.c | 2 +- gcc/machmode.h | 32 ++++++++++-- gcc/omp-low.c | 3 +- gcc/optabs-query.c | 2 +- gcc/optabs-tree.c | 2 +- gcc/optabs.c | 15 +++--- gcc/recog.c | 6 +-- gcc/regcprop.c | 7 ++- gcc/reginfo.c | 16 +++--- gcc/regrename.c | 6 ++- gcc/regstat.c | 8 ++- gcc/reload.c | 70 +++++++++++++------------ gcc/reload1.c | 11 ++-- gcc/rtlanal.c | 8 +-- gcc/simplify-rtx.c | 44 ++++++++-------- gcc/targhooks.c | 13 +++-- gcc/tree-cfg.c | 16 +++--- gcc/tree-inline.c | 9 ++-- gcc/tree-ssa-forwprop.c | 8 +-- gcc/tree-ssa-loop-ivopts.c | 10 ++-- gcc/tree-vect-data-refs.c | 17 ++++-- gcc/tree-vect-loop.c | 6 +-- gcc/tree-vect-stmts.c | 11 ++-- gcc/tree.c | 4 +- gcc/valtrack.c | 16 +++--- gcc/var-tracking.c | 20 ++++--- 56 files changed, 535 insertions(+), 311 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index b4e536db44c..e4759c30c11 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,98 @@ +2018-01-03 Richard Sandiford + Alan Hayward + David Sherwood + + * machmode.h (mode_size): Change from unsigned short to + poly_uint16_pod. + (mode_to_bytes): Return a poly_uint16 rather than an unsigned short. + (GET_MODE_SIZE): Return a constant if ONLY_FIXED_SIZE_MODES, + or if measurement_type is not polynomial. + (fixed_size_mode::includes_p): Check for constant-sized modes. + * genmodes.c (emit_mode_size_inline): Make mode_size_inline + return a poly_uint16 rather than an unsigned short. + (emit_mode_size): Change the type of mode_size from unsigned short + to poly_uint16_pod. Use ZERO_COEFFS for the initializer. + (emit_mode_adjustments): Cope with polynomial vector sizes. + * lto-streamer-in.c (lto_input_mode_table): Use bp_unpack_poly_value + for GET_MODE_SIZE. + * lto-streamer-out.c (lto_write_mode_table): Use bp_pack_poly_value + for GET_MODE_SIZE. + * auto-inc-dec.c (try_merge): Treat GET_MODE_SIZE as polynomial. + * builtins.c (expand_ifn_atomic_compare_exchange_into_call): Likewise. + * caller-save.c (setup_save_areas): Likewise. + (replace_reg_with_saved_mem): Likewise. + * calls.c (emit_library_call_value_1): Likewise. + * combine-stack-adj.c (combine_stack_adjustments_for_block): Likewise. + * combine.c (simplify_set, make_extraction, simplify_shift_const_1) + (gen_lowpart_for_combine): Likewise. + * convert.c (convert_to_integer_1): Likewise. + * cse.c (equiv_constant, cse_insn): Likewise. + * cselib.c (autoinc_split, cselib_hash_rtx): Likewise. + (cselib_subst_to_values): Likewise. + * dce.c (word_dce_process_block): Likewise. + * df-problems.c (df_word_lr_mark_ref): Likewise. + * dwarf2cfi.c (init_one_dwarf_reg_size): Likewise. + * dwarf2out.c (multiple_reg_loc_descriptor, mem_loc_descriptor) + (concat_loc_descriptor, concatn_loc_descriptor, loc_descriptor) + (rtl_for_decl_location): Likewise. + * emit-rtl.c (gen_highpart, widen_memory_access): Likewise. + * expmed.c (extract_bit_field_1, extract_integral_bit_field): Likewise. + * expr.c (emit_group_load_1, clear_storage_hints): Likewise. + (emit_move_complex, emit_move_multi_word, emit_push_insn): Likewise. + (expand_expr_real_1): Likewise. + * function.c (assign_parm_setup_block_p, assign_parm_setup_block) + (pad_below): Likewise. + * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise. + * gimple-ssa-store-merging.c (rhs_valid_for_store_merging_p): Likewise. + * ira.c (get_subreg_tracking_sizes): Likewise. + * ira-build.c (ira_create_allocno_objects): Likewise. + * ira-color.c (coalesced_pseudo_reg_slot_compare): Likewise. + (ira_sort_regnos_for_alter_reg): Likewise. + * ira-costs.c (record_operand_costs): Likewise. + * lower-subreg.c (interesting_mode_p, simplify_gen_subreg_concatn) + (resolve_simple_move): Likewise. + * lra-constraints.c (get_reload_reg, operands_match_p): Likewise. + (process_addr_reg, simplify_operand_subreg, curr_insn_transform) + (lra_constraints): Likewise. + (CONST_POOL_OK_P): Reject variable-sized modes. + * lra-spills.c (slot, assign_mem_slot, pseudo_reg_slot_compare) + (add_pseudo_to_slot, lra_spill): Likewise. + * omp-low.c (omp_clause_aligned_alignment): Likewise. + * optabs-query.c (get_best_extraction_insn): Likewise. + * optabs-tree.c (expand_vec_cond_expr_p): Likewise. + * optabs.c (expand_vec_perm_var, expand_vec_cond_expr): Likewise. + (expand_mult_highpart, valid_multiword_target_p): Likewise. + * recog.c (offsettable_address_addr_space_p): Likewise. + * regcprop.c (maybe_mode_change): Likewise. + * reginfo.c (choose_hard_reg_mode, record_subregs_of_mode): Likewise. + * regrename.c (build_def_use): Likewise. + * regstat.c (dump_reg_info): Likewise. + * reload.c (complex_word_subreg_p, push_reload, find_dummy_reload) + (find_reloads, find_reloads_subreg_address): Likewise. + * reload1.c (eliminate_regs_1): Likewise. + * rtlanal.c (for_each_inc_dec_find_inc_dec, rtx_cost): Likewise. + * simplify-rtx.c (avoid_constant_pool_reference): Likewise. + (simplify_binary_operation_1, simplify_subreg): Likewise. + * targhooks.c (default_function_arg_padding): Likewise. + (default_hard_regno_nregs, default_class_max_nregs): Likewise. + * tree-cfg.c (verify_gimple_assign_binary): Likewise. + (verify_gimple_assign_ternary): Likewise. + * tree-inline.c (estimate_move_cost): Likewise. + * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise. + * tree-ssa-loop-ivopts.c (add_autoinc_candidates): Likewise. + (get_address_cost_ainc): Likewise. + * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise. + (vect_supportable_dr_alignment): Likewise. + * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise. + (vectorizable_reduction): Likewise. + * tree-vect-stmts.c (vectorizable_assignment, vectorizable_shift) + (vectorizable_operation, vectorizable_load): Likewise. + * tree.c (build_same_sized_truth_vector_type): Likewise. + * valtrack.c (cleanup_auto_inc_dec): Likewise. + * var-tracking.c (emit_note_insn_var_location): Likewise. + * config/arc/arc.h (ASM_OUTPUT_CASE_END): Use as_a . + (ADDR_VEC_ALIGN): Likewise. + 2018-01-03 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c index 72faed50f5a..d02fa9d081c 100644 --- a/gcc/auto-inc-dec.c +++ b/gcc/auto-inc-dec.c @@ -601,7 +601,7 @@ try_merge (void) inc_insn.reg_res : mem_insn.reg0; /* The width of the mem being accessed. */ - int size = GET_MODE_SIZE (GET_MODE (mem)); + poly_int64 size = GET_MODE_SIZE (GET_MODE (mem)); rtx_insn *last_insn = NULL; machine_mode reg_mode = GET_MODE (inc_reg); diff --git a/gcc/builtins.c b/gcc/builtins.c index 27ca135eeaf..1d6e69d30ce 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -5989,7 +5989,7 @@ expand_ifn_atomic_compare_exchange_into_call (gcall *call, machine_mode mode) for (z = 4; z < 6; z++) vec->quick_push (gimple_call_arg (call, z)); /* At present we only have BUILT_IN_ATOMIC_COMPARE_EXCHANGE_{1,2,4,8,16}. */ - unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode)); + unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode).to_constant ()); gcc_assert (bytes_log2 < 5); built_in_function fncode = (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1 diff --git a/gcc/caller-save.c b/gcc/caller-save.c index 8309bf2e571..524de495b51 100644 --- a/gcc/caller-save.c +++ b/gcc/caller-save.c @@ -607,9 +607,9 @@ setup_save_areas (void) break; } if (k < 0 - && (GET_MODE_SIZE (regno_save_mode[regno][1]) - <= GET_MODE_SIZE (regno_save_mode - [saved_reg2->hard_regno][1]))) + && known_le (GET_MODE_SIZE (regno_save_mode[regno][1]), + GET_MODE_SIZE (regno_save_mode + [saved_reg2->hard_regno][1]))) { saved_reg->slot = adjust_address_nv @@ -631,8 +631,8 @@ setup_save_areas (void) slot = prev_save_slots[j]; if (slot == NULL_RTX) continue; - if (GET_MODE_SIZE (regno_save_mode[regno][1]) - <= GET_MODE_SIZE (GET_MODE (slot)) + if (known_le (GET_MODE_SIZE (regno_save_mode[regno][1]), + GET_MODE_SIZE (GET_MODE (slot))) && best_slot_num < 0) best_slot_num = j; if (GET_MODE (slot) == regno_save_mode[regno][1]) @@ -1147,7 +1147,7 @@ replace_reg_with_saved_mem (rtx *loc, machine_mode smode = save_mode[regno]; gcc_assert (smode != VOIDmode); if (hard_regno_nregs (regno, smode) > 1) - smode = mode_for_size (GET_MODE_SIZE (mode) / nregs, + smode = mode_for_size (exact_div (GET_MODE_SIZE (mode), nregs), GET_MODE_CLASS (mode), 0).require (); XVECEXP (mem, 0, i) = gen_rtx_REG (smode, regno + i); } diff --git a/gcc/calls.c b/gcc/calls.c index 83736e18b69..bdd49914fb2 100644 --- a/gcc/calls.c +++ b/gcc/calls.c @@ -4686,7 +4686,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value, rtx mem_value = 0; rtx valreg; int pcc_struct_value = 0; - int struct_value_size = 0; + poly_int64 struct_value_size = 0; int flags; int reg_parm_stack_space = 0; poly_int64 needed; @@ -4925,7 +4925,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value, end it should be padded. */ argvec[count].locate.where_pad = BLOCK_REG_PADDING (mode, NULL_TREE, - GET_MODE_SIZE (mode) <= UNITS_PER_WORD); + known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD)); #endif targetm.calls.function_arg_advance (args_so_far, mode, (tree) 0, true); @@ -5176,9 +5176,6 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value, rtx val = argvec[argnum].value; rtx reg = argvec[argnum].reg; int partial = argvec[argnum].partial; -#ifdef BLOCK_REG_PADDING - int size = 0; -#endif /* Handle calls that pass values in multiple non-contiguous locations. The PA64 has examples of this for library calls. */ @@ -5188,19 +5185,19 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value, { emit_move_insn (reg, val); #ifdef BLOCK_REG_PADDING - size = GET_MODE_SIZE (argvec[argnum].mode); + poly_int64 size = GET_MODE_SIZE (argvec[argnum].mode); /* Copied from load_register_parameters. */ /* Handle case where we have a value that needs shifting up to the msb. eg. a QImode value and we're padding upward on a BYTES_BIG_ENDIAN machine. */ - if (size < UNITS_PER_WORD + if (known_lt (size, UNITS_PER_WORD) && (argvec[argnum].locate.where_pad == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD))) { rtx x; - int shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT; + poly_int64 shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT; /* Assigning REG here rather than a temp makes CALL_FUSAGE report the whole reg as used. Strictly speaking, the diff --git a/gcc/combine-stack-adj.c b/gcc/combine-stack-adj.c index ad25f04a1b4..4573dc2a87d 100644 --- a/gcc/combine-stack-adj.c +++ b/gcc/combine-stack-adj.c @@ -622,11 +622,11 @@ combine_stack_adjustments_for_block (basic_block bb) if (MEM_P (dest) && ((STACK_GROWS_DOWNWARD ? (GET_CODE (XEXP (dest, 0)) == PRE_DEC - && last_sp_adjust - == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest))) + && known_eq (last_sp_adjust, + GET_MODE_SIZE (GET_MODE (dest)))) : (GET_CODE (XEXP (dest, 0)) == PRE_INC - && last_sp_adjust - == -(HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest)))) + && known_eq (-last_sp_adjust, + GET_MODE_SIZE (GET_MODE (dest))))) || ((STACK_GROWS_DOWNWARD ? last_sp_adjust >= 0 : last_sp_adjust <= 0) && GET_CODE (XEXP (dest, 0)) == PRE_MODIFY diff --git a/gcc/combine.c b/gcc/combine.c index 19cdb2d7f65..3a42de53455 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -6931,10 +6931,10 @@ simplify_set (rtx x) if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) && !OBJECT_P (SUBREG_REG (src)) - && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1)) - / UNITS_PER_WORD) - == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) - + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)) + && (known_equal_after_align_up + (GET_MODE_SIZE (GET_MODE (src)), + GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))), + UNITS_PER_WORD)) && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src)) && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER && !REG_CAN_CHANGE_MODE_P (REGNO (dest), @@ -7773,7 +7773,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner)) && ! MEM_VOLATILE_P (inner)) { - int offset = 0; + poly_int64 offset = 0; /* The computations below will be correct if the machine is big endian in both bits and bytes or little endian in bits and bytes. @@ -10469,8 +10469,6 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, machine_mode mode = result_mode; machine_mode shift_mode; scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode; - unsigned int mode_words - = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; /* We form (outer_op (code varop count) (outer_const)). */ enum rtx_code outer_op = UNKNOWN; HOST_WIDE_INT outer_const = 0; @@ -10651,9 +10649,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, if (subreg_lowpart_p (varop) && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode) && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode) - && (unsigned int) ((GET_MODE_SIZE (inner_mode) - + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) - == mode_words + && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD) + == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD)) && GET_MODE_CLASS (int_varop_mode) == MODE_INT) { varop = SUBREG_REG (varop); @@ -11625,8 +11622,6 @@ static rtx gen_lowpart_for_combine (machine_mode omode, rtx x) { machine_mode imode = GET_MODE (x); - unsigned int osize = GET_MODE_SIZE (omode); - unsigned int isize = GET_MODE_SIZE (imode); rtx result; if (omode == imode) @@ -11634,8 +11629,9 @@ gen_lowpart_for_combine (machine_mode omode, rtx x) /* We can only support MODE being wider than a word if X is a constant integer or has a mode the same size. */ - if (GET_MODE_SIZE (omode) > UNITS_PER_WORD - && ! (CONST_SCALAR_INT_P (x) || isize == osize)) + if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD) + && ! (CONST_SCALAR_INT_P (x) + || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode)))) goto fail; /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart @@ -11652,8 +11648,6 @@ gen_lowpart_for_combine (machine_mode omode, rtx x) if (imode == omode) return x; - - isize = GET_MODE_SIZE (imode); } result = gen_lowpart_common (omode, x); diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h index 140080f03f9..a04a3e9f116 100644 --- a/gcc/config/arc/arc.h +++ b/gcc/config/arc/arc.h @@ -1291,7 +1291,8 @@ do { \ do \ { \ if (GET_CODE (PATTERN (JUMPTABLE)) == ADDR_DIFF_VEC \ - && ((GET_MODE_SIZE (GET_MODE (PATTERN (JUMPTABLE))) \ + && ((GET_MODE_SIZE (as_a \ + (GET_MODE (PATTERN (JUMPTABLE)))) \ * XVECLEN (PATTERN (JUMPTABLE), 1) + 1) \ & 2)) \ arc_toggle_unalign (); \ @@ -1405,7 +1406,8 @@ do { \ : SImode) #define ADDR_VEC_ALIGN(VEC_INSN) \ - (exact_log2 (GET_MODE_SIZE (GET_MODE (PATTERN (VEC_INSN))))) + (exact_log2 (GET_MODE_SIZE (as_a \ + (GET_MODE (PATTERN (VEC_INSN)))))) #undef ASM_OUTPUT_BEFORE_CASE_LABEL #define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \ ASM_OUTPUT_ALIGN ((FILE), ADDR_VEC_ALIGN (TABLE)) diff --git a/gcc/convert.c b/gcc/convert.c index 4b92cc58a59..e168a266ff4 100644 --- a/gcc/convert.c +++ b/gcc/convert.c @@ -922,13 +922,15 @@ convert_to_integer_1 (tree type, tree expr, bool dofold) } CASE_CONVERT: - /* Don't introduce a "can't convert between vector values of - different size" error. */ - if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE - && (GET_MODE_SIZE (TYPE_MODE - (TREE_TYPE (TREE_OPERAND (expr, 0)))) - != GET_MODE_SIZE (TYPE_MODE (type)))) - break; + { + tree argtype = TREE_TYPE (TREE_OPERAND (expr, 0)); + /* Don't introduce a "can't convert between vector values + of different size" error. */ + if (TREE_CODE (argtype) == VECTOR_TYPE + && maybe_ne (GET_MODE_SIZE (TYPE_MODE (argtype)), + GET_MODE_SIZE (TYPE_MODE (type)))) + break; + } /* If truncating after truncating, might as well do all at once. If truncating after extending, we may get rid of wasted work. */ return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type)); diff --git a/gcc/cse.c b/gcc/cse.c index 2101eeb8244..825b0bd8989 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -3807,8 +3807,8 @@ equiv_constant (rtx x) /* If we didn't and if doing so makes sense, see if we previously assigned a constant value to the enclosing word mode SUBREG. */ - if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode) - && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode)) + if (known_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD) + && known_lt (UNITS_PER_WORD, GET_MODE_SIZE (imode))) { poly_int64 byte = (SUBREG_BYTE (x) - subreg_lowpart_offset (mode, word_mode)); @@ -5986,9 +5986,10 @@ cse_insn (rtx_insn *insn) already entered SRC and DEST of the SET in the table. */ if (GET_CODE (dest) == SUBREG - && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1) - / UNITS_PER_WORD) - == (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD) + && (known_equal_after_align_down + (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1, + GET_MODE_SIZE (GET_MODE (dest)) - 1, + UNITS_PER_WORD)) && !partial_subreg_p (dest) && sets[i].src_elt != 0) { diff --git a/gcc/cselib.c b/gcc/cselib.c index ca4a53a8733..586b8cc3a66 100644 --- a/gcc/cselib.c +++ b/gcc/cselib.c @@ -805,14 +805,14 @@ autoinc_split (rtx x, rtx *off, machine_mode memmode) if (memmode == VOIDmode) return x; - *off = GEN_INT (-GET_MODE_SIZE (memmode)); + *off = gen_int_mode (-GET_MODE_SIZE (memmode), GET_MODE (x)); return XEXP (x, 0); case PRE_INC: if (memmode == VOIDmode) return x; - *off = GEN_INT (GET_MODE_SIZE (memmode)); + *off = gen_int_mode (GET_MODE_SIZE (memmode), GET_MODE (x)); return XEXP (x, 0); case PRE_MODIFY: @@ -1068,6 +1068,7 @@ static unsigned int cselib_hash_rtx (rtx x, int create, machine_mode memmode) { cselib_val *e; + poly_int64 offset; int i, j; enum rtx_code code; const char *fmt; @@ -1203,14 +1204,15 @@ cselib_hash_rtx (rtx x, int create, machine_mode memmode) case PRE_INC: /* We can't compute these without knowing the MEM mode. */ gcc_assert (memmode != VOIDmode); - i = GET_MODE_SIZE (memmode); + offset = GET_MODE_SIZE (memmode); if (code == PRE_DEC) - i = -i; + offset = -offset; /* Adjust the hash so that (mem:MEMMODE (pre_* (reg))) hashes like (mem:MEMMODE (plus (reg) (const_int I))). */ hash += (unsigned) PLUS - (unsigned)code + cselib_hash_rtx (XEXP (x, 0), create, memmode) - + cselib_hash_rtx (GEN_INT (i), create, memmode); + + cselib_hash_rtx (gen_int_mode (offset, GET_MODE (x)), + create, memmode); return hash ? hash : 1 + (unsigned) PLUS; case PRE_MODIFY: @@ -1871,6 +1873,7 @@ cselib_subst_to_values (rtx x, machine_mode memmode) struct elt_list *l; rtx copy = x; int i; + poly_int64 offset; switch (code) { @@ -1907,11 +1910,11 @@ cselib_subst_to_values (rtx x, machine_mode memmode) case PRE_DEC: case PRE_INC: gcc_assert (memmode != VOIDmode); - i = GET_MODE_SIZE (memmode); + offset = GET_MODE_SIZE (memmode); if (code == PRE_DEC) - i = -i; + offset = -offset; return cselib_subst_to_values (plus_constant (GET_MODE (x), - XEXP (x, 0), i), + XEXP (x, 0), offset), memmode); case PRE_MODIFY: diff --git a/gcc/dce.c b/gcc/dce.c index f4c7a9811a3..590b6874e53 100644 --- a/gcc/dce.c +++ b/gcc/dce.c @@ -884,8 +884,8 @@ word_dce_process_block (basic_block bb, bool redo_out, df_ref use; FOR_EACH_INSN_USE (use, insn) if (DF_REF_REGNO (use) >= FIRST_PSEUDO_REGISTER - && (GET_MODE_SIZE (GET_MODE (DF_REF_REAL_REG (use))) - == 2 * UNITS_PER_WORD) + && known_eq (GET_MODE_SIZE (GET_MODE (DF_REF_REAL_REG (use))), + 2 * UNITS_PER_WORD) && !bitmap_bit_p (local_live, 2 * DF_REF_REGNO (use)) && !bitmap_bit_p (local_live, 2 * DF_REF_REGNO (use) + 1)) dead_debug_add (&debug, use, DF_REF_REGNO (use)); diff --git a/gcc/df-problems.c b/gcc/df-problems.c index 6d14a7de85e..3d73bc5df10 100644 --- a/gcc/df-problems.c +++ b/gcc/df-problems.c @@ -2815,7 +2815,7 @@ df_word_lr_mark_ref (df_ref ref, bool is_set, regset live) regno = REGNO (reg); reg_mode = GET_MODE (reg); if (regno < FIRST_PSEUDO_REGISTER - || GET_MODE_SIZE (reg_mode) != 2 * UNITS_PER_WORD) + || maybe_ne (GET_MODE_SIZE (reg_mode), 2 * UNITS_PER_WORD)) return true; if (GET_CODE (orig_reg) == SUBREG diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c index cc0d45bf17f..3ae5b8e894f 100644 --- a/gcc/dwarf2cfi.c +++ b/gcc/dwarf2cfi.c @@ -270,8 +270,8 @@ void init_one_dwarf_reg_size (int regno, machine_mode regmode, const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1); const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum); - const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode); - const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode); + poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode); + poly_int64 regsize = GET_MODE_SIZE (regmode); init_state->processed_regno[regno] = true; @@ -285,7 +285,8 @@ void init_one_dwarf_reg_size (int regno, machine_mode regmode, init_state->wrote_return_column = true; } - if (slotoffset < 0) + /* ??? When is this true? Should it be a test based on DCOL instead? */ + if (maybe_lt (slotoffset, 0)) return; emit_move_insn (adjust_address (table, slotmode, slotoffset), diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index 56cc454302c..ae91c2d750b 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -13184,7 +13184,10 @@ multiple_reg_loc_descriptor (rtx rtl, rtx regs, gcc_assert ((unsigned) DBX_REGISTER_NUMBER (reg) == dbx_reg_number (rtl)); nregs = REG_NREGS (rtl); - size = GET_MODE_SIZE (GET_MODE (rtl)) / nregs; + /* At present we only track constant-sized pieces. */ + if (!GET_MODE_SIZE (GET_MODE (rtl)).is_constant (&size)) + return NULL; + size /= nregs; loc_result = NULL; while (nregs--) @@ -13204,7 +13207,9 @@ multiple_reg_loc_descriptor (rtx rtl, rtx regs, gcc_assert (GET_CODE (regs) == PARALLEL); - size = GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0))); + /* At present we only track constant-sized pieces. */ + if (!GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0))).is_constant (&size)) + return NULL; loc_result = NULL; for (i = 0; i < XVECLEN (regs, 0); ++i) @@ -14797,7 +14802,7 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, if (is_a (mode, &int_mode) && is_a (GET_MODE (inner), &inner_mode) ? GET_MODE_SIZE (int_mode) <= GET_MODE_SIZE (inner_mode) - : GET_MODE_SIZE (mode) == GET_MODE_SIZE (GET_MODE (inner))) + : known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (GET_MODE (inner)))) { dw_die_ref type_die; dw_loc_descr_ref cvt; @@ -14813,8 +14818,7 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, mem_loc_result = NULL; break; } - if (GET_MODE_SIZE (mode) - != GET_MODE_SIZE (GET_MODE (inner))) + if (maybe_ne (GET_MODE_SIZE (mode), GET_MODE_SIZE (GET_MODE (inner)))) cvt = new_loc_descr (dwarf_OP (DW_OP_convert), 0, 0); else cvt = new_loc_descr (dwarf_OP (DW_OP_reinterpret), 0, 0); @@ -14975,15 +14979,17 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, { dw_die_ref type_die; dw_loc_descr_ref deref; + HOST_WIDE_INT size; if (dwarf_strict && dwarf_version < 5) return NULL; + if (!GET_MODE_SIZE (mode).is_constant (&size)) + return NULL; type_die = base_type_for_mode (mode, SCALAR_INT_MODE_P (mode)); if (type_die == NULL) return NULL; - deref = new_loc_descr (dwarf_OP (DW_OP_deref_type), - GET_MODE_SIZE (mode), 0); + deref = new_loc_descr (dwarf_OP (DW_OP_deref_type), size, 0); deref->dw_loc_oprnd2.val_class = dw_val_class_die_ref; deref->dw_loc_oprnd2.v.val_die_ref.die = type_die; deref->dw_loc_oprnd2.v.val_die_ref.external = 0; @@ -15760,6 +15766,12 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, static dw_loc_descr_ref concat_loc_descriptor (rtx x0, rtx x1, enum var_init_status initialized) { + /* At present we only track constant-sized pieces. */ + unsigned int size0, size1; + if (!GET_MODE_SIZE (GET_MODE (x0)).is_constant (&size0) + || !GET_MODE_SIZE (GET_MODE (x1)).is_constant (&size1)) + return 0; + dw_loc_descr_ref cc_loc_result = NULL; dw_loc_descr_ref x0_ref = loc_descriptor (x0, VOIDmode, VAR_INIT_STATUS_INITIALIZED); @@ -15770,10 +15782,10 @@ concat_loc_descriptor (rtx x0, rtx x1, enum var_init_status initialized) return 0; cc_loc_result = x0_ref; - add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x0))); + add_loc_descr_op_piece (&cc_loc_result, size0); add_loc_descr (&cc_loc_result, x1_ref); - add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x1))); + add_loc_descr_op_piece (&cc_loc_result, size1); if (initialized == VAR_INIT_STATUS_UNINITIALIZED) add_loc_descr (&cc_loc_result, new_loc_descr (DW_OP_GNU_uninit, 0, 0)); @@ -15790,18 +15802,23 @@ concatn_loc_descriptor (rtx concatn, enum var_init_status initialized) unsigned int i; dw_loc_descr_ref cc_loc_result = NULL; unsigned int n = XVECLEN (concatn, 0); + unsigned int size; for (i = 0; i < n; ++i) { dw_loc_descr_ref ref; rtx x = XVECEXP (concatn, 0, i); + /* At present we only track constant-sized pieces. */ + if (!GET_MODE_SIZE (GET_MODE (x)).is_constant (&size)) + return NULL; + ref = loc_descriptor (x, VOIDmode, VAR_INIT_STATUS_INITIALIZED); if (ref == NULL) return NULL; add_loc_descr (&cc_loc_result, ref); - add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x))); + add_loc_descr_op_piece (&cc_loc_result, size); } if (cc_loc_result && initialized == VAR_INIT_STATUS_UNINITIALIZED) @@ -15920,7 +15937,7 @@ loc_descriptor (rtx rtl, machine_mode mode, rtvec par_elems = XVEC (rtl, 0); int num_elem = GET_NUM_ELEM (par_elems); machine_mode mode; - int i; + int i, size; /* Create the first one, so we have something to add to. */ loc_result = loc_descriptor (XEXP (RTVEC_ELT (par_elems, 0), 0), @@ -15928,7 +15945,10 @@ loc_descriptor (rtx rtl, machine_mode mode, if (loc_result == NULL) return NULL; mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, 0), 0)); - add_loc_descr_op_piece (&loc_result, GET_MODE_SIZE (mode)); + /* At present we only track constant-sized pieces. */ + if (!GET_MODE_SIZE (mode).is_constant (&size)) + return NULL; + add_loc_descr_op_piece (&loc_result, size); for (i = 1; i < num_elem; i++) { dw_loc_descr_ref temp; @@ -15939,7 +15959,10 @@ loc_descriptor (rtx rtl, machine_mode mode, return NULL; add_loc_descr (&loc_result, temp); mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, i), 0)); - add_loc_descr_op_piece (&loc_result, GET_MODE_SIZE (mode)); + /* At present we only track constant-sized pieces. */ + if (!GET_MODE_SIZE (mode).is_constant (&size)) + return NULL; + add_loc_descr_op_piece (&loc_result, size); } } break; @@ -19178,7 +19201,7 @@ rtl_for_decl_location (tree decl) rtl = DECL_INCOMING_RTL (decl); else if ((rtl == NULL_RTX || is_pseudo_reg (rtl)) && SCALAR_INT_MODE_P (dmode) - && GET_MODE_SIZE (dmode) <= GET_MODE_SIZE (pmode) + && known_le (GET_MODE_SIZE (dmode), GET_MODE_SIZE (pmode)) && DECL_INCOMING_RTL (decl)) { rtx inc = DECL_INCOMING_RTL (decl); @@ -19219,12 +19242,12 @@ rtl_for_decl_location (tree decl) /* Big endian correction check. */ && BYTES_BIG_ENDIAN && TYPE_MODE (TREE_TYPE (decl)) != GET_MODE (rtl) - && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))) - < UNITS_PER_WORD)) + && known_lt (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))), + UNITS_PER_WORD)) { machine_mode addr_mode = get_address_mode (rtl); - int offset = (UNITS_PER_WORD - - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)))); + poly_int64 offset = (UNITS_PER_WORD + - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)))); rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)), plus_constant (addr_mode, XEXP (rtl, 0), offset)); diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index 626567a7179..eedd1616722 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -1605,13 +1605,13 @@ gen_lowpart_common (machine_mode mode, rtx x) rtx gen_highpart (machine_mode mode, rtx x) { - unsigned int msize = GET_MODE_SIZE (mode); + poly_uint64 msize = GET_MODE_SIZE (mode); rtx result; /* This case loses if X is a subreg. To catch bugs early, complain if an invalid MODE is used even in other cases. */ - gcc_assert (msize <= UNITS_PER_WORD - || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x))); + gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD) + || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x)))); result = simplify_gen_subreg (mode, x, GET_MODE (x), subreg_highpart_offset (mode, GET_MODE (x))); @@ -2573,7 +2573,7 @@ rtx widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset) { rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0); - unsigned int size = GET_MODE_SIZE (mode); + poly_uint64 size = GET_MODE_SIZE (mode); /* If there are no changes, just return the original memory reference. */ if (new_rtx == memref) diff --git a/gcc/expmed.c b/gcc/expmed.c index ed21b277bcc..4433460033b 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -1632,7 +1632,7 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, && !MEM_P (op0) && VECTOR_MODE_P (tmode) && known_eq (bitsize, GET_MODE_SIZE (tmode)) - && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode)) + && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode))) { machine_mode new_mode = GET_MODE (op0); if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)) @@ -1643,7 +1643,8 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, GET_MODE_UNIT_BITSIZE (tmode), &nunits) || !mode_for_vector (inner_mode, nunits).exists (&new_mode) || !VECTOR_MODE_P (new_mode) - || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0)) + || maybe_ne (GET_MODE_SIZE (new_mode), + GET_MODE_SIZE (GET_MODE (op0))) || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode) || !targetm.vector_mode_supported_p (new_mode)) new_mode = VOIDmode; @@ -1699,8 +1700,8 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, new_mode = MIN_MODE_VECTOR_INT; FOR_EACH_MODE_FROM (new_mode, new_mode) - if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0)) - && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode) + if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0))) + && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode)) && targetm.vector_mode_supported_p (new_mode)) break; if (new_mode != VOIDmode) @@ -1758,7 +1759,7 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, } else { - HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0)); + poly_int64 size = GET_MODE_SIZE (GET_MODE (op0)); rtx mem = assign_stack_temp (GET_MODE (op0), size); emit_move_insn (mem, op0); op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size); @@ -1858,7 +1859,8 @@ extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode, /* The mode must be fixed-size, since extract_bit_field_1 handles extractions from variable-sized objects before calling this function. */ - unsigned int target_size = GET_MODE_SIZE (GET_MODE (target)); + unsigned int target_size + = GET_MODE_SIZE (GET_MODE (target)).to_constant (); last = get_last_insn (); for (i = 0; i < nwords; i++) { diff --git a/gcc/expr.c b/gcc/expr.c index 444a3a78a6b..281a714a797 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -2238,7 +2238,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, else if (VECTOR_MODE_P (GET_MODE (dst)) && REG_P (src)) { - int slen = GET_MODE_SIZE (GET_MODE (src)); + poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src)); rtx mem; mem = assign_stack_temp (GET_MODE (src), slen); @@ -2970,7 +2970,7 @@ clear_storage_hints (rtx object, rtx size, enum block_op_methods method, just move a zero. Otherwise, do this a piece at a time. */ if (mode != BLKmode && CONST_INT_P (size) - && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode)) + && known_eq (INTVAL (size), GET_MODE_SIZE (mode))) { rtx zero = CONST0_RTX (mode); if (zero != NULL) @@ -3508,7 +3508,7 @@ emit_move_complex (machine_mode mode, rtx x, rtx y) existing block move logic. */ if (MEM_P (x) && MEM_P (y)) { - emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)), + emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode), BLOCK_OP_NO_LIBCALL); return get_last_insn (); } @@ -3573,9 +3573,12 @@ emit_move_multi_word (machine_mode mode, rtx x, rtx y) rtx_insn *seq; rtx inner; bool need_clobber; - int i; + int i, mode_size; - gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD); + /* This function can only handle cases where the number of words is + known at compile time. */ + mode_size = GET_MODE_SIZE (mode).to_constant (); + gcc_assert (mode_size >= UNITS_PER_WORD); /* If X is a push on the stack, do the push now and replace X with a reference to the stack pointer. */ @@ -3594,9 +3597,7 @@ emit_move_multi_word (machine_mode mode, rtx x, rtx y) start_sequence (); need_clobber = false; - for (i = 0; - i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; - i++) + for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++) { rtx xpart = operand_subword (x, i, 1, mode); rtx ypart; @@ -4337,7 +4338,7 @@ emit_push_insn (rtx x, machine_mode mode, tree type, rtx size, /* A value is to be stored in an insufficiently aligned stack slot; copy via a suitably aligned slot if necessary. */ - size = GEN_INT (GET_MODE_SIZE (mode)); + size = gen_int_mode (GET_MODE_SIZE (mode), Pmode); if (!MEM_P (xinner)) { temp = assign_temp (type, 1, 1); @@ -4493,9 +4494,10 @@ emit_push_insn (rtx x, machine_mode mode, tree type, rtx size, } else if (partial > 0) { - /* Scalar partly in registers. */ - - int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD; + /* Scalar partly in registers. This case is only supported + for fixed-wdth modes. */ + int size = GET_MODE_SIZE (mode).to_constant (); + size /= UNITS_PER_WORD; int i; int not_stack; /* # bytes of start of argument @@ -11158,10 +11160,13 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, gcc_assert (!TREE_ADDRESSABLE (exp)); if (GET_MODE (op0) == BLKmode) - emit_block_move (new_with_op0_mode, op0, - GEN_INT (GET_MODE_SIZE (mode)), - (modifier == EXPAND_STACK_PARM - ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); + { + rtx size_rtx = gen_int_mode (mode_size, Pmode); + emit_block_move (new_with_op0_mode, op0, size_rtx, + (modifier == EXPAND_STACK_PARM + ? BLOCK_OP_CALL_PARM + : BLOCK_OP_NORMAL)); + } else emit_move_insn (new_with_op0_mode, op0); diff --git a/gcc/function.c b/gcc/function.c index d1256f0fa5d..d1d2edb2f1f 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -2882,7 +2882,7 @@ assign_parm_setup_block_p (struct assign_parm_data_one *data) /* Only assign_parm_setup_block knows how to deal with register arguments that are padded at the least significant end. */ if (REG_P (data->entry_parm) - && GET_MODE_SIZE (data->promoted_mode) < UNITS_PER_WORD + && known_lt (GET_MODE_SIZE (data->promoted_mode), UNITS_PER_WORD) && (BLOCK_REG_PADDING (data->passed_mode, data->passed_type, 1) == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD))) return true; @@ -2945,7 +2945,7 @@ assign_parm_setup_block (struct assign_parm_data_all *all, SET_DECL_ALIGN (parm, MAX (DECL_ALIGN (parm), BITS_PER_WORD)); stack_parm = assign_stack_local (BLKmode, size_stored, DECL_ALIGN (parm)); - if (GET_MODE_SIZE (GET_MODE (entry_parm)) == size) + if (known_eq (GET_MODE_SIZE (GET_MODE (entry_parm)), size)) PUT_MODE (stack_parm, GET_MODE (entry_parm)); set_mem_attributes (stack_parm, parm, 1); } @@ -4346,8 +4346,10 @@ static void pad_below (struct args_size *offset_ptr, machine_mode passed_mode, tree sizetree) { unsigned int align = PARM_BOUNDARY / BITS_PER_UNIT; - if (passed_mode != BLKmode) - offset_ptr->constant += -GET_MODE_SIZE (passed_mode) & (align - 1); + int misalign; + if (passed_mode != BLKmode + && known_misalignment (GET_MODE_SIZE (passed_mode), align, &misalign)) + offset_ptr->constant += -misalign & (align - 1); else { if (TREE_CODE (sizetree) != INTEGER_CST diff --git a/gcc/genmodes.c b/gcc/genmodes.c index 62302704f64..b134c1b7515 100644 --- a/gcc/genmodes.c +++ b/gcc/genmodes.c @@ -987,10 +987,10 @@ inline __attribute__((__always_inline__))\n\ #else\n\ extern __inline__ __attribute__((__always_inline__, __gnu_inline__))\n\ #endif\n\ -unsigned short\n\ +poly_uint16\n\ mode_size_inline (machine_mode mode)\n\ {\n\ - extern %sunsigned short mode_size[NUM_MACHINE_MODES];\n\ + extern %spoly_uint16_pod mode_size[NUM_MACHINE_MODES];\n\ gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);\n\ switch (mode)\n\ {\n", adj_bytesize ? "" : "const "); @@ -1376,11 +1376,11 @@ emit_mode_size (void) int c; struct mode_data *m; - print_maybe_const_decl ("%sunsigned short", "mode_size", + print_maybe_const_decl ("%spoly_uint16_pod", "mode_size", "NUM_MACHINE_MODES", bytesize); for_all_modes (c, m) - tagged_printf ("%u", m->bytesize, m->name); + tagged_printf ("{ %u" ZERO_COEFFS " }", m->bytesize, m->name); print_closer (); } @@ -1647,17 +1647,33 @@ emit_mode_adjustments (void) \nvoid\ \ninit_adjust_machine_modes (void)\ \n{\ -\n size_t s ATTRIBUTE_UNUSED;"); +\n poly_uint16 ps ATTRIBUTE_UNUSED;\n\ + size_t s ATTRIBUTE_UNUSED;"); /* Size adjustments must be propagated to all containing modes. A size adjustment forces us to recalculate the alignment too. */ for (a = adj_bytesize; a; a = a->next) { - printf ("\n /* %s:%d */\n s = %s;\n", - a->file, a->line, a->adjustment); - printf (" mode_size[E_%smode] = s;\n", a->mode->name); - printf (" mode_unit_size[E_%smode] = s;\n", a->mode->name); - printf (" mode_base_align[E_%smode] = s & (~s + 1);\n", + printf ("\n /* %s:%d */\n", a->file, a->line); + switch (a->mode->cl) + { + case MODE_VECTOR_INT: + case MODE_VECTOR_FLOAT: + case MODE_VECTOR_FRACT: + case MODE_VECTOR_UFRACT: + case MODE_VECTOR_ACCUM: + case MODE_VECTOR_UACCUM: + printf (" ps = %s;\n", a->adjustment); + printf (" s = mode_unit_size[E_%smode];\n", a->mode->name); + break; + + default: + printf (" ps = s = %s;\n", a->adjustment); + printf (" mode_unit_size[E_%smode] = s;\n", a->mode->name); + break; + } + printf (" mode_size[E_%smode] = ps;\n", a->mode->name); + printf (" mode_base_align[E_%smode] = known_alignment (ps);\n", a->mode->name); for (m = a->mode->contained; m; m = m->next_cont) @@ -1678,11 +1694,12 @@ emit_mode_adjustments (void) case MODE_VECTOR_UFRACT: case MODE_VECTOR_ACCUM: case MODE_VECTOR_UACCUM: - printf (" mode_size[E_%smode] = %d*s;\n", + printf (" mode_size[E_%smode] = %d * ps;\n", m->name, m->ncomponents); printf (" mode_unit_size[E_%smode] = s;\n", m->name); - printf (" mode_base_align[E_%smode] = (%d*s) & (~(%d*s)+1);\n", - m->name, m->ncomponents, m->ncomponents); + printf (" mode_base_align[E_%smode]" + " = known_alignment (%d * ps);\n", + m->name, m->ncomponents); break; default: diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index e99a78adb73..7e4cb74d7cc 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -3803,7 +3803,7 @@ optimize_atomic_compare_exchange_p (gimple *stmt) && optab_handler (sync_compare_and_swap_optab, mode) == CODE_FOR_nothing) return false; - if (int_size_in_bytes (etype) != GET_MODE_SIZE (mode)) + if (maybe_ne (int_size_in_bytes (etype), GET_MODE_SIZE (mode))) return false; return true; diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c index 85de4e01ee6..9321177d761 100644 --- a/gcc/gimple-ssa-store-merging.c +++ b/gcc/gimple-ssa-store-merging.c @@ -3798,8 +3798,9 @@ lhs_valid_for_store_merging_p (tree lhs) static bool rhs_valid_for_store_merging_p (tree rhs) { - return native_encode_expr (rhs, NULL, - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0; + unsigned HOST_WIDE_INT size; + return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size) + && native_encode_expr (rhs, NULL, size) != 0); } /* If MEM is a memory reference usable for store merging (either as diff --git a/gcc/ira-build.c b/gcc/ira-build.c index d90063eaa24..9ece3ae1d94 100644 --- a/gcc/ira-build.c +++ b/gcc/ira-build.c @@ -566,7 +566,7 @@ ira_create_allocno_objects (ira_allocno_t a) int n = ira_reg_class_max_nregs[aclass][mode]; int i; - if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2) + if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD)) n = 1; ALLOCNO_NUM_OBJECTS (a) = n; diff --git a/gcc/ira-color.c b/gcc/ira-color.c index 81d7bb464ed..43f5d57cf3e 100644 --- a/gcc/ira-color.c +++ b/gcc/ira-color.c @@ -3939,7 +3939,8 @@ coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p) regno_max_ref_mode[regno1]); mode2 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno2), regno_max_ref_mode[regno2]); - if ((diff = GET_MODE_SIZE (mode2) - GET_MODE_SIZE (mode1)) != 0) + if ((diff = compare_sizes_for_sort (GET_MODE_SIZE (mode2), + GET_MODE_SIZE (mode1))) != 0) return diff; return regno1 - regno2; } @@ -4228,9 +4229,10 @@ ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n, machine_mode mode = wider_subreg_mode (PSEUDO_REGNO_MODE (ALLOCNO_REGNO (a)), reg_max_ref_mode[ALLOCNO_REGNO (a)]); - fprintf (ira_dump_file, " a%dr%d(%d,%d)", - ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a), - GET_MODE_SIZE (mode)); + fprintf (ira_dump_file, " a%dr%d(%d,", + ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a)); + print_dec (GET_MODE_SIZE (mode), ira_dump_file, SIGNED); + fprintf (ira_dump_file, ")\n"); } if (a == allocno) diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c index 9008ab9acb4..2b4ae38f410 100644 --- a/gcc/ira-costs.c +++ b/gcc/ira-costs.c @@ -1368,12 +1368,12 @@ record_operand_costs (rtx_insn *insn, enum reg_class *pref) rtx src = SET_SRC (set); if (GET_CODE (dest) == SUBREG - && (GET_MODE_SIZE (GET_MODE (dest)) - == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))) + && known_eq (GET_MODE_SIZE (GET_MODE (dest)), + GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))) dest = SUBREG_REG (dest); if (GET_CODE (src) == SUBREG - && (GET_MODE_SIZE (GET_MODE (src)) - == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) + && known_eq (GET_MODE_SIZE (GET_MODE (src)), + GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) src = SUBREG_REG (src); if (REG_P (src) && REG_P (dest) && find_regno_note (insn, REG_DEAD, REGNO (src)) diff --git a/gcc/ira.c b/gcc/ira.c index b9c1f4aa489..b70e2c60b74 100644 --- a/gcc/ira.c +++ b/gcc/ira.c @@ -4046,9 +4046,9 @@ get_subreg_tracking_sizes (rtx x, HOST_WIDE_INT *outer_size, HOST_WIDE_INT *inner_size, HOST_WIDE_INT *start) { rtx reg = regno_reg_rtx[REGNO (SUBREG_REG (x))]; - *outer_size = GET_MODE_SIZE (GET_MODE (x)); - *inner_size = GET_MODE_SIZE (GET_MODE (reg)); - return SUBREG_BYTE (x).is_constant (start); + return (GET_MODE_SIZE (GET_MODE (x)).is_constant (outer_size) + && GET_MODE_SIZE (GET_MODE (reg)).is_constant (inner_size) + && SUBREG_BYTE (x).is_constant (start)); } /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c index 6666b7432bc..32e70dcbd12 100644 --- a/gcc/lower-subreg.c +++ b/gcc/lower-subreg.c @@ -110,7 +110,8 @@ static inline bool interesting_mode_p (machine_mode mode, unsigned int *bytes, unsigned int *words) { - *bytes = GET_MODE_SIZE (mode); + if (!GET_MODE_SIZE (mode).is_constant (bytes)) + return false; *words = CEIL (*bytes, UNITS_PER_WORD); return true; } @@ -667,8 +668,8 @@ simplify_gen_subreg_concatn (machine_mode outermode, rtx op, { rtx op2; - if ((GET_MODE_SIZE (GET_MODE (op)) - == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))) + if (known_eq (GET_MODE_SIZE (GET_MODE (op)), + GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))) && known_eq (SUBREG_BYTE (op), 0)) return simplify_gen_subreg_concatn (outermode, SUBREG_REG (op), GET_MODE (SUBREG_REG (op)), byte); @@ -869,8 +870,7 @@ resolve_simple_move (rtx set, rtx_insn *insn) if (GET_CODE (src) == SUBREG && resolve_reg_p (SUBREG_REG (src)) && (maybe_ne (SUBREG_BYTE (src), 0) - || (GET_MODE_SIZE (orig_mode) - != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))) + || maybe_ne (orig_size, GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))) { real_dest = dest; dest = gen_reg_rtx (orig_mode); @@ -884,8 +884,8 @@ resolve_simple_move (rtx set, rtx_insn *insn) if (GET_CODE (dest) == SUBREG && resolve_reg_p (SUBREG_REG (dest)) && (maybe_ne (SUBREG_BYTE (dest), 0) - || (GET_MODE_SIZE (orig_mode) - != GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))) + || maybe_ne (orig_size, + GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))) { rtx reg, smove; rtx_insn *minsn; diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c index 8eeff0166e2..4d307af7011 100644 --- a/gcc/lra-constraints.c +++ b/gcc/lra-constraints.c @@ -591,7 +591,8 @@ get_reload_reg (enum op_type type, machine_mode mode, rtx original, { if (in_subreg_p) continue; - if (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (mode)) + if (maybe_lt (GET_MODE_SIZE (GET_MODE (reg)), + GET_MODE_SIZE (mode))) continue; reg = lowpart_subreg (mode, reg, GET_MODE (reg)); if (reg == NULL_RTX || GET_CODE (reg) != SUBREG) @@ -827,6 +828,7 @@ operands_match_p (rtx x, rtx y, int y_hard_regno) ((MODE) != VOIDmode \ && CONSTANT_P (X) \ && GET_CODE (X) != HIGH \ + && GET_MODE_SIZE (MODE).is_constant () \ && !targetm.cannot_force_const_mem (MODE, X)) /* True if C is a non-empty register class that has too few registers @@ -1394,7 +1396,7 @@ process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **aft -fno-split-wide-types specified. */ if (!REG_P (reg) || in_class_p (reg, cl, &new_class) - || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (ptr_mode)) + || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode))) loc = &SUBREG_REG (*loc); } @@ -1557,8 +1559,8 @@ simplify_operand_subreg (int nop, machine_mode reg_mode) a word. */ if (!(maybe_ne (GET_MODE_PRECISION (mode), GET_MODE_PRECISION (innermode)) - && GET_MODE_SIZE (mode) <= UNITS_PER_WORD - && GET_MODE_SIZE (innermode) <= UNITS_PER_WORD + && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD) + && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD) && WORD_REGISTER_OPERATIONS) && (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode) && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst))) @@ -4245,7 +4247,8 @@ curr_insn_transform (bool check_only_p) (ira_class_hard_regs[goal_alt[i]][0], GET_MODE (reg), byte, mode) >= 0))) || (partial_subreg_p (mode, GET_MODE (reg)) - && GET_MODE_SIZE (GET_MODE (reg)) <= UNITS_PER_WORD + && known_le (GET_MODE_SIZE (GET_MODE (reg)), + UNITS_PER_WORD) && WORD_REGISTER_OPERATIONS))) { /* An OP_INOUT is required when reloading a subreg of a @@ -4745,8 +4748,8 @@ lra_constraints (bool first_p) /* Prevent access beyond equivalent memory for paradoxical subregs. */ || (MEM_P (x) - && (GET_MODE_SIZE (lra_reg_info[i].biggest_mode) - > GET_MODE_SIZE (GET_MODE (x)))) + && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode), + GET_MODE_SIZE (GET_MODE (x)))) || (pic_offset_table_rtx && ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x) && (targetm.preferred_reload_class diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c index 46efc3ace1b..64162c76e1a 100644 --- a/gcc/lra-spills.c +++ b/gcc/lra-spills.c @@ -107,7 +107,7 @@ struct slot /* Maximum alignment required by all users of the slot. */ unsigned int align; /* Maximum size required by all users of the slot. */ - HOST_WIDE_INT size; + poly_int64 size; /* Memory representing the all stack slot. It can be different from memory representing a pseudo belonging to give stack slot because pseudo can be placed in a part of the corresponding stack slot. @@ -132,10 +132,10 @@ assign_mem_slot (int i) { rtx x = NULL_RTX; machine_mode mode = GET_MODE (regno_reg_rtx[i]); - HOST_WIDE_INT inherent_size = PSEUDO_REGNO_BYTES (i); + poly_int64 inherent_size = PSEUDO_REGNO_BYTES (i); machine_mode wider_mode = wider_subreg_mode (mode, lra_reg_info[i].biggest_mode); - HOST_WIDE_INT total_size = GET_MODE_SIZE (wider_mode); + poly_int64 total_size = GET_MODE_SIZE (wider_mode); poly_int64 adjust = 0; lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i]) @@ -191,16 +191,15 @@ pseudo_reg_slot_compare (const void *v1p, const void *v2p) const int regno1 = *(const int *) v1p; const int regno2 = *(const int *) v2p; int diff, slot_num1, slot_num2; - int total_size1, total_size2; slot_num1 = pseudo_slots[regno1].slot_num; slot_num2 = pseudo_slots[regno2].slot_num; if ((diff = slot_num1 - slot_num2) != 0) return (frame_pointer_needed || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff); - total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode); - total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode); - if ((diff = total_size2 - total_size1) != 0) + poly_int64 total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode); + poly_int64 total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode); + if ((diff = compare_sizes_for_sort (total_size2, total_size1)) != 0) return diff; return regno1 - regno2; } @@ -315,7 +314,8 @@ add_pseudo_to_slot (int regno, int slot_num) lra_reg_info[regno].biggest_mode); unsigned int align = spill_slot_alignment (mode); slots[slot_num].align = MAX (slots[slot_num].align, align); - slots[slot_num].size = MAX (slots[slot_num].size, GET_MODE_SIZE (mode)); + slots[slot_num].size = upper_bound (slots[slot_num].size, + GET_MODE_SIZE (mode)); if (slots[slot_num].regno < 0) { @@ -580,8 +580,10 @@ lra_spill (void) { for (i = 0; i < slots_num; i++) { - fprintf (lra_dump_file, " Slot %d regnos (width = %d):", i, - GET_MODE_SIZE (GET_MODE (slots[i].mem))); + fprintf (lra_dump_file, " Slot %d regnos (width = ", i); + print_dec (GET_MODE_SIZE (GET_MODE (slots[i].mem)), + lra_dump_file, SIGNED); + fprintf (lra_dump_file, "):"); for (curr_regno = slots[i].regno;; curr_regno = pseudo_slots[curr_regno].next - pseudo_slots) { diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c index afca7f421a3..46be029f255 100644 --- a/gcc/lto-streamer-in.c +++ b/gcc/lto-streamer-in.c @@ -1617,7 +1617,7 @@ lto_input_mode_table (struct lto_file_decl_data *file_data) { enum mode_class mclass = bp_unpack_enum (&bp, mode_class, MAX_MODE_CLASS); - unsigned int size = bp_unpack_value (&bp, 8); + poly_uint16 size = bp_unpack_poly_value (&bp, 16); poly_uint16 prec = bp_unpack_poly_value (&bp, 16); machine_mode inner = (machine_mode) bp_unpack_value (&bp, 8); poly_uint16 nunits = bp_unpack_poly_value (&bp, 16); @@ -1651,7 +1651,7 @@ lto_input_mode_table (struct lto_file_decl_data *file_data) pass ? mr = (machine_mode) (mr + 1) : mr = GET_MODE_WIDER_MODE (mr).else_void ()) if (GET_MODE_CLASS (mr) != mclass - || GET_MODE_SIZE (mr) != size + || maybe_ne (GET_MODE_SIZE (mr), size) || maybe_ne (GET_MODE_PRECISION (mr), prec) || (inner == m ? GET_MODE_INNER (mr) != mr diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c index 86ac8112fd2..8ffcecd494c 100644 --- a/gcc/lto-streamer-out.c +++ b/gcc/lto-streamer-out.c @@ -2810,7 +2810,7 @@ lto_write_mode_table (void) continue; bp_pack_value (&bp, m, 8); bp_pack_enum (&bp, mode_class, MAX_MODE_CLASS, GET_MODE_CLASS (m)); - bp_pack_value (&bp, GET_MODE_SIZE (m), 8); + bp_pack_poly_value (&bp, GET_MODE_SIZE (m), 16); bp_pack_poly_value (&bp, GET_MODE_PRECISION (m), 16); bp_pack_value (&bp, GET_MODE_INNER (m), 8); bp_pack_poly_value (&bp, GET_MODE_NUNITS (m), 16); diff --git a/gcc/machmode.h b/gcc/machmode.h index 60c918e39c7..8e918d6dd31 100644 --- a/gcc/machmode.h +++ b/gcc/machmode.h @@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see typedef opt_mode opt_machine_mode; -extern CONST_MODE_SIZE unsigned short mode_size[NUM_MACHINE_MODES]; +extern CONST_MODE_SIZE poly_uint16_pod mode_size[NUM_MACHINE_MODES]; extern const poly_uint16_pod mode_precision[NUM_MACHINE_MODES]; extern const unsigned char mode_inner[NUM_MACHINE_MODES]; extern const poly_uint16_pod mode_nunits[NUM_MACHINE_MODES]; @@ -514,7 +514,7 @@ complex_mode::includes_p (machine_mode m) /* Return the base GET_MODE_SIZE value for MODE. */ -ALWAYS_INLINE unsigned short +ALWAYS_INLINE poly_uint16 mode_to_bytes (machine_mode mode) { #if GCC_VERSION >= 4001 @@ -596,7 +596,29 @@ mode_to_nunits (machine_mode mode) /* Get the size in bytes of an object of mode MODE. */ -#define GET_MODE_SIZE(MODE) (mode_to_bytes (MODE)) +#if ONLY_FIXED_SIZE_MODES +#define GET_MODE_SIZE(MODE) ((unsigned short) mode_to_bytes (MODE).coeffs[0]) +#else +ALWAYS_INLINE poly_uint16 +GET_MODE_SIZE (machine_mode mode) +{ + return mode_to_bytes (mode); +} + +template +ALWAYS_INLINE typename if_poly::type +GET_MODE_SIZE (const T &mode) +{ + return mode_to_bytes (mode); +} + +template +ALWAYS_INLINE typename if_nonpoly::type +GET_MODE_SIZE (const T &mode) +{ + return mode_to_bytes (mode).coeffs[0]; +} +#endif /* Get the size in bits of an object of mode MODE. */ @@ -761,9 +783,9 @@ protected: /* Return true if MODE has a fixed size. */ inline bool -fixed_size_mode::includes_p (machine_mode) +fixed_size_mode::includes_p (machine_mode mode) { - return true; + return mode_to_bytes (mode).is_constant (); } /* Wrapper for mode arguments to target macros, so that if a target diff --git a/gcc/omp-low.c b/gcc/omp-low.c index 8285d12b204..3fcda29d326 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -3404,7 +3404,8 @@ omp_clause_aligned_alignment (tree clause) tree type = lang_hooks.types.type_for_mode (mode, 1); if (type == NULL_TREE || TYPE_MODE (type) != mode) continue; - unsigned int nelts = GET_MODE_SIZE (vmode) / GET_MODE_SIZE (mode); + poly_uint64 nelts = exact_div (GET_MODE_SIZE (vmode), + GET_MODE_SIZE (mode)); type = build_vector_type (type, nelts); if (TYPE_MODE (type) != vmode) continue; diff --git a/gcc/optabs-query.c b/gcc/optabs-query.c index f823b98f8b5..d70380605b2 100644 --- a/gcc/optabs-query.c +++ b/gcc/optabs-query.c @@ -213,7 +213,7 @@ get_best_extraction_insn (extraction_insn *insn, FOR_EACH_MODE_FROM (mode_iter, mode) { mode = mode_iter.require (); - if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (field_mode) + if (maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (field_mode)) || TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode, field_mode)) break; diff --git a/gcc/optabs-tree.c b/gcc/optabs-tree.c index 69448cf82eb..60119cb442e 100644 --- a/gcc/optabs-tree.c +++ b/gcc/optabs-tree.c @@ -327,7 +327,7 @@ expand_vec_cond_expr_p (tree value_type, tree cmp_op_type, enum tree_code code) TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing) return true; - if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode) + if (maybe_ne (GET_MODE_SIZE (value_mode), GET_MODE_SIZE (cmp_op_mode)) || maybe_ne (GET_MODE_NUNITS (value_mode), GET_MODE_NUNITS (cmp_op_mode))) return false; diff --git a/gcc/optabs.c b/gcc/optabs.c index 6fe0d76c716..daac5457223 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -5614,10 +5614,9 @@ rtx expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target) { enum insn_code icode; - unsigned int i, w, u; + unsigned int i, u; rtx tmp, sel_qi; - w = GET_MODE_SIZE (mode); u = GET_MODE_UNIT_SIZE (mode); if (!target || GET_MODE (target) != mode) @@ -5655,7 +5654,7 @@ expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target) /* Broadcast the low byte each element into each of its bytes. The encoding has U interleaved stepped patterns, one for each byte of an element. */ - vec_perm_builder const_sel (w, u, 3); + vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3); unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0; for (i = 0; i < 3; ++i) for (unsigned int j = 0; j < u; ++j) @@ -5758,7 +5757,7 @@ expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2, unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a)); - gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode) + gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode)) && known_eq (GET_MODE_NUNITS (mode), GET_MODE_NUNITS (cmp_op_mode))); @@ -5887,7 +5886,7 @@ expand_mult_highpart (machine_mode mode, rtx op0, rtx op1, wmode = insn_data[icode].operand[0].mode; gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode), GET_MODE_NUNITS (mode))); - gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode)); + gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode))); create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode); create_input_operand (&eops[1], op0, mode); @@ -7035,10 +7034,12 @@ bool valid_multiword_target_p (rtx target) { machine_mode mode; - int i; + int i, size; mode = GET_MODE (target); - for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD) + if (!GET_MODE_SIZE (mode).is_constant (&size)) + return false; + for (i = 0; i < size; i += UNITS_PER_WORD) if (!validate_subreg (word_mode, mode, target, i)) return false; return true; diff --git a/gcc/recog.c b/gcc/recog.c index 4edda0c9e1e..d6aa9036f57 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1945,7 +1945,7 @@ offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y, int (*addressp) (machine_mode, rtx, addr_space_t) = (strictp ? strict_memory_address_addr_space_p : memory_address_addr_space_p); - unsigned int mode_sz = GET_MODE_SIZE (mode); + poly_int64 mode_sz = GET_MODE_SIZE (mode); if (CONSTANT_ADDRESS_P (y)) return 1; @@ -1967,7 +1967,7 @@ offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y, Clearly that depends on the situation in which it's being used. However, the current situation in which we test 0xffffffff is less than ideal. Caveat user. */ - if (mode_sz == 0) + if (known_eq (mode_sz, 0)) mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT; /* If the expression contains a constant term, @@ -1998,7 +1998,7 @@ offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y, go inside a LO_SUM here, so we do so as well. */ if (GET_CODE (y) == LO_SUM && mode != BLKmode - && mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT) + && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)) z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0), plus_constant (address_mode, XEXP (y, 1), mode_sz - 1)); diff --git a/gcc/regcprop.c b/gcc/regcprop.c index a9fbcae6f00..e2a78a2b0e1 100644 --- a/gcc/regcprop.c +++ b/gcc/regcprop.c @@ -406,8 +406,11 @@ maybe_mode_change (machine_mode orig_mode, machine_mode copy_mode, { int copy_nregs = hard_regno_nregs (copy_regno, copy_mode); int use_nregs = hard_regno_nregs (copy_regno, new_mode); - int copy_offset - = GET_MODE_SIZE (copy_mode) / copy_nregs * (copy_nregs - use_nregs); + poly_uint64 bytes_per_reg; + if (!can_div_trunc_p (GET_MODE_SIZE (copy_mode), + copy_nregs, &bytes_per_reg)) + return NULL_RTX; + poly_uint64 copy_offset = bytes_per_reg * (copy_nregs - use_nregs); poly_uint64 offset = subreg_size_lowpart_offset (GET_MODE_SIZE (new_mode) + copy_offset, GET_MODE_SIZE (orig_mode)); diff --git a/gcc/reginfo.c b/gcc/reginfo.c index ee2fd44c3f0..f4071dac8b4 100644 --- a/gcc/reginfo.c +++ b/gcc/reginfo.c @@ -631,14 +631,16 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, /* We first look for the largest integer mode that can be validly held in REGNO. If none, we look for the largest floating-point mode. - If we still didn't find a valid mode, try CCmode. */ + If we still didn't find a valid mode, try CCmode. + The tests use maybe_gt rather than known_gt because we want (for example) + N V4SFs to win over plain V4SF even though N might be 1. */ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT) if (hard_regno_nregs (regno, mode) == nregs && targetm.hard_regno_mode_ok (regno, mode) && (!call_saved || !targetm.hard_regno_call_part_clobbered (regno, mode)) - && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode)) + && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) found_mode = mode; FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT) @@ -646,7 +648,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, && targetm.hard_regno_mode_ok (regno, mode) && (!call_saved || !targetm.hard_regno_call_part_clobbered (regno, mode)) - && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode)) + && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) found_mode = mode; FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT) @@ -654,7 +656,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, && targetm.hard_regno_mode_ok (regno, mode) && (!call_saved || !targetm.hard_regno_call_part_clobbered (regno, mode)) - && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode)) + && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) found_mode = mode; FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT) @@ -662,7 +664,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, && targetm.hard_regno_mode_ok (regno, mode) && (!call_saved || !targetm.hard_regno_call_part_clobbered (regno, mode)) - && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode)) + && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) found_mode = mode; if (found_mode != VOIDmode) @@ -1299,8 +1301,8 @@ record_subregs_of_mode (rtx subreg, bool partial_def) The size of the outer mode must ordered wrt the size of the inner mode's registers, since otherwise we wouldn't know at compile time how many registers the outer mode occupies. */ - poly_uint64 size = MAX (REGMODE_NATURAL_SIZE (shape.inner_mode), - GET_MODE_SIZE (shape.outer_mode)); + poly_uint64 size = ordered_max (REGMODE_NATURAL_SIZE (shape.inner_mode), + GET_MODE_SIZE (shape.outer_mode)); gcc_checking_assert (known_lt (size, GET_MODE_SIZE (shape.inner_mode))); if (known_ge (shape.offset, size)) shape.offset -= size; diff --git a/gcc/regrename.c b/gcc/regrename.c index c3c4d2e9bdb..f930a65e0c1 100644 --- a/gcc/regrename.c +++ b/gcc/regrename.c @@ -1697,9 +1697,11 @@ build_def_use (basic_block bb) not already tracking such a reg, we won't start here, and we must instead make sure to make the operand visible to the machinery that tracks hard registers. */ + machine_mode i_mode = recog_data.operand_mode[i]; + machine_mode matches_mode = recog_data.operand_mode[matches]; if (matches >= 0 - && (GET_MODE_SIZE (recog_data.operand_mode[i]) - != GET_MODE_SIZE (recog_data.operand_mode[matches])) + && maybe_ne (GET_MODE_SIZE (i_mode), + GET_MODE_SIZE (matches_mode)) && !verify_reg_in_set (op, &live_in_chains)) { untracked_operands |= 1 << i; diff --git a/gcc/regstat.c b/gcc/regstat.c index e3141d6e344..beeea1f0cd5 100644 --- a/gcc/regstat.c +++ b/gcc/regstat.c @@ -436,8 +436,12 @@ dump_reg_info (FILE *file) else if (REG_N_CALLS_CROSSED (i)) fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i)); if (regno_reg_rtx[i] != NULL - && PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD) - fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i)); + && maybe_ne (PSEUDO_REGNO_BYTES (i), UNITS_PER_WORD)) + { + fprintf (file, "; "); + print_dec (PSEUDO_REGNO_BYTES (i), file, SIGNED); + fprintf (file, " bytes"); + } rclass = reg_preferred_class (i); altclass = reg_alternate_class (i); diff --git a/gcc/reload.c b/gcc/reload.c index ec0c05a13db..88299a8a905 100644 --- a/gcc/reload.c +++ b/gcc/reload.c @@ -823,9 +823,11 @@ static bool complex_word_subreg_p (machine_mode outer_mode, rtx reg) { machine_mode inner_mode = GET_MODE (reg); - return (GET_MODE_SIZE (outer_mode) <= UNITS_PER_WORD - && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD - && GET_MODE_SIZE (inner_mode) / UNITS_PER_WORD != REG_NREGS (reg)); + poly_uint64 reg_words = REG_NREGS (reg) * UNITS_PER_WORD; + return (known_le (GET_MODE_SIZE (outer_mode), UNITS_PER_WORD) + && maybe_gt (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD) + && !known_equal_after_align_up (GET_MODE_SIZE (inner_mode), + reg_words, UNITS_PER_WORD)); } /* Return true if X is a SUBREG that will need reloading of its SUBREG_REG @@ -1061,7 +1063,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, && REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER) || MEM_P (SUBREG_REG (in))) && (paradoxical_subreg_p (inmode, GET_MODE (SUBREG_REG (in))) - || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD + || (known_le (GET_MODE_SIZE (inmode), UNITS_PER_WORD) && is_a (GET_MODE (SUBREG_REG (in)), &inner_mode) && GET_MODE_SIZE (inner_mode) <= UNITS_PER_WORD @@ -1069,9 +1071,10 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, && LOAD_EXTEND_OP (inner_mode) != UNKNOWN) || (WORD_REGISTER_OPERATIONS && partial_subreg_p (inmode, GET_MODE (SUBREG_REG (in))) - && ((GET_MODE_SIZE (inmode) - 1) / UNITS_PER_WORD == - ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1) - / UNITS_PER_WORD))))) + && (known_equal_after_align_down + (GET_MODE_SIZE (inmode) - 1, + GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1, + UNITS_PER_WORD))))) || (REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER /* The case where out is nonzero @@ -1099,7 +1102,8 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, && MEM_P (in)) /* This is supposed to happen only for paradoxical subregs made by combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */ - gcc_assert (GET_MODE_SIZE (GET_MODE (in)) <= GET_MODE_SIZE (inmode)); + gcc_assert (known_le (GET_MODE_SIZE (GET_MODE (in)), + GET_MODE_SIZE (inmode))); inmode = GET_MODE (in); } @@ -1158,16 +1162,17 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, && (paradoxical_subreg_p (outmode, GET_MODE (SUBREG_REG (out))) || (WORD_REGISTER_OPERATIONS && partial_subreg_p (outmode, GET_MODE (SUBREG_REG (out))) - && ((GET_MODE_SIZE (outmode) - 1) / UNITS_PER_WORD == - ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1) - / UNITS_PER_WORD))))) + && (known_equal_after_align_down + (GET_MODE_SIZE (outmode) - 1, + GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1, + UNITS_PER_WORD))))) || (REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER /* The case of a word mode subreg is handled differently in the following statement. */ - && ! (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD - && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - > UNITS_PER_WORD)) + && ! (known_le (GET_MODE_SIZE (outmode), UNITS_PER_WORD) + && maybe_gt (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))), + UNITS_PER_WORD)) && !targetm.hard_regno_mode_ok (subreg_regno (out), outmode)) || (secondary_reload_class (0, rclass, outmode, out) != NO_REGS && (secondary_reload_class (0, rclass, GET_MODE (SUBREG_REG (out)), @@ -1185,8 +1190,8 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, outloc = &SUBREG_REG (out); out = *outloc; gcc_assert (WORD_REGISTER_OPERATIONS || !MEM_P (out) - || GET_MODE_SIZE (GET_MODE (out)) - <= GET_MODE_SIZE (outmode)); + || known_le (GET_MODE_SIZE (GET_MODE (out)), + GET_MODE_SIZE (outmode))); outmode = GET_MODE (out); } @@ -1593,13 +1598,13 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, What's going on here. */ && (in != out || (GET_CODE (in) == SUBREG - && (((GET_MODE_SIZE (GET_MODE (in)) + (UNITS_PER_WORD - 1)) - / UNITS_PER_WORD) - == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))) + && (known_equal_after_align_up + (GET_MODE_SIZE (GET_MODE (in)), + GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))), + UNITS_PER_WORD)))) /* Make sure the operand fits in the reg that dies. */ - && (GET_MODE_SIZE (rel_mode) - <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))) + && known_le (GET_MODE_SIZE (rel_mode), + GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))) && targetm.hard_regno_mode_ok (regno, inmode) && targetm.hard_regno_mode_ok (regno, outmode)) { @@ -1937,9 +1942,9 @@ find_dummy_reload (rtx real_in, rtx real_out, rtx *inloc, rtx *outloc, /* If operands exceed a word, we can't use either of them unless they have the same size. */ - if (GET_MODE_SIZE (outmode) != GET_MODE_SIZE (inmode) - && (GET_MODE_SIZE (outmode) > UNITS_PER_WORD - || GET_MODE_SIZE (inmode) > UNITS_PER_WORD)) + if (maybe_ne (GET_MODE_SIZE (outmode), GET_MODE_SIZE (inmode)) + && (maybe_gt (GET_MODE_SIZE (outmode), UNITS_PER_WORD) + || maybe_gt (GET_MODE_SIZE (inmode), UNITS_PER_WORD))) return 0; /* Note that {in,out}_offset are needed only when 'in' or 'out' @@ -2885,8 +2890,8 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known, if (replace && MEM_P (op) && REG_P (reg) - && (GET_MODE_SIZE (GET_MODE (reg)) - >= GET_MODE_SIZE (GET_MODE (op))) + && known_ge (GET_MODE_SIZE (GET_MODE (reg)), + GET_MODE_SIZE (GET_MODE (op))) && reg_equiv_constant (REGNO (reg)) == 0) set_unique_reg_note (emit_insn_before (gen_rtx_USE (VOIDmode, reg), insn), @@ -3127,8 +3132,8 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known, && (paradoxical_subreg_p (operand_mode[i], GET_MODE (operand))))) || BYTES_BIG_ENDIAN - || ((GET_MODE_SIZE (operand_mode[i]) - <= UNITS_PER_WORD) + || (known_le (GET_MODE_SIZE (operand_mode[i]), + UNITS_PER_WORD) && (is_a (GET_MODE (operand), &inner_mode)) && (GET_MODE_SIZE (inner_mode) @@ -3625,7 +3630,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known, if (! win && ! did_match && this_alternative[i] != NO_REGS - && GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD + && known_le (GET_MODE_SIZE (operand_mode[i]), UNITS_PER_WORD) && reg_class_size [(int) preferred_class[i]] > 0 && ! small_register_class_p (preferred_class[i])) { @@ -6146,8 +6151,9 @@ find_reloads_subreg_address (rtx x, int opnum, enum reload_type type, if (WORD_REGISTER_OPERATIONS && partial_subreg_p (outer_mode, inner_mode) - && ((GET_MODE_SIZE (outer_mode) - 1) / UNITS_PER_WORD - == (GET_MODE_SIZE (inner_mode) - 1) / UNITS_PER_WORD)) + && known_equal_after_align_down (GET_MODE_SIZE (outer_mode) - 1, + GET_MODE_SIZE (inner_mode) - 1, + UNITS_PER_WORD)) return NULL; /* Since we don't attempt to handle paradoxical subregs, we can just diff --git a/gcc/reload1.c b/gcc/reload1.c index 85936a86bde..a4cc3ee02ea 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -2829,8 +2829,8 @@ eliminate_regs_1 (rtx x, machine_mode mem_mode, rtx insn, if (new_rtx != SUBREG_REG (x)) { - int x_size = GET_MODE_SIZE (GET_MODE (x)); - int new_size = GET_MODE_SIZE (GET_MODE (new_rtx)); + poly_int64 x_size = GET_MODE_SIZE (GET_MODE (x)); + poly_int64 new_size = GET_MODE_SIZE (GET_MODE (new_rtx)); if (MEM_P (new_rtx) && ((partial_subreg_p (GET_MODE (x), GET_MODE (new_rtx)) @@ -2842,9 +2842,10 @@ eliminate_regs_1 (rtx x, machine_mode mem_mode, rtx insn, So if the number of words is the same, preserve the subreg so that push_reload can see it. */ && !(WORD_REGISTER_OPERATIONS - && (x_size - 1) / UNITS_PER_WORD - == (new_size -1 ) / UNITS_PER_WORD)) - || x_size == new_size) + && known_equal_after_align_down (x_size - 1, + new_size - 1, + UNITS_PER_WORD))) + || known_eq (x_size, new_size)) ) return adjust_address_nv (new_rtx, GET_MODE (x), SUBREG_BYTE (x)); else if (insn && GET_CODE (insn) == DEBUG_INSN) diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 54b14d8ab3d..b7283910f74 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -3346,7 +3346,7 @@ for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data) case PRE_INC: case POST_INC: { - int size = GET_MODE_SIZE (GET_MODE (mem)); + poly_int64 size = GET_MODE_SIZE (GET_MODE (mem)); rtx r1 = XEXP (x, 0); rtx c = gen_int_mode (size, GET_MODE (r1)); return fn (mem, x, r1, r1, c, data); @@ -3355,7 +3355,7 @@ for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data) case PRE_DEC: case POST_DEC: { - int size = GET_MODE_SIZE (GET_MODE (mem)); + poly_int64 size = GET_MODE_SIZE (GET_MODE (mem)); rtx r1 = XEXP (x, 0); rtx c = gen_int_mode (-size, GET_MODE (r1)); return fn (mem, x, r1, r1, c, data); @@ -4194,7 +4194,7 @@ rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code, /* A size N times larger than UNITS_PER_WORD likely needs N times as many insns, taking N times as long. */ - factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD; + factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD; if (factor == 0) factor = 1; @@ -4225,7 +4225,7 @@ rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code, /* A SET doesn't have a mode, so let's look at the SET_DEST to get the mode for the factor. */ mode = GET_MODE (SET_DEST (x)); - factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD; + factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD; if (factor == 0) factor = 1; /* FALLTHRU */ diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index a20782ac02c..e7273a4fdb4 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -263,7 +263,7 @@ avoid_constant_pool_reference (rtx x) If that fails we have no choice but to return the original memory. */ if (offset == 0 && cmode == GET_MODE (x)) return c; - else if (offset >= 0 && offset < GET_MODE_SIZE (cmode)) + else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode))) { rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset); if (tem && CONSTANT_P (tem)) @@ -3821,13 +3821,13 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode, && GET_CODE (trueop0) == VEC_CONCAT) { rtx vec = trueop0; - int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode); + offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode); /* Try to find the element in the VEC_CONCAT. */ while (GET_MODE (vec) != mode && GET_CODE (vec) == VEC_CONCAT) { - HOST_WIDE_INT vec_size; + poly_int64 vec_size; if (CONST_INT_P (XEXP (vec, 0))) { @@ -3842,13 +3842,15 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode, else vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0))); - if (offset < vec_size) + if (known_lt (offset, vec_size)) vec = XEXP (vec, 0); - else + else if (known_ge (offset, vec_size)) { offset -= vec_size; vec = XEXP (vec, 1); } + else + break; vec = avoid_constant_pool_reference (vec); } @@ -3917,8 +3919,9 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode, : GET_MODE_INNER (mode)); gcc_assert (VECTOR_MODE_P (mode)); - gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) - == GET_MODE_SIZE (mode)); + gcc_assert (known_eq (GET_MODE_SIZE (op0_mode) + + GET_MODE_SIZE (op1_mode), + GET_MODE_SIZE (mode))); if (VECTOR_MODE_P (op0_mode)) gcc_assert (GET_MODE_INNER (mode) @@ -6315,10 +6318,12 @@ simplify_subreg (machine_mode outermode, rtx op, gcc_assert (GET_MODE (op) == innermode || GET_MODE (op) == VOIDmode); - if (!multiple_p (byte, GET_MODE_SIZE (outermode))) + poly_uint64 outersize = GET_MODE_SIZE (outermode); + if (!multiple_p (byte, outersize)) return NULL_RTX; - if (maybe_ge (byte, GET_MODE_SIZE (innermode))) + poly_uint64 innersize = GET_MODE_SIZE (innermode); + if (maybe_ge (byte, innersize)) return NULL_RTX; if (outermode == innermode && known_eq (byte, 0U)) @@ -6363,6 +6368,7 @@ simplify_subreg (machine_mode outermode, rtx op, if (GET_CODE (op) == SUBREG) { machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); + poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode); rtx newx; if (outermode == innermostmode @@ -6380,12 +6386,10 @@ simplify_subreg (machine_mode outermode, rtx op, /* See whether resulting subreg will be paradoxical. */ if (!paradoxical_subreg_p (outermode, innermostmode)) { - /* In nonparadoxical subregs we can't handle negative offsets. */ - if (maybe_lt (final_offset, 0)) - return NULL_RTX; /* Bail out in case resulting subreg would be incorrect. */ - if (!multiple_p (final_offset, GET_MODE_SIZE (outermode)) - || maybe_ge (final_offset, GET_MODE_SIZE (innermostmode))) + if (maybe_lt (final_offset, 0) + || maybe_ge (poly_uint64 (final_offset), innermostsize) + || !multiple_p (final_offset, outersize)) return NULL_RTX; } else @@ -6410,9 +6414,8 @@ simplify_subreg (machine_mode outermode, rtx op, if (SUBREG_PROMOTED_VAR_P (op) && SUBREG_PROMOTED_SIGN (op) >= 0 && GET_MODE_CLASS (outermode) == MODE_INT - && IN_RANGE (GET_MODE_SIZE (outermode), - GET_MODE_SIZE (innermode), - GET_MODE_SIZE (innermostmode)) + && known_ge (outersize, innersize) + && known_le (outersize, innermostsize) && subreg_lowpart_p (newx)) { SUBREG_PROMOTED_VAR_P (newx) = 1; @@ -6462,7 +6465,7 @@ simplify_subreg (machine_mode outermode, rtx op, have instruction to move the whole thing. */ && (! MEM_VOLATILE_P (op) || ! have_insn_for (SET, innermode)) - && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) + && known_le (outersize, innersize)) return adjust_address_nv (op, outermode, byte); /* Handle complex or vector values represented as CONCAT or VEC_CONCAT @@ -6470,14 +6473,13 @@ simplify_subreg (machine_mode outermode, rtx op, if (GET_CODE (op) == CONCAT || GET_CODE (op) == VEC_CONCAT) { - unsigned int part_size; poly_uint64 final_offset; rtx part, res; machine_mode part_mode = GET_MODE (XEXP (op, 0)); if (part_mode == VOIDmode) part_mode = GET_MODE_INNER (GET_MODE (op)); - part_size = GET_MODE_SIZE (part_mode); + poly_uint64 part_size = GET_MODE_SIZE (part_mode); if (known_lt (byte, part_size)) { part = XEXP (op, 0); @@ -6491,7 +6493,7 @@ simplify_subreg (machine_mode outermode, rtx op, else return NULL_RTX; - if (maybe_gt (final_offset + GET_MODE_SIZE (outermode), part_size)) + if (maybe_gt (final_offset + outersize, part_size)) return NULL_RTX; part_mode = GET_MODE (part); diff --git a/gcc/targhooks.c b/gcc/targhooks.c index ebe43e99a64..e064dd8983a 100644 --- a/gcc/targhooks.c +++ b/gcc/targhooks.c @@ -795,7 +795,9 @@ default_function_arg_padding (machine_mode mode, const_tree type) size = int_size_in_bytes (type); } else - size = GET_MODE_SIZE (mode); + /* Targets with variable-sized modes must override this hook + and handle variable-sized modes explicitly. */ + size = GET_MODE_SIZE (mode).to_constant (); if (size < (PARM_BOUNDARY / BITS_PER_UNIT)) return PAD_DOWNWARD; @@ -1520,7 +1522,9 @@ default_addr_space_convert (rtx op ATTRIBUTE_UNUSED, unsigned int default_hard_regno_nregs (unsigned int, machine_mode mode) { - return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD); + /* Targets with variable-sized modes must provide their own definition + of this hook. */ + return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD); } bool @@ -1846,7 +1850,10 @@ default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED, return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass, MACRO_MODE (mode)); #else - return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD); + /* Targets with variable-sized modes must provide their own definition + of this hook. */ + unsigned int size = GET_MODE_SIZE (mode).to_constant (); + return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; #endif } diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index a0cc1edbfc5..eb7a89a9885 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -4177,8 +4177,8 @@ verify_gimple_assign_binary (gassign *stmt) || (!INTEGRAL_TYPE_P (lhs_type) && !SCALAR_FLOAT_TYPE_P (lhs_type)))) || !useless_type_conversion_p (lhs_type, rhs2_type) - || (GET_MODE_SIZE (element_mode (rhs2_type)) - < 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) + || maybe_lt (GET_MODE_SIZE (element_mode (rhs2_type)), + 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) { error ("type mismatch in widening sum reduction"); debug_generic_expr (lhs_type); @@ -4197,8 +4197,8 @@ verify_gimple_assign_binary (gassign *stmt) if (TREE_CODE (rhs1_type) != VECTOR_TYPE || TREE_CODE (lhs_type) != VECTOR_TYPE || !types_compatible_p (rhs1_type, rhs2_type) - || (GET_MODE_SIZE (element_mode (lhs_type)) - != 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) + || maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)), + 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) { error ("type mismatch in vector widening multiplication"); debug_generic_expr (lhs_type); @@ -4231,8 +4231,8 @@ verify_gimple_assign_binary (gassign *stmt) || (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type)) == INTEGRAL_TYPE_P (TREE_TYPE (lhs_type)))) || !types_compatible_p (rhs1_type, rhs2_type) - || (GET_MODE_SIZE (element_mode (rhs1_type)) - != 2 * GET_MODE_SIZE (element_mode (lhs_type)))) + || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)), + 2 * GET_MODE_SIZE (element_mode (lhs_type)))) { error ("type mismatch in vector pack expression"); debug_generic_expr (lhs_type); @@ -4532,8 +4532,8 @@ verify_gimple_assign_ternary (gassign *stmt) && !SCALAR_FLOAT_TYPE_P (lhs_type)))) || !types_compatible_p (rhs1_type, rhs2_type) || !useless_type_conversion_p (lhs_type, rhs3_type) - || (GET_MODE_SIZE (element_mode (rhs3_type)) - < 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) + || maybe_lt (GET_MODE_SIZE (element_mode (rhs3_type)), + 2 * GET_MODE_SIZE (element_mode (rhs1_type)))) { error ("type mismatch in dot product reduction"); debug_generic_expr (lhs_type); diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c index c290de28fdc..5910745d096 100644 --- a/gcc/tree-inline.c +++ b/gcc/tree-inline.c @@ -3808,10 +3808,11 @@ estimate_move_cost (tree type, bool ARG_UNUSED (speed_p)) if (TREE_CODE (type) == VECTOR_TYPE) { scalar_mode inner = SCALAR_TYPE_MODE (TREE_TYPE (type)); - machine_mode simd - = targetm.vectorize.preferred_simd_mode (inner); - int simd_mode_size = GET_MODE_SIZE (simd); - return ((GET_MODE_SIZE (TYPE_MODE (type)) + simd_mode_size - 1) + machine_mode simd = targetm.vectorize.preferred_simd_mode (inner); + int orig_mode_size + = estimated_poly_value (GET_MODE_SIZE (TYPE_MODE (type))); + int simd_mode_size = estimated_poly_value (GET_MODE_SIZE (simd)); + return ((orig_mode_size + simd_mode_size - 1) / simd_mode_size); } diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c index 8ddef997fff..1aad176ccbd 100644 --- a/gcc/tree-ssa-forwprop.c +++ b/gcc/tree-ssa-forwprop.c @@ -2044,8 +2044,8 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) op1 = gimple_assign_rhs1 (def_stmt); if (conv_code == ERROR_MARK) { - if (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (elt->value))) - != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op1)))) + if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (elt->value))), + GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op1))))) return false; conv_code = code; } @@ -2119,8 +2119,8 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) = build_vector_type (build_nonstandard_integer_type (elem_size, 1), nelts); if (GET_MODE_CLASS (TYPE_MODE (mask_type)) != MODE_VECTOR_INT - || GET_MODE_SIZE (TYPE_MODE (mask_type)) - != GET_MODE_SIZE (TYPE_MODE (type))) + || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type)), + GET_MODE_SIZE (TYPE_MODE (type)))) return false; op2 = vec_perm_indices_to_tree (mask_type, indices); if (conv_code == ERROR_MARK) diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 17838c26644..63f01d22344 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -3148,10 +3148,10 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step, mem_mode = TYPE_MODE (TREE_TYPE (*use->op_p)); if (((USE_LOAD_PRE_INCREMENT (mem_mode) || USE_STORE_PRE_INCREMENT (mem_mode)) - && GET_MODE_SIZE (mem_mode) == cstepi) + && known_eq (GET_MODE_SIZE (mem_mode), cstepi)) || ((USE_LOAD_PRE_DECREMENT (mem_mode) || USE_STORE_PRE_DECREMENT (mem_mode)) - && GET_MODE_SIZE (mem_mode) == -cstepi)) + && known_eq (GET_MODE_SIZE (mem_mode), -cstepi))) { enum tree_code code = MINUS_EXPR; tree new_base; @@ -3170,10 +3170,10 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step, } if (((USE_LOAD_POST_INCREMENT (mem_mode) || USE_STORE_POST_INCREMENT (mem_mode)) - && GET_MODE_SIZE (mem_mode) == cstepi) + && known_eq (GET_MODE_SIZE (mem_mode), cstepi)) || ((USE_LOAD_POST_DECREMENT (mem_mode) || USE_STORE_POST_DECREMENT (mem_mode)) - && GET_MODE_SIZE (mem_mode) == -cstepi)) + && known_eq (GET_MODE_SIZE (mem_mode), -cstepi))) { add_candidate_1 (data, base, step, important, IP_AFTER_USE, use, use->stmt); @@ -4295,7 +4295,7 @@ get_address_cost_ainc (poly_int64 ainc_step, poly_int64 ainc_offset, ainc_cost_data_list[idx] = data; } - HOST_WIDE_INT msize = GET_MODE_SIZE (mem_mode); + poly_int64 msize = GET_MODE_SIZE (mem_mode); if (known_eq (ainc_offset, 0) && known_eq (msize, ainc_step)) return comp_cost (data->costs[AINC_POST_INC], 0); if (known_eq (ainc_offset, 0) && known_eq (msize, -ainc_step)) diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index c1005eec269..26431203f11 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -2139,11 +2139,22 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); gcc_assert (vectype); + /* At present we don't support versioning for alignment + with variable VF, since there's no guarantee that the + VF is a power of two. We could relax this if we added + a way of enforcing a power-of-two size. */ + unsigned HOST_WIDE_INT size; + if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size)) + { + do_versioning = false; + break; + } + /* The rightmost bits of an aligned address must be zeros. Construct the mask needed for this test. For example, GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the mask must be 15 = 0xf. */ - mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; + mask = size - 1; /* FORNOW: use the same mask to test all potentially unaligned references in the loop. The vectorizer currently supports @@ -6078,8 +6089,8 @@ vect_supportable_dr_alignment (struct data_reference *dr, ; else if (!loop_vinfo || (nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) - != GET_MODE_SIZE (TYPE_MODE (vectype))))) + && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)), + GET_MODE_SIZE (TYPE_MODE (vectype))))) return dr_explicit_realign; else return dr_explicit_realign_optimized; diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index a74992bbc9c..2fd11df6c09 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -527,8 +527,8 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo) return false; } - if ((GET_MODE_SIZE (TYPE_MODE (vectype)) - != GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) + if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), + GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) { if (dump_enabled_p ()) { @@ -6156,7 +6156,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, if (dump_enabled_p ()) dump_printf (MSG_NOTE, "op not supported by target.\n"); - if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD + if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) || !vect_worthwhile_without_simd_p (loop_vinfo, code)) return false; diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 341d772fe8f..27a8f7bb7ae 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -4776,8 +4776,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, || code == VIEW_CONVERT_EXPR) && (!vectype_in || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits) - || (GET_MODE_SIZE (TYPE_MODE (vectype)) - != GET_MODE_SIZE (TYPE_MODE (vectype_in))))) + || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), + GET_MODE_SIZE (TYPE_MODE (vectype_in))))) return false; /* We do not handle bit-precision changes. */ @@ -5147,7 +5147,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "op not supported by target.\n"); /* Check only during analysis. */ - if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD + if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code))) return false; @@ -5471,7 +5471,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "op not supported by target.\n"); /* Check only during analysis. */ - if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD + if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code))) return false; if (dump_enabled_p ()) @@ -7483,7 +7483,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, nested within an outer-loop that is being vectorized. */ if (nested_in_vect_loop - && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0) + && !multiple_p (DR_STEP_ALIGNMENT (dr), + GET_MODE_SIZE (TYPE_MODE (vectype)))) { gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); compute_in_loop = true; diff --git a/gcc/tree.c b/gcc/tree.c index d2631294662..92376d179ac 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -10570,9 +10570,9 @@ build_same_sized_truth_vector_type (tree vectype) if (VECTOR_BOOLEAN_TYPE_P (vectype)) return vectype; - unsigned HOST_WIDE_INT size = GET_MODE_SIZE (TYPE_MODE (vectype)); + poly_uint64 size = GET_MODE_SIZE (TYPE_MODE (vectype)); - if (!size) + if (known_eq (size, 0U)) size = tree_to_uhwi (TYPE_SIZE_UNIT (vectype)); return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), size); diff --git a/gcc/valtrack.c b/gcc/valtrack.c index 20d1296f32b..4741d001071 100644 --- a/gcc/valtrack.c +++ b/gcc/valtrack.c @@ -94,13 +94,15 @@ cleanup_auto_inc_dec (rtx src, machine_mode mem_mode ATTRIBUTE_UNUSED) case PRE_INC: case PRE_DEC: - gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode); - return gen_rtx_PLUS (GET_MODE (x), - cleanup_auto_inc_dec (XEXP (x, 0), mem_mode), - gen_int_mode (code == PRE_INC - ? GET_MODE_SIZE (mem_mode) - : -GET_MODE_SIZE (mem_mode), - GET_MODE (x))); + { + gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode); + poly_int64 offset = GET_MODE_SIZE (mem_mode); + if (code == PRE_DEC) + offset = -offset; + return gen_rtx_PLUS (GET_MODE (x), + cleanup_auto_inc_dec (XEXP (x, 0), mem_mode), + gen_int_mode (offset, GET_MODE (x))); + } case POST_INC: case POST_DEC: diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index 98d09c0544c..fbb63dbc305 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -8685,7 +8685,7 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data) { machine_mode mode, wider_mode; rtx loc2; - HOST_WIDE_INT offset; + HOST_WIDE_INT offset, size, wider_size; if (i == 0 && var->onepart) { @@ -8740,7 +8740,14 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data) mode = GET_MODE (var->var_part[i].cur_loc); if (mode == VOIDmode && var->onepart) mode = DECL_MODE (decl); - last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode); + /* We ony track subparts of constant-sized objects, since at present + there's no representation for polynomial pieces. */ + if (!GET_MODE_SIZE (mode).is_constant (&size)) + { + complete = false; + continue; + } + last_limit = offsets[n_var_parts] + size; /* Attempt to merge adjacent registers or memory. */ for (j = i + 1; j < var->n_var_parts; j++) @@ -8748,6 +8755,7 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data) break; if (j < var->n_var_parts && GET_MODE_WIDER_MODE (mode).exists (&wider_mode) + && GET_MODE_SIZE (wider_mode).is_constant (&wider_size) && var->var_part[j].cur_loc && mode == GET_MODE (var->var_part[j].cur_loc) && (REG_P (loc[n_var_parts]) || MEM_P (loc[n_var_parts])) @@ -8785,14 +8793,12 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data) if ((REG_P (XEXP (loc[n_var_parts], 0)) && rtx_equal_p (XEXP (loc[n_var_parts], 0), XEXP (XEXP (loc2, 0), 0)) - && INTVAL (XEXP (XEXP (loc2, 0), 1)) - == GET_MODE_SIZE (mode)) + && INTVAL (XEXP (XEXP (loc2, 0), 1)) == size) || (GET_CODE (XEXP (loc[n_var_parts], 0)) == PLUS && CONST_INT_P (XEXP (XEXP (loc[n_var_parts], 0), 1)) && rtx_equal_p (XEXP (XEXP (loc[n_var_parts], 0), 0), XEXP (XEXP (loc2, 0), 0)) - && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1)) - + GET_MODE_SIZE (mode) + && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1)) + size == INTVAL (XEXP (XEXP (loc2, 0), 1)))) new_loc = adjust_address_nv (loc[n_var_parts], wider_mode, 0); @@ -8802,7 +8808,7 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data) { loc[n_var_parts] = new_loc; mode = wider_mode; - last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode); + last_limit = offsets[n_var_parts] + wider_size; i = j; } } -- 2.30.2