+2017-09-12 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * regs.h (hard_regno_nregs): Turn into a function.
+ (end_hard_regno): Update accordingly.
+ * caller-save.c (setup_save_areas): Likewise.
+ (save_call_clobbered_regs): Likewise.
+ (replace_reg_with_saved_mem): Likewise.
+ (insert_restore): Likewise.
+ (insert_save): Likewise.
+ * combine.c (can_change_dest_mode): Likewise.
+ (move_deaths): Likewise.
+ (distribute_notes): Likewise.
+ * config/mips/mips.c (mips_hard_regno_call_part_clobbered): Likewise.
+ * config/powerpcspe/powerpcspe.c (rs6000_cannot_change_mode_class)
+ (rs6000_split_multireg_move): Likewise.
+ (rs6000_register_move_cost): Likewise.
+ (rs6000_memory_move_cost): Likewise.
+ * config/rs6000/rs6000.c (rs6000_cannot_change_mode_class): Likewise.
+ (rs6000_split_multireg_move): Likewise.
+ (rs6000_register_move_cost): Likewise.
+ (rs6000_memory_move_cost): Likewise.
+ * cselib.c (cselib_reset_table): Likewise.
+ (cselib_lookup_1): Likewise.
+ * emit-rtl.c (set_mode_and_regno): Likewise.
+ * function.c (aggregate_value_p): Likewise.
+ * ira-color.c (setup_profitable_hard_regs): Likewise.
+ (check_hard_reg_p): Likewise.
+ (calculate_saved_nregs): Likewise.
+ (assign_hard_reg): Likewise.
+ (improve_allocation): Likewise.
+ (calculate_spill_cost): Likewise.
+ * ira-emit.c (modify_move_list): Likewise.
+ * ira-int.h (ira_hard_reg_set_intersection_p): Likewise.
+ (ira_hard_reg_in_set_p): Likewise.
+ * ira.c (setup_reg_mode_hard_regset): Likewise.
+ (clarify_prohibited_class_mode_regs): Likewise.
+ (check_allocation): Likewise.
+ * lra-assigns.c (find_hard_regno_for_1): Likewise.
+ (lra_setup_reg_renumber): Likewise.
+ (setup_try_hard_regno_pseudos): Likewise.
+ (spill_for): Likewise.
+ (assign_hard_regno): Likewise.
+ (setup_live_pseudos_and_spill_after_risky_transforms): Likewise.
+ * lra-constraints.c (in_class_p): Likewise.
+ (lra_constraint_offset): Likewise.
+ (simplify_operand_subreg): Likewise.
+ (lra_constraints): Likewise.
+ (split_reg): Likewise.
+ (split_if_necessary): Likewise.
+ (invariant_p): Likewise.
+ (inherit_in_ebb): Likewise.
+ * lra-lives.c (process_bb_lives): Likewise.
+ * lra-remat.c (reg_overlap_for_remat_p): Likewise.
+ (get_hard_regs): Likewise.
+ (do_remat): Likewise.
+ * lra-spills.c (assign_spill_hard_regs): Likewise.
+ * mode-switching.c (create_pre_exit): Likewise.
+ * postreload.c (reload_combine_recognize_pattern): Likewise.
+ * recog.c (peep2_find_free_register): Likewise.
+ * regcprop.c (kill_value_regno): Likewise.
+ (set_value_regno): Likewise.
+ (copy_value): Likewise.
+ (maybe_mode_change): Likewise.
+ (find_oldest_value_reg): Likewise.
+ (copyprop_hardreg_forward_1): Likewise.
+ * regrename.c (check_new_reg_p): Likewise.
+ (regrename_do_replace): Likewise.
+ * reload.c (push_reload): Likewise.
+ (combine_reloads): Likewise.
+ (find_dummy_reload): Likewise.
+ (operands_match_p): Likewise.
+ (find_reloads): Likewise.
+ (find_equiv_reg): Likewise.
+ (reload_adjust_reg_for_mode): Likewise.
+ * reload1.c (count_pseudo): Likewise.
+ (count_spilled_pseudo): Likewise.
+ (find_reg): Likewise.
+ (clear_reload_reg_in_use): Likewise.
+ (free_for_value_p): Likewise.
+ (allocate_reload_reg): Likewise.
+ (choose_reload_regs): Likewise.
+ (reload_adjust_reg_for_temp): Likewise.
+ (emit_reload_insns): Likewise.
+ (delete_output_reload): Likewise.
+ * rtlanal.c (subreg_get_info): Likewise.
+ * sched-deps.c (sched_analyze_reg): Likewise.
+ * sel-sched.c (init_regs_for_mode): Likewise.
+ (mark_unavailable_hard_regs): Likewise.
+ (choose_best_reg_1): Likewise.
+ (verify_target_availability): Likewise.
+ * valtrack.c (dead_debug_insert_temp): Likewise.
+ * var-tracking.c (track_loc_p): Likewise.
+ (emit_note_insn_var_location): Likewise.
+ * varasm.c (make_decl_rtl): Likewise.
+ * reginfo.c (choose_hard_reg_mode): Likewise.
+ (init_reg_modes_target): Refer directly to
+ this_target_regs->x_hard_regno_nregs.
+
2017-09-12 Richard Sandiford <richard.sandiford@linaro.org>
* ira-costs.c (record_operand_costs): Use in_hard_reg_set_p
if (r < 0 || regno_reg_rtx[regno] == cheap)
continue;
- bound = r + hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
+ bound = r + hard_regno_nregs (r, PSEUDO_REGNO_MODE (regno));
for (; r < bound; r++)
if (TEST_HARD_REG_BIT (used_regs, r))
{
if (r < 0 || regno_reg_rtx[regno] == cheap)
continue;
- bound = r + hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
+ bound = r + hard_regno_nregs (r, PSEUDO_REGNO_MODE (regno));
for (; r < bound; r++)
if (TEST_HARD_REG_BIT (used_regs, r))
call_saved_regs[call_saved_regs_num++] = hard_reg_map[r];
if (r < 0 || regno_reg_rtx[regno] == cheap)
continue;
- nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
+ nregs = hard_regno_nregs (r, PSEUDO_REGNO_MODE (regno));
mode = HARD_REGNO_CALLER_SAVE_MODE
(r, nregs, PSEUDO_REGNO_MODE (regno));
if (partial_subreg_p (save_mode[r], mode))
int regno,
void *arg)
{
- unsigned int i, nregs = hard_regno_nregs [regno][mode];
+ unsigned int i, nregs = hard_regno_nregs (regno, mode);
rtx mem;
machine_mode *save_mode = (machine_mode *)arg;
{
mem = copy_rtx (regno_save_mem[regno][nregs]);
- if (nregs == (unsigned int) hard_regno_nregs[regno][save_mode[regno]])
+ if (nregs == hard_regno_nregs (regno, save_mode[regno]))
mem = adjust_address_nv (mem, save_mode[regno], 0);
if (GET_MODE (mem) != mode)
{
machine_mode smode = save_mode[regno];
gcc_assert (smode != VOIDmode);
- if (hard_regno_nregs [regno][smode] > 1)
+ if (hard_regno_nregs (regno, smode) > 1)
smode = mode_for_size (GET_MODE_SIZE (mode) / nregs,
GET_MODE_CLASS (mode), 0).require ();
XVECEXP (mem, 0, i) = gen_rtx_REG (smode, regno + i);
mem = regno_save_mem [regno][numregs];
if (save_mode [regno] != VOIDmode
&& save_mode [regno] != GET_MODE (mem)
- && numregs == (unsigned int) hard_regno_nregs[regno][save_mode [regno]]
+ && numregs == hard_regno_nregs (regno, save_mode [regno])
/* Check that insn to restore REGNO in save_mode[regno] is
correct. */
&& reg_save_code (regno, save_mode[regno]) >= 0)
mem = regno_save_mem [regno][numregs];
if (save_mode [regno] != VOIDmode
&& save_mode [regno] != GET_MODE (mem)
- && numregs == (unsigned int) hard_regno_nregs[regno][save_mode [regno]]
+ && numregs == hard_regno_nregs (regno, save_mode [regno])
/* Check that insn to save REGNO in save_mode[regno] is
correct. */
&& reg_save_code (regno, save_mode[regno]) >= 0)
registers than the old mode. */
if (regno < FIRST_PSEUDO_REGISTER)
return (targetm.hard_regno_mode_ok (regno, mode)
- && REG_NREGS (x) >= hard_regno_nregs[regno][mode]);
+ && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
/* Or a pseudo that is only used once. */
return (regno < reg_n_sets_max
rtx oldnotes = 0;
if (note)
- offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
+ offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
else
offset = 1;
not already dead or set. */
for (i = regno; i < endregno;
- i += hard_regno_nregs[i][reg_raw_mode[i]])
+ i += hard_regno_nregs (i, reg_raw_mode[i]))
{
rtx piece = regno_reg_rtx[i];
basic_block bb = this_basic_block;
mips_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
{
if (TARGET_FLOATXX
- && hard_regno_nregs[regno][mode] == 1
+ && hard_regno_nregs (regno, mode) == 1
&& FP_REG_P (regno)
&& (regno & 1) != 0)
return true;
if (reg_classes_intersect_p (xclass, rclass))
{
- unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
- unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
+ unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
+ unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
{
unsigned num_regs = (from_size + 15) / 16;
- if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
- || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
+ if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
+ || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
return true;
return (from_size != 8 && from_size != 16);
reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
mode = GET_MODE (dst);
- nregs = hard_regno_nregs[reg][mode];
+ nregs = hard_regno_nregs (reg, mode);
if (FP_REGNO_P (reg))
reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
|| rs6000_cpu == PROCESSOR_POWER8
|| rs6000_cpu == PROCESSOR_POWER9)
&& reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
- ret = 6 * hard_regno_nregs[0][mode];
+ ret = 6 * hard_regno_nregs (0, mode);
else
/* A move will cost one instruction per GPR moved. */
- ret = 2 * hard_regno_nregs[0][mode];
+ ret = 2 * hard_regno_nregs (0, mode);
}
/* If we have VSX, we can easily move between FPR or Altivec registers. */
else if (VECTOR_MEM_VSX_P (mode)
&& reg_classes_intersect_p (to, VSX_REGS)
&& reg_classes_intersect_p (from, VSX_REGS))
- ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
+ ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
/* Moving between two similar registers is just one instruction. */
else if (reg_classes_intersect_p (to, from))
dbg_cost_ctrl++;
if (reg_classes_intersect_p (rclass, GENERAL_REGS))
- ret = 4 * hard_regno_nregs[0][mode];
+ ret = 4 * hard_regno_nregs (0, mode);
else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
|| reg_classes_intersect_p (rclass, VSX_REGS)))
- ret = 4 * hard_regno_nregs[32][mode];
+ ret = 4 * hard_regno_nregs (32, mode);
else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
- ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
+ ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
else
ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
if (reg_classes_intersect_p (xclass, rclass))
{
- unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
- unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
+ unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
+ unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
{
unsigned num_regs = (from_size + 15) / 16;
- if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
- || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
+ if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
+ || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
return true;
return (from_size != 8 && from_size != 16);
reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
mode = GET_MODE (dst);
- nregs = hard_regno_nregs[reg][mode];
+ nregs = hard_regno_nregs (reg, mode);
if (FP_REGNO_P (reg))
reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
|| rs6000_cpu == PROCESSOR_POWER8
|| rs6000_cpu == PROCESSOR_POWER9)
&& reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
- ret = 6 * hard_regno_nregs[0][mode];
+ ret = 6 * hard_regno_nregs (0, mode);
else
/* A move will cost one instruction per GPR moved. */
- ret = 2 * hard_regno_nregs[0][mode];
+ ret = 2 * hard_regno_nregs (0, mode);
}
/* If we have VSX, we can easily move between FPR or Altivec registers. */
else if (VECTOR_MEM_VSX_P (mode)
&& reg_classes_intersect_p (to, VSX_REGS)
&& reg_classes_intersect_p (from, VSX_REGS))
- ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
+ ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
/* Moving between two similar registers is just one instruction. */
else if (reg_classes_intersect_p (to, from))
dbg_cost_ctrl++;
if (reg_classes_intersect_p (rclass, GENERAL_REGS))
- ret = 4 * hard_regno_nregs[0][mode];
+ ret = 4 * hard_regno_nregs (0, mode);
else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
|| reg_classes_intersect_p (rclass, VSX_REGS)))
- ret = 4 * hard_regno_nregs[32][mode];
+ ret = 4 * hard_regno_nregs (32, mode);
else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
- ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
+ ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
else
ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
n_used_regs = new_used_regs;
used_regs[0] = regno;
max_value_regs
- = hard_regno_nregs[regno][GET_MODE (cfa_base_preserved_val->locs->loc)];
+ = hard_regno_nregs (regno,
+ GET_MODE (cfa_base_preserved_val->locs->loc));
}
else
{
if (i < FIRST_PSEUDO_REGISTER)
{
- unsigned int n = hard_regno_nregs[i][mode];
+ unsigned int n = hard_regno_nregs (i, mode);
if (n > max_value_regs)
max_value_regs = n;
{
struct elt_loc_list *el;
if (i < FIRST_PSEUDO_REGISTER
- && hard_regno_nregs[i][lmode] != 1)
+ && hard_regno_nregs (i, lmode) != 1)
continue;
for (el = l->elt->locs; el; el = el->next)
if (!REG_P (el->loc))
set_mode_and_regno (rtx x, machine_mode mode, unsigned int regno)
{
unsigned int nregs = (HARD_REGISTER_NUM_P (regno)
- ? hard_regno_nregs[regno][mode]
+ ? hard_regno_nregs (regno, mode)
: 1);
PUT_MODE_RAW (x, mode);
set_regno_raw (x, regno, nregs);
return 0;
regno = REGNO (reg);
- nregs = hard_regno_nregs[regno][TYPE_MODE (type)];
+ nregs = hard_regno_nregs (regno, TYPE_MODE (type));
for (i = 0; i < nregs; i++)
if (! call_used_regs[regno + i])
return 1;
|| (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
continue;
mode = ALLOCNO_MODE (a);
- nregs = hard_regno_nregs[hard_regno][mode];
+ nregs = hard_regno_nregs (hard_regno, mode);
nobj = ALLOCNO_NUM_OBJECTS (a);
for (k = 0; k < nobj; k++)
{
/* Checking only profitable hard regs. */
if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
return false;
- nregs = hard_regno_nregs[hard_regno][mode];
+ nregs = hard_regno_nregs (hard_regno, mode);
nwords = ALLOCNO_NUM_OBJECTS (a);
for (j = 0; j < nregs; j++)
{
int nregs = 0;
ira_assert (hard_regno >= 0);
- for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
if (!allocated_hardreg_p[hard_regno + i]
&& !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
&& !LOCAL_REGNO (hard_regno + i))
int conflict_nregs;
mode = ALLOCNO_MODE (conflict_a);
- conflict_nregs = hard_regno_nregs[hard_regno][mode];
+ conflict_nregs = hard_regno_nregs (hard_regno, mode);
if (conflict_nregs == n_objects && conflict_nregs > 1)
{
int num = OBJECT_SUBWORD (conflict_obj);
rclass = REGNO_REG_CLASS (hard_regno);
add_cost = ((ira_memory_move_cost[mode][rclass][0]
+ ira_memory_move_cost[mode][rclass][1])
- * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
+ * saved_nregs / hard_regno_nregs (hard_regno,
+ mode) - 1);
cost += add_cost;
full_cost += add_cost;
}
fail:
if (best_hard_regno >= 0)
{
- for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (best_hard_regno, mode) - 1; i >= 0; i--)
allocated_hardreg_p[best_hard_regno + i] = true;
}
if (! retry_p)
spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
spill_cost
+= allocno_copy_cost_saving (conflict_a, conflict_hregno);
- conflict_nregs
- = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
+ conflict_nregs = hard_regno_nregs (conflict_hregno,
+ ALLOCNO_MODE (conflict_a));
for (r = conflict_hregno;
r >= 0 && (int) end_hard_regno (mode, r) > conflict_hregno;
r--)
by spilling some conflicting allocnos does not improve the
allocation cost. */
continue;
- nregs = hard_regno_nregs[best][mode];
+ nregs = hard_regno_nregs (best, mode);
/* Now spill conflicting allocnos which contain a hard register
of A when we assign the best chosen hard register to it. */
for (word = 0; word < nwords; word++)
if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
continue;
- conflict_nregs
- = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
+ conflict_nregs = hard_regno_nregs (conflict_hregno,
+ ALLOCNO_MODE (conflict_a));
if (best + nregs <= conflict_hregno
|| conflict_hregno + conflict_nregs <= best)
/* No intersection. */
a = ira_regno_allocno_map[regno];
length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a));
for (j = 0; j < nregs; j++)
if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
break;
to = move->to;
if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
continue;
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to));
for (i = 0; i < nregs; i++)
{
hard_regno_last_set[hard_regno + i] = move;
to = move->to;
if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
{
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from));
for (n = i = 0; i < nregs; i++)
if (hard_regno_last_set_check[hard_regno + i] == curr_tick
&& (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
to = move->to;
if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
{
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from));
for (i = 0; i < nregs; i++)
if (hard_regno_last_set_check[hard_regno + i] == curr_tick
&& ALLOCNO_HARD_REGNO
}
if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
continue;
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to));
for (i = 0; i < nregs; i++)
{
hard_regno_last_set[hard_regno + i] = move;
int i;
gcc_assert (hard_regno >= 0);
- for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
return true;
return false;
int i;
ira_assert (hard_regno >= 0);
- for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
if (!TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
return false;
return true;
for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
{
CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
- for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (hard_regno, (machine_mode) m) - 1;
+ i >= 0; i--)
if (hard_regno + i < FIRST_PSEUDO_REGISTER)
SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
hard_regno + i);
hard_regno = ira_class_hard_regs[cl][k];
if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
continue;
- nregs = hard_regno_nregs[hard_regno][j];
+ nregs = hard_regno_nregs (hard_regno, (machine_mode) j);
if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
{
SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
if (ALLOCNO_CAP_MEMBER (a) != NULL
|| (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
continue;
- nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
+ nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a));
if (nregs == 1)
/* We allocated a single hard register. */
n = 1;
if (conflict_hard_regno < 0)
continue;
- conflict_nregs
- = (hard_regno_nregs
- [conflict_hard_regno][ALLOCNO_MODE (conflict_a)]);
+ conflict_nregs = hard_regno_nregs (conflict_hard_regno,
+ ALLOCNO_MODE (conflict_a));
if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
&& conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
if (lra_reg_val_equal_p (conflict_regno, val, offset))
{
conflict_hr = live_pseudos_reg_renumber[conflict_regno];
- nregs = (hard_regno_nregs[conflict_hr]
- [lra_reg_info[conflict_regno].biggest_mode]);
+ nregs = hard_regno_nregs (conflict_hr,
+ lra_reg_info[conflict_regno].biggest_mode);
/* Remember about multi-register pseudos. For example, 2
hard register pseudos can start on the same hard register
but can not start on HR and HR+1/HR-1. */
machine_mode biggest_conflict_mode
= lra_reg_info[conflict_regno].biggest_mode;
int biggest_conflict_nregs
- = hard_regno_nregs[conflict_hr][biggest_conflict_mode];
+ = hard_regno_nregs (conflict_hr, biggest_conflict_mode);
- nregs_diff = (biggest_conflict_nregs
- - (hard_regno_nregs
- [conflict_hr]
- [PSEUDO_REGNO_MODE (conflict_regno)]));
+ nregs_diff
+ = (biggest_conflict_nregs
+ - hard_regno_nregs (conflict_hr,
+ PSEUDO_REGNO_MODE (conflict_regno)));
add_to_hard_reg_set (&conflict_set,
biggest_conflict_mode,
conflict_hr
rclass_size = ira_class_hard_regs_num[rclass];
best_hard_regno = -1;
hard_regno = ira_class_hard_regs[rclass][0];
- biggest_nregs = hard_regno_nregs[hard_regno][biggest_mode];
+ biggest_nregs = hard_regno_nregs (hard_regno, biggest_mode);
nregs_diff = (biggest_nregs
- - hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)]);
+ - hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno)));
COPY_HARD_REG_SET (available_regs, reg_class_contents[rclass]);
AND_COMPL_HARD_REG_SET (available_regs, lra_no_alloc_regs);
for (i = 0; i < rclass_size; i++)
hard_regno_costs[hard_regno] = 0;
}
for (j = 0;
- j < hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)];
+ j < hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno));
j++)
if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j)
&& ! df_regs_ever_live_p (hard_regno + j))
hr = reg_renumber[regno];
reg_renumber[regno] = hard_regno;
lra_assert (hr >= 0);
- for (i = 0; i < hard_regno_nregs[hr][PSEUDO_REGNO_MODE (regno)]; i++)
+ for (i = 0; i < hard_regno_nregs (hr, PSEUDO_REGNO_MODE (regno)); i++)
if (hard_regno < 0)
lra_hard_reg_usage[hr + i] -= lra_reg_info[regno].freq;
else
if (overlaps_hard_reg_set_p (reg_class_contents[rclass],
mode, hard_regno))
{
- for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
{
if (try_hard_reg_pseudos_check[hard_regno + i]
!= curr_pseudo_check)
{
hard_regno = ira_class_hard_regs[rclass][i];
bitmap_clear (&spill_pseudos_bitmap);
- for (j = hard_regno_nregs[hard_regno][mode] - 1; j >= 0; j--)
+ for (j = hard_regno_nregs (hard_regno, mode) - 1; j >= 0; j--)
{
if (try_hard_reg_pseudos_check[hard_regno + j] != curr_pseudo_check)
continue;
lra_setup_reg_renumber (regno, hard_regno, true);
update_lives (regno, false);
for (i = 0;
- i < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode];
+ i < hard_regno_nregs (hard_regno, lra_reg_info[regno].biggest_mode);
i++)
df_set_regs_ever_live (hard_regno + i, true);
}
{
int conflict_hard_regno = reg_renumber[conflict_regno];
machine_mode biggest_mode = lra_reg_info[conflict_regno].biggest_mode;
- int biggest_nregs = hard_regno_nregs[conflict_hard_regno][biggest_mode];
- int nregs_diff = (biggest_nregs
- - (hard_regno_nregs
- [conflict_hard_regno]
- [PSEUDO_REGNO_MODE (conflict_regno)]));
+ int biggest_nregs = hard_regno_nregs (conflict_hard_regno,
+ biggest_mode);
+ int nregs_diff
+ = (biggest_nregs
+ - hard_regno_nregs (conflict_hard_regno,
+ PSEUDO_REGNO_MODE (conflict_regno)));
add_to_hard_reg_set (&conflict_set,
biggest_mode,
conflict_hard_regno
}
bitmap_set_bit (spilled_pseudo_bitmap, regno);
for (j = 0;
- j < hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)];
+ j < hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno));
j++)
lra_hard_reg_usage[hard_regno + j] -= lra_reg_info[regno].freq;
reg_renumber[regno] = -1;
for (i = 0; i < class_size; i++)
{
hard_regno = ira_class_hard_regs[common_class][i];
- nregs = hard_regno_nregs[hard_regno][reg_mode];
+ nregs = hard_regno_nregs (hard_regno, reg_mode);
if (nregs == 1)
return true;
for (j = 0; j < nregs; j++)
if (WORDS_BIG_ENDIAN
&& is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_SIZE (int_mode) > UNITS_PER_WORD)
- return hard_regno_nregs[regno][mode] - 1;
+ return hard_regno_nregs (regno, mode) - 1;
return 0;
}
&& (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0
/* Don't reload paradoxical subregs because we could be looping
having repeatedly final regno out of hard regs range. */
- && (hard_regno_nregs[hard_regno][innermode]
- >= hard_regno_nregs[hard_regno][mode])
+ && (hard_regno_nregs (hard_regno, innermode)
+ >= hard_regno_nregs (hard_regno, mode))
&& simplify_subreg_regno (hard_regno, innermode,
SUBREG_BYTE (operand), mode) < 0
/* Don't reload subreg for matching reload. It is actually
else if (REG_P (reg)
&& REGNO (reg) >= FIRST_PSEUDO_REGISTER
&& (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0
- && (hard_regno_nregs[hard_regno][innermode]
- < hard_regno_nregs[hard_regno][mode])
+ && (hard_regno_nregs (hard_regno, innermode)
+ < hard_regno_nregs (hard_regno, mode))
&& (regclass = lra_get_allocno_class (REGNO (reg)))
&& (type != OP_IN
|| !in_hard_reg_set_p (reg_class_contents[regclass],
{
int j, nregs;
- nregs = hard_regno_nregs[hard_regno][lra_reg_info[i].biggest_mode];
+ nregs = hard_regno_nregs (hard_regno, lra_reg_info[i].biggest_mode);
for (j = 0; j < nregs; j++)
df_set_regs_ever_live (hard_regno + j, true);
}
if (lra_reg_info[i].nrefs != 0
&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
{
- int j, nregs = hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (i)];
+ int j, nregs = hard_regno_nregs (hard_regno,
+ PSEUDO_REGNO_MODE (i));
for (j = 0; j < nregs; j++)
lra_assert (df_regs_ever_live_p (hard_regno + j));
{
mode = PSEUDO_REGNO_MODE (original_regno);
hard_regno = reg_renumber[original_regno];
- nregs = hard_regno_nregs[hard_regno][mode];
+ nregs = hard_regno_nregs (hard_regno, mode);
rclass = lra_get_allocno_class (original_regno);
original_reg = regno_reg_rtx[original_regno];
call_save_p = need_for_call_save_p (original_regno);
if (call_save_p)
{
mode = HARD_REGNO_CALLER_SAVE_MODE (hard_regno,
- hard_regno_nregs[hard_regno][mode],
+ hard_regno_nregs (hard_regno, mode),
mode);
new_reg = lra_create_new_reg (mode, NULL_RTX, NO_REGS, "save");
}
rtx next_usage_insns;
if (regno < FIRST_PSEUDO_REGISTER)
- nregs = hard_regno_nregs[regno][mode];
+ nregs = hard_regno_nregs (regno, mode);
for (i = 0; i < nregs; i++)
if (usage_insns[regno + i].check == curr_usage_insns_check
&& (next_usage_insns = usage_insns[regno + i].insns) != NULL_RTX
|| TEST_HARD_REG_BIT (eliminable_regset, regno)
|| GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
return false;
- nregs = hard_regno_nregs[regno][mode];
+ nregs = hard_regno_nregs (regno, mode);
for (i = 0; i < nregs; i++)
if (! fixed_regs[regno + i]
/* A hard register may be clobbered in the current insn
usage_insns[dst_regno].check = -(int) INSN_UID (curr_insn);
else
{
- nregs = hard_regno_nregs[dst_regno][reg->biggest_mode];
+ nregs = hard_regno_nregs (dst_regno,
+ reg->biggest_mode);
for (i = 0; i < nregs; i++)
usage_insns[dst_regno + i].check
= -(int) INSN_UID (curr_insn);
but implicitly it can be used in natural mode as a
part of multi-register group. Process this case
here. */
- for (i = 1; i < hard_regno_nregs[regno][reg->biggest_mode]; i++)
+ for (i = 1; i < hard_regno_nregs (regno, reg->biggest_mode); i++)
if (partial_subreg_p (lra_reg_info[regno + i].biggest_mode,
GET_MODE (regno_reg_rtx[regno + i])))
lra_reg_info[regno + i].biggest_mode
if (regno >= FIRST_PSEUDO_REGISTER)
nregs = 1;
else
- nregs = hard_regno_nregs[regno][reg->biggest_mode];
+ nregs = hard_regno_nregs (regno, reg->biggest_mode);
struct lra_insn_reg *reg2;
if (regno2 >= FIRST_PSEUDO_REGISTER)
nregs2 = 1;
else
- nregs2 = hard_regno_nregs[regno2][reg->biggest_mode];
+ nregs2 = hard_regno_nregs (regno2, reg->biggest_mode);
if ((regno2 + nregs2 - 1 >= regno && regno2 < regno + nregs)
|| (regno + nregs - 1 >= regno2 && regno < regno2 + nregs2))
int hard_regno = regno < FIRST_PSEUDO_REGISTER ? regno : reg_renumber[regno];
if (hard_regno >= 0)
- nregs = hard_regno_nregs[hard_regno][reg->biggest_mode];
+ nregs = hard_regno_nregs (hard_regno, reg->biggest_mode);
return hard_regno;
}
? dst_regno : reg_renumber[dst_regno];
gcc_assert (dst_hard_regno >= 0);
machine_mode mode = GET_MODE (SET_DEST (set));
- dst_nregs = hard_regno_nregs[dst_hard_regno][mode];
+ dst_nregs = hard_regno_nregs (dst_hard_regno, mode);
for (reg = cand_id->regs; reg != NULL; reg = reg->next)
if (reg->type != OP_IN && reg->regno != ignore_regno)
spill_hard_reg[regno]
= gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno);
for (nr = 0;
- nr < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode];
+ nr < hard_regno_nregs (hard_regno,
+ lra_reg_info[regno].biggest_mode);
nr++)
/* Just loop. */
df_set_regs_ever_live (hard_regno + nr, true);
if (!targetm.calls.function_value_regno_p (copy_start))
copy_num = 0;
else
- copy_num
- = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
+ copy_num = hard_regno_nregs (copy_start,
+ GET_MODE (copy_reg));
/* If the return register is not likely spilled, - as is
the case for floating point on SH4 - then it might
&& (call_used_regs[i] || df_regs_ever_live_p (i))
&& (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
&& !fixed_regs[i] && !global_regs[i]
- && hard_regno_nregs[i][GET_MODE (reg)] == 1
+ && hard_regno_nregs (i, GET_MODE (reg)) == 1
&& targetm.hard_regno_scratch_ok (i))
{
index_reg = gen_rtx_REG (GET_MODE (reg), i);
continue;
success = 1;
- for (j = 0; success && j < hard_regno_nregs[regno][mode]; j++)
+ for (j = 0; success && j < hard_regno_nregs (regno, mode); j++)
{
/* Don't allocate fixed registers. */
if (fixed_regs[regno + j])
unsigned int i, n;
if (vd->e[j].mode == VOIDmode)
continue;
- n = hard_regno_nregs[j][vd->e[j].mode];
+ n = hard_regno_nregs (j, vd->e[j].mode);
if (j + n > regno)
for (i = 0; i < n; ++i)
kill_value_one_regno (j + i, vd);
vd->e[regno].mode = mode;
- nregs = hard_regno_nregs[regno][mode];
+ nregs = hard_regno_nregs (regno, mode);
if (nregs > vd->max_value_regs)
vd->max_value_regs = nregs;
}
We can't properly represent the latter case in our tables, so don't
record anything then. */
- else if (sn < (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode]
+ else if (sn < hard_regno_nregs (sr, vd->e[sr].mode)
&& (GET_MODE_SIZE (vd->e[sr].mode) > UNITS_PER_WORD
? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
return;
/* If SRC had been assigned a mode narrower than the copy, we can't
link DEST into the chain, because not all of the pieces of the
copy came from oldest_regno. */
- else if (sn > (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode])
+ else if (sn > hard_regno_nregs (sr, vd->e[sr].mode))
return;
/* Link DR at the end of the value chain used by SR. */
return gen_raw_REG (new_mode, regno);
else if (mode_change_ok (orig_mode, new_mode, regno))
{
- int copy_nregs = hard_regno_nregs[copy_regno][copy_mode];
- int use_nregs = hard_regno_nregs[copy_regno][new_mode];
+ int copy_nregs = hard_regno_nregs (copy_regno, copy_mode);
+ int use_nregs = hard_regno_nregs (copy_regno, new_mode);
int copy_offset
= GET_MODE_SIZE (copy_mode) / copy_nregs * (copy_nregs - use_nregs);
int offset
(set (...) (reg:DI r9))
Replacing r9 with r11 is invalid. */
if (mode != vd->e[regno].mode
- && REG_NREGS (reg) > hard_regno_nregs[regno][vd->e[regno].mode])
+ && REG_NREGS (reg) > hard_regno_nregs (regno, vd->e[regno].mode))
return NULL_RTX;
for (i = vd->e[regno].oldest_regno; i != regno; i = vd->e[i].next_regno)
if (mode != vd->e[regno].mode)
{
if (REG_NREGS (src)
- > hard_regno_nregs[regno][vd->e[regno].mode])
+ > hard_regno_nregs (regno, vd->e[regno].mode))
goto no_move_special_case;
/* And likewise, if we are narrowing on big endian the transformation
is also invalid. */
- if (REG_NREGS (src) < hard_regno_nregs[regno][vd->e[regno].mode]
+ if (REG_NREGS (src) < hard_regno_nregs (regno, vd->e[regno].mode)
&& (GET_MODE_SIZE (vd->e[regno].mode) > UNITS_PER_WORD
? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
goto no_move_special_case;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
for (j = 0; j < MAX_MACHINE_MODE; j++)
- hard_regno_nregs[i][j] = HARD_REGNO_NREGS (i, (machine_mode)j);
+ this_target_regs->x_hard_regno_nregs[i][j]
+ = HARD_REGNO_NREGS (i, (machine_mode)j);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if it is suitable, otherwise fall back on word_mode. */
if (reg_raw_mode[i] == VOIDmode)
{
- if (i > 0 && hard_regno_nregs[i][reg_raw_mode[i - 1]] == 1)
+ if (i > 0 && hard_regno_nregs (i, reg_raw_mode[i - 1]) == 1)
reg_raw_mode[i] = reg_raw_mode[i - 1];
else
reg_raw_mode[i] = word_mode;
If we still didn't find a valid mode, try CCmode. */
FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
- if ((unsigned) hard_regno_nregs[regno][mode] == nregs
+ if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
- if ((unsigned) hard_regno_nregs[regno][mode] == nregs
+ if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
- if ((unsigned) hard_regno_nregs[regno][mode] == nregs
+ if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
- if ((unsigned) hard_regno_nregs[regno][mode] == nregs
+ if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
for (m = (unsigned int) CCmode; m < (unsigned int) NUM_MACHINE_MODES; ++m)
{
mode = (machine_mode) m;
- if ((unsigned) hard_regno_nregs[regno][mode] == nregs
+ if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode)))
struct du_head *this_head, HARD_REG_SET this_unavailable)
{
machine_mode mode = GET_MODE (*this_head->first->loc);
- int nregs = hard_regno_nregs[new_reg][mode];
+ int nregs = hard_regno_nregs (new_reg, mode);
int i;
struct du_chain *tmp;
mode = GET_MODE (*head->first->loc);
head->renamed = 1;
head->regno = reg;
- head->nregs = hard_regno_nregs[reg][mode];
+ head->nregs = hard_regno_nregs (reg, mode);
return true;
}
#else
#define this_target_regs (&default_target_regs)
#endif
-
-#define hard_regno_nregs \
- (this_target_regs->x_hard_regno_nregs)
#define reg_raw_mode \
(this_target_regs->x_reg_raw_mode)
#define have_regs_of_mode \
#define float_extend_from_mem \
(this_target_regs->x_float_extend_from_mem)
+/* Return the number of hard registers in (reg:MODE REGNO). */
+
+ALWAYS_INLINE unsigned char
+hard_regno_nregs (unsigned int regno, machine_mode mode)
+{
+ return this_target_regs->x_hard_regno_nregs[regno][mode];
+}
+
/* Return an exclusive upper bound on the registers occupied by hard
register (reg:MODE REGNO). */
static inline unsigned int
end_hard_regno (machine_mode mode, unsigned int regno)
{
- return regno + hard_regno_nregs[regno][(int) mode];
+ return regno + hard_regno_nregs (regno, mode);
}
/* Add to REGS all the registers required to store a value of mode MODE
&& targetm.hard_regno_mode_ok (regno, outmode))
{
unsigned int offs;
- unsigned int nregs = MAX (hard_regno_nregs[regno][inmode],
- hard_regno_nregs[regno][outmode]);
+ unsigned int nregs = MAX (hard_regno_nregs (regno, inmode),
+ hard_regno_nregs (regno, outmode));
for (offs = 0; offs < nregs; offs++)
if (fixed_regs[regno + offs]
&& targetm.hard_regno_mode_ok (regno, rld[output_reload].outmode)
&& TEST_HARD_REG_BIT (reg_class_contents[(int) rld[output_reload].rclass],
regno)
- && (hard_regno_nregs[regno][rld[output_reload].outmode]
+ && (hard_regno_nregs (regno, rld[output_reload].outmode)
<= REG_NREGS (XEXP (note, 0)))
/* Ensure that a secondary or tertiary reload for this output
won't want this register. */
&& REGNO (out) < FIRST_PSEUDO_REGISTER)
{
unsigned int regno = REGNO (out) + out_offset;
- unsigned int nwords = hard_regno_nregs[regno][outmode];
+ unsigned int nwords = hard_regno_nregs (regno, outmode);
rtx saved_rtx;
/* When we consider whether the insn uses OUT,
&& REG_NREGS (in) == 1)))
{
unsigned int regno = REGNO (in) + in_offset;
- unsigned int nwords = hard_regno_nregs[regno][inmode];
+ unsigned int nwords = hard_regno_nregs (regno, inmode);
if (! refers_to_regno_for_reload_p (regno, regno + nwords, out, (rtx*) 0)
&& ! hard_reg_set_here_p (regno, regno + nwords,
&& is_a <scalar_int_mode> (GET_MODE (x), &xmode)
&& GET_MODE_SIZE (xmode) > UNITS_PER_WORD
&& i < FIRST_PSEUDO_REGISTER)
- i += hard_regno_nregs[i][xmode] - 1;
+ i += hard_regno_nregs (i, xmode) - 1;
scalar_int_mode ymode;
if (REG_WORDS_BIG_ENDIAN
&& is_a <scalar_int_mode> (GET_MODE (y), &ymode)
&& GET_MODE_SIZE (ymode) > UNITS_PER_WORD
&& j < FIRST_PSEUDO_REGISTER)
- j += hard_regno_nregs[j][ymode] - 1;
+ j += hard_regno_nregs (j, ymode) - 1;
return i == j;
}
&& TEST_HARD_REG_BIT (reg_class_contents[rld[i].rclass], regno)
&& targetm.hard_regno_mode_ok (regno, rld[i].mode))
{
- int nr = hard_regno_nregs[regno][rld[i].mode];
+ int nr = hard_regno_nregs (regno, rld[i].mode);
int ok = 1, nri;
for (nri = 1; nri < nr; nri ++)
/* Reject registers that overlap GOAL. */
if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER)
- nregs = hard_regno_nregs[regno][mode];
+ nregs = hard_regno_nregs (regno, mode);
else
nregs = 1;
- valuenregs = hard_regno_nregs[valueno][mode];
+ valuenregs = hard_regno_nregs (valueno, mode);
if (!goal_mem && !goal_const
&& regno + nregs > valueno && regno < valueno + valuenregs)
regno = REGNO (reloadreg);
if (REG_WORDS_BIG_ENDIAN)
- regno += (int) REG_NREGS (reloadreg) - (int) hard_regno_nregs[regno][mode];
+ regno += ((int) REG_NREGS (reloadreg)
+ - (int) hard_regno_nregs (regno, mode));
return gen_rtx_REG (mode, regno);
}
gcc_assert (r >= 0);
spill_add_cost[r] += freq;
- nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)];
+ nregs = hard_regno_nregs (r, PSEUDO_REGNO_MODE (reg));
while (nregs-- > 0)
{
hard_regno_to_pseudo_regno[r + nregs] = reg;
gcc_assert (r >= 0);
- nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)];
+ nregs = hard_regno_nregs (r, PSEUDO_REGNO_MODE (reg));
if (REGNO_REG_SET_P (&spilled_pseudos, reg)
|| spilled + spilled_nregs <= r || r + nregs <= spilled)
{
int this_cost = spill_cost[regno];
int ok = 1;
- unsigned int this_nregs = hard_regno_nregs[regno][rl->mode];
+ unsigned int this_nregs = hard_regno_nregs (regno, rl->mode);
for (j = 1; j < this_nregs; j++)
{
if (dump_file)
fprintf (dump_file, "Using reg %d for reload %d\n", best_reg, rnum);
- rl->nregs = hard_regno_nregs[best_reg][rl->mode];
+ rl->nregs = hard_regno_nregs (best_reg, rl->mode);
rl->regno = best_reg;
EXECUTE_IF_SET_IN_REG_SET
clear_reload_reg_in_use (unsigned int regno, int opnum,
enum reload_type type, machine_mode mode)
{
- unsigned int nregs = hard_regno_nregs[regno][mode];
+ unsigned int nregs = hard_regno_nregs (regno, mode);
unsigned int start_regno, end_regno, r;
int i;
/* A complication is that for some reload types, inheritance might
enum reload_type type, rtx value, rtx out, int reloadnum,
int ignore_address_reloads)
{
- int nregs = hard_regno_nregs[regno][mode];
+ int nregs = hard_regno_nregs (regno, mode);
while (nregs-- > 0)
if (! reload_reg_free_for_value_p (regno, regno + nregs, opnum, type,
value, out, reloadnum,
&& ! TEST_HARD_REG_BIT (reload_reg_used_for_inherit,
regnum))))
{
- int nr = hard_regno_nregs[regnum][rld[r].mode];
+ int nr = hard_regno_nregs (regnum, rld[r].mode);
/* During the second pass we want to avoid reload registers
which are "bad" for this reload. */
{
/* If a group is needed, verify that all the subsequent
registers still have their values intact. */
- int nr = hard_regno_nregs[i][rld[r].mode];
+ int nr = hard_regno_nregs (i, rld[r].mode);
int k;
for (k = 1; k < nr; k++)
&& (regno != HARD_FRAME_POINTER_REGNUM
|| !frame_pointer_needed))
{
- int nr = hard_regno_nregs[regno][rld[r].mode];
+ int nr = hard_regno_nregs (regno, rld[r].mode);
int k;
rld[r].reg_rtx = equiv;
reload_spill_index[r] = regno;
int nr = 1;
if (nregno < FIRST_PSEUDO_REGISTER)
- nr = hard_regno_nregs[nregno][rld[r].mode];
+ nr = hard_regno_nregs (nregno, rld[r].mode);
while (--nr >= 0)
SET_REGNO_REG_SET (®_has_output_reload,
{
if (!targetm.hard_regno_mode_ok (regno, new_mode))
continue;
- if (hard_regno_nregs[regno][new_mode] > REG_NREGS (reg))
+ if (hard_regno_nregs (regno, new_mode) > REG_NREGS (reg))
continue;
reg = reload_adjust_reg_for_mode (reg, new_mode);
}
if (i >= 0 && rld[r].reg_rtx != 0)
{
- int nr = hard_regno_nregs[i][GET_MODE (rld[r].reg_rtx)];
+ int nr = hard_regno_nregs (i, GET_MODE (rld[r].reg_rtx));
int k;
/* For a multi register reload, we need to check if all or part
/* AUTO_INC */ : XEXP (rld[r].in_reg, 0));
int out_regno = REGNO (out);
int out_nregs = (!HARD_REGISTER_NUM_P (out_regno) ? 1
- : hard_regno_nregs[out_regno][mode]);
+ : hard_regno_nregs (out_regno, mode));
bool piecemeal;
spill_reg_store[regno] = new_spill_reg_store[regno];
in_regno = REGNO (in);
in_nregs = (!HARD_REGISTER_NUM_P (in_regno) ? 1
- : hard_regno_nregs[in_regno][mode]);
+ : hard_regno_nregs (in_regno, mode));
reg_last_reload_reg[in_regno] = reg;
gcc_assert (GET_MODE (src_reg) == mode);
src_regno = REGNO (src_reg);
- src_nregs = hard_regno_nregs[src_regno][mode];
+ src_nregs = hard_regno_nregs (src_regno, mode);
/* The place where to find a death note varies with
PRESERVE_DEATH_INFO_REGNO_P . The condition is not
necessarily checked exactly in the code that moves
}
else
{
- int k, out_nregs = hard_regno_nregs[out_regno][mode];
+ int k, out_nregs = hard_regno_nregs (out_regno, mode);
for (k = 0; k < out_nregs; k++)
reg_last_reload_reg[out_regno + k] = 0;
}
/* We will be deleting the insn. Remove the spill reg information. */
- for (k = hard_regno_nregs[last_reload_reg][GET_MODE (reg)]; k-- > 0; )
+ for (k = hard_regno_nregs (last_reload_reg, GET_MODE (reg)); k-- > 0; )
{
spill_reg_store[last_reload_reg + k] = 0;
spill_reg_stored_to[last_reload_reg + k] = 0;
gcc_assert (nregs_xmode
== (nunits
* HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
- gcc_assert (hard_regno_nregs[xregno][xmode]
- == hard_regno_nregs[xregno][xmode_unit] * nunits);
+ gcc_assert (hard_regno_nregs (xregno, xmode)
+ == hard_regno_nregs (xregno, xmode_unit) * nunits);
/* You can only ask for a SUBREG of a value with holes in the middle
if you don't cross the holes. (Such a SUBREG should be done by
}
}
else
- nregs_xmode = hard_regno_nregs[xregno][xmode];
+ nregs_xmode = hard_regno_nregs (xregno, xmode);
- nregs_ymode = hard_regno_nregs[xregno][ymode];
+ nregs_ymode = hard_regno_nregs (xregno, ymode);
/* Paradoxical subregs are otherwise valid. */
if (!rknown && offset == 0 && ysize > xsize)
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = hard_regno_nregs[regno][mode];
+ int i = hard_regno_nregs (regno, mode);
if (ref == SET)
{
while (--i >= 0)
if (!targetm.hard_regno_mode_ok (cur_reg, mode))
continue;
- nregs = hard_regno_nregs[cur_reg][mode];
+ nregs = hard_regno_nregs (cur_reg, mode);
for (i = nregs - 1; i >= 0; --i)
if (fixed_regs[cur_reg + i]
int nregs;
int i;
- nregs = hard_regno_nregs[cur_reg][mode];
+ nregs = hard_regno_nregs (cur_reg, mode);
gcc_assert (nregs > 0);
for (i = nregs - 1; i >= 0; --i)
if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
{
/* Check that all hard regs for mode are available. */
- for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++)
+ for (i = 1, n = hard_regno_nregs (cur_reg, mode); i < n; i++)
if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
|| !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
cur_reg + i))
regno = expr_dest_regno (expr);
mode = GET_MODE (EXPR_LHS (expr));
target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
- n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1;
+ n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs (regno, mode) : 1;
live_available = hard_available = true;
for (i = 0; i < n; i++)
the debug temp to. */
else if (REGNO (reg) < FIRST_PSEUDO_REGISTER
&& (REG_NREGS (reg)
- != hard_regno_nregs[REGNO (reg)][GET_MODE (dest)]))
+ != hard_regno_nregs (REGNO (reg), GET_MODE (dest))))
breg = NULL;
/* Yay, we can use SRC, just adjust its mode. */
else
if ((paradoxical_subreg_p (mode, DECL_MODE (expr))
|| (store_reg_p
&& !COMPLEX_MODE_P (DECL_MODE (expr))
- && hard_regno_nregs[REGNO (loc)][DECL_MODE (expr)] == 1))
+ && hard_regno_nregs (REGNO (loc), DECL_MODE (expr)) == 1))
&& offset + byte_lowpart_offset (DECL_MODE (expr), mode) == 0)
{
mode = DECL_MODE (expr);
rtx new_loc = NULL;
if (REG_P (loc[n_var_parts])
- && hard_regno_nregs[REGNO (loc[n_var_parts])][mode] * 2
- == hard_regno_nregs[REGNO (loc[n_var_parts])][wider_mode]
+ && hard_regno_nregs (REGNO (loc[n_var_parts]), mode) * 2
+ == hard_regno_nregs (REGNO (loc[n_var_parts]), wider_mode)
&& end_hard_regno (mode, REGNO (loc[n_var_parts]))
== REGNO (loc2))
{
name = IDENTIFIER_POINTER (DECL_NAME (decl));
ASM_DECLARE_REGISTER_GLOBAL (asm_out_file, decl, reg_number, name);
#endif
- nregs = hard_regno_nregs[reg_number][mode];
+ nregs = hard_regno_nregs (reg_number, mode);
while (nregs > 0)
globalize_reg (decl, reg_number + --nregs);
}