This patch replaces "COPY_HARD_REG_SET (x, y)" with "x = y".
2019-09-09 Richard Sandiford <richard.sandiford@arm.com>
gcc/
* hard-reg-set.h (COPY_HARD_REG_SET): Delete.
* caller-save.c (save_call_clobbered_regs): Use assignment instead
of COPY_HARD_REG_SET.
* config/epiphany/epiphany.c (epiphany_compute_frame_size): Likewise.
(epiphany_conditional_register_usage): Likewise.
* config/frv/frv.c (frv_ifcvt_modify_tests): Likewise.
* config/gcn/gcn.c (gcn_md_reorg): Likewise.
* config/ia64/ia64.c (ia64_compute_frame_size): Likewise.
* config/m32c/m32c.c (m32c_register_move_cost): Likewise.
* config/m68k/m68k.c (m68k_conditional_register_usage): Likewise.
* config/mips/mips.c (mips_class_max_nregs): Likewise.
* config/pdp11/pdp11.c (pdp11_conditional_register_usage): Likewise.
* config/rs6000/rs6000.c (rs6000_register_move_cost): Likewise.
* config/sh/sh.c (output_stack_adjust): Likewise.
* final.c (collect_fn_hard_reg_usage): Likewise.
(get_call_reg_set_usage): Likewise.
* ira-build.c (ira_create_object, remove_low_level_allocnos)
(ira_flattening): Likewise.
* ira-color.c (add_allocno_hard_regs, add_allocno_hard_regs_to_forest)
(setup_left_conflict_sizes_p, setup_profitable_hard_regs)
(get_conflict_and_start_profitable_regs, allocno_reload_assign)
(ira_reassign_pseudos): Likewise.
* ira-conflicts.c (print_allocno_conflicts): Likewise.
(ira_build_conflicts): Likewise.
* ira-costs.c (restrict_cost_classes): Likewise.
(setup_regno_cost_classes_by_aclass): Likewise.
* ira.c (setup_class_hard_regs, setup_alloc_regs): Likewise.
(setup_reg_subclasses, setup_class_subset_and_memory_move_costs)
(setup_stack_reg_pressure_class, setup_pressure_classes)
(setup_allocno_and_important_classes, setup_class_translate_array)
(setup_reg_class_relations, setup_prohibited_class_mode_regs)
(ira_setup_eliminable_regset): Likewise.
* lra-assigns.c (find_hard_regno_for_1): Likewise.
(setup_live_pseudos_and_spill_after_risky_transforms): Likewise.
* lra-constraints.c (prohibited_class_reg_set_mode_p): Likewise.
(process_alt_operands, inherit_in_ebb): Likewise.
* lra-lives.c (process_bb_lives): Likewise.
* lra-spills.c (assign_spill_hard_regs): Likewise.
* lra.c (lra): Likewise.
* mode-switching.c (new_seginfo): Likewise.
* postreload.c (reload_combine): Likewise.
* reg-stack.c (straighten_stack): Likewise.
* reginfo.c (save_register_info, restore_register_info): Likewise.
(init_reg_sets_1, record_subregs_of_mode): Likewise
* regrename.c (create_new_chain, rename_chains): Likewise.
* reload1.c (order_regs_for_reload, find_reg): Likewise.
(find_reload_regs): Likewise.
* resource.c (find_dead_or_set_registers): Likewise.
(mark_target_live_regs): Likewise.
* sel-sched.c (mark_unavailable_hard_regs): Likewise.
From-SVN: r275528
+2019-09-09 Richard Sandiford <richard.sandiford@arm.com>
+
+ * hard-reg-set.h (COPY_HARD_REG_SET): Delete.
+ * caller-save.c (save_call_clobbered_regs): Use assignment instead
+ of COPY_HARD_REG_SET.
+ * config/epiphany/epiphany.c (epiphany_compute_frame_size): Likewise.
+ (epiphany_conditional_register_usage): Likewise.
+ * config/frv/frv.c (frv_ifcvt_modify_tests): Likewise.
+ * config/gcn/gcn.c (gcn_md_reorg): Likewise.
+ * config/ia64/ia64.c (ia64_compute_frame_size): Likewise.
+ * config/m32c/m32c.c (m32c_register_move_cost): Likewise.
+ * config/m68k/m68k.c (m68k_conditional_register_usage): Likewise.
+ * config/mips/mips.c (mips_class_max_nregs): Likewise.
+ * config/pdp11/pdp11.c (pdp11_conditional_register_usage): Likewise.
+ * config/rs6000/rs6000.c (rs6000_register_move_cost): Likewise.
+ * config/sh/sh.c (output_stack_adjust): Likewise.
+ * final.c (collect_fn_hard_reg_usage): Likewise.
+ (get_call_reg_set_usage): Likewise.
+ * ira-build.c (ira_create_object, remove_low_level_allocnos)
+ (ira_flattening): Likewise.
+ * ira-color.c (add_allocno_hard_regs, add_allocno_hard_regs_to_forest)
+ (setup_left_conflict_sizes_p, setup_profitable_hard_regs)
+ (get_conflict_and_start_profitable_regs, allocno_reload_assign)
+ (ira_reassign_pseudos): Likewise.
+ * ira-conflicts.c (print_allocno_conflicts): Likewise.
+ (ira_build_conflicts): Likewise.
+ * ira-costs.c (restrict_cost_classes): Likewise.
+ (setup_regno_cost_classes_by_aclass): Likewise.
+ * ira.c (setup_class_hard_regs, setup_alloc_regs): Likewise.
+ (setup_reg_subclasses, setup_class_subset_and_memory_move_costs)
+ (setup_stack_reg_pressure_class, setup_pressure_classes)
+ (setup_allocno_and_important_classes, setup_class_translate_array)
+ (setup_reg_class_relations, setup_prohibited_class_mode_regs)
+ (ira_setup_eliminable_regset): Likewise.
+ * lra-assigns.c (find_hard_regno_for_1): Likewise.
+ (setup_live_pseudos_and_spill_after_risky_transforms): Likewise.
+ * lra-constraints.c (prohibited_class_reg_set_mode_p): Likewise.
+ (process_alt_operands, inherit_in_ebb): Likewise.
+ * lra-lives.c (process_bb_lives): Likewise.
+ * lra-spills.c (assign_spill_hard_regs): Likewise.
+ * lra.c (lra): Likewise.
+ * mode-switching.c (new_seginfo): Likewise.
+ * postreload.c (reload_combine): Likewise.
+ * reg-stack.c (straighten_stack): Likewise.
+ * reginfo.c (save_register_info, restore_register_info): Likewise.
+ (init_reg_sets_1, record_subregs_of_mode): Likewise
+ * regrename.c (create_new_chain, rename_chains): Likewise.
+ * reload1.c (order_regs_for_reload, find_reg): Likewise.
+ (find_reload_regs): Likewise.
+ * resource.c (find_dead_or_set_registers): Likewise.
+ (mark_target_live_regs): Likewise.
+ * sel-sched.c (mark_unavailable_hard_regs): Likewise.
+
2019-09-09 Richard Sandiford <richard.sandiford@arm.com>
* rtl.h (CALL_INSN_FUNCTION_USAGE): Document what SETs mean.
if (code == JUMP_INSN)
/* Restore all registers if this is a JUMP_INSN. */
- COPY_HARD_REG_SET (referenced_regs, hard_regs_saved);
+ referenced_regs = hard_regs_saved;
else
{
CLEAR_HARD_REG_SET (referenced_regs);
current_frame_info.var_size = var_size;
current_frame_info.args_size = args_size;
current_frame_info.reg_size = reg_size;
- COPY_HARD_REG_SET (current_frame_info.gmask, gmask);
+ current_frame_info.gmask = gmask;
current_frame_info.first_slot = first_slot;
current_frame_info.last_slot = last_slot;
current_frame_info.first_slot_offset = first_slot_offset;
}
if (!TARGET_PREFER_SHORT_INSN_REGS)
CLEAR_HARD_REG_SET (reg_class_contents[SHORT_INSN_REGS]);
- COPY_HARD_REG_SET (reg_class_contents[SIBCALL_REGS],
- reg_class_contents[GENERAL_REGS]);
+ reg_class_contents[SIBCALL_REGS] = reg_class_contents[GENERAL_REGS];
/* It would be simpler and quicker if we could just use
AND_COMPL_HARD_REG_SET, alas, call_used_reg_set is yet uninitialized;
it is set up later by our caller. */
not fixed. However, allow the ICC/ICR temporary registers to be allocated
if we did not need to use them in reloading other registers. */
memset (&tmp_reg->regs, 0, sizeof (tmp_reg->regs));
- COPY_HARD_REG_SET (tmp_reg->regs, call_used_reg_set);
+ tmp_reg->regs = call_used_reg_set;
AND_COMPL_HARD_REG_SET (tmp_reg->regs, fixed_reg_set);
SET_HARD_REG_BIT (tmp_reg->regs, ICC_TEMP);
SET_HARD_REG_BIT (tmp_reg->regs, ICR_TEMP);
&& gcn_vmem_insn_p (itype))
{
HARD_REG_SET regs;
- COPY_HARD_REG_SET (regs, prev_insn->writes);
+ regs = prev_insn->writes;
AND_HARD_REG_SET (regs, ireads);
if (hard_reg_set_intersect_p
(regs, reg_class_contents[(int) SGPR_REGS]))
&& get_attr_laneselect (insn) == LANESELECT_YES)
{
HARD_REG_SET regs;
- COPY_HARD_REG_SET (regs, prev_insn->writes);
+ regs = prev_insn->writes;
AND_HARD_REG_SET (regs, ireads);
if (hard_reg_set_intersect_p
(regs, reg_class_contents[(int) SGPR_REGS])
&& itype == TYPE_VOP_DPP)
{
HARD_REG_SET regs;
- COPY_HARD_REG_SET (regs, prev_insn->writes);
+ regs = prev_insn->writes;
AND_HARD_REG_SET (regs, ireads);
if (hard_reg_set_intersect_p
(regs, reg_class_contents[(int) VGPR_REGS]))
back[oldest].insn = insn;
back[oldest].unit = iunit;
back[oldest].delayeduse = idelayeduse;
- COPY_HARD_REG_SET (back[oldest].writes, iwrites);
- COPY_HARD_REG_SET (back[oldest].reads, ireads);
+ back[oldest].writes = iwrites;
+ back[oldest].reads = ireads;
back[oldest].age = 0;
oldest = (oldest + 1) % max_waits;
current_frame_info.spill_cfa_off = pretend_args_size - 16;
current_frame_info.spill_size = spill_size;
current_frame_info.extra_spill_size = extra_spill_size;
- COPY_HARD_REG_SET (current_frame_info.mask, mask);
+ current_frame_info.mask = mask;
current_frame_info.n_spilled = n_spilled;
current_frame_info.initialized = reload_completed;
}
HARD_REG_SET cc;
/* FIXME: pick real values, but not 2 for now. */
- COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
+ cc = reg_class_contents[from];
IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
if (mode == QImode
HARD_REG_SET x;
if (!TARGET_HARD_FLOAT)
{
- COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
+ x = reg_class_contents[FP_REGS];
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (x, i))
fixed_regs[i] = call_used_regs[i] = 1;
HARD_REG_SET left;
size = 0x8000;
- COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
+ left = reg_class_contents[rclass];
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
{
if (mips_hard_regno_mode_ok (ST_REG_FIRST, mode))
HARD_REG_SET x;
if (!TARGET_FPU)
{
- COPY_HARD_REG_SET (x, reg_class_contents[(int)FPU_REGS]);
+ x = reg_class_contents[FPU_REGS];
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++ )
if (TEST_HARD_REG_BIT (x, i))
fixed_regs[i] = call_used_regs[i] = 1;
Do this first so we give best-case answers for union classes
containing both gprs and vsx regs. */
HARD_REG_SET to_vsx, from_vsx;
- COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
+ to_vsx = reg_class_contents[to];
AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
- COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
+ from_vsx = reg_class_contents[from];
AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
if (!hard_reg_set_empty_p (to_vsx)
&& !hard_reg_set_empty_p (from_vsx)
if (temp < 0 && ! current_function_interrupt && epilogue_p >= 0)
{
HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, call_used_reg_set);
+ temps = call_used_reg_set;
AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
if (epilogue_p > 0)
{
{
HARD_REG_SET temps;
- COPY_HARD_REG_SET (temps, *live_regs_mask);
+ temps = *live_regs_mask;
CLEAR_HARD_REG_BIT (temps, REGNO (reg));
temp = scavenge_reg (&temps);
}
node = cgraph_node::rtl_info (current_function_decl);
gcc_assert (node != NULL);
- COPY_HARD_REG_SET (node->function_used_regs, function_used_regs);
+ node->function_used_regs = function_used_regs;
node->function_used_regs_valid = 1;
}
if (node != NULL
&& node->function_used_regs_valid)
{
- COPY_HARD_REG_SET (*reg_set, node->function_used_regs);
+ *reg_set = node->function_used_regs;
AND_HARD_REG_SET (*reg_set, default_set);
return true;
}
}
- COPY_HARD_REG_SET (*reg_set, default_set);
+ *reg_set = default_set;
targetm.remove_extra_call_preserved_regs (insn, reg_set);
return false;
}
CLEAR_HARD_REG_SET and SET_HARD_REG_SET.
These take just one argument.
- Also define macros for copying hard reg sets:
- COPY_HARD_REG_SET and COMPL_HARD_REG_SET.
- These take two arguments TO and FROM; they read from FROM
- and store into TO. COMPL_HARD_REG_SET complements each bit.
+ Also define macros for copying the complement of a hard reg set:
+ COMPL_HARD_REG_SET.
+ This takes two arguments TO and FROM; it reads from FROM
+ and stores into TO.
Also define macros for combining hard reg sets:
IOR_HARD_REG_SET and AND_HARD_REG_SET.
#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
-#define COPY_HARD_REG_SET(TO, FROM) ((TO) = (FROM))
#define COMPL_HARD_REG_SET(TO, FROM) ((TO) = ~(FROM))
#define IOR_HARD_REG_SET(TO, FROM) ((TO) |= (FROM))
set.elts[i] = -1;
}
-inline void
-COPY_HARD_REG_SET (HARD_REG_SET &to, const_hard_reg_set from)
-{
- to = from;
-}
-
inline void
COMPL_HARD_REG_SET (HARD_REG_SET &to, const_hard_reg_set from)
{
OBJECT_CONFLICT_VEC_P (obj) = false;
OBJECT_CONFLICT_ARRAY (obj) = NULL;
OBJECT_NUM_CONFLICTS (obj) = 0;
- COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
- COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
+ OBJECT_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
reg_class_contents[aclass]);
IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
ALLOCNO_CAP_MEMBER (a) = NULL;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
- COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ OBJECT_CONFLICT_HARD_REGS (obj)
+ = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
#ifdef STACK_REGS
if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
ALLOCNO_NO_STACK_REG_P (a) = true;
flattening. */
continue;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
- COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- OBJECT_CONFLICT_HARD_REGS (obj));
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
+ = OBJECT_CONFLICT_HARD_REGS (obj);
#ifdef STACK_REGS
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
#endif
allocno_hard_regs_t hv;
gcc_assert (! hard_reg_set_empty_p (set));
- COPY_HARD_REG_SET (temp.set, set);
+ temp.set = set;
if ((hv = find_hard_regs (&temp)) != NULL)
hv->cost += cost;
else
{
hv = ((struct allocno_hard_regs *)
ira_allocate (sizeof (struct allocno_hard_regs)));
- COPY_HARD_REG_SET (hv->set, set);
+ hv->set = set;
hv->cost = cost;
allocno_hard_regs_vec.safe_push (hv);
insert_hard_regs (hv);
hard_regs_node_vec.safe_push (node);
else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
{
- COPY_HARD_REG_SET (temp_set, hv->set);
+ temp_set = hv->set;
AND_HARD_REG_SET (temp_set, node->hard_regs->set);
hv2 = add_allocno_hard_regs (temp_set, hv->cost);
add_allocno_hard_regs_to_forest (&node->first, hv2);
nobj = ALLOCNO_NUM_OBJECTS (a);
data = ALLOCNO_COLOR_DATA (a);
subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
- COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
+ profitable_hard_regs = data->profitable_hard_regs;
node = data->hard_regs_node;
node_preorder_num = node->preorder_num;
- COPY_HARD_REG_SET (node_set, node->hard_regs->set);
+ node_set = node->hard_regs->set;
node_check_tick++;
for (k = 0; k < nobj; k++)
{
->profitable_hard_regs))
continue;
conflict_node = conflict_data->hard_regs_node;
- COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
+ conflict_node_set = conflict_node->hard_regs->set;
if (hard_reg_set_subset_p (node_set, conflict_node_set))
temp_node = node;
else
int j, n, hard_regno;
enum reg_class aclass;
- COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
+ temp_set = temp_node->hard_regs->set;
AND_HARD_REG_SET (temp_set, profitable_hard_regs);
aclass = ALLOCNO_CLASS (a);
for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
else
{
mode = ALLOCNO_MODE (a);
- COPY_HARD_REG_SET (data->profitable_hard_regs,
- ira_useful_class_mode_regs[aclass][mode]);
+ data->profitable_hard_regs
+ = ira_useful_class_mode_regs[aclass][mode];
nobj = ALLOCNO_NUM_OBJECTS (a);
for (k = 0; k < nobj; k++)
{
for (i = 0; i < nwords; i++)
{
obj = ALLOCNO_OBJECT (a, i);
- COPY_HARD_REG_SET (conflict_regs[i],
- OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ conflict_regs[i] = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
}
if (retry_p)
{
- COPY_HARD_REG_SET (*start_profitable_regs,
- reg_class_contents[ALLOCNO_CLASS (a)]);
+ *start_profitable_regs = reg_class_contents[ALLOCNO_CLASS (a)];
AND_COMPL_HARD_REG_SET (*start_profitable_regs,
ira_prohibited_class_mode_regs
[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
}
else
- COPY_HARD_REG_SET (*start_profitable_regs,
- ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
+ *start_profitable_regs = ALLOCNO_COLOR_DATA (a)->profitable_hard_regs;
}
/* Return true if HARD_REGNO is ok for assigning to allocno A with
for (i = 0; i < n; i++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);
- COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ saved[i] = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
for (i = 0; i < n; i++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);
- COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = saved[i];
}
return reg_renumber[regno] >= 0;
}
for (i = 0; i < num; i++)
{
regno = spilled_pseudo_regs[i];
- COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
+ forbidden_regs = bad_spill_regs;
IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
gcc_assert (reg_renumber[regno] < 0);
putc (')', file);
}
}
- COPY_HARD_REG_SET (conflicting_hard_regs, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ conflicting_hard_regs = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_CLASS (a)]);
print_hard_reg_set (file, "\n;; total conflict hard regs:",
conflicting_hard_regs);
- COPY_HARD_REG_SET (conflicting_hard_regs, OBJECT_CONFLICT_HARD_REGS (obj));
+ conflicting_hard_regs = OBJECT_CONFLICT_HARD_REGS (obj);
AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_CLASS (a)]);
CLEAR_HARD_REG_SET (temp_hard_reg_set);
else
{
- COPY_HARD_REG_SET (temp_hard_reg_set, reg_class_contents[base]);
+ temp_hard_reg_set = reg_class_contents[base];
AND_COMPL_HARD_REG_SET (temp_hard_reg_set, ira_no_alloc_regs);
AND_HARD_REG_SET (temp_hard_reg_set, call_used_reg_set);
}
/* Calculate the set of registers in CL that belong to REGS and
are valid for MODE. */
HARD_REG_SET valid_for_cl;
- COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
+ valid_for_cl = reg_class_contents[cl];
AND_HARD_REG_SET (valid_for_cl, regs);
AND_COMPL_HARD_REG_SET (valid_for_cl,
ira_prohibited_class_mode_regs[cl][mode]);
if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
{
- COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
+ temp = reg_class_contents[aclass];
AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
/* We exclude classes from consideration which are subsets of
ACLASS only if ACLASS is an uniform class. */
{
/* Exclude non-uniform classes which are subsets of
ACLASS. */
- COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
+ temp2 = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
continue;
ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
CLEAR_HARD_REG_SET (processed_hard_reg_set);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
#ifdef ADJUST_REG_ALLOC_ORDER
ADJUST_REG_ALLOC_ORDER;
#endif
- COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_nonglobal_reg_set);
+ no_unit_alloc_regs = fixed_nonglobal_reg_set;
if (! use_hard_frame_p)
SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
setup_class_hard_regs ();
if (i == (int) NO_REGS)
continue;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
+ temp_hard_regset = reg_class_contents[i];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (hard_reg_set_empty_p (temp_hard_regset))
continue;
{
enum reg_class *p;
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
+ temp_hard_regset2 = reg_class_contents[j];
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset,
temp_hard_regset2))
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
+ temp_hard_regset2 = reg_class_contents[cl2];
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
ira_class_subset_p[cl][cl2]
= hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_pressure_classes[i];
- COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset);
+ temp_hard_regset2 = temp_hard_regset;
AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
size = hard_reg_set_size (temp_hard_regset2);
if (best < size)
register pressure class. */
for (m = 0; m < NUM_MACHINE_MODES; m++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
AND_COMPL_HARD_REG_SET (temp_hard_regset,
ira_prohibited_class_mode_regs[cl][m]);
}
curr = 0;
insert_p = true;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
/* Remove so far added pressure classes which are subset of the
current candidate class. Prefer GENERAL_REGS as a pressure
for (i = 0; i < n; i++)
{
cl2 = pressure_classes[i];
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
+ temp_hard_regset2 = reg_class_contents[cl2];
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
&& (! hard_reg_set_equal_p (temp_hard_regset,
registers available for the allocation. */
CLEAR_HARD_REG_SET (temp_hard_regset);
CLEAR_HARD_REG_SET (temp_hard_regset2);
- COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs);
+ ignore_hard_regs = no_unit_alloc_regs;
for (cl = 0; cl < LIM_REG_CLASSES; cl++)
{
/* For some targets (like MIPS with MD_REGS), there are some
same set of hard registers. */
for (i = 0; i < LIM_REG_CLASSES; i++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
+ temp_hard_regset = reg_class_contents[i];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
for (j = 0; j < n; j++)
{
cl = classes[j];
- COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
+ temp_hard_regset2 = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset2,
no_unit_alloc_regs);
if (hard_reg_set_equal_p (temp_hard_regset,
for (cl = 0; cl < N_REG_CLASSES; cl++)
if (ira_class_hard_regs_num[cl] > 0)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
set_p = false;
for (j = 0; j < ira_allocno_classes_num; j++)
{
- COPY_HARD_REG_SET (temp_hard_regset2,
- reg_class_contents[ira_allocno_classes[j]]);
+ temp_hard_regset2 = reg_class_contents[ira_allocno_classes[j]];
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
if ((enum reg_class) cl == ira_allocno_classes[j])
break;
for (i = 0; i < classes_num; i++)
{
aclass = classes[i];
- COPY_HARD_REG_SET (temp_hard_regset,
- reg_class_contents[aclass]);
+ temp_hard_regset = reg_class_contents[aclass];
AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (! hard_reg_set_empty_p (temp_hard_regset))
ira_reg_classes_intersect_p[cl1][cl2] = false;
ira_reg_class_intersect[cl1][cl2] = NO_REGS;
ira_reg_class_subset[cl1][cl2] = NO_REGS;
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
+ temp_hard_regset = reg_class_contents[cl1];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
+ temp_set2 = reg_class_contents[cl2];
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (hard_reg_set_empty_p (temp_hard_regset)
&& hard_reg_set_empty_p (temp_set2))
}
ira_reg_class_subunion[cl1][cl2] = NO_REGS;
ira_reg_class_superunion[cl1][cl2] = NO_REGS;
- COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
+ intersection_set = reg_class_contents[cl1];
AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
- COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
+ union_set = reg_class_contents[cl1];
IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
+ temp_hard_regset = reg_class_contents[cl3];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
{
of CL1 and CL2. */
if (important_class_p[cl3])
{
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents
- [(int) ira_reg_class_intersect[cl1][cl2]]);
+ temp_set2
+ = (reg_class_contents
+ [ira_reg_class_intersect[cl1][cl2]]);
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
/* If the allocatable hard register sets are
ira_reg_class_intersect[cl1][cl2]])))))
ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
}
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_subset[cl1][cl2]]);
+ temp_set2
+ = reg_class_contents[ira_reg_class_subset[cl1][cl2]];
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
/* Ignore unavailable hard registers and prefer
/* CL3 allocatable hard register set is inside of
union of allocatable hard register sets of CL1
and CL2. */
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]);
+ temp_set2
+ = reg_class_contents[ira_reg_class_subunion[cl1][cl2]];
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
/* CL3 allocatable hard register set contains union
of allocatable hard register sets of CL1 and
CL2. */
- COPY_HARD_REG_SET
- (temp_set2,
- reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]);
+ temp_set2
+ = reg_class_contents[ira_reg_class_superunion[cl1][cl2]];
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
{
- COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset = reg_class_contents[cl];
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
for (j = 0; j < NUM_MACHINE_MODES; j++)
{
if (frame_pointer_needed)
df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
- COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
+ ira_no_alloc_regs = no_unit_alloc_regs;
CLEAR_HARD_REG_SET (eliminable_regset);
compute_regs_asm_clobbered ();
HARD_REG_SET impossible_start_hard_regs, available_regs;
if (hard_reg_set_empty_p (regno_set))
- COPY_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
+ conflict_set = lra_no_alloc_regs;
else
{
COMPL_HARD_REG_SET (conflict_set, regno_set);
biggest_nregs = hard_regno_nregs (hard_regno, biggest_mode);
nregs_diff = (biggest_nregs
- hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno)));
- COPY_HARD_REG_SET (available_regs, reg_class_contents[rclass]);
+ available_regs = reg_class_contents[rclass];
AND_COMPL_HARD_REG_SET (available_regs, lra_no_alloc_regs);
for (i = 0; i < rclass_size; i++)
{
sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno);
}
}
- COPY_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
+ conflict_set = lra_no_alloc_regs;
IOR_HARD_REG_SET (conflict_set, lra_reg_info[regno].conflict_hard_regs);
val = lra_reg_info[regno].val;
offset = lra_reg_info[regno].offset;
HARD_REG_SET temp;
lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set));
- COPY_HARD_REG_SET (temp, set);
+ temp = set;
AND_COMPL_HARD_REG_SET (temp, lra_no_alloc_regs);
return (hard_reg_set_subset_p
(temp, ira_prohibited_class_mode_regs[rclass][mode]));
reloads. */
badop = false;
this_alternative = curr_alt[m];
- COPY_HARD_REG_SET (this_alternative_set, curr_alt_set[m]);
+ this_alternative_set = curr_alt_set[m];
winreg = this_alternative != NO_REGS;
break;
}
{
HARD_REG_SET available_regs;
- COPY_HARD_REG_SET (available_regs,
- reg_class_contents[this_alternative]);
+ available_regs = reg_class_contents[this_alternative];
AND_COMPL_HARD_REG_SET
(available_regs,
ira_prohibited_class_mode_regs[this_alternative][mode]);
goto fail;
}
curr_alt[nop] = this_alternative;
- COPY_HARD_REG_SET (curr_alt_set[nop], this_alternative_set);
+ curr_alt_set[nop] = this_alternative_set;
curr_alt_win[nop] = this_alternative_win;
curr_alt_match_win[nop] = this_alternative_match_win;
curr_alt_offmemok[nop] = this_alternative_offmemok;
bitmap_clear (&invalid_invariant_regs);
last_processed_bb = NULL;
CLEAR_HARD_REG_SET (potential_reload_hard_regs);
- COPY_HARD_REG_SET (live_hard_regs, eliminable_regset);
+ live_hard_regs = eliminable_regset;
IOR_HARD_REG_SET (live_hard_regs, lra_no_alloc_regs);
/* We don't process new insns generated in the loop. */
for (curr_insn = tail; curr_insn != PREV_INSN (head); curr_insn = prev_insn)
{
call_insn = curr_insn;
if (! flag_ipa_ra && ! targetm.return_call_with_max_clobbers)
- COPY_HARD_REG_SET(last_call_used_reg_set, call_used_reg_set);
+ last_call_used_reg_set = call_used_reg_set;
else
{
HARD_REG_SET this_call_used_reg_set;
last_call_used_reg_set,
last_call_insn);
}
- COPY_HARD_REG_SET(last_call_used_reg_set, this_call_used_reg_set);
+ last_call_used_reg_set = this_call_used_reg_set;
last_call_insn = call_insn;
}
/* Set up reserved hard regs for every program point. */
reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point);
for (p = 0; p < lra_live_max_point; p++)
- COPY_HARD_REG_SET (reserved_hard_regs[p], lra_no_alloc_regs);
+ reserved_hard_regs[p] = lra_no_alloc_regs;
for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
if (lra_reg_info[i].nrefs != 0
&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
continue;
}
lra_assert (spill_class != NO_REGS);
- COPY_HARD_REG_SET (conflict_hard_regs,
- lra_reg_info[regno].conflict_hard_regs);
+ conflict_hard_regs = lra_reg_info[regno].conflict_hard_regs;
for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
for (p = r->start; p <= r->finish; p++)
IOR_HARD_REG_SET (conflict_hard_regs, reserved_hard_regs[p]);
need it. */
emit_note (NOTE_INSN_DELETED);
- COPY_HARD_REG_SET (lra_no_alloc_regs, ira_no_alloc_regs);
+ lra_no_alloc_regs = ira_no_alloc_regs;
init_reg_info ();
expand_reg_info ();
ptr->insn_ptr = insn;
ptr->bbnum = bb;
ptr->next = NULL;
- COPY_HARD_REG_SET (ptr->regs_live, regs_live);
+ ptr->regs_live = regs_live;
return ptr;
}
REG_SET_TO_HARD_REG_SET (live, live_in);
compute_use_by_pseudos (&live, live_in);
- COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
+ LABEL_LIVE (insn) = live;
IOR_HARD_REG_SET (ever_live_at_start, live);
}
}
if (regstack->top <= 0)
return;
- COPY_HARD_REG_SET (temp_stack.reg_set, regstack->reg_set);
+ temp_stack.reg_set = regstack->reg_set;
for (top = temp_stack.top = regstack->top; top >= 0; top--)
temp_stack.reg[top] = FIRST_STACK_REG + temp_stack.top - top;
/* And similarly for reg_names. */
gcc_assert (sizeof reg_names == sizeof saved_reg_names);
memcpy (saved_reg_names, reg_names, sizeof reg_names);
- COPY_HARD_REG_SET (saved_accessible_reg_set, accessible_reg_set);
- COPY_HARD_REG_SET (saved_operand_reg_set, operand_reg_set);
+ saved_accessible_reg_set = accessible_reg_set;
+ saved_operand_reg_set = operand_reg_set;
}
/* Restore the register information. */
#endif
memcpy (reg_names, saved_reg_names, sizeof reg_names);
- COPY_HARD_REG_SET (accessible_reg_set, saved_accessible_reg_set);
- COPY_HARD_REG_SET (operand_reg_set, saved_operand_reg_set);
+ accessible_reg_set = saved_accessible_reg_set;
+ operand_reg_set = saved_operand_reg_set;
}
/* After switches have been processed, which perhaps alter
HARD_REG_SET c;
int k;
- COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ c = reg_class_contents[i];
IOR_HARD_REG_SET (c, reg_class_contents[j]);
for (k = 0; k < N_REG_CLASSES; k++)
if (hard_reg_set_subset_p (reg_class_contents[k], c)
HARD_REG_SET c;
int k;
- COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ c = reg_class_contents[i];
IOR_HARD_REG_SET (c, reg_class_contents[j]);
for (k = 0; k < N_REG_CLASSES; k++)
if (hard_reg_set_subset_p (c, reg_class_contents[k]))
}
}
- COPY_HARD_REG_SET (call_fixed_reg_set, fixed_reg_set);
- COPY_HARD_REG_SET (fixed_nonglobal_reg_set, fixed_reg_set);
+ call_fixed_reg_set = fixed_reg_set;
+ fixed_nonglobal_reg_set = fixed_reg_set;
/* Preserve global registers if called more than once. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
valid_mode_changes[regno]
= XOBNEW (&valid_mode_changes_obstack, HARD_REG_SET);
- COPY_HARD_REG_SET (*valid_mode_changes[regno],
- simplifiable_subregs (shape));
+ *valid_mode_changes[regno] = simplifiable_subregs (shape);
}
}
CLEAR_HARD_REG_BIT (live_hard_regs, head->regno + nregs);
}
- COPY_HARD_REG_SET (head->hard_conflicts, live_hard_regs);
+ head->hard_conflicts = live_hard_regs;
bitmap_set_bit (&open_chains_set, head->id);
open_chains = head;
&& reg == FRAME_POINTER_REGNUM))
continue;
- COPY_HARD_REG_SET (this_unavailable, unavailable);
+ this_unavailable = unavailable;
reg_class super_class = regrename_find_superclass (this_head, &n_uses,
&this_unavailable);
HARD_REG_SET used_by_pseudos2;
reg_set_iterator rsi;
- COPY_HARD_REG_SET (bad_spill_regs, fixed_reg_set);
+ bad_spill_regs = fixed_reg_set;
memset (spill_cost, 0, sizeof spill_cost);
memset (spill_add_cost, 0, sizeof spill_add_cost);
static int regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
static int best_regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
- COPY_HARD_REG_SET (not_usable, bad_spill_regs);
+ not_usable = bad_spill_regs;
IOR_HARD_REG_SET (not_usable, bad_spill_regs_global);
IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->rclass]);
}
}
- COPY_HARD_REG_SET (chain->used_spill_regs, used_spill_regs_local);
+ chain->used_spill_regs = used_spill_regs_local;
IOR_HARD_REG_SET (used_spill_regs, used_spill_regs_local);
memcpy (chain->rld, rld, n_reloads * sizeof (struct reload));
}
target_res = *res;
- COPY_HARD_REG_SET (scratch, target_set.regs);
+ scratch = target_set.regs;
AND_COMPL_HARD_REG_SET (scratch, needed.regs);
AND_COMPL_HARD_REG_SET (target_res.regs, scratch);
fallthrough_res = *res;
- COPY_HARD_REG_SET (scratch, set.regs);
+ scratch = set.regs;
AND_COMPL_HARD_REG_SET (scratch, needed.regs);
AND_COMPL_HARD_REG_SET (fallthrough_res.regs, scratch);
mark_referenced_resources (insn, &needed, true);
mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- COPY_HARD_REG_SET (scratch, set.regs);
+ scratch = set.regs;
AND_COMPL_HARD_REG_SET (scratch, needed.regs);
AND_COMPL_HARD_REG_SET (res->regs, scratch);
}
update it below. */
if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b])
{
- COPY_HARD_REG_SET (res->regs, tinfo->live_regs);
+ res->regs = tinfo->live_regs;
return;
}
}
IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs);
}
- COPY_HARD_REG_SET (res->regs, current_live_regs);
+ res->regs = current_live_regs;
if (tinfo != NULL)
{
tinfo->block = b;
{
mark_referenced_resources (insn, &needed, true);
- COPY_HARD_REG_SET (scratch, needed.regs);
+ scratch = needed.regs;
AND_COMPL_HARD_REG_SET (scratch, set.regs);
IOR_HARD_REG_SET (new_resources.regs, scratch);
}
if (tinfo != NULL)
- {
- COPY_HARD_REG_SET (tinfo->live_regs, res->regs);
- }
+ tinfo->live_regs = res->regs;
}
\f
/* Initialize the resources required by mark_target_live_regs ().
/* Leave regs as 'available' only from the current
register class. */
- COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
- reg_class_contents[cl]);
+ reg_rename_p->available_for_renaming = reg_class_contents[cl];
mode = GET_MODE (orig_dest);