+2019-09-09 Richard Sandiford <richard.sandiford@arm.com>
+
+ * hard-reg-set.h (HARD_REG_SET::operator|): New function.
+ (HARD_REG_SET::operator|=): Likewise.
+ (IOR_HARD_REG_SET): Delete.
+ * config/gcn/gcn.c (gcn_md_reorg): Use "|" instead of
+ IOR_HARD_REG_SET.
+ * config/m32c/m32c.c (m32c_register_move_cost): Likewise.
+ * config/s390/s390.c (s390_adjust_loop_scan_osc): Likewise.
+ * final.c (collect_fn_hard_reg_usage): Likewise.
+ * hw-doloop.c (scan_loop, optimize_loop): Likewise.
+ * ira-build.c (merge_hard_reg_conflicts): Likewise.
+ (ior_hard_reg_conflicts, create_cap_allocno, propagate_allocno_info)
+ (propagate_some_info_from_allocno): Likewise.
+ (copy_info_to_removed_store_destinations): Likewise.
+ * ira-color.c (add_allocno_hard_regs_to_forest, assign_hard_reg)
+ (allocno_reload_assign, ira_reassign_pseudos): Likewise.
+ (fast_allocation): Likewise.
+ * ira-conflicts.c (ira_build_conflicts): Likewise.
+ * ira-lives.c (make_object_dead, process_single_reg_class_operands)
+ (process_bb_node_lives): Likewise.
+ * ira.c (setup_pressure_classes, setup_reg_class_relations): Likewise.
+ * lra-assigns.c (find_hard_regno_for_1): Likewise.
+ (setup_live_pseudos_and_spill_after_risky_transforms): Likewise.
+ * lra-constraints.c (process_alt_operands, inherit_in_ebb): Likewise.
+ * lra-eliminations.c (spill_pseudos, update_reg_eliminate): Likewise.
+ * lra-lives.c (mark_pseudo_dead, check_pseudos_live_through_calls)
+ (process_bb_lives): Likewise.
+ * lra-spills.c (assign_spill_hard_regs): Likewise.
+ * postreload.c (reload_combine): Likewise.
+ * reginfo.c (init_reg_sets_1): Likewise.
+ * regrename.c (merge_overlapping_regs, find_rename_reg)
+ (merge_chains): Likewise.
+ * reload1.c (maybe_fix_stack_asms, order_regs_for_reload, find_reg)
+ (find_reload_regs, finish_spills, choose_reload_regs_init)
+ (emit_reload_insns): Likewise.
+ * reorg.c (redundant_insn): Likewise.
+ * resource.c (find_dead_or_set_registers, mark_set_resources)
+ (mark_target_live_regs): Likewise.
+ * rtlanal.c (find_all_hard_reg_sets): Likewise.
+ * sched-deps.c (sched_analyze_insn): Likewise.
+ * sel-sched.c (mark_unavailable_hard_regs): Likewise.
+ (find_best_reg_for_expr): Likewise.
+ * shrink-wrap.c (try_shrink_wrapping): Likewise.
+
2019-09-09 Richard Sandiford <richard.sandiford@arm.com>
* hard-reg-set.h (HARD_REG_SET::operator&): New function.
not publish the cycle times for instructions. */
prev_insn->age += 1 + nops_rqd;
- IOR_HARD_REG_SET (written, iwrites);
+ written |= iwrites;
AND_COMPL_HARD_REG_SET (prev_insn->writes, written);
}
HARD_REG_SET cc;
/* FIXME: pick real values, but not 2 for now. */
- cc = reg_class_contents[from];
- IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
+ cc = reg_class_contents[from] | reg_class_contents[(int) to];
if (mode == QImode
&& hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
return false;
find_all_hard_reg_sets (insn, &newregs, true);
- IOR_HARD_REG_SET (modregs, newregs);
+ modregs |= newregs;
set = single_set (insn);
if (!set)
return false;
find_all_hard_reg_sets (insn, &newregs, true);
- IOR_HARD_REG_SET (modregs, newregs);
+ modregs |= newregs;
set = single_set (insn);
if (!set)
call_used_reg_set))
return;
- IOR_HARD_REG_SET (function_used_regs, insn_used_regs);
+ function_used_regs |= insn_used_regs;
}
find_all_hard_reg_sets (insn, &insn_used_regs, false);
- IOR_HARD_REG_SET (function_used_regs, insn_used_regs);
+ function_used_regs |= insn_used_regs;
}
/* Be conservative - mark fixed and global registers as used. */
- IOR_HARD_REG_SET (function_used_regs, fixed_reg_set);
+ function_used_regs |= fixed_reg_set;
#ifdef STACK_REGS
/* Handle STACK_REGS conservatively, since the df-framework does not
return *this;
}
+ HARD_REG_SET
+ operator| (const HARD_REG_SET &other) const
+ {
+ HARD_REG_SET res;
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ res.elts[i] = elts[i] | other.elts[i];
+ return res;
+ }
+
+ HARD_REG_SET &
+ operator|= (const HARD_REG_SET &other)
+ {
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ elts[i] |= other.elts[i];
+ return *this;
+ }
+
HARD_REG_ELT_TYPE elts[HARD_REG_SET_LONGS];
};
typedef const HARD_REG_SET &const_hard_reg_set;
CLEAR_HARD_REG_SET and SET_HARD_REG_SET.
These take just one argument.
- Also define a macro for combining hard reg sets:
- IOR_HARD_REG_SET
- This takes two arguments TO and FROM; it reads from FROM
- and combines bitwise into TO. Define also
+ Also define:
IOR_COMPL_HARD_REG_SET and AND_COMPL_HARD_REG_SET
- which use the complement of the set FROM.
+ These take two arguments TO and FROM; they read from FROM
+ and combines its complement bitwise into TO.
Also define:
#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
-#define IOR_HARD_REG_SET(TO, FROM) ((TO) |= (FROM))
#define IOR_COMPL_HARD_REG_SET(TO, FROM) ((TO) |= ~ (FROM))
#define AND_COMPL_HARD_REG_SET(TO, FROM) ((TO) &= ~ (FROM))
to.elts[i] &= ~from.elts[i];
}
-inline void
-IOR_HARD_REG_SET (HARD_REG_SET &to, const_hard_reg_set from)
-{
- for (unsigned int i = 0; i < ARRAY_SIZE (to.elts); ++i)
- to.elts[i] |= from.elts[i];
-}
-
inline void
IOR_COMPL_HARD_REG_SET (HARD_REG_SET &to, const_hard_reg_set from)
{
CLEAR_HARD_REG_BIT (set_this_insn, REGNO (loop->iter_reg));
else if (reg_mentioned_p (loop->iter_reg, PATTERN (insn)))
loop->iter_reg_used = true;
- IOR_HARD_REG_SET (loop->regs_set_in_loop, set_this_insn);
+ loop->regs_set_in_loop |= set_this_insn;
}
}
}
inner_depth = inner->depth;
/* The set of registers may be changed while optimizing the inner
loop. */
- IOR_HARD_REG_SET (loop->regs_set_in_loop, inner->regs_set_in_loop);
+ loop->regs_set_in_loop |= inner->regs_set_in_loop;
}
loop->depth = inner_depth + 1;
ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
if (!total_only)
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
- OBJECT_CONFLICT_HARD_REGS (from_obj));
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
- OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
+ OBJECT_CONFLICT_HARD_REGS (to_obj)
+ |= OBJECT_CONFLICT_HARD_REGS (from_obj);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj)
+ |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj);
}
#ifdef STACK_REGS
if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
{
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
+ OBJECT_CONFLICT_HARD_REGS (obj) |= *set;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= *set;
}
}
ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
- IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
- ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Creating cap ");
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
- IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
- ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
aclass = ALLOCNO_CLASS (a);
ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
- IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
- ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
- IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
- ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
merged_p = true;
i++)
{
node = hard_regs_node_vec[i];
- IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
+ temp_set |= node->hard_regs->set;
}
hv = add_allocno_hard_regs (temp_set, hv->cost);
new_node = create_new_allocno_hard_regs_node (hv);
hard_regno + num);
}
else
- IOR_HARD_REG_SET
- (conflicting_regs[word],
- ira_reg_mode_hard_regset[hard_regno][mode]);
+ conflicting_regs[word]
+ |= ira_reg_mode_hard_regset[hard_regno][mode];
if (hard_reg_set_subset_p (profitable_hard_regs,
conflicting_regs[word]))
goto fail;
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);
saved[i] = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= forbidden_regs;
if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- call_used_reg_set);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= call_used_reg_set;
}
ALLOCNO_ASSIGNED_P (a) = false;
aclass = ALLOCNO_CLASS (a);
for (i = 0; i < num; i++)
{
regno = spilled_pseudo_regs[i];
- forbidden_regs = bad_spill_regs;
- IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
- IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
+ forbidden_regs = (bad_spill_regs
+ | pseudo_forbidden_regs[regno]
+ | pseudo_previous_regs[regno]);
gcc_assert (reg_renumber[regno] < 0);
a = ira_regno_allocno_map[regno];
ira_mark_allocation_change (regno);
for (l = 0; l < nr; l++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, l);
- IOR_HARD_REG_SET (conflict_hard_regs,
- OBJECT_CONFLICT_HARD_REGS (obj));
+ conflict_hard_regs |= OBJECT_CONFLICT_HARD_REGS (obj);
for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
for (j = r->start; j <= r->finish; j++)
- IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
+ conflict_hard_regs |= used_hard_regs[j];
}
aclass = ALLOCNO_CLASS (a);
ALLOCNO_ASSIGNED_P (a) = true;
ira_object_t obj = ALLOCNO_OBJECT (a, l);
for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
for (k = r->start; k <= r->finish; k++)
- IOR_HARD_REG_SET (used_hard_regs[k],
- ira_reg_mode_hard_regset[hard_regno][mode]);
+ used_hard_regs[k] |= ira_reg_mode_hard_regset[hard_regno][mode];
}
}
ira_free (sorted_allocnos);
&& REG_USERVAR_P (allocno_reg)
&& ! reg_is_parm_p (allocno_reg)))
{
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- call_used_reg_set);
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- call_used_reg_set);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= call_used_reg_set;
+ OBJECT_CONFLICT_HARD_REGS (obj) |= call_used_reg_set;
}
else if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
{
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- no_caller_save_reg_set);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- temp_hard_reg_set);
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- no_caller_save_reg_set);
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- temp_hard_reg_set);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= no_caller_save_reg_set;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= temp_hard_reg_set;
+ OBJECT_CONFLICT_HARD_REGS (obj) |= no_caller_save_reg_set;
+ OBJECT_CONFLICT_HARD_REGS (obj) |= temp_hard_reg_set;
}
/* Now we deal with paradoxical subreg cases where certain registers
}
}
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), hard_regs_live);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), hard_regs_live);
+ OBJECT_CONFLICT_HARD_REGS (obj) |= hard_regs_live;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= hard_regs_live;
/* If IGNORE_REG_FOR_CONFLICTS did not already conflict with OBJ, make
sure it still doesn't. */
/* We could increase costs of A instead of making it
conflicting with the hard register. But it works worse
because it will be spilled in reload in anyway. */
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- reg_class_contents[cl]);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- reg_class_contents[cl]);
+ OBJECT_CONFLICT_HARD_REGS (obj) |= reg_class_contents[cl];
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= reg_class_contents[cl];
}
}
}
}
if (can_throw_internal (insn))
{
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- this_call_used_reg_set);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- this_call_used_reg_set);
+ OBJECT_CONFLICT_HARD_REGS (obj)
+ |= this_call_used_reg_set;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
+ |= this_call_used_reg_set;
}
if (sparseset_bit_p (allocnos_processed, num))
/* Mark it as saved at the next call. */
allocno_saved_at_call[num] = last_call_num + 1;
ALLOCNO_CALLS_CROSSED_NUM (a)++;
- IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
- this_call_used_reg_set);
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
+ |= this_call_used_reg_set;
if (cheap_reg != NULL_RTX
&& ALLOCNO_REGNO (a) == (int) REGNO (cheap_reg))
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)++;
break;
if (m >= NUM_MACHINE_MODES)
{
- IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]);
+ ignore_hard_regs |= reg_class_contents[cl];
continue;
}
for (i = 0; i < n; i++)
if ((int) pressure_classes[i] == cl)
break;
- IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
+ temp_hard_regset2 |= reg_class_contents[cl];
if (i < n)
- IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
+ temp_hard_regset |= reg_class_contents[cl];
}
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
/* Some targets (like SPARC with ICC reg) have allocatable regs
intersection_set = (reg_class_contents[cl1]
& reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
- union_set = reg_class_contents[cl1];
- IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
+ union_set = reg_class_contents[cl1] | reg_class_contents[cl2];
AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
{
if (hard_reg_set_empty_p (regno_set))
conflict_set = lra_no_alloc_regs;
else
- {
- conflict_set = ~regno_set;
- IOR_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
- }
+ conflict_set = ~regno_set | lra_no_alloc_regs;
rclass = regno_allocno_class_array[regno];
rclass_intersect_p = ira_reg_classes_intersect_p[rclass];
curr_hard_regno_costs_check++;
sparseset_clear (conflict_reload_and_inheritance_pseudos);
sparseset_clear (live_range_hard_reg_pseudos);
- IOR_HARD_REG_SET (conflict_set, lra_reg_info[regno].conflict_hard_regs);
+ conflict_set |= lra_reg_info[regno].conflict_hard_regs;
biggest_mode = lra_reg_info[regno].biggest_mode;
for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
{
}
}
conflict_set = lra_no_alloc_regs;
- IOR_HARD_REG_SET (conflict_set, lra_reg_info[regno].conflict_hard_regs);
+ conflict_set |= lra_reg_info[regno].conflict_hard_regs;
val = lra_reg_info[regno].val;
offset = lra_reg_info[regno].offset;
EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)
if (mode == BLKmode)
break;
this_alternative = reg_class_subunion[this_alternative][cl];
- IOR_HARD_REG_SET (this_alternative_set,
- reg_class_contents[cl]);
+ this_alternative_set |= reg_class_contents[cl];
if (costly_p)
{
this_costly_alternative
= reg_class_subunion[this_costly_alternative][cl];
- IOR_HARD_REG_SET (this_costly_alternative_set,
- reg_class_contents[cl]);
+ this_costly_alternative_set |= reg_class_contents[cl];
}
winreg = true;
if (REG_P (op))
bitmap_clear (&invalid_invariant_regs);
last_processed_bb = NULL;
CLEAR_HARD_REG_SET (potential_reload_hard_regs);
- live_hard_regs = eliminable_regset;
- IOR_HARD_REG_SET (live_hard_regs, lra_no_alloc_regs);
+ live_hard_regs = eliminable_regset | lra_no_alloc_regs;
/* We don't process new insns generated in the loop. */
for (curr_insn = tail; curr_insn != PREV_INSN (head); curr_insn = prev_insn)
{
else
setup_next_usage_insn (src_regno, curr_insn, reloads_num, false);
if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
- IOR_HARD_REG_SET (potential_reload_hard_regs,
- reg_class_contents[cl]);
+ potential_reload_hard_regs |= reg_class_contents[cl];
}
else if (src_regno < 0
&& dst_regno >= lra_constraint_new_regno_start
if (process_invariant_for_inheritance (SET_DEST (curr_set), SET_SRC (curr_set)))
change_p = true;
if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
- IOR_HARD_REG_SET (potential_reload_hard_regs,
- reg_class_contents[cl]);
+ potential_reload_hard_regs |= reg_class_contents[cl];
}
else if (src_regno >= lra_constraint_new_regno_start
&& dst_regno < lra_constraint_new_regno_start
/* Invalidate. */
usage_insns[dst_regno].check = 0;
if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
- IOR_HARD_REG_SET (potential_reload_hard_regs,
- reg_class_contents[cl]);
+ potential_reload_hard_regs |= reg_class_contents[cl];
}
else if (INSN_P (curr_insn))
{
if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num)
reloads_num++;
if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
- IOR_HARD_REG_SET (potential_reload_hard_regs,
- reg_class_contents[cl]);
+ potential_reload_hard_regs |= reg_class_contents[cl];
}
}
if (NONDEBUG_INSN_P (curr_insn))
reg_renumber[i] = -1;
bitmap_ior_into (&to_process, &lra_reg_info[i].insn_bitmap);
}
- IOR_HARD_REG_SET (lra_no_alloc_regs, set);
+ lra_no_alloc_regs |= set;
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
if (bitmap_bit_p (&to_process, INSN_UID (insn)))
{
result = true;
}
}
- IOR_HARD_REG_SET (lra_no_alloc_regs, temp_hard_reg_set);
+ lra_no_alloc_regs |= temp_hard_reg_set;
AND_COMPL_HARD_REG_SET (eliminable_regset, temp_hard_reg_set);
spill_pseudos (temp_hard_reg_set);
return result;
mark_pseudo_dead (int regno)
{
lra_assert (!HARD_REGISTER_NUM_P (regno));
- IOR_HARD_REG_SET (lra_reg_info[regno].conflict_hard_regs, hard_regs_live);
+ lra_reg_info[regno].conflict_hard_regs |= hard_regs_live;
if (!sparseset_bit_p (pseudos_live, regno))
return;
lra_reg_info[regno].call_insn = call_insn;
sparseset_clear_bit (pseudos_live_through_calls, regno);
- IOR_HARD_REG_SET (lra_reg_info[regno].conflict_hard_regs,
- last_call_used_reg_set);
+ lra_reg_info[regno].conflict_hard_regs |= last_call_used_reg_set;
for (hr = 0; HARD_REGISTER_NUM_P (hr); hr++)
if (targetm.hard_regno_call_part_clobbered (call_insn, hr,
EXECUTE_IF_SET_IN_SPARSESET (pseudos_live, j)
{
- IOR_HARD_REG_SET (lra_reg_info[j].actual_call_used_reg_set,
- this_call_used_reg_set);
+ lra_reg_info[j].actual_call_used_reg_set
+ |= this_call_used_reg_set;
if (flush)
check_pseudos_live_through_calls (j,
conflict_hard_regs = lra_reg_info[regno].conflict_hard_regs;
for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
for (p = r->start; p <= r->finish; p++)
- IOR_HARD_REG_SET (conflict_hard_regs, reserved_hard_regs[p]);
+ conflict_hard_regs |= reserved_hard_regs[p];
spill_class_size = ira_class_hard_regs_num[spill_class];
mode = lra_reg_info[regno].biggest_mode;
for (k = 0; k < spill_class_size; k++)
REG_SET_TO_HARD_REG_SET (live, live_in);
compute_use_by_pseudos (&live, live_in);
LABEL_LIVE (insn) = live;
- IOR_HARD_REG_SET (ever_live_at_start, live);
+ ever_live_at_start |= live;
}
}
HARD_REG_SET c;
int k;
- c = reg_class_contents[i];
- IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ c = reg_class_contents[i] | reg_class_contents[j];
for (k = 0; k < N_REG_CLASSES; k++)
if (hard_reg_set_subset_p (reg_class_contents[k], c)
&& !hard_reg_set_subset_p (reg_class_contents[k],
HARD_REG_SET c;
int k;
- c = reg_class_contents[i];
- IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ c = reg_class_contents[i] | reg_class_contents[j];
for (k = 0; k < N_REG_CLASSES; k++)
if (hard_reg_set_subset_p (c, reg_class_contents[k]))
break;
{
bitmap_iterator bi;
unsigned i;
- IOR_HARD_REG_SET (*pset, head->hard_conflicts);
+ *pset |= head->hard_conflicts;
EXECUTE_IF_SET_IN_BITMAP (&head->conflicts, 0, i, bi)
{
du_head_p other = regrename_chain_from_id (i);
If the chain needs a call-saved register, mark the call-used
registers as unavailable. */
if (this_head->need_caller_save_reg)
- IOR_HARD_REG_SET (*unavailable, call_used_reg_set);
+ *unavailable |= call_used_reg_set;
/* Mark registers that overlap this chain's lifetime as unavailable. */
merge_overlapping_regs (unavailable, this_head);
c2->first = c2->last = NULL;
c2->id = c1->id;
- IOR_HARD_REG_SET (c1->hard_conflicts, c2->hard_conflicts);
+ c1->hard_conflicts |= c2->hard_conflicts;
bitmap_ior_into (&c1->conflicts, &c2->conflicts);
c1->need_caller_save_reg |= c2->need_caller_save_reg;
{
/* End of one alternative - mark the regs in the current
class, and reset the class. */
- IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
+ allowed |= reg_class_contents[cls];
cls = NO_REGS;
p++;
if (c == '#')
REG_SET_TO_HARD_REG_SET (used_by_pseudos, &chain->live_throughout);
REG_SET_TO_HARD_REG_SET (used_by_pseudos2, &chain->dead_or_set);
- IOR_HARD_REG_SET (bad_spill_regs, used_by_pseudos);
- IOR_HARD_REG_SET (bad_spill_regs, used_by_pseudos2);
+ bad_spill_regs |= used_by_pseudos;
+ bad_spill_regs |= used_by_pseudos2;
/* Now find out which pseudos are allocated to it, and update
hard_reg_n_uses. */
static int regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
static int best_regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
- not_usable = bad_spill_regs;
- IOR_HARD_REG_SET (not_usable, bad_spill_regs_global);
+ not_usable = bad_spill_regs | bad_spill_regs_global;
IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->rclass]);
CLEAR_HARD_REG_SET (used_by_other_reload);
}
chain->used_spill_regs = used_spill_regs_local;
- IOR_HARD_REG_SET (used_spill_regs, used_spill_regs_local);
+ used_spill_regs |= used_spill_regs_local;
memcpy (chain->rld, rld, n_reloads * sizeof (struct reload));
}
EXECUTE_IF_SET_IN_REG_SET
(&chain->live_throughout, FIRST_PSEUDO_REGISTER, i, rsi)
{
- IOR_HARD_REG_SET (pseudo_forbidden_regs[i],
- chain->used_spill_regs);
+ pseudo_forbidden_regs[i] |= chain->used_spill_regs;
}
EXECUTE_IF_SET_IN_REG_SET
(&chain->dead_or_set, FIRST_PSEUDO_REGISTER, i, rsi)
{
- IOR_HARD_REG_SET (pseudo_forbidden_regs[i],
- chain->used_spill_regs);
+ pseudo_forbidden_regs[i] |= chain->used_spill_regs;
}
}
{
REG_SET_TO_HARD_REG_SET (used_by_pseudos, &chain->live_throughout);
REG_SET_TO_HARD_REG_SET (used_by_pseudos2, &chain->dead_or_set);
- IOR_HARD_REG_SET (used_by_pseudos, used_by_pseudos2);
+ used_by_pseudos |= used_by_pseudos2;
compute_use_by_pseudos (&used_by_pseudos, &chain->live_throughout);
compute_use_by_pseudos (&used_by_pseudos, &chain->dead_or_set);
{
HARD_REG_SET tmp;
REG_SET_TO_HARD_REG_SET (tmp, &chain->live_throughout);
- IOR_HARD_REG_SET (reg_used_in_insn, tmp);
+ reg_used_in_insn |= tmp;
REG_SET_TO_HARD_REG_SET (tmp, &chain->dead_or_set);
- IOR_HARD_REG_SET (reg_used_in_insn, tmp);
+ reg_used_in_insn |= tmp;
compute_use_by_pseudos (®_used_in_insn, &chain->live_throughout);
compute_use_by_pseudos (®_used_in_insn, &chain->dead_or_set);
}
}
}
}
- IOR_HARD_REG_SET (reg_reloaded_dead, reg_reloaded_died);
+ reg_reloaded_dead |= reg_reloaded_died;
}
\f
/* Go through the motions to emit INSN and test if it is strictly valid.
/* Insns we pass may not set either NEEDED or SET, so merge them for
simpler tests. */
needed.memory |= set.memory;
- IOR_HARD_REG_SET (needed.regs, set.regs);
+ needed.regs |= set.regs;
/* This insn isn't redundant if it conflicts with an insn that either is
or will be in a delay slot of TARGET. */
find_dead_or_set_registers (next_insn,
&fallthrough_res, 0, jump_count,
set, needed);
- IOR_HARD_REG_SET (fallthrough_res.regs, target_res.regs);
+ fallthrough_res.regs |= target_res.regs;
res->regs &= fallthrough_res.regs;
break;
}
res->cc = res->memory = 1;
get_call_reg_set_usage (call_insn, ®s, regs_invalidated_by_call);
- IOR_HARD_REG_SET (res->regs, regs);
+ res->regs |= regs;
for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
link; link = XEXP (link, 1))
HARD_REG_SET extra_live;
REG_SET_TO_HARD_REG_SET (extra_live, DF_LR_IN (bb));
- IOR_HARD_REG_SET (current_live_regs, extra_live);
+ current_live_regs |= extra_live;
}
}
are implicitly required at that point. */
else if (NOTE_P (real_insn)
&& NOTE_KIND (real_insn) == NOTE_INSN_EPILOGUE_BEG)
- IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs);
+ current_live_regs |= start_of_epilogue_needs.regs;
}
res->regs = current_live_regs;
scratch = needed.regs;
AND_COMPL_HARD_REG_SET (scratch, set.regs);
- IOR_HARD_REG_SET (new_resources.regs, scratch);
+ new_resources.regs |= scratch;
mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
}
- IOR_HARD_REG_SET (res->regs, new_resources.regs);
+ res->regs |= new_resources.regs;
}
if (tinfo != NULL)
CLEAR_HARD_REG_SET (*pset);
note_stores (insn, record_hard_reg_sets, pset);
if (CALL_P (insn) && implicit)
- IOR_HARD_REG_SET (*pset, call_used_reg_set);
+ *pset |= call_used_reg_set;
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
record_hard_reg_sets (XEXP (link, 0), NULL, pset);
{
HARD_REG_SET temp;
get_implicit_reg_pending_clobbers (&temp, insn);
- IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
+ implicit_reg_pending_clobbers |= temp;
}
can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
The HARD_REGNO_RENAME_OK covers other cases in condition below. */
if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
&& REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
- sel_hrd.stack_regs);
+ reg_rename_p->unavailable_hard_regs |= sel_hrd.stack_regs;
#endif
/* If there's a call on this path, make regs from call_used_reg_set
unavailable. */
if (def->crosses_call)
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
- call_used_reg_set);
+ reg_rename_p->unavailable_hard_regs |= call_used_reg_set;
/* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
but not register classes. */
/* Join hard registers unavailable due to register class
restrictions and live range intersection. */
- IOR_HARD_REG_SET (hard_regs_used,
- reg_rename_data.unavailable_hard_regs);
+ hard_regs_used |= reg_rename_data.unavailable_hard_regs;
best_reg = choose_best_reg (hard_regs_used, ®_rename_data,
original_insns, is_orig_reg_p);
CLEAR_HARD_REG_SET (this_used);
note_uses (&PATTERN (insn), record_hard_reg_uses, &this_used);
AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
- IOR_HARD_REG_SET (prologue_used, this_used);
+ prologue_used |= this_used;
note_stores (insn, record_hard_reg_sets, &prologue_clobbered);
}
CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);