+2004-07-01 Jerry Quinn <jlquinn@optonline.net>
+
+ * alias.c (get_alias_set, canon_rtx, get_addr,
+ nonoverlapping_memrefs_p, nonlocal_referenced_p_1, memory_modified_1):
+ Use MEM_P.
+ * builtins.c (expand_builtin_prefetch, expand_builtin_profile_func,
+ expand_builtin): Likewise.
+ * calls.c (expand_call, emit_library_call_value_1, store_one_arg):
+ Likewise.
+ * combine.c (can_combine_p, combinable_i3pat, try_combine,
+ find_split_point, combine_simplify_rtx, simplify_set, make_extraction,
+ rtx_equal_for_field_assignment_p, gen_lowpart_for_combine,
+ record_dead_and_set_regs_1, get_last_value_validate,
+ mark_used_regs_combine, move_deaths, unmentioned_reg_p_1): Likewise.
+ * cse.c (check_dependence, canon_hash, equiv_constant,
+ gen_lowpart_if_possible, cse_insn, invalidate_from_clobbers,
+ cse_around_loop, cse_check_loop_start, cse_set_around_loop,
+ count_reg_usage): Likewise.
+ * cselib.c (rtx_equal_for_cselib_p, add_mem_for_addr, cselib_lookup,
+ cselib_invalidate_mem, cselib_invalidate_rtx, cselib_record_set,
+ cselib_record_sets): Likewise.
+ * dbxout.c (PARM_PASSED_IN_MEMORY, dbxout_symbol,
+ dbxout_symbol_location, dbxout_parms, dbxout_reg_parms): Likewise.
+ * ddg.c (mark_mem_use, mark_mem_store, rtx_mem_access_p): Likewise.
+ * df.c (df_uses_record): Likewise.
+ * dojump (do_jump): Likewise.
+ * dwarf2out.c (stack_adjust_offset, mem_loc_descriptor,
+ loc_descriptor_from_tree, rtl_for_decl_location, add_bound_info,
+ decl_start_label): Likewise.
+ * emit-rtl.c (gen_complex_constant_part, gen_highpart,
+ operand_subword, change_address_1, make_safe_from): Likewise.
+ * explow.c (break_out_memory_refs, copy_all_regs, validize_mem,
+ stabilize, force_not_mem): Likewise.
+ * expmed.c (store_bit_field, store_split_bit_field, extract_bit_field,
+ expand_mult_const, expand_divmod, emit_store_flag): Likewise.
+ * expr.c (convert_move, convert_modes, emit_block_move,
+ emit_group_load, emit_group_store, clear_storage, emit_move_insn,
+ emit_move_insn_1, expand_assignment, store_expr,
+ store_constructor_field, store_constructor, store_field,
+ force_operand, safe_from_p, expand_expr_real_1, expand_increment):
+ Likewise.
+ * final.c (cleanup_subreg_operands, alter_subreg,
+ get_mem_expr_from_op): Likewise.
+ * flow.c (notice_stack_pointer_modification_1,
+ init_propagate_block_info, insn_dead_p, mark_set_1, mark_used_regs):
+ Likewise.
+ * function.c (mark_temp_addr_taken, preserve_temp_slots,
+ preserve_rtl_expr_result, put_var_into_stack, fixup_var_refs_1,
+ optimize_bit_field, flush_addressof, purge_addressof_1,
+ instantiate_decl, instantiate_virtual_regs_1, assign_parms,
+ setjmp_protect, setjmp_protect_args, fix_lexical_addr,
+ keep_stack_depressed): Likewise.
+ * ifcvt.c (noce_try_cmove_arith, noce_try_abs, noce_operand_ok,
+ noce_process_if_block, find_memory): Likewise.
+ * integrate.c (subst_constants, allocate_initial_values): Likewise.
+ * local-alloc.c (validate_equiv_mem_from_store, memref_referenced_p,
+ update_equiv_regs): Likewise.
+ * loop.c (scan_loop, prescan_loop, note_addr_stored, check_store,
+ maybe_eliminate_biv_1, find_mem_in_note_1): Likewise.
+ * optabs.c (expand_abs, emit_unop_insn): Likewise.
+ * passes.c (rest_of_handle_final): Likewise.
+ * postreload.c (reload_cse_simplify_set, reload_cse_simplify_operands,
+ move2add_note_store): Likewise.
+ * ra-build.c (detect_remat_webs): Likewise.
+ * ra-debug.c (dump_static_insn_cost): Likewise.
+ * ra-rewrite.c (slots_overlap_p, insert_stores): Likewise.
+ * recog.c (validate_change, apply_change_group, cancel_changes,
+ validate_replace_rtx_1, general_operand, register_operand,
+ nonmemory_operand, push_operand, pop_operand, memory_operand,
+ indirect_operand, asm_operand_ok, offsettable_memref_p,
+ offsettable_nonstrict_memref_p, constrain_operands,
+ store_data_bypass_p): Likewise.
+ * reg-stack.c (subst_stack_regs_pat): Likewise.
+ * regclass.c (record_operand_costs, scan_one_insn, record_reg_classes,
+ copy_cost, reg_scan_mark_refs): Likewise.
+ * regmove.c (optimize_reg_copy_3, stack_memref_p,
+ combine_stack_adjustments_for_block): Likewise.
+ * regrename.c (copyprop_hardreg_forward_1): Likewise.
+ * reload.c (can_reload_into, push_reload, decompose, immune_p,
+ find_reloads, find_reloads_address, find_reloads_address_1,
+ reg_overlap_mentioned_for_reload_p, refers_to_mem_for_reload_p,
+ find_equiv_reg): Likewise.
+ * reload1.c (reload, eliminate_regs, eliminate_regs_in_insn,
+ reload_as_needed, choose_reload_regs, emit_input_reload_insns,
+ do_input_reload, emit_reload_insns, gen_reload, delete_output_reload,
+ delete_address_reloads): Likewise.
+ * resource.c (mark_referenced_resources): Likewise.
+ * rtlanal.c (get_jump_table_offset, count_occurrences,
+ reg_referenced_p, reg_set_p, set_of_1, set_noop_p,
+ reg_overlap_mentioned_p, note_uses, replace_regs, nonzero_bits1,
+ num_sign_bit_copies1): Likewise.
+ * rtlhooks.c (gen_lowpart_general): Likewise.
+ * sched-deps.c (sched_analyze_1, sched_analyze_2): Likewise.
+ * sdbout.c (PARM_PASSED_IN_MEMORY, sdbout_symbol,
+ sdbout_toplevel_data, sdbout_parms, sdbout_reg_parms,
+ sdbout_global_decl): Likewise.
+ * simplify-rtx.c (simplify_subreg): Likewise.
+ * stmt.c (expand_asm_operands, expand_expr_stmt_value, expand_decl,
+ expand_anon_union_decl, expand_end_case_type): Likewise.
+ * unroll.c (calculate_giv_inc): Likewise.
+ * var-tracking.c (stack_adjust_offset_pre_post,
+ bb_stack_adjust_offset, track_expr_p, count_uses, add_uses,
+ add_stores, compute_bb_dataflow, vt_get_decl_and_offset,
+ vt_add_function_parameters): Likewise.
+ * varasm.c (make_var_volatile, notice_global_symbol,
+ assemble_external, decode_addr_const, mark_weak,
+ default_encode_section_info): Likewise.
+
2004-07-01 Steven Bosscher <stevenb@suse.de>
* stmt.c (check_seenlabel): Remove.
it. This is necessary for C++ anonymous unions, whose component
variables don't look like union members (boo!). */
if (TREE_CODE (t) == VAR_DECL
- && DECL_RTL_SET_P (t) && GET_CODE (DECL_RTL (t)) == MEM)
+ && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
return MEM_ALIAS_SET (DECL_RTL (t));
/* Now all we care about is the type. */
the loop optimizer. Note we want to leave the original
MEM alone, but need to return the canonicalized MEM with
all the flags with their original values. */
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
return x;
if (CONSTANT_P (l->loc))
return l->loc;
for (l = v->locs; l; l = l->next)
- if (!REG_P (l->loc) && GET_CODE (l->loc) != MEM)
+ if (!REG_P (l->loc) && !MEM_P (l->loc))
return l->loc;
if (v->locs)
return v->locs->loc;
/* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
can't overlap unless they are the same because we never reuse that part
of the stack frame used for locals for spilled pseudos. */
- if ((GET_CODE (rtlx) != MEM || GET_CODE (rtly) != MEM)
+ if ((!MEM_P (rtlx) || !MEM_P (rtly))
&& ! rtx_equal_p (rtlx, rtly))
return 1;
know both are and are the same, so use that as the base. The only
we can avoid overlap is if we can deduce that they are nonoverlapping
pieces of that decl, which is very rare. */
- basex = GET_CODE (rtlx) == MEM ? XEXP (rtlx, 0) : rtlx;
+ basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
if (GET_CODE (basex) == PLUS && GET_CODE (XEXP (basex, 1)) == CONST_INT)
offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
- basey = GET_CODE (rtly) == MEM ? XEXP (rtly, 0) : rtly;
+ basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
if (GET_CODE (basey) == PLUS && GET_CODE (XEXP (basey, 1)) == CONST_INT)
offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
|| (CONSTANT_P (basey) && REG_P (basex)
&& REGNO_PTR_FRAME_P (REGNO (basex))));
- sizex = (GET_CODE (rtlx) != MEM ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
+ sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
: MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
: -1);
- sizey = (GET_CODE (rtly) != MEM ? (int) GET_MODE_SIZE (GET_MODE (rtly))
+ sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
: MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
-1);
if (nonlocal_mentioned_p (SET_SRC (x)))
return 1;
- if (GET_CODE (SET_DEST (x)) == MEM)
+ if (MEM_P (SET_DEST (x)))
return nonlocal_mentioned_p (XEXP (SET_DEST (x), 0));
/* If the destination is anything other than a CC0, PC,
return 0;
case CLOBBER:
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
return nonlocal_mentioned_p (XEXP (XEXP (x, 0), 0));
return 0;
static void
memory_modified_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
{
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
if (anti_dependence (x, (rtx)data) || output_dependence (x, (rtx)data))
memory_modified = true;
op0 = protect_from_queue (op0, 0);
/* Don't do anything with direct references to volatile memory, but
generate code to handle other side effects. */
- if (GET_CODE (op0) != MEM && side_effects_p (op0))
+ if (!MEM_P (op0) && side_effects_p (op0))
emit_insn (op0);
}
rtx this, which;
this = DECL_RTL (current_function_decl);
- if (GET_CODE (this) == MEM)
+ if (MEM_P (this))
this = XEXP (this, 0);
else
abort ();
case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
if (arglist != 0
|| ! AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))
- || GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) != MEM)
+ || !MEM_P (DECL_RTL (DECL_RESULT (current_function_decl))))
return const0_rtx;
else
return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
structure_value_addr = expand_expr (return_arg, NULL_RTX,
VOIDmode, EXPAND_NORMAL);
}
- else if (target && GET_CODE (target) == MEM)
+ else if (target && MEM_P (target))
structure_value_addr = XEXP (target, 0);
else
{
target = const0_rtx;
else if (structure_value_addr)
{
- if (target == 0 || GET_CODE (target) != MEM)
+ if (target == 0 || !MEM_P (target))
{
target
= gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)),
/* If we are setting a MEM, this code must be executed. Since it is
emitted after the call insn, sibcall optimization cannot be
performed in that case. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
sibcall_failure = 1;
}
else if (TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
adding to call_fusage before the call to emit_call_1 because TARGET
may be modified in the meantime. */
if (structure_value_addr != 0 && target != 0
- && GET_CODE (target) == MEM && RTX_UNCHANGING_P (target))
+ && MEM_P (target) && RTX_UNCHANGING_P (target))
add_function_usage_to
(last_call_insn (),
gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (VOIDmode, target),
value = gen_reg_rtx (outmode);
#else /* not PCC_STATIC_STRUCT_RETURN */
struct_value_size = GET_MODE_SIZE (outmode);
- if (value != 0 && GET_CODE (value) == MEM)
+ if (value != 0 && MEM_P (value))
mem_value = value;
else
mem_value = assign_temp (tfom, 0, 1, 1);
nargs++;
/* Make sure it is a reasonable operand for a move or push insn. */
- if (!REG_P (addr) && GET_CODE (addr) != MEM
+ if (!REG_P (addr) && !MEM_P (addr)
&& ! (CONSTANT_P (addr) && LEGITIMATE_CONSTANT_P (addr)))
addr = force_operand (addr, NULL_RTX);
either emit_move_insn or emit_push_insn will do that. */
/* Make sure it is a reasonable operand for a move or push insn. */
- if (!REG_P (val) && GET_CODE (val) != MEM
+ if (!REG_P (val) && !MEM_P (val)
&& ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val)))
val = force_operand (val, NULL_RTX);
}
}
- if ((flags & ECF_SIBCALL) && GET_CODE (arg->value) == MEM)
+ if ((flags & ECF_SIBCALL) && MEM_P (arg->value))
{
/* emit_push_insn might not work properly if arg->value and
argblock + arg->locate.offset areas overlap. */
are intervening stores. Also, don't move a volatile asm or
UNSPEC_VOLATILE across any other insns. */
|| (! all_adjacent
- && (((GET_CODE (src) != MEM
+ && (((!MEM_P (src)
|| ! find_reg_note (insn, REG_EQUIV, src))
&& use_crosses_set_p (src, INSN_CUID (insn)))
|| (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
into the address of a MEM, so only prevent the combination if
i1 or i2 set the same MEM. */
if ((inner_dest != dest &&
- (GET_CODE (inner_dest) != MEM
+ (!MEM_P (inner_dest)
|| rtx_equal_p (i2dest, inner_dest)
|| (i1dest && rtx_equal_p (i1dest, inner_dest)))
&& (reg_overlap_mentioned_p (i2dest, inner_dest)
#if 0
if (!(GET_CODE (PATTERN (i3)) == SET
&& REG_P (SET_SRC (PATTERN (i3)))
- && GET_CODE (SET_DEST (PATTERN (i3))) == MEM
+ && MEM_P (SET_DEST (PATTERN (i3)))
&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
|| GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
/* It's not the exception. */
#ifdef INSN_SCHEDULING
/* If *SPLIT is a paradoxical SUBREG, when we split it, it should
be written as a ZERO_EXTEND. */
- if (split_code == SUBREG && GET_CODE (SUBREG_REG (*split)) == MEM)
+ if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
{
#ifdef LOAD_EXTEND_OP
/* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
#ifdef INSN_SCHEDULING
/* If we are making a paradoxical SUBREG invalid, it becomes a split
point. */
- if (GET_CODE (SUBREG_REG (x)) == MEM)
+ if (MEM_P (SUBREG_REG (x)))
return loc;
#endif
return find_split_point (&SUBREG_REG (x), insn);
/* Don't change the mode of the MEM if that would change the meaning
of the address. */
- if (GET_CODE (SUBREG_REG (x)) == MEM
+ if (MEM_P (SUBREG_REG (x))
&& (MEM_VOLATILE_P (SUBREG_REG (x))
|| mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
return gen_rtx_CLOBBER (mode, const0_rtx);
&& SUBREG_BYTE (src) == 0
&& (GET_MODE_SIZE (GET_MODE (src))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
- && GET_CODE (SUBREG_REG (src)) == MEM)
+ && MEM_P (SUBREG_REG (src)))
{
SUBST (SET_SRC (x),
gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
The subreg adds or removes high bits; its mode is
irrelevant to the meaning of this extraction,
since POS and LEN count from the lsb. */
- if (GET_CODE (SUBREG_REG (inner)) == MEM)
+ if (MEM_P (SUBREG_REG (inner)))
is_mode = GET_MODE (SUBREG_REG (inner));
inner = SUBREG_REG (inner);
}
if (tmode != BLKmode
&& ! (spans_byte && inner_mode != tmode)
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
- && GET_CODE (inner) != MEM
+ && !MEM_P (inner)
&& (! in_dest
|| (REG_P (inner)
&& have_insn_for (STRICT_LOW_PART, tmode))))
- || (GET_CODE (inner) == MEM && pos_rtx == 0
+ || (MEM_P (inner) && pos_rtx == 0
&& (pos
% (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
: BITS_PER_UNIT)) == 0
If INNER is not a MEM, get a piece consisting of just the field
of interest (in this case POS % BITS_PER_WORD must be 0). */
- if (GET_CODE (inner) == MEM)
+ if (MEM_P (inner))
{
HOST_WIDE_INT offset;
make a STRICT_LOW_PART unless we made a MEM. */
if (in_dest)
- return (GET_CODE (new) == MEM ? new
+ return (MEM_P (new) ? new
: (GET_CODE (new) != SUBREG
? gen_rtx_CLOBBER (tmode, const0_rtx)
: gen_rtx_STRICT_LOW_PART (VOIDmode, new)));
length is not 1. In all other cases, we would only be going outside
our object in cases when an original shift would have been
undefined. */
- if (! spans_byte && GET_CODE (inner) == MEM
+ if (! spans_byte && MEM_P (inner)
&& ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
/* If this is not from memory, the desired mode is wanted_inner_reg_mode;
if we have to change the mode of memory and cannot, the desired mode is
EXTRACTION_MODE. */
- if (GET_CODE (inner) != MEM)
+ if (!MEM_P (inner))
wanted_inner_mode = wanted_inner_reg_mode;
else if (inner_mode != wanted_inner_mode
&& (mode_dependent_address_p (XEXP (inner, 0))
If it's a MEM we need to recompute POS relative to that.
However, if we're extracting from (or inserting into) a register,
we want to recompute POS relative to wanted_inner_mode. */
- int width = (GET_CODE (inner) == MEM
+ int width = (MEM_P (inner)
? GET_MODE_BITSIZE (is_mode)
: GET_MODE_BITSIZE (wanted_inner_mode));
pos_rtx
= gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
/* POS may be less than 0 now, but we check for that below.
- Note that it can only be less than 0 if GET_CODE (inner) != MEM. */
+ Note that it can only be less than 0 if !MEM_P (inner). */
}
/* If INNER has a wider mode, make it smaller. If this is a constant
the value. */
if (wanted_inner_mode != VOIDmode
&& GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
- && ((GET_CODE (inner) == MEM
+ && ((MEM_P (inner)
&& (inner_mode == wanted_inner_mode
|| (! mode_dependent_address_p (XEXP (inner, 0))
&& ! MEM_VOLATILE_P (inner))))))
/* If INNER is not memory, we can always get it into the proper mode. If we
are changing its mode, POS must be a constant and smaller than the size
of the new mode. */
- else if (GET_CODE (inner) != MEM)
+ else if (!MEM_P (inner))
{
if (GET_MODE (inner) != wanted_inner_mode
&& (pos_rtx != 0
/* Check for a paradoxical SUBREG of a MEM compared with the MEM.
Note that all SUBREGs of MEM are paradoxical; otherwise they
would have been rewritten. */
- if (GET_CODE (x) == MEM && GET_CODE (y) == SUBREG
- && GET_CODE (SUBREG_REG (y)) == MEM
+ if (MEM_P (x) && GET_CODE (y) == SUBREG
+ && MEM_P (SUBREG_REG (y))
&& rtx_equal_p (SUBREG_REG (y),
gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
return 1;
- if (GET_CODE (y) == MEM && GET_CODE (x) == SUBREG
- && GET_CODE (SUBREG_REG (x)) == MEM
+ if (MEM_P (y) && GET_CODE (x) == SUBREG
+ && MEM_P (SUBREG_REG (x))
&& rtx_equal_p (SUBREG_REG (x),
gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
return 1;
/* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
won't know what to do. So we will strip off the SUBREG here and
process normally. */
- if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
{
x = SUBREG_REG (x);
if (GET_MODE (x) == mode)
if (result)
return result;
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
int offset = 0;
else
record_value_for_reg (dest, record_dead_insn, NULL_RTX);
}
- else if (GET_CODE (dest) == MEM
+ else if (MEM_P (dest)
/* Ignore pushes, they clobber nothing. */
&& ! push_operand (dest, GET_MODE (dest)))
mem_last_set = INSN_CUID (record_dead_insn);
/* If this is a memory reference, make sure that there were
no stores after it that might have clobbered the value. We don't
have alias info, so we assume any store invalidates it. */
- else if (GET_CODE (x) == MEM && ! RTX_UNCHANGING_P (x)
+ else if (MEM_P (x) && ! RTX_UNCHANGING_P (x)
&& INSN_CUID (insn) <= mem_last_set)
{
if (replace)
case CLOBBER:
/* If we are clobbering a MEM, mark any hard registers inside the
address as used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
return;
|| GET_CODE (testreg) == STRICT_LOW_PART)
testreg = XEXP (testreg, 0);
- if (GET_CODE (testreg) == MEM)
+ if (MEM_P (testreg))
mark_used_regs_combine (XEXP (testreg, 0));
mark_used_regs_combine (SET_SRC (x));
For a REG (the only other possibility), the entire value is
being replaced so the old value is not used in this insn. */
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
move_deaths (XEXP (dest, 0), maybe_kill_insn, from_cuid,
to_insn, pnotes);
return;
rtx x = *loc;
if (x != NULL_RTX
- && (REG_P (x) || GET_CODE (x) == MEM)
+ && (REG_P (x) || MEM_P (x))
&& ! reg_mentioned_p (x, (rtx) expr))
return 1;
return 0;
check_dependence (rtx *x, void *data)
{
struct check_dependence_data *d = (struct check_dependence_data *) data;
- if (*x && GET_CODE (*x) == MEM)
+ if (*x && MEM_P (*x))
return canon_true_dependence (d->exp, d->mode, d->addr, *x,
cse_rtx_varies_p);
else
handling since the MEM may be BLKmode which normally
prevents an entry from being made. Pure calls are
marked by a USE which mentions BLKmode memory. */
- if (GET_CODE (XEXP (x, 0)) == MEM
+ if (MEM_P (XEXP (x, 0))
&& ! MEM_VOLATILE_P (XEXP (x, 0)))
{
hash += (unsigned) USE;
is a constant-pool reference. Then try to look it up in the hash table
in case it is something whose value we have seen before. */
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
struct table_elt *elt;
if (result)
return result;
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
/* This is the only other case we handle. */
int offset = 0;
/* If we clobber memory, canon the address.
This does nothing when a register is clobbered
because we have already invalidated the reg. */
- if (GET_CODE (XEXP (y, 0)) == MEM)
+ if (MEM_P (XEXP (y, 0)))
canon_reg (XEXP (y, 0), NULL_RTX);
}
else if (GET_CODE (y) == USE
}
else if (GET_CODE (x) == CLOBBER)
{
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
canon_reg (XEXP (x, 0), NULL_RTX);
}
|| GET_CODE (dest) == SIGN_EXTRACT)
dest = XEXP (dest, 0);
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
canon_reg (dest, insn);
}
RTL would be referring to SRC, so we don't lose any optimization
opportunities by not having SRC in the hash table. */
- if (GET_CODE (src) == MEM
+ if (MEM_P (src)
&& find_reg_note (insn, REG_EQUIV, NULL_RTX) != 0
&& REG_P (dest)
&& REGNO (dest) >= FIRST_PSEUDO_REGISTER)
if (flag_expensive_optimizations && src_related == 0
&& (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
&& GET_MODE_CLASS (mode) == MODE_INT
- && GET_CODE (src) == MEM && ! do_not_record
+ && MEM_P (src) && ! do_not_record
&& LOAD_EXTEND_OP (mode) != NIL)
{
enum machine_mode tmode;
if (libcall_insn
&& (REG_P (sets[i].orig_src)
|| GET_CODE (sets[i].orig_src) == SUBREG
- || GET_CODE (sets[i].orig_src) == MEM))
+ || MEM_P (sets[i].orig_src)))
{
rtx note = find_reg_equal_equiv_note (libcall_insn);
if (note != 0)
&& GET_CODE (XEXP (XEXP (trial, 0), 0)) == LABEL_REF
&& GET_CODE (XEXP (XEXP (trial, 0), 1)) == LABEL_REF)
&& (src_folded == 0
- || (GET_CODE (src_folded) != MEM
+ || (!MEM_P (src_folded)
&& ! src_folded_force_flag))
&& GET_MODE_CLASS (mode) != MODE_CC
&& mode != VOIDmode)
sets[i].inner_dest = dest;
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
{
#ifdef PUSH_ROUNDING
/* Stack pushes invalidate the stack pointer. */
{
if (REG_P (dest) || GET_CODE (dest) == SUBREG)
invalidate (dest, VOIDmode);
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* Outgoing arguments for a libcall don't
affect any recorded expressions. */
we have just done an invalidate_memory that covers even those. */
if (REG_P (dest) || GET_CODE (dest) == SUBREG)
invalidate (dest, VOIDmode);
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* Outgoing arguments for a libcall don't
affect any recorded expressions. */
floating-point values in registers that might be wider than
memory. */
if ((flag_float_store
- && GET_CODE (dest) == MEM
+ && MEM_P (dest)
&& FLOAT_MODE_P (GET_MODE (dest)))
/* Don't record BLKmode values, because we don't know the
size of it, and can't be sure that other BLKmode values
sets[i].dest_hash = HASH (dest, GET_MODE (dest));
}
- if (GET_CODE (inner_dest) == MEM
+ if (MEM_P (inner_dest)
&& GET_CODE (XEXP (inner_dest, 0)) == ADDRESSOF)
/* Given (SET (MEM (ADDRESSOF (X))) Y) we don't want to say
that (MEM (ADDRESSOF (X))) is equivalent to Y.
elt = insert (dest, sets[i].src_elt,
sets[i].dest_hash, GET_MODE (dest));
- elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
+ elt->in_memory = (MEM_P (sets[i].inner_dest)
&& (! RTX_UNCHANGING_P (sets[i].inner_dest)
|| fixed_base_plus_p (XEXP (sets[i].inner_dest,
0))));
if (ref)
{
if (REG_P (ref) || GET_CODE (ref) == SUBREG
- || GET_CODE (ref) == MEM)
+ || MEM_P (ref))
invalidate (ref, VOIDmode);
else if (GET_CODE (ref) == STRICT_LOW_PART
|| GET_CODE (ref) == ZERO_EXTRACT)
{
rtx ref = XEXP (y, 0);
if (REG_P (ref) || GET_CODE (ref) == SUBREG
- || GET_CODE (ref) == MEM)
+ || MEM_P (ref))
invalidate (ref, VOIDmode);
else if (GET_CODE (ref) == STRICT_LOW_PART
|| GET_CODE (ref) == ZERO_EXTRACT)
|| GET_CODE (x) == CC0 || GET_CODE (x) == PC)
return;
- if ((GET_CODE (x) == MEM && GET_CODE (*cse_check_loop_start_value) == MEM)
+ if ((MEM_P (x) && MEM_P (*cse_check_loop_start_value))
|| reg_overlap_mentioned_p (x, *cse_check_loop_start_value))
*cse_check_loop_start_value = NULL_RTX;
}
/* See comment on similar code in cse_insn for explanation of these
tests. */
if (REG_P (SET_DEST (x)) || GET_CODE (SET_DEST (x)) == SUBREG
- || GET_CODE (SET_DEST (x)) == MEM)
+ || MEM_P (SET_DEST (x)))
invalidate (SET_DEST (x), VOIDmode);
else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
|| GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
case CLOBBER:
/* If we are clobbering a MEM, mark any registers inside the address
as being used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
count_reg_usage (XEXP (XEXP (x, 0), 0), counts, incr);
return;
rtx t = l->loc;
/* Avoid infinite recursion. */
- if (REG_P (t) || GET_CODE (t) == MEM)
+ if (REG_P (t) || MEM_P (t))
continue;
else if (rtx_equal_for_cselib_p (t, y))
return 1;
{
rtx t = l->loc;
- if (REG_P (t) || GET_CODE (t) == MEM)
+ if (REG_P (t) || MEM_P (t))
continue;
else if (rtx_equal_for_cselib_p (x, t))
return 1;
/* Avoid duplicates. */
for (l = mem_elt->locs; l; l = l->next)
- if (GET_CODE (l->loc) == MEM
+ if (MEM_P (l->loc)
&& CSELIB_VAL_PTR (XEXP (l->loc, 0)) == addr_elt)
return;
return e;
}
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
return cselib_lookup_mem (x, create);
hashval = hash_rtx (x, mode, create);
/* MEMs may occur in locations only at the top level; below
that every MEM or REG is substituted by its VALUE. */
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
{
p = &(*p)->next;
continue;
if (REG_P (dest))
cselib_invalidate_regno (REGNO (dest), GET_MODE (dest));
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
cselib_invalidate_mem (dest);
/* Some machines don't define AUTO_INC_DEC, but they still use push
n_useless_values--;
src_elt->locs = new_elt_loc_list (src_elt->locs, dest);
}
- else if (GET_CODE (dest) == MEM && dest_addr_elt != 0
+ else if (MEM_P (dest) && dest_addr_elt != 0
&& cselib_record_memory)
{
if (src_elt->locs == 0)
/* We don't know how to record anything but REG or MEM. */
if (REG_P (dest)
- || (GET_CODE (dest) == MEM && cselib_record_memory))
+ || (MEM_P (dest) && cselib_record_memory))
{
rtx src = sets[i].src;
if (cond)
src = gen_rtx_IF_THEN_ELSE (GET_MODE (src), cond, src, dest);
sets[i].src_elt = cselib_lookup (src, GET_MODE (dest), 1);
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
sets[i].dest_addr_elt = cselib_lookup (XEXP (dest, 0), Pmode, 1);
else
sets[i].dest_addr_elt = 0;
for (i = 0; i < n_sets; i++)
{
rtx dest = sets[i].dest;
- if (REG_P (dest) || GET_CODE (dest) == MEM)
+ if (REG_P (dest) || MEM_P (dest))
{
int j;
for (j = i + 1; j < n_sets; j++)
{
rtx dest = sets[i].dest;
if (REG_P (dest)
- || (GET_CODE (dest) == MEM && cselib_record_memory))
+ || (MEM_P (dest) && cselib_record_memory))
cselib_record_set (dest, sets[i].src_elt, sets[i].dest_addr_elt);
}
}
/* 1 if PARM is passed to this function in memory. */
#define PARM_PASSED_IN_MEMORY(PARM) \
- (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+ (MEM_P (DECL_INCOMING_RTL (PARM)))
/* A C expression for the integer offset value of an automatic variable
(N_LSYM) having address X (an RTX). */
context = decl_function_context (decl);
if (context == current_function_decl)
break;
- if (GET_CODE (DECL_RTL (decl)) != MEM
+ if (!MEM_P (DECL_RTL (decl))
|| GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
break;
FORCE_TEXT;
no letter at all, and N_LSYM, for auto variable,
r and N_RSYM for register variable. */
- if (GET_CODE (home) == MEM
+ if (MEM_P (home)
&& GET_CODE (XEXP (home, 0)) == SYMBOL_REF)
{
if (TREE_PUBLIC (decl))
current_sym_code = N_RSYM;
current_sym_value = DBX_REGISTER_NUMBER (regno);
}
- else if (GET_CODE (home) == MEM
- && (GET_CODE (XEXP (home, 0)) == MEM
+ else if (MEM_P (home)
+ && (MEM_P (XEXP (home, 0))
|| (REG_P (XEXP (home, 0))
&& REGNO (XEXP (home, 0)) != HARD_FRAME_POINTER_REGNUM
&& REGNO (XEXP (home, 0)) != STACK_POINTER_REGNUM
type = make_node (POINTER_TYPE);
TREE_TYPE (type) = TREE_TYPE (decl);
}
- else if (GET_CODE (home) == MEM
+ else if (MEM_P (home)
&& REG_P (XEXP (home, 0)))
{
current_sym_code = N_LSYM;
current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
}
- else if (GET_CODE (home) == MEM
+ else if (MEM_P (home)
&& GET_CODE (XEXP (home, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (home, 0), 1)) == CONST_INT)
{
We want the value of that CONST_INT. */
current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
}
- else if (GET_CODE (home) == MEM
+ else if (MEM_P (home)
&& GET_CODE (XEXP (home, 0)) == CONST)
{
/* Handle an obscure case which can arise when optimizing and
dbxout_type (parm_type, 0);
dbxout_finish_symbol (parms);
}
- else if (GET_CODE (DECL_RTL (parms)) == MEM
+ else if (MEM_P (DECL_RTL (parms))
&& REG_P (XEXP (DECL_RTL (parms), 0))
&& REGNO (XEXP (DECL_RTL (parms), 0)) != HARD_FRAME_POINTER_REGNUM
&& REGNO (XEXP (DECL_RTL (parms), 0)) != STACK_POINTER_REGNUM
dbxout_type (TREE_TYPE (parms), 0);
dbxout_finish_symbol (parms);
}
- else if (GET_CODE (DECL_RTL (parms)) == MEM
- && GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ else if (MEM_P (DECL_RTL (parms))
+ && MEM_P (XEXP (DECL_RTL (parms), 0)))
{
/* Parm was passed via invisible reference, with the reference
living on the stack. DECL_RTL looks like
dbxout_type (TREE_TYPE (parms), 0);
dbxout_finish_symbol (parms);
}
- else if (GET_CODE (DECL_RTL (parms)) == MEM
+ else if (MEM_P (DECL_RTL (parms))
&& XEXP (DECL_RTL (parms), 0) != const0_rtx
/* ??? A constant address for a parm can happen
when the reg it lives in is equiv to a constant in memory.
dbxout_symbol_location (parms, TREE_TYPE (parms),
0, DECL_RTL (parms));
/* Report parms that live in memory but not where they were passed. */
- else if (GET_CODE (DECL_RTL (parms)) == MEM
+ else if (MEM_P (DECL_RTL (parms))
&& ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
dbxout_symbol_location (parms, TREE_TYPE (parms),
0, DECL_RTL (parms));
case CLOBBER:
/* If we are clobbering a MEM, mark any registers inside the address
as being used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
df_uses_record (df, &XEXP (XEXP (x, 0), 0),
DF_REF_REG_MEM_STORE, bb, insn, flags);
sequences. */
/* Copy to register to avoid generating bad insns by cse
from (set (mem ...) (arithop)) (set (cc0) (mem ...)). */
- if (!cse_not_expected && GET_CODE (temp) == MEM)
+ if (!cse_not_expected && MEM_P (temp))
temp = copy_to_reg (temp);
#endif
do_pending_stack_adjust ();
if (code == PLUS)
offset = -offset;
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* (set (mem (pre_dec (reg sp))) (foo)) */
src = XEXP (dest, 0);
case ADDRESSOF:
/* If this is a MEM, return its address. Otherwise, we can't
represent this. */
- if (GET_CODE (XEXP (rtl, 0)) == MEM)
+ if (MEM_P (XEXP (rtl, 0)))
return mem_loc_descriptor (XEXP (XEXP (rtl, 0), 0), mode,
can_use_fbreg);
else
if (rtl == NULL_RTX)
return 0;
- if (GET_CODE (rtl) != MEM)
+ if (!MEM_P (rtl))
return 0;
rtl = XEXP (rtl, 0);
if (! CONSTANT_P (rtl))
{
enum machine_mode mode = GET_MODE (rtl);
- if (GET_CODE (rtl) == MEM)
+ if (MEM_P (rtl))
{
indirect_p = 1;
rtl = XEXP (rtl, 0);
rtx rtl = lookup_constant_def (loc);
enum machine_mode mode;
- if (GET_CODE (rtl) != MEM)
+ if (!MEM_P (rtl))
return 0;
mode = GET_MODE (rtl);
rtl = XEXP (rtl, 0);
{
if (rtl
&& (CONSTANT_P (rtl)
- || (GET_CODE (rtl) == MEM
+ || (MEM_P (rtl)
&& CONSTANT_P (XEXP (rtl, 0)))
|| (REG_P (rtl)
&& TREE_CODE (decl) == VAR_DECL
we reach the big endian correction code there. It isn't clear if all
of these checks are necessary here, but keeping them all is the safe
thing to do. */
- else if (GET_CODE (rtl) == MEM
+ else if (MEM_P (rtl)
&& XEXP (rtl, 0) != const0_rtx
&& ! CONSTANT_P (XEXP (rtl, 0))
/* Not passed in memory. */
- && GET_CODE (DECL_INCOMING_RTL (decl)) != MEM
+ && !MEM_P (DECL_INCOMING_RTL (decl))
/* Not passed by invisible reference. */
&& (!REG_P (XEXP (rtl, 0))
|| REGNO (XEXP (rtl, 0)) == HARD_FRAME_POINTER_REGNUM
}
else if (TREE_CODE (decl) == VAR_DECL
&& rtl
- && GET_CODE (rtl) == MEM
+ && MEM_P (rtl)
&& GET_MODE (rtl) != TYPE_MODE (TREE_TYPE (decl))
&& BYTES_BIG_ENDIAN)
{
rtl = expand_expr (DECL_INITIAL (decl), NULL_RTX, VOIDmode,
EXPAND_INITIALIZER);
/* If expand_expr returns a MEM, it wasn't immediate. */
- if (rtl && GET_CODE (rtl) == MEM)
+ if (rtl && MEM_P (rtl))
abort ();
}
}
value there unless it was going to be used repeatedly in the
function, i.e. for cleanups. */
if (SAVE_EXPR_RTL (bound)
- && (! optimize || GET_CODE (SAVE_EXPR_RTL (bound)) == MEM))
+ && (! optimize || MEM_P (SAVE_EXPR_RTL (bound))))
{
dw_die_ref ctx = lookup_decl_die (current_function_decl);
dw_die_ref decl_die = new_die (DW_TAG_variable, ctx, bound);
/* If the RTL for the SAVE_EXPR is memory, handle the case where
it references an outer function's frame. */
- if (GET_CODE (loc) == MEM)
+ if (MEM_P (loc))
{
rtx new_addr = fix_lexical_addr (XEXP (loc, 0), bound);
const char *fnname;
x = DECL_RTL (decl);
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
x = XEXP (x, 0);
{
tree decl, part;
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
{
decl = SYMBOL_REF_DECL (XEXP (x, 0));
/* simplify_gen_subreg is not guaranteed to return a valid operand for
the target if we have a MEM. gen_highpart must return a valid operand,
emitting code if necessary to do so. */
- if (result != NULL_RTX && GET_CODE (result) == MEM)
+ if (result != NULL_RTX && MEM_P (result))
result = validize_mem (result);
if (!result)
return const0_rtx;
/* Form a new MEM at the requested address. */
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
{
rtx new = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD);
{
rtx new;
- if (GET_CODE (memref) != MEM)
+ if (!MEM_P (memref))
abort ();
if (mode == VOIDmode)
mode = GET_MODE (memref);
goto done;
}
done:
- if ((GET_CODE (other) == MEM
+ if ((MEM_P (other)
&& ! CONSTANT_P (x)
&& !REG_P (x)
&& GET_CODE (x) != SUBREG)
static rtx
break_out_memory_refs (rtx x)
{
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
|| (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)
&& GET_MODE (x) != VOIDmode))
x = force_reg (GET_MODE (x), x);
)
x = copy_to_reg (x);
}
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
x = copy_to_reg (x);
else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
|| GET_CODE (x) == MULT)
rtx
validize_mem (rtx ref)
{
- if (GET_CODE (ref) != MEM)
+ if (!MEM_P (ref))
return ref;
if (! (flag_force_addr && CONSTANT_ADDRESS_P (XEXP (ref, 0)))
&& memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
rtx
stabilize (rtx x)
{
- if (GET_CODE (x) != MEM
+ if (!MEM_P (x)
|| ! rtx_unstable_p (XEXP (x, 0)))
return x;
{
rtx temp;
- if (GET_CODE (x) != MEM || GET_MODE (x) == BLKmode)
+ if (!MEM_P (x) || GET_MODE (x) == BLKmode)
return x;
temp = gen_reg_rtx (GET_MODE (x));
rtx value, HOST_WIDE_INT total_size)
{
unsigned int unit
- = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
unsigned HOST_WIDE_INT offset = bitnum / unit;
unsigned HOST_WIDE_INT bitpos = bitnum % unit;
rtx op0 = str_rtx;
/* Use vec_extract patterns for extracting parts of vectors whenever
available. */
if (VECTOR_MODE_P (GET_MODE (op0))
- && GET_CODE (op0) != MEM
+ && !MEM_P (op0)
&& (vec_set_optab->handlers[GET_MODE (op0)].insn_code
!= CODE_FOR_nothing)
&& fieldmode == GET_MODE_INNER (GET_MODE (op0))
if (bitpos == 0
&& bitsize == GET_MODE_BITSIZE (fieldmode)
- && (GET_CODE (op0) != MEM
+ && (!MEM_P (op0)
? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
|| GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
&& byte_offset % GET_MODE_SIZE (fieldmode) == 0)
enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
if (imode != GET_MODE (op0))
{
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
op0 = adjust_address (op0, imode, 0);
else if (imode != BLKmode)
op0 = gen_lowpart (imode, op0);
/* We may be accessing data outside the field, which means
we can alias adjacent data. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
{
op0 = shallow_copy_rtx (op0);
set_mem_alias_set (op0, 0);
But as we have it, it counts within whatever size OP0 now has.
On a bigendian machine, these are not the same, so convert. */
if (BYTES_BIG_ENDIAN
- && GET_CODE (op0) != MEM
+ && !MEM_P (op0)
&& unit > GET_MODE_BITSIZE (GET_MODE (op0)))
bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
/* Storing an lsb-aligned field in a register
can be done with a movestrict instruction. */
- if (GET_CODE (op0) != MEM
+ if (!MEM_P (op0)
&& (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
&& bitsize == GET_MODE_BITSIZE (fieldmode)
&& (movstrict_optab->handlers[fieldmode].insn_code
/* OFFSET is the number of words or bytes (UNIT says which)
from STR_RTX to the first word or byte containing part of the field. */
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
{
if (offset != 0
|| GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
into a register and save it back later. */
/* This used to check flag_force_mem, but that was a serious
de-optimization now that flag_force_mem is enabled by -O2. */
- if (GET_CODE (op0) == MEM
+ if (MEM_P (op0)
&& ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
(op0, VOIDmode)))
{
volatile_ok = save_volatile_ok;
/* Add OFFSET into OP0's address. */
- if (GET_CODE (xop0) == MEM)
+ if (MEM_P (xop0))
xop0 = adjust_address (xop0, byte_mode, offset);
/* If xop0 is a register, we need it in MAXMODE
/* We have been counting XBITPOS within UNIT.
Count instead within the size of the register. */
- if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ if (BITS_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
unit = GET_MODE_BITSIZE (maxmode);
/* We must do an endian conversion exactly the same way as it is
done in extract_bit_field, so that the two calls to
extract_fixed_bit_field will have comparable arguments. */
- if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
+ if (!MEM_P (value) || GET_MODE (value) == BLKmode)
total_bits = BITS_PER_WORD;
else
total_bits = GET_MODE_BITSIZE (GET_MODE (value));
HOST_WIDE_INT total_size)
{
unsigned int unit
- = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
unsigned HOST_WIDE_INT offset = bitnum / unit;
unsigned HOST_WIDE_INT bitpos = bitnum % unit;
rtx op0 = str_rtx;
/* Use vec_extract patterns for extracting parts of vectors whenever
available. */
if (VECTOR_MODE_P (GET_MODE (op0))
- && GET_CODE (op0) != MEM
+ && !MEM_P (op0)
&& (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
!= CODE_FOR_nothing)
&& ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
if (imode != GET_MODE (op0))
{
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
op0 = adjust_address (op0, imode, 0);
else if (imode != BLKmode)
op0 = gen_lowpart (imode, op0);
/* We may be accessing data outside the field, which means
we can alias adjacent data. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
{
op0 = shallow_copy_rtx (op0);
set_mem_alias_set (op0, 0);
But as we have it, it counts within whatever size OP0 now has.
On a bigendian machine, these are not the same, so convert. */
if (BYTES_BIG_ENDIAN
- && GET_CODE (op0) != MEM
+ && !MEM_P (op0)
&& unit > GET_MODE_BITSIZE (GET_MODE (op0)))
bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
&& (BYTES_BIG_ENDIAN
? bitpos + bitsize == BITS_PER_WORD
: bitpos == 0)))
- && ((GET_CODE (op0) != MEM
+ && ((!MEM_P (op0)
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (GET_MODE (op0)))
&& GET_MODE_SIZE (mode1) != 0
&& byte_offset % GET_MODE_SIZE (mode1) == 0)
- || (GET_CODE (op0) == MEM
+ || (MEM_P (op0)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
|| (offset * BITS_PER_UNIT % bitsize == 0
&& MEM_ALIGN (op0) % bitsize == 0)))))
/* OFFSET is the number of words or bytes (UNIT says which)
from STR_RTX to the first word or byte containing part of the field. */
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
{
if (offset != 0
|| GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
rtx pat;
enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
- if (GET_CODE (xop0) == MEM)
+ if (MEM_P (xop0))
{
int save_volatile_ok = volatile_ok;
volatile_ok = 1;
xbitpos = unit - bitsize - xbitpos;
/* Now convert from counting within UNIT to counting in MAXMODE. */
- if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ if (BITS_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
unit = GET_MODE_BITSIZE (maxmode);
if (xtarget == 0
- || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ || (flag_force_mem && MEM_P (xtarget)))
xtarget = xspec_target = gen_reg_rtx (tmode);
if (GET_MODE (xtarget) != maxmode)
rtx pat;
enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
- if (GET_CODE (xop0) == MEM)
+ if (MEM_P (xop0))
{
/* Is the memory operand acceptable? */
if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
/* XBITPOS counts within a size of UNIT.
Adjust to count within a size of MAXMODE. */
- if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ if (BITS_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
unit = GET_MODE_BITSIZE (maxmode);
if (xtarget == 0
- || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ || (flag_force_mem && MEM_P (xtarget)))
xtarget = xspec_target = gen_reg_rtx (tmode);
if (GET_MODE (xtarget) != maxmode)
/* Avoid referencing memory over and over.
For speed, but also for correctness when mem is volatile. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
op0 = force_reg (mode, op0);
/* ACCUM starts out either as OP0 or as a zero, depending on
/* Don't clobber an operand while doing a multi-step calculation. */
|| ((rem_flag || op1_is_constant)
&& (reg_mentioned_p (target, op0)
- || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
+ || (MEM_P (op0) && MEM_P (target))))
|| reg_mentioned_p (target, op1)
- || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
+ || (MEM_P (op1) && MEM_P (target))))
target = 0;
/* Get the mode in which to perform this computation. Normally it will
/* If one of the operands is a volatile MEM, copy it into a register. */
- if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
+ if (MEM_P (op0) && MEM_VOLATILE_P (op0))
op0 = force_reg (compute_mode, op0);
- if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
+ if (MEM_P (op1) && MEM_VOLATILE_P (op1))
op1 = force_reg (compute_mode, op1);
/* If we need the remainder or if OP1 is constant, we need to
if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
&& GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
- && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
+ && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
{
if (code == EQ || code == NE)
{
if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
&& GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
GET_MODE_BITSIZE (from_mode)))
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
&& GET_MODE_CLASS (oldmode) == MODE_INT
&& (GET_CODE (x) == CONST_DOUBLE
|| (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
- && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x)
+ && ((MEM_P (x) && ! MEM_VOLATILE_P (x)
&& direct_load[(int) mode])
|| (REG_P (x)
&& (! HARD_REGISTER_P (x)
y = protect_from_queue (y, 0);
size = protect_from_queue (size, 0);
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
- if (GET_CODE (y) != MEM)
+ if (!MEM_P (y))
abort ();
if (size == 0)
abort ();
from strange tricks we might play; but make sure that the source can
be loaded directly into the destination. */
src = orig_src;
- if (GET_CODE (orig_src) != MEM
+ if (!MEM_P (orig_src)
&& (!CONSTANT_P (orig_src)
|| (GET_MODE (orig_src) != mode
&& GET_MODE (orig_src) != VOIDmode)))
}
/* Optimize the access just a bit. */
- if (GET_CODE (src) == MEM
+ if (MEM_P (src)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src))
|| MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
emit_group_load (dst, temp, type, ssize);
return;
}
- else if (GET_CODE (dst) != MEM && GET_CODE (dst) != CONCAT)
+ else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
{
dst = gen_reg_rtx (GET_MODE (orig_dst));
/* Make life a bit easier for combine. */
}
/* Optimize the access just a bit. */
- if (GET_CODE (dest) == MEM
+ if (MEM_P (dest)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
|| MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
clear_storage (rtx object, rtx size)
{
rtx retval = 0;
- unsigned int align = (GET_CODE (object) == MEM ? MEM_ALIGN (object)
+ unsigned int align = (MEM_P (object) ? MEM_ALIGN (object)
: GET_MODE_ALIGNMENT (GET_MODE (object)));
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
/* If X or Y are memory references, verify that their addresses are valid
for the machine. */
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
&& ! push_operand (x, GET_MODE (x)))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (x, 0)))))
x = validize_mem (x);
- if (GET_CODE (y) == MEM
+ if (MEM_P (y)
&& (! memory_address_p (GET_MODE (y), XEXP (y, 0))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (y, 0)))))
if (reload_in_progress)
{
x = gen_lowpart_common (tmode, x1);
- if (x == 0 && GET_CODE (x1) == MEM)
+ if (x == 0 && MEM_P (x1))
{
x = adjust_address_nv (x1, tmode, 0);
copy_replacements (x1, x);
}
y = gen_lowpart_common (tmode, y1);
- if (y == 0 && GET_CODE (y1) == MEM)
+ if (y == 0 && MEM_P (y1))
{
y = adjust_address_nv (y1, tmode, 0);
copy_replacements (y1, y);
/* If we are in reload, see if either operand is a MEM whose address
is scheduled for replacement. */
- if (reload_in_progress && GET_CODE (x) == MEM
+ if (reload_in_progress && MEM_P (x)
&& (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && GET_CODE (y) == MEM
+ if (reload_in_progress && MEM_P (y)
&& (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
y = replace_equiv_address_nv (y, inner);
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* A constant address in TO_RTX can have VOIDmode, we must not try
to call force_reg for that case. Avoid that case. */
- if (GET_CODE (to_rtx) == MEM
+ if (MEM_P (to_rtx)
&& GET_MODE (to_rtx) == BLKmode
&& GET_MODE (XEXP (to_rtx, 0)) != VOIDmode
&& bitsize > 0
offset));
}
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
{
/* If the field is at offset zero, we could have been given the
DECL_RTX of the parent struct. Don't munge it. */
/* Deal with volatile and readonly fields. The former is only done
for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep && GET_CODE (to_rtx) == MEM)
+ if (volatilep && MEM_P (to_rtx))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
/* We can't assert that a MEM won't be set more than once
if the component is not addressable because another
non-addressable component may be referenced by the same MEM. */
- && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to)))
+ && ! (MEM_P (to_rtx) && ! can_address_p (to)))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
}
- if (GET_CODE (to_rtx) == MEM && ! can_address_p (to))
+ if (MEM_P (to_rtx) && ! can_address_p (to))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
dont_return_target = 1;
}
else if ((want_value & 1) != 0
- && GET_CODE (target) == MEM
+ && MEM_P (target)
&& ! MEM_VOLATILE_P (target)
&& GET_MODE (target) != BLKmode)
/* If target is in memory and caller wants value in a register instead,
only necessary if the MEM is volatile, or if the address
overlaps TARGET. But not performing the load twice also
reduces the amount of rtl we generate and then have to CSE. */
- if (GET_CODE (temp) == MEM && (want_value & 1) != 0)
+ if (MEM_P (temp) && (want_value & 1) != 0)
temp = copy_to_reg (temp);
/* If TEMP is a VOIDmode constant, use convert_modes to make
or if we really want the correct value. */
if (!(target && REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER)
- && !(GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ && !(MEM_P (target) && MEM_VOLATILE_P (target))
&& ! rtx_equal_p (temp, target)
&& (CONSTANT_P (temp) || (want_value & 1) != 0))
dont_return_target = 1;
/* If we are supposed to return TEMP, do so as long as it isn't a MEM.
??? The latter test doesn't seem to make sense. */
- else if (dont_return_target && GET_CODE (temp) != MEM)
+ else if (dont_return_target && !MEM_P (temp))
return temp;
/* Return TARGET itself if it is a hard register. */
/* If we have a nonzero bitpos for a register target, then we just
let store_field do the bitfield handling. This is unlikely to
generate unnecessary clear instructions anyways. */
- && (bitpos == 0 || GET_CODE (target) == MEM))
+ && (bitpos == 0 || MEM_P (target)))
{
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target
= adjust_address (target,
GET_MODE (target) == BLKmode
/* Update the alias set, if required. */
- if (GET_CODE (target) == MEM && ! MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
&& MEM_ALIAS_SET (target) != 0)
{
target = copy_rtx (target);
target));
offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
if (TREE_READONLY (field))
{
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
}
#endif
- if (GET_CODE (to_rtx) == MEM && !MEM_KEEP_ALIAS_SET_P (to_rtx)
+ if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
&& DECL_NONADDRESSABLE_P (field))
{
to_rtx = copy_rtx (to_rtx);
&& (lo = tree_low_cst (lo_index, 0),
hi = tree_low_cst (hi_index, 0),
count = hi - lo + 1,
- (GET_CODE (target) != MEM
+ (!MEM_P (target)
|| count <= 2
|| (host_integerp (TYPE_SIZE (elttype), 1)
&& (tree_low_cst (TYPE_SIZE (elttype), 1) * count
{
bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0);
- if (GET_CODE (target) == MEM
+ if (MEM_P (target)
&& !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
else
bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1));
- if (GET_CODE (target) == MEM && !MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
{
/* The assumption here is that it is safe to use
XEXP if the set is multi-word, but not if
it's single-word. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
to_rtx = adjust_address (target, mode, offset);
else if (offset == 0)
to_rtx = target;
emit_move_insn (targetx, target);
}
- else if (GET_CODE (target) == MEM)
+ else if (MEM_P (target))
targetx = target;
else
abort ();
boundary. If so, we simply do a block copy. */
if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
{
- if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
+ if (!MEM_P (target) || !MEM_P (temp)
|| bitpos % BITS_PER_UNIT != 0)
abort ();
/* The caller wants an rtx for the value.
If possible, avoid refetching from the bitfield itself. */
if (width_mask != 0
- && ! (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)))
+ && ! (MEM_P (target) && MEM_VOLATILE_P (target)))
{
tree count;
enum machine_mode tmode;
/* Check for subreg applied to an expression produced by loop optimizer. */
if (code == SUBREG
&& !REG_P (SUBREG_REG (value))
- && GET_CODE (SUBREG_REG (value)) != MEM)
+ && !MEM_P (SUBREG_REG (value)))
{
value = simplify_gen_subreg (GET_MODE (value),
force_reg (GET_MODE (SUBREG_REG (value)),
#ifdef INSN_SCHEDULING
/* On machines that have insn scheduling, we want all memory reference to be
explicit, so we need to deal with such paradoxical SUBREGs. */
- if (GET_CODE (value) == SUBREG && GET_CODE (SUBREG_REG (value)) == MEM
+ if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value))
&& (GET_MODE_SIZE (GET_MODE (value))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (value)))))
value
!= INTEGER_CST)
&& GET_MODE (x) == BLKmode)
/* If X is in the outgoing argument area, it is always safe. */
- || (GET_CODE (x) == MEM
+ || (MEM_P (x)
&& (XEXP (x, 0) == virtual_outgoing_args_rtx
|| (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
if (DECL_P (exp))
{
if (!DECL_RTL_SET_P (exp)
- || GET_CODE (DECL_RTL (exp)) != MEM)
+ || !MEM_P (DECL_RTL (exp)))
return 0;
else
exp_rtl = XEXP (DECL_RTL (exp), 0);
break;
case INDIRECT_REF:
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
return 0;
/* Assume that the call will clobber all hard registers and
all of memory. */
if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
- || GET_CODE (x) == MEM)
+ || MEM_P (x))
return 0;
break;
/* If the rtl is X, then it is not safe. Otherwise, it is unless both
are memory and they conflict. */
return ! (rtx_equal_p (x, exp_rtl)
- || (GET_CODE (x) == MEM && GET_CODE (exp_rtl) == MEM
+ || (MEM_P (x) && MEM_P (exp_rtl)
&& true_dependence (exp_rtl, VOIDmode, x,
rtx_addr_varies_p)));
}
&& modifier != EXPAND_CONST_ADDRESS)
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
- if (GET_CODE (temp) == MEM)
+ if (MEM_P (temp))
temp = copy_to_reg (temp);
return const0_rtx;
}
if (context != 0 && context != current_function_decl
/* If var is static, we don't need a static chain to access it. */
- && ! (GET_CODE (DECL_RTL (exp)) == MEM
+ && ! (MEM_P (DECL_RTL (exp))
&& CONSTANT_P (XEXP (DECL_RTL (exp), 0))))
{
rtx addr;
if (DECL_NO_STATIC_CHAIN (current_function_decl))
abort ();
lang_hooks.mark_addressable (exp);
- if (GET_CODE (DECL_RTL (exp)) != MEM)
+ if (!MEM_P (DECL_RTL (exp)))
abort ();
addr = XEXP (DECL_RTL (exp), 0);
- if (GET_CODE (addr) == MEM)
+ if (MEM_P (addr))
addr
= replace_equiv_address (addr,
fix_lexical_addr (XEXP (addr, 0), exp));
from its initializer, while the initializer is still being parsed.
See expand_decl. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
+ else if (MEM_P (DECL_RTL (exp))
&& REG_P (XEXP (DECL_RTL (exp), 0)))
temp = validize_mem (DECL_RTL (exp));
the address is not valid or it is not a register and -fforce-addr
is specified, get the address into a register. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
+ else if (MEM_P (DECL_RTL (exp))
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_SUM
&& modifier != EXPAND_INITIALIZER
if the address is a register. */
if (temp != 0)
{
- if (GET_CODE (temp) == MEM && REG_P (XEXP (temp, 0)))
+ if (MEM_P (temp) && REG_P (XEXP (temp, 0)))
mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
return temp;
put_var_into_stack (exp, /*rescan=*/true);
temp = SAVE_EXPR_RTL (exp);
}
- if (temp == 0 || GET_CODE (temp) != MEM)
+ if (temp == 0 || !MEM_P (temp))
abort ();
return
replace_equiv_address (temp,
C, but can in Ada if we have unchecked conversion of an expression
from a scalar type to an array or record type or for an
ARRAY_RANGE_REF whose type is BLKmode. */
- else if (GET_CODE (op0) != MEM
+ else if (!MEM_P (op0)
&& (offset != 0
|| (code == ARRAY_RANGE_REF && mode == BLKmode)))
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
EXPAND_SUM);
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
record its alignment as BIGGEST_ALIGNMENT. */
- if (GET_CODE (op0) == MEM && bitpos == 0 && offset != 0
+ if (MEM_P (op0) && bitpos == 0 && offset != 0
&& is_aligning_offset (offset, tem))
set_mem_align (op0, BIGGEST_ALIGNMENT);
/* Don't forget about volatility even if this is a bitfield. */
- if (GET_CODE (op0) == MEM && volatilep && ! MEM_VOLATILE_P (op0))
+ if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
{
if (op0 == orig_op0)
op0 = copy_rtx (op0);
|| (mode1 != BLKmode
&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
|| (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
- || (GET_CODE (op0) == MEM
+ || (MEM_P (op0)
&& (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
|| (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
&& ((modifier == EXPAND_CONST_ADDRESS
enum machine_mode ext_mode = mode;
if (ext_mode == BLKmode
- && ! (target != 0 && GET_CODE (op0) == MEM
- && GET_CODE (target) == MEM
+ && ! (target != 0 && MEM_P (op0)
+ && MEM_P (target)
&& bitpos % BITS_PER_UNIT == 0))
ext_mode = mode_for_size (bitsize, MODE_INT, 1);
/* In this case, BITPOS must start at a byte boundary and
TARGET, if specified, must be a MEM. */
- if (GET_CODE (op0) != MEM
- || (target != 0 && GET_CODE (target) != MEM)
+ if (!MEM_P (op0)
+ || (target != 0 && !MEM_P (target))
|| bitpos % BITS_PER_UNIT != 0)
abort ();
op0 = validize_mem (op0);
- if (GET_CODE (op0) == MEM && REG_P (XEXP (op0, 0)))
+ if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
target = assign_temp (type, 0, 1, 1);
}
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
/* Store data into beginning of memory target. */
store_expr (TREE_OPERAND (exp, 0),
adjust_address (target, TYPE_MODE (valtype), 0),
&& GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
&& GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
op0 = gen_lowpart (TYPE_MODE (type), op0);
- else if (GET_CODE (op0) != MEM)
+ else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
are going to be be changing the mode of the MEM, don't call
that the operand is known to be aligned, indicate that it is.
Otherwise, we need only be concerned about alignment for non-BLKmode
results. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
{
op0 = copy_rtx (op0);
target = original_target;
if (target == 0
|| modifier == EXPAND_STACK_PARM
- || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (MEM_P (target) && MEM_VOLATILE_P (target))
|| GET_MODE (target) != mode
|| (REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER))
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target = gen_reg_rtx (mode);
/* If op1 was placed in target, swap op0 and op1. */
|| REG_P (original_target)
|| TREE_ADDRESSABLE (type))
#endif
- && (GET_CODE (original_target) != MEM
+ && (!MEM_P (original_target)
|| TREE_ADDRESSABLE (type)))
temp = original_target;
else if (TREE_ADDRESSABLE (type))
}
}
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
mark_temp_addr_taken (op0);
return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
}
- if (icode != (int) CODE_FOR_nothing && GET_CODE (op0) == MEM)
+ if (icode != (int) CODE_FOR_nothing && MEM_P (op0))
{
rtx addr = (general_operand (XEXP (op0, 0), mode)
? force_reg (Pmode, XEXP (op0, 0))
recog_data.operand[i] = alter_subreg (recog_data.operand_loc[i]);
else if (GET_CODE (recog_data.operand[i]) == PLUS
|| GET_CODE (recog_data.operand[i]) == MULT
- || GET_CODE (recog_data.operand[i]) == MEM)
+ || MEM_P (recog_data.operand[i]))
recog_data.operand[i] = walk_alter_subreg (recog_data.operand_loc[i]);
}
*recog_data.dup_loc[i] = alter_subreg (recog_data.dup_loc[i]);
else if (GET_CODE (*recog_data.dup_loc[i]) == PLUS
|| GET_CODE (*recog_data.dup_loc[i]) == MULT
- || GET_CODE (*recog_data.dup_loc[i]) == MEM)
+ || MEM_P (*recog_data.dup_loc[i]))
*recog_data.dup_loc[i] = walk_alter_subreg (recog_data.dup_loc[i]);
}
}
/* simplify_subreg does not remove subreg from volatile references.
We are required to. */
- if (GET_CODE (y) == MEM)
+ if (MEM_P (y))
*xp = adjust_address (y, GET_MODE (x), SUBREG_BYTE (x));
else
{
if (REG_P (op))
return REG_EXPR (op);
- else if (GET_CODE (op) != MEM)
+ else if (!MEM_P (op))
return 0;
if (MEM_EXPR (op) != 0)
/* The stack pointer is only modified indirectly as the result
of a push until later in flow. See the comments in rtl.texi
regarding Embedded Side-Effects on Addresses. */
- || (GET_CODE (x) == MEM
+ || (MEM_P (x)
&& GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_AUTOINC
&& XEXP (XEXP (x, 0), 0) == stack_pointer_rtx))
current_function_sp_is_unchanging = 0;
for (insn = BB_END (bb); insn != BB_HEAD (bb); insn = PREV_INSN (insn))
if (GET_CODE (insn) == INSN
&& (set = single_set (insn))
- && GET_CODE (SET_DEST (set)) == MEM)
+ && MEM_P (SET_DEST (set)))
{
rtx mem = SET_DEST (set);
rtx canon_mem = canon_rtx (mem);
else if (volatile_refs_p (SET_SRC (x)))
return 0;
- if (GET_CODE (r) == MEM)
+ if (MEM_P (r))
{
rtx temp, canon_r;
|| GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == SIGN_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART);
- if (GET_CODE (reg) == MEM)
+ if (MEM_P (reg))
break;
not_dead = (unsigned long) REGNO_REG_SET_P (pbi->reg_live, REGNO (reg));
/* Fall through. */
/* If the memory reference had embedded side effects (autoincrement
address modes. Then we may need to kill some entries on the
memory set list. */
- if (insn && GET_CODE (reg) == MEM)
+ if (insn && MEM_P (reg))
for_each_rtx (&PATTERN (insn), invalidate_mems_from_autoinc, pbi);
- if (GET_CODE (reg) == MEM && ! side_effects_p (reg)
+ if (MEM_P (reg) && ! side_effects_p (reg)
/* ??? With more effort we could track conditional memory life. */
&& ! cond)
add_to_mem_set_list (pbi, canon_rtx (reg));
case CLOBBER:
/* If we are clobbering a MEM, mark any registers inside the address
as being used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
mark_used_regs (pbi, XEXP (XEXP (x, 0), 0), cond, insn);
return;
/* If storing into MEM, don't show it as being used. But do
show the address as being used. */
- if (GET_CODE (testreg) == MEM)
+ if (MEM_P (testreg))
{
#ifdef AUTO_INC_DEC
if (flags & PROP_AUTOINC)
/* If X is not in memory or is at a constant address, it cannot be in
a temporary slot. */
- if (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ if (!MEM_P (x) || CONSTANT_P (XEXP (x, 0)))
return;
p = find_temp_slot_from_address (XEXP (x, 0));
/* If X is not in memory or is at a constant address, it cannot be in
a temporary slot, but it can contain something whose address was
taken. */
- if (p == 0 && (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0))))
+ if (p == 0 && (!MEM_P (x) || CONSTANT_P (XEXP (x, 0))))
{
for (p = *temp_slots_at_level (temp_slot_level); p; p = next)
{
/* If X is not in memory or is at a constant address, it cannot be in
a temporary slot. */
- if (x == 0 || GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ if (x == 0 || !MEM_P (x) || CONSTANT_P (XEXP (x, 0)))
return;
/* If we can find a match, move it to our level unless it is already at
reference, with a pseudo to address it, put that pseudo into the stack
if the var is non-local. */
if (TREE_CODE (decl) != SAVE_EXPR && DECL_NONLOCAL (decl)
- && GET_CODE (reg) == MEM
+ && MEM_P (reg)
&& REG_P (XEXP (reg, 0))
&& REGNO (XEXP (reg, 0)) > LAST_VIRTUAL_REGISTER)
{
/* If we can't use ADDRESSOF, make sure we see through one we already
generated. */
if (! can_use_addressof_p
- && GET_CODE (reg) == MEM
+ && MEM_P (reg)
&& GET_CODE (XEXP (reg, 0)) == ADDRESSOF)
reg = XEXP (XEXP (reg, 0), 0);
/* If this was previously a MEM but we've removed the ADDRESSOF,
set this address into that MEM so we always use the same
rtx for this variable. */
- if (orig_reg != reg && GET_CODE (orig_reg) == MEM)
+ if (orig_reg != reg && MEM_P (orig_reg))
XEXP (orig_reg, 0) = XEXP (reg, 0);
}
else if (GET_CODE (reg) == CONCAT)
means that the insn may have become invalid again. We can't
in this case make a new replacement since we already have one
and we must deal with MATCH_DUPs. */
- if (GET_CODE (replacement->new) == MEM)
+ if (MEM_P (replacement->new))
{
INSN_CODE (insn) = -1;
if (recog_memoized (insn) >= 0)
/* Now check that the containing word is memory, not a register,
and that it is safe to change the machine mode. */
- if (GET_CODE (XEXP (bitfield, 0)) == MEM)
+ if (MEM_P (XEXP (bitfield, 0)))
memref = XEXP (bitfield, 0);
else if (REG_P (XEXP (bitfield, 0))
&& equiv_mem != 0)
memref = equiv_mem[REGNO (XEXP (bitfield, 0))];
else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
- && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == MEM)
+ && MEM_P (SUBREG_REG (XEXP (bitfield, 0))))
memref = SUBREG_REG (XEXP (bitfield, 0));
else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
&& equiv_mem != 0
{
if ((TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL)
&& DECL_RTL (decl) != 0
- && GET_CODE (DECL_RTL (decl)) == MEM
+ && MEM_P (DECL_RTL (decl))
&& GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF
&& REG_P (XEXP (XEXP (DECL_RTL (decl), 0), 0)))
put_addressof_into_stack (XEXP (DECL_RTL (decl), 0), 0);
{
rtx sub, insns;
- if (GET_CODE (XEXP (x, 0)) != MEM)
+ if (!MEM_P (XEXP (x, 0)))
put_addressof_into_stack (x, ht);
/* We must create a copy of the rtx because it was created by
{
rtx sub = XEXP (XEXP (x, 0), 0);
- if (GET_CODE (sub) == MEM)
+ if (MEM_P (sub))
sub = adjust_address_nv (sub, GET_MODE (x), 0);
else if (REG_P (sub)
&& (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
/* If this is not a MEM, no need to do anything. Similarly if the
address is a constant or a register that is not a virtual register. */
- if (x == 0 || GET_CODE (x) != MEM)
+ if (x == 0 || !MEM_P (x))
return;
addr = XEXP (x, 0);
go ahead and make the invalid one, but do it to a copy. For a REG,
just make the recursive call, since there's no chance of a problem. */
- if ((GET_CODE (XEXP (x, 0)) == MEM
+ if ((MEM_P (XEXP (x, 0))
&& instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0),
0))
|| (REG_P (XEXP (x, 0))
if (REG_P (XEXP (x, 0)))
return 1;
- else if (GET_CODE (XEXP (x, 0)) == MEM)
+ else if (MEM_P (XEXP (x, 0)))
{
/* If we have a (addressof (mem ..)), do any instantiation inside
since we know we'll be making the inside valid when we finally
if (nominal_mode == passed_mode
&& ! did_conversion
&& stack_parm != 0
- && GET_CODE (stack_parm) == MEM
+ && MEM_P (stack_parm)
&& locate.offset.var == 0
&& reg_mentioned_p (virtual_incoming_args_rtx,
XEXP (stack_parm, 0)))
/* Set MEM_EXPR to the original decl, i.e. to PARM,
instead of the copy of decl, i.e. FNARGS. */
if (DECL_INCOMING_RTL (parm)
- && GET_CODE (DECL_INCOMING_RTL (parm)) == MEM)
+ && MEM_P (DECL_INCOMING_RTL (parm)))
set_mem_expr (DECL_INCOMING_RTL (parm), parm);
}
fnargs = TREE_CHAIN (fnargs);
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_RTL (decl) != 0
&& (REG_P (DECL_RTL (decl))
- || (GET_CODE (DECL_RTL (decl)) == MEM
+ || (MEM_P (DECL_RTL (decl))
&& GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
/* If this variable came from an inline function, it must be
that its life doesn't overlap the setjmp. If there was a
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_RTL (decl) != 0
&& (REG_P (DECL_RTL (decl))
- || (GET_CODE (DECL_RTL (decl)) == MEM
+ || (MEM_P (DECL_RTL (decl))
&& GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
&& (
/* If longjmp doesn't restore the registers,
fp = find_function_data (context);
- if (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == MEM)
+ if (GET_CODE (addr) == ADDRESSOF && MEM_P (XEXP (addr, 0)))
addr = XEXP (XEXP (addr, 0), 0);
/* Decode given address as base reg plus displacement. */
insn = next;
continue;
}
- else if (GET_CODE (retaddr) == MEM
+ else if (MEM_P (retaddr)
&& REG_P (XEXP (retaddr, 0)))
base = gen_rtx_REG (Pmode, REGNO (XEXP (retaddr, 0))), offset = 0;
- else if (GET_CODE (retaddr) == MEM
+ else if (MEM_P (retaddr)
&& GET_CODE (XEXP (retaddr, 0)) == PLUS
&& REG_P (XEXP (XEXP (retaddr, 0), 0))
&& GET_CODE (XEXP (XEXP (retaddr, 0), 1)) == CONST_INT)
early because it'll screw alias analysis. Note that we've
already checked for no side effects. */
if (! no_new_pseudos && cse_not_expected
- && GET_CODE (a) == MEM && GET_CODE (b) == MEM
+ && MEM_P (a) && MEM_P (b)
&& BRANCH_COST >= 5)
{
a = XEXP (a, 0);
return FALSE;
c = XEXP (note, 0);
}
- if (GET_CODE (c) == MEM
+ if (MEM_P (c)
&& GET_CODE (XEXP (c, 0)) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (XEXP (c, 0)))
c = get_pool_constant (XEXP (c, 0));
{
/* We special-case memories, so handle any of them with
no address side effects. */
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
return ! side_effects_p (XEXP (op, 0));
if (side_effects_p (op))
for most optimizations if writing to x may trap, i.e. it's a memory
other than a static var or a stack slot. */
if (! set_b
- && GET_CODE (orig_x) == MEM
+ && MEM_P (orig_x)
&& ! MEM_NOTRAP_P (orig_x)
&& rtx_addr_can_trap_p (XEXP (orig_x, 0)))
{
static int
find_memory (rtx *px, void *data ATTRIBUTE_UNUSED)
{
- return GET_CODE (*px) == MEM;
+ return MEM_P (*px);
}
/* Used by the code above to perform the actual rtl transformations.
case CLOBBER:
/* The only thing we can do with a USE or CLOBBER is possibly do
some substitutions in a MEM within it. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
subst_constants (&XEXP (XEXP (x, 0), 0), insn, map, 0);
return;
}
/* Do substitute in the address of a destination in memory. */
- if (GET_CODE (*dest_loc) == MEM)
+ if (MEM_P (*dest_loc))
subst_constants (&XEXP (*dest_loc, 0), insn, map, 0);
/* Check for the case of DEST a SUBREG, both it and the underlying
if (x == NULL_RTX || REG_N_SETS (REGNO (ivs->entries[i].pseudo)) > 1)
; /* Do nothing. */
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
reg_equiv_memory_loc[regno] = x;
else if (REG_P (x))
{
{
if ((REG_P (dest)
&& reg_overlap_mentioned_p (dest, equiv_mem))
- || (GET_CODE (dest) == MEM
+ || (MEM_P (dest)
&& true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
equiv_mem_modified = 1;
}
case SET:
/* If we are setting a MEM, it doesn't count (its address does), but any
other SET_DEST that has a MEM in it is referencing the MEM. */
- if (GET_CODE (SET_DEST (x)) == MEM)
+ if (MEM_P (SET_DEST (x)))
{
if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
return 1;
|| (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
|| reg_equiv[regno].init_insns == const0_rtx
|| (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno))
- && GET_CODE (src) == MEM))
+ && MEM_P (src)))
{
/* This might be setting a SUBREG of a pseudo, a pseudo that is
also set somewhere else to a constant. */
note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
if (note == 0 && REG_BASIC_BLOCK (regno) >= 0
- && GET_CODE (SET_SRC (set)) == MEM
+ && MEM_P (SET_SRC (set))
&& validate_equiv_mem (insn, dest, SET_SRC (set)))
REG_NOTES (insn) = note = gen_rtx_EXPR_LIST (REG_EQUIV, SET_SRC (set),
REG_NOTES (insn));
= gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
dependencies);
else if (GET_CODE (x) == CLOBBER
- && GET_CODE (XEXP (x, 0)) == MEM)
+ && MEM_P (XEXP (x, 0)))
dependencies = find_regs_nested (dependencies,
XEXP (XEXP (x, 0), 0));
}
rtx fusage = XEXP (fusage_entry, 0);
if (GET_CODE (fusage) == CLOBBER
- && GET_CODE (XEXP (fusage, 0)) == MEM
+ && MEM_P (XEXP (fusage, 0))
&& RTX_UNCHANGING_P (XEXP (fusage, 0)))
{
note_stores (fusage, note_addr_stored, loop_info);
{
struct loop_info *loop_info = data;
- if (x == 0 || GET_CODE (x) != MEM)
+ if (x == 0 || !MEM_P (x))
return;
/* Count number of memory writes.
{
struct check_store_data *d = (struct check_store_data *) data;
- if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
+ if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
d->mem_write = 1;
}
\f
return 1;
}
}
- else if (REG_P (arg) || GET_CODE (arg) == MEM)
+ else if (REG_P (arg) || MEM_P (arg))
{
if (loop_invariant_p (loop, arg) == 1)
{
static int
find_mem_in_note_1 (rtx *x, void *data)
{
- if (*x != NULL_RTX && GET_CODE (*x) == MEM)
+ if (*x != NULL_RTX && MEM_P (*x))
{
rtx *res = (rtx *) data;
*res = *x;
op1 = gen_label_rtx ();
if (target == 0 || ! safe
|| GET_MODE (target) != mode
- || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (MEM_P (target) && MEM_VOLATILE_P (target))
|| (REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER))
target = gen_reg_rtx (mode);
op0 = copy_to_mode_reg (mode0, op0);
if (! (*insn_data[icode].operand[0].predicate) (temp, GET_MODE (temp))
- || (flag_force_mem && GET_CODE (temp) == MEM))
+ || (flag_force_mem && MEM_P (temp)))
temp = gen_reg_rtx (GET_MODE (temp));
pat = GEN_FCN (icode) (temp, op0);
different from the DECL_NAME name used in the source file. */
x = DECL_RTL (current_function_decl);
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
x = XEXP (x, 0);
if (GET_CODE (x) != SYMBOL_REF)
that combine made wrt the contents of sign bits. We'll do this by
generating an extend instruction instead of a reg->reg copy. Thus
the destination must be a register that we can widen. */
- if (GET_CODE (src) == MEM
+ if (MEM_P (src)
&& GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
&& (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != NIL
&& !REG_P (SET_DEST (set)))
return 0;
/* If memory loads are cheaper than register copies, don't change them. */
- if (GET_CODE (src) == MEM)
+ if (MEM_P (src))
old_cost = MEMORY_MOVE_COST (GET_MODE (src), dclass, 1);
else if (REG_P (src))
old_cost = REGISTER_MOVE_COST (GET_MODE (src),
op = recog_data.operand[i];
mode = GET_MODE (op);
#ifdef LOAD_EXTEND_OP
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& LOAD_EXTEND_OP (mode) != NIL)
{
extension applies.
Also, if there is an explicit extension, we don't have to
worry about an implicit one. */
- else if (GET_CODE (SET_DEST (set)) == MEM
+ else if (MEM_P (SET_DEST (set))
|| GET_CODE (SET_DEST (set)) == STRICT_LOW_PART
|| GET_CODE (SET_SRC (set)) == ZERO_EXTEND
|| GET_CODE (SET_SRC (set)) == SIGN_EXTEND)
/* Some targets do argument pushes without adding REG_INC notes. */
- if (GET_CODE (dst) == MEM)
+ if (MEM_P (dst))
{
dst = XEXP (dst, 0);
if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
we created them ourself. They might not have set their
unchanging flag set, but nevertheless they are stable across
the livetime in question. */
- || (GET_CODE (src) == MEM
+ || (MEM_P (src)
&& INSN_UID (insn) >= orig_max_uid
&& memref_is_stack_slot (src)))
/* And we must be able to construct an insn without
src = SUBREG_REG (src);
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
- if (GET_CODE (src) == MEM && GET_CODE (dest) != MEM
+ if (MEM_P (src) && !MEM_P (dest)
&& memref_is_stack_slot (src))
pcost = &load;
- else if (GET_CODE (src) != MEM && GET_CODE (dest) == MEM
+ else if (!MEM_P (src) && MEM_P (dest)
&& memref_is_stack_slot (dest))
pcost = &store;
}
return 0;
return 1;
}
- if (GET_CODE (s1) != MEM || GET_CODE (s2) != MEM)
+ if (!MEM_P (s1) || GET_CODE (s2) != MEM)
abort ();
s1 = XEXP (s1, 0);
s2 = XEXP (s2, 0);
slots = NULL;
else
{
- if (1 || GET_CODE (SET_SRC (set)) == MEM)
+ if (1 || MEM_P (SET_SRC (set)))
delete_overlapping_slots (&slots, SET_SRC (set));
}
}
changes[num_changes].loc = loc;
changes[num_changes].old = old;
- if (object && GET_CODE (object) != MEM)
+ if (object && !MEM_P (object))
{
/* Set INSN_CODE to force rerecognition of insn. Save old code in
case invalid. */
if (object == 0 || object == last_validated)
continue;
- if (GET_CODE (object) == MEM)
+ if (MEM_P (object))
{
if (! memory_address_p (GET_MODE (object), XEXP (object, 0)))
break;
for (i = num_changes - 1; i >= num; i--)
{
*changes[i].loc = changes[i].old;
- if (changes[i].object && GET_CODE (changes[i].object) != MEM)
+ if (changes[i].object && !MEM_P (changes[i].object))
INSN_CODE (changes[i].object) = changes[i].old_code;
}
num_changes = num;
likely to be an insertion operation; if it was, nothing bad will
happen, we might just fail in some cases). */
- if (GET_CODE (XEXP (x, 0)) == MEM
+ if (MEM_P (XEXP (x, 0))
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& GET_CODE (XEXP (x, 2)) == CONST_INT
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0))
#ifdef INSN_SCHEDULING
/* On machines that have insn scheduling, we want all memory
reference to be explicit, so outlaw paradoxical SUBREGs. */
- if (GET_CODE (sub) == MEM
+ if (MEM_P (sub)
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub)))
return 0;
#endif
??? This is a kludge. */
if (!reload_completed && SUBREG_BYTE (op) != 0
- && GET_CODE (sub) == MEM)
+ && MEM_P (sub))
return 0;
/* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
(Ideally, (SUBREG (MEM)...) should not exist after reload,
but currently it does result from (SUBREG (REG)...) where the
reg went on the stack.) */
- if (! reload_completed && GET_CODE (sub) == MEM)
+ if (! reload_completed && MEM_P (sub))
return general_operand (op, mode);
#ifdef CANNOT_CHANGE_MODE_CLASS
(Ideally, (SUBREG (MEM)...) should not exist after reload,
but currently it does result from (SUBREG (REG)...) where the
reg went on the stack.) */
- if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM)
+ if (! reload_completed && MEM_P (SUBREG_REG (op)))
return general_operand (op, mode);
op = SUBREG_REG (op);
}
rounded_size = PUSH_ROUNDING (rounded_size);
#endif
- if (GET_CODE (op) != MEM)
+ if (!MEM_P (op))
return 0;
if (mode != VOIDmode && GET_MODE (op) != mode)
int
pop_operand (rtx op, enum machine_mode mode)
{
- if (GET_CODE (op) != MEM)
+ if (!MEM_P (op))
return 0;
if (mode != VOIDmode && GET_MODE (op) != mode)
if (! reload_completed)
/* Note that no SUBREG is a memory operand before end of reload pass,
because (SUBREG (MEM...)) forces reloading into a register. */
- return GET_CODE (op) == MEM && general_operand (op, mode);
+ return MEM_P (op) && general_operand (op, mode);
if (mode != VOIDmode && GET_MODE (op) != mode)
return 0;
if (GET_CODE (inner) == SUBREG)
inner = SUBREG_REG (inner);
- return (GET_CODE (inner) == MEM && general_operand (op, mode));
+ return (MEM_P (inner) && general_operand (op, mode));
}
/* Return 1 if OP is a valid indirect memory reference with mode MODE;
{
/* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
if (! reload_completed
- && GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == MEM)
+ && GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op)))
{
int offset = SUBREG_BYTE (op);
rtx inner = SUBREG_REG (op);
&& general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
}
- return (GET_CODE (op) == MEM
+ return (MEM_P (op)
&& memory_operand (op, mode)
&& general_operand (XEXP (op, 0), Pmode));
}
Match any memory and hope things are resolved after reload. */
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (1
|| GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == POST_DEC))
break;
case '>':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (1
|| GET_CODE (XEXP (op, 0)) == PRE_INC
|| GET_CODE (XEXP (op, 0)) == POST_INC))
int
offsettable_memref_p (rtx op)
{
- return ((GET_CODE (op) == MEM)
+ return ((MEM_P (op))
&& offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)));
}
int
offsettable_nonstrict_memref_p (rtx op)
{
- return ((GET_CODE (op) == MEM)
+ return ((MEM_P (op))
&& offsettable_address_p (0, GET_MODE (op), XEXP (op, 0)));
}
case 'm':
/* Memory operands must be valid, to the extent
required by STRICT. */
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
{
if (strict > 0
&& !strict_memory_address_p (GET_MODE (op),
break;
case '<':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == POST_DEC))
win = 1;
break;
case '>':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_INC
|| GET_CODE (XEXP (op, 0)) == POST_INC))
win = 1;
break;
case 'V':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& ((strict > 0 && ! offsettable_memref_p (op))
|| (strict < 0
- && !(CONSTANT_P (op) || GET_CODE (op) == MEM))
+ && !(CONSTANT_P (op) || MEM_P (op)))
|| (reload_in_progress
&& !(REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER))))
|| (strict == 0 && offsettable_nonstrict_memref_p (op))
/* Before reload, accept what reload can handle. */
|| (strict < 0
- && (CONSTANT_P (op) || GET_CODE (op) == MEM))
+ && (CONSTANT_P (op) || MEM_P (op)))
/* During reload, accept a pseudo */
|| (reload_in_progress && REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER))
else if (EXTRA_MEMORY_CONSTRAINT (c, p)
/* Every memory operand can be reloaded to fit. */
- && ((strict < 0 && GET_CODE (op) == MEM)
+ && ((strict < 0 && MEM_P (op))
/* Before reload, accept what reload can turn
into mem. */
|| (strict < 0 && CONSTANT_P (op))
if (earlyclobber[eopno]
&& REG_P (recog_data.operand[eopno]))
for (opno = 0; opno < recog_data.n_operands; opno++)
- if ((GET_CODE (recog_data.operand[opno]) == MEM
+ if ((MEM_P (recog_data.operand[opno])
|| recog_data.operand_type[opno] != OP_OUT)
&& opno != eopno
/* Ignore things like match_operator operands. */
if (! in_set)
abort ();
- if (GET_CODE (SET_DEST (in_set)) != MEM)
+ if (!MEM_P (SET_DEST (in_set)))
return false;
out_set = single_set (out_insn);
/* See if this is a `movM' pattern, and handle elsewhere if so. */
if (STACK_REG_P (*src)
|| (STACK_REG_P (*dest)
- && (REG_P (*src) || GET_CODE (*src) == MEM
+ && (REG_P (*src) || MEM_P (*src)
|| GET_CODE (*src) == CONST_DOUBLE)))
{
control_flow_insn_deleted |= move_for_stack_reg (insn, regstack, pat);
if (GET_CODE (recog_data.operand[i]) == SUBREG)
recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
- if (GET_CODE (recog_data.operand[i]) == MEM)
+ if (MEM_P (recog_data.operand[i]))
record_address_regs (XEXP (recog_data.operand[i], 0),
MODE_BASE_REG_CLASS (modes[i]), frequency * 2);
else if (constraints[i][0] == 'p'
parameter is stored in memory. Record this fact. */
if (set != 0 && REG_P (SET_DEST (set))
- && GET_CODE (SET_SRC (set)) == MEM
+ && MEM_P (SET_SRC (set))
&& (note = find_reg_note (insn, REG_EQUIV,
NULL_RTX)) != 0
- && GET_CODE (XEXP (note, 0)) == MEM)
+ && MEM_P (XEXP (note, 0)))
{
costs[REGNO (SET_DEST (set))].mem_cost
-= (MEMORY_MOVE_COST (GET_MODE (SET_DEST (set)),
/* It doesn't seem worth distinguishing between offsettable
and non-offsettable addresses here. */
allows_mem[i] = 1;
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
win = 1;
break;
case '<':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == POST_DEC))
win = 1;
break;
case '>':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& (GET_CODE (XEXP (op, 0)) == PRE_INC
|| GET_CODE (XEXP (op, 0)) == POST_INC))
win = 1;
break;
case 'g':
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
|| (CONSTANT_P (op)
#ifdef LEGITIMATE_PIC_OPERAND_P
&& (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
{
/* Every MEM can be reloaded to fit. */
allows_mem[i] = 1;
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
win = 1;
}
if (EXTRA_ADDRESS_CONSTRAINT (c, p))
cost to move between the register classes, and use 2 for everything
else (constants). */
- if (GET_CODE (x) == MEM || class == NO_REGS)
+ if (MEM_P (x) || class == NO_REGS)
return MEMORY_MOVE_COST (mode, class, to_p);
else if (REG_P (x))
REG_N_SETS (REGNO (reg))++;
REG_N_REFS (REGNO (reg))++;
}
- else if (GET_CODE (reg) == MEM)
+ else if (MEM_P (reg))
reg_scan_mark_refs (XEXP (reg, 0), insn, note_flag, min_regno);
}
break;
if (!REG_ATTRS (dest) && REG_P (src))
REG_ATTRS (dest) = REG_ATTRS (src);
- if (!REG_ATTRS (dest) && GET_CODE (src) == MEM)
+ if (!REG_ATTRS (dest) && MEM_P (src))
set_reg_attrs_from_mem (dest, src);
}
return;
if (! (set = single_set (p))
- || GET_CODE (SET_SRC (set)) != MEM
+ || !MEM_P (SET_SRC (set))
/* If there's a REG_EQUIV note, this must be an insn that loads an
argument. Prefer keeping the note over doing this optimization. */
|| find_reg_note (p, REG_EQUIV, NULL_RTX)
static int
stack_memref_p (rtx x)
{
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
return 0;
x = XEXP (x, 0);
turn it into a direct store. Obviously we can't do this if
there were any intervening uses of the stack pointer. */
if (memlist == NULL
- && GET_CODE (dest) == MEM
+ && MEM_P (dest)
&& ((GET_CODE (XEXP (dest, 0)) == PRE_DEC
&& (last_sp_adjust
== (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest))))
= replace_oldest_value_reg (recog_data.operand_loc[i],
recog_op_alt[i][alt].class,
insn, vd);
- else if (GET_CODE (recog_data.operand[i]) == MEM)
+ else if (MEM_P (recog_data.operand[i]))
replaced = replace_oldest_value_mem (recog_data.operand[i],
insn, vd);
}
- else if (GET_CODE (recog_data.operand[i]) == MEM)
+ else if (MEM_P (recog_data.operand[i]))
replaced = replace_oldest_value_mem (recog_data.operand[i],
insn, vd);
that are already scheduled, which can become quite complicated.
And since we've already handled address reloads for this MEM, it
should always succeed anyway. */
- if (GET_CODE (in) == MEM)
+ if (MEM_P (in))
return 1;
/* If we can make a simple SET insn that does the job, everything should
/* If we have a read-write operand with an address side-effect,
change either IN or OUT so the side-effect happens only once. */
- if (in != 0 && out != 0 && GET_CODE (in) == MEM && rtx_equal_p (in, out))
+ if (in != 0 && out != 0 && MEM_P (in) && rtx_equal_p (in, out))
switch (GET_CODE (XEXP (in, 0)))
{
case POST_INC: case POST_DEC: case POST_MODIFY:
|| strict_low
|| (((REG_P (SUBREG_REG (in))
&& REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER)
- || GET_CODE (SUBREG_REG (in)) == MEM)
+ || MEM_P (SUBREG_REG (in)))
&& ((GET_MODE_SIZE (inmode)
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
#ifdef LOAD_EXTEND_OP
inloc = &SUBREG_REG (in);
in = *inloc;
#if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS)
- if (GET_CODE (in) == MEM)
+ if (MEM_P (in))
/* This is supposed to happen only for paradoxical subregs made by
combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */
if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode))
|| strict_low
|| (((REG_P (SUBREG_REG (out))
&& REGNO (SUBREG_REG (out)) >= FIRST_PSEUDO_REGISTER)
- || GET_CODE (SUBREG_REG (out)) == MEM)
+ || MEM_P (SUBREG_REG (out)))
&& ((GET_MODE_SIZE (outmode)
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
#ifdef WORD_REGISTER_OPERATIONS
outloc = &SUBREG_REG (out);
out = *outloc;
#if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS)
- if (GET_CODE (out) == MEM
+ if (MEM_P (out)
&& GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode))
abort ();
#endif
}
/* If IN appears in OUT, we can't share any input-only reload for IN. */
- if (in != 0 && out != 0 && GET_CODE (out) == MEM
- && (REG_P (in) || GET_CODE (in) == MEM)
+ if (in != 0 && out != 0 && MEM_P (out)
+ && (REG_P (in) || MEM_P (in))
&& reg_overlap_mentioned_for_reload_p (in, XEXP (out, 0)))
dont_share = 1;
memset (&val, 0, sizeof (val));
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
rtx base = NULL_RTX, offset = 0;
rtx addr = XEXP (x, 0);
if (ydata.safe)
return 1;
- if (GET_CODE (y) != MEM)
+ if (!MEM_P (y))
abort ();
/* If Y is memory and X is not, Y can't affect X. */
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
return 1;
xdata = decompose (x);
wider reload. */
if (replace
- && GET_CODE (op) == MEM
+ && MEM_P (op)
&& REG_P (reg)
&& (GET_MODE_SIZE (GET_MODE (reg))
>= GET_MODE_SIZE (GET_MODE (op))))
??? When is it right at this stage to have a subreg
of a mem that is _not_ to be handled specially? IMO
those should have been reduced to just a mem. */
- || ((GET_CODE (operand) == MEM
+ || ((MEM_P (operand)
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
#ifndef WORD_REGISTER_OPERATIONS
offsettable address was expected, then we must reject
this combination, because we can't reload it. */
if (this_alternative_offmemok[m]
- && GET_CODE (recog_data.operand[m]) == MEM
+ && MEM_P (recog_data.operand[m])
&& this_alternative[m] == (int) NO_REGS
&& ! this_alternative_win[m])
bad = 1;
case 'm':
if (force_reload)
break;
- if (GET_CODE (operand) == MEM
+ if (MEM_P (operand)
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER
&& reg_renumber[REGNO (operand)] < 0))
break;
case '<':
- if (GET_CODE (operand) == MEM
+ if (MEM_P (operand)
&& ! address_reloaded[i]
&& (GET_CODE (XEXP (operand, 0)) == PRE_DEC
|| GET_CODE (XEXP (operand, 0)) == POST_DEC))
break;
case '>':
- if (GET_CODE (operand) == MEM
+ if (MEM_P (operand)
&& ! address_reloaded[i]
&& (GET_CODE (XEXP (operand, 0)) == PRE_INC
|| GET_CODE (XEXP (operand, 0)) == POST_INC))
case 'V':
if (force_reload)
break;
- if (GET_CODE (operand) == MEM
+ if (MEM_P (operand)
&& ! (ind_levels ? offsettable_memref_p (operand)
: offsettable_nonstrict_memref_p (operand))
/* Certain mem addresses will become offsettable
case 'o':
if (force_reload)
break;
- if ((GET_CODE (operand) == MEM
+ if ((MEM_P (operand)
/* If IND_LEVELS, find_reloads_address won't reload a
pseudo that didn't get a hard reg, so we have to
reject that case. */
|| (reg_equiv_address[REGNO (operand)] != 0))))
win = 1;
if (CONST_POOL_OK_P (operand)
- || GET_CODE (operand) == MEM)
+ || MEM_P (operand))
badop = 0;
constmemok = 1;
offmemok = 1;
win = 1;
/* If the address was already reloaded,
we win as well. */
- else if (GET_CODE (operand) == MEM
+ else if (MEM_P (operand)
&& address_reloaded[i])
win = 1;
/* Likewise if the address will be reloaded because
constants via force_const_mem, and other
MEMs by reloading the address like for 'o'. */
if (CONST_POOL_OK_P (operand)
- || GET_CODE (operand) == MEM)
+ || MEM_P (operand))
badop = 0;
constmemok = 1;
offmemok = 1;
for (j = 0; j < noperands; j++)
/* Is this an input operand or a memory ref? */
- if ((GET_CODE (recog_data.operand[j]) == MEM
+ if ((MEM_P (recog_data.operand[j])
|| modified[j] != RELOAD_WRITE)
&& j != i
/* Ignore things like match_operator operands. */
so we don't bother with it. It may not be worth doing. */
else if (goal_alternative_matched[i] == -1
&& goal_alternative_offmemok[i]
- && GET_CODE (recog_data.operand[i]) == MEM)
+ && MEM_P (recog_data.operand[i]))
{
operand_reloadnum[i]
= push_reload (XEXP (recog_data.operand[i], 0), NULL_RTX,
while (GET_CODE (operand) == SUBREG)
operand = SUBREG_REG (operand);
- if ((GET_CODE (operand) == MEM
+ if ((MEM_P (operand)
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
/* If this is only for an output, the optional reload would not
we then need to emit a USE and/or a CLOBBER so that reload
inheritance will do the right thing. */
else if (replace
- && (GET_CODE (operand) == MEM
+ && (MEM_P (operand)
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER
&& reg_renumber [REGNO (operand)] < 0)))
while (GET_CODE (operand) == SUBREG)
operand = SUBREG_REG (operand);
- if ((GET_CODE (operand) == MEM
+ if ((MEM_P (operand)
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
&& ((enum reg_class) goal_alternative[goal_alternative_matches[i]]
frame and stack pointers is not its initial value. In that case the
pseudo will have been replaced by a MEM referring to the
stack pointer. */
- if (GET_CODE (ad) == MEM)
+ if (MEM_P (ad))
{
/* First ensure that the address in this MEM is valid. Then, unless
indirect addresses are valid, reload the MEM into a register. */
if (ind_levels == 0
|| (GET_CODE (XEXP (tem, 0)) == SYMBOL_REF && ! indirect_symref_ok)
- || GET_CODE (XEXP (tem, 0)) == MEM
+ || MEM_P (XEXP (tem, 0))
|| ! (REG_P (XEXP (tem, 0))
|| (GET_CODE (XEXP (tem, 0)) == PLUS
&& REG_P (XEXP (XEXP (tem, 0), 0))
memory location, since this will make it harder to
reuse address reloads, and increases register pressure.
Also don't do this if we can probably update x directly. */
- rtx equiv = (GET_CODE (XEXP (x, 0)) == MEM
+ rtx equiv = (MEM_P (XEXP (x, 0))
? XEXP (x, 0)
: reg_equiv_mem[regno]);
int icode = (int) add_optab->handlers[(int) Pmode].insn_code;
return value;
}
- else if (GET_CODE (XEXP (x, 0)) == MEM)
+ else if (MEM_P (XEXP (x, 0)))
{
/* This is probably the result of a substitution, by eliminate_regs,
of an equivalent address for a pseudo that was not allocated to a
abort ();
}
}
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
return refers_to_mem_for_reload_p (in);
else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
|| GET_CODE (x) == CC0)
(plus (sp) (const_int 64)), since that can lead to incorrect reload
allocation when spuriously changing a RELOAD_FOR_OUTPUT_ADDRESS
into a RELOAD_OTHER on behalf of another RELOAD_OTHER. */
- while (GET_CODE (in) == MEM)
+ while (MEM_P (in))
in = XEXP (in, 0);
if (REG_P (in))
return 0;
const char *fmt;
int i;
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
return 1;
if (REG_P (x))
fmt = GET_RTX_FORMAT (GET_CODE (x));
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
if (fmt[i] == 'e'
- && (GET_CODE (XEXP (x, i)) == MEM
+ && (MEM_P (XEXP (x, i))
|| refers_to_mem_for_reload_p (XEXP (x, i))))
return 1;
regno = goalreg;
else if (REG_P (goal))
regno = REGNO (goal);
- else if (GET_CODE (goal) == MEM)
+ else if (MEM_P (goal))
{
enum rtx_code code = GET_CODE (XEXP (goal, 0));
if (MEM_VOLATILE_P (goal))
if (xregno == STACK_POINTER_REGNUM && need_stable_sp)
return 0;
}
- else if (goal_mem && GET_CODE (dest) == MEM
+ else if (goal_mem && MEM_P (dest)
&& ! push_operand (dest, GET_MODE (dest)))
return 0;
- else if (GET_CODE (dest) == MEM && regno >= FIRST_PSEUDO_REGISTER
+ else if (MEM_P (dest) && regno >= FIRST_PSEUDO_REGISTER
&& reg_equiv_memory_loc[regno] != 0)
return 0;
else if (need_stable_sp && push_operand (dest, GET_MODE (dest)))
if (xregno == STACK_POINTER_REGNUM && need_stable_sp)
return 0;
}
- else if (goal_mem && GET_CODE (dest) == MEM
+ else if (goal_mem && MEM_P (dest)
&& ! push_operand (dest, GET_MODE (dest)))
return 0;
- else if (GET_CODE (dest) == MEM && regno >= FIRST_PSEUDO_REGISTER
+ else if (MEM_P (dest) && regno >= FIRST_PSEUDO_REGISTER
&& reg_equiv_memory_loc[regno] != 0)
return 0;
else if (need_stable_sp
return 0;
}
- else if (goal_mem && GET_CODE (dest) == MEM
+ else if (goal_mem && MEM_P (dest)
&& ! push_operand (dest, GET_MODE (dest)))
return 0;
else if (need_stable_sp
and the MEM is not SET_SRC, the equivalencing insn
is one with the MEM as a SET_DEST and it occurs later.
So don't mark this insn now. */
- if (GET_CODE (x) != MEM
+ if (!MEM_P (x)
|| rtx_equal_p (SET_SRC (set), x))
reg_equiv_init[i]
= gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[i]);
/* If this insn is setting a MEM from a register equivalent to it,
this is the equivalencing insn. */
- else if (set && GET_CODE (SET_DEST (set)) == MEM
+ else if (set && MEM_P (SET_DEST (set))
&& REG_P (SET_SRC (set))
&& reg_equiv_memory_loc[REGNO (SET_SRC (set))]
&& rtx_equal_p (SET_DEST (set),
&& (GET_MODE (insn) == QImode
|| find_reg_note (insn, REG_EQUAL, NULL_RTX)))
|| (GET_CODE (PATTERN (insn)) == CLOBBER
- && (GET_CODE (XEXP (PATTERN (insn), 0)) != MEM
+ && (!MEM_P (XEXP (PATTERN (insn), 0))
|| GET_MODE (XEXP (PATTERN (insn), 0)) != BLKmode
|| (GET_CODE (XEXP (XEXP (PATTERN (insn), 0), 0)) != SCRATCH
&& XEXP (XEXP (PATTERN (insn), 0), 0)
eliminate_regs on DECL_RTL; any ADDRESSOFs in the actual insns are
removed after CSE. */
new = eliminate_regs (XEXP (x, 0), 0, insn);
- if (GET_CODE (new) == MEM)
+ if (MEM_P (new))
return XEXP (new, 0);
return x;
int x_size = GET_MODE_SIZE (GET_MODE (x));
int new_size = GET_MODE_SIZE (GET_MODE (new));
- if (GET_CODE (new) == MEM
+ if (MEM_P (new)
&& ((x_size < new_size
#ifdef WORD_REGISTER_OPERATIONS
/* On these machines, combine can create rtl of the form
insn, write a CLOBBER insn. */
if (recog_data.operand_type[i] != OP_IN
&& REG_P (orig_operand[i])
- && GET_CODE (substed_operand[i]) == MEM
+ && MEM_P (substed_operand[i])
&& replace)
emit_insn_after (gen_rtx_CLOBBER (VOIDmode, orig_operand[i]),
insn);
the MEM in recog_data.operand to the one in the insn.
If they are not equal, then rerecognize the insn. */
|| (old_set != 0
- && ((GET_CODE (SET_SRC (old_set)) == MEM
+ && ((MEM_P (SET_SRC (old_set))
&& SET_SRC (old_set) != recog_data.operand[1])
- || (GET_CODE (SET_DEST (old_set)) == MEM
+ || (MEM_P (SET_DEST (old_set))
&& SET_DEST (old_set) != recog_data.operand[0])))
/* If this was an add insn before, rerecognize. */
|| GET_CODE (SET_SRC (old_set)) == PLUS))
if ((GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER)
- && GET_CODE (XEXP (PATTERN (insn), 0)) == MEM)
+ && MEM_P (XEXP (PATTERN (insn), 0)))
XEXP (XEXP (PATTERN (insn), 0), 0)
= eliminate_regs (XEXP (XEXP (PATTERN (insn), 0), 0),
GET_MODE (XEXP (PATTERN (insn), 0)),
if (rld[r].in != 0 && rld[r].reg_rtx != 0
&& (rtx_equal_p (rld[r].in, rld[r].reg_rtx)
|| (rtx_equal_p (rld[r].out, rld[r].reg_rtx)
- && GET_CODE (rld[r].in) != MEM
+ && !MEM_P (rld[r].in)
&& true_regnum (rld[r].in) < FIRST_PSEUDO_REGISTER)))
continue;
&& (CONSTANT_P (rld[r].in)
|| GET_CODE (rld[r].in) == PLUS
|| REG_P (rld[r].in)
- || GET_CODE (rld[r].in) == MEM)
+ || MEM_P (rld[r].in))
&& (rld[r].nregs == max_group_size
|| ! reg_classes_intersect_p (rld[r].class, group_class)))
search_equiv = rld[r].in;
because we will use this equiv reg right away. */
if (oldequiv == 0 && optimize
- && (GET_CODE (old) == MEM
+ && (MEM_P (old)
|| (REG_P (old)
&& REGNO (old) >= FIRST_PSEUDO_REGISTER
&& reg_renumber[REGNO (old)] < 0)))
do_input_reload (struct insn_chain *chain, struct reload *rl, int j)
{
rtx insn = chain->insn;
- rtx old = (rl->in && GET_CODE (rl->in) == MEM
+ rtx old = (rl->in && MEM_P (rl->in)
? rl->in_reg : rl->in);
if (old != 0
e.g. inheriting a SImode output reload for
(mem:HI (plus:SI (reg:SI 14 fp) (const_int 10))) */
if (optimize && reload_inherited[j] && rl->in
- && GET_CODE (rl->in) == MEM
- && GET_CODE (rl->in_reg) == MEM
+ && MEM_P (rl->in)
+ && MEM_P (rl->in_reg)
&& reload_spill_index[j] >= 0
&& TEST_HARD_REG_BIT (reg_reloaded_valid, reload_spill_index[j]))
rl->in = regno_reg_rtx[reg_reloaded_contents[reload_spill_index[j]]];
it thinks only about the original insn. So invalidate it here. */
if (i < 0 && rld[r].out != 0
&& (REG_P (rld[r].out)
- || (GET_CODE (rld[r].out) == MEM
+ || (MEM_P (rld[r].out)
&& REG_P (rld[r].out_reg))))
{
rtx out = (REG_P (rld[r].out)
if (GET_CODE (in) == PLUS
&& (REG_P (XEXP (in, 0))
|| GET_CODE (XEXP (in, 0)) == SUBREG
- || GET_CODE (XEXP (in, 0)) == MEM)
+ || MEM_P (XEXP (in, 0)))
&& (REG_P (XEXP (in, 1))
|| GET_CODE (XEXP (in, 1)) == SUBREG
|| CONSTANT_P (XEXP (in, 1))
- || GET_CODE (XEXP (in, 1)) == MEM))
+ || MEM_P (XEXP (in, 1))))
{
/* We need to compute the sum of a register or a MEM and another
register, constant, or MEM, and put it into the reload
code = (int) add_optab->handlers[(int) GET_MODE (out)].insn_code;
- if (CONSTANT_P (op1) || GET_CODE (op1) == MEM || GET_CODE (op1) == SUBREG
+ if (CONSTANT_P (op1) || MEM_P (op1) || GET_CODE (op1) == SUBREG
|| (REG_P (op1)
&& REGNO (op1) >= FIRST_PSEUDO_REGISTER)
|| (code != CODE_FOR_nothing
rtx reg2 = rld[k].in;
if (! reg2)
continue;
- if (GET_CODE (reg2) == MEM || reload_override_in[k])
+ if (MEM_P (reg2) || reload_override_in[k])
reg2 = rld[k].in_reg;
#ifdef AUTO_INC_DEC
if (rld[k].out && ! rld[k].out_reg)
if (set)
{
rtx dst = SET_DEST (set);
- if (GET_CODE (dst) == MEM)
+ if (MEM_P (dst))
delete_address_reloads_1 (dead_insn, XEXP (dst, 0), current_insn);
}
/* If we deleted the store from a reloaded post_{in,de}c expression,
mark_referenced_resources (x, res, 0);
else if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
mark_referenced_resources (XEXP (x, 0), res, 0);
return;
}
/* If X isn't a MEM then this isn't a tablejump we understand. */
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
return NULL_RTX;
/* Strip off the MEM. */
return 0;
case MEM:
- if (GET_CODE (find) == MEM && rtx_equal_p (x, find))
+ if (MEM_P (find) && rtx_equal_p (x, find))
return 1;
break;
return 0;
case CLOBBER:
- if (GET_CODE (XEXP (body, 0)) == MEM)
+ if (MEM_P (XEXP (body, 0)))
if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
return 1;
return 0;
information holds all clobbered registers. */
&& ((REG_P (reg)
&& REGNO (reg) < FIRST_PSEUDO_REGISTER)
- || GET_CODE (reg) == MEM
+ || MEM_P (reg)
|| find_reg_fusage (insn, CLOBBER, reg)))))
return 1;
{
struct set_of_data *data = (struct set_of_data *) (data1);
if (rtx_equal_p (x, data->pat)
- || (GET_CODE (x) != MEM && reg_overlap_mentioned_p (data->pat, x)))
+ || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
data->found = pat;
}
if (dst == pc_rtx && src == pc_rtx)
return 1;
- if (GET_CODE (dst) == MEM && GET_CODE (src) == MEM)
+ if (MEM_P (dst) && MEM_P (src))
return rtx_equal_p (dst, src) && !side_effects_p (dst);
if (GET_CODE (dst) == SIGN_EXTRACT
const char *fmt;
int i;
- if (GET_CODE (in) == MEM)
+ if (MEM_P (in))
return 1;
fmt = GET_RTX_FORMAT (GET_CODE (in));
return;
case CLOBBER:
- if (GET_CODE (XEXP (body, 0)) == MEM)
+ if (MEM_P (XEXP (body, 0)))
(*fun) (&XEXP (XEXP (body, 0), 0), data);
return;
while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
(*fun) (&XEXP (dest, 0), data);
}
return;
rtx u, m;
if (GET_CODE (u = XEXP (link, 0)) == USE
- && GET_CODE (m = XEXP (u, 0)) == MEM && GET_MODE (m) == BLKmode
+ && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
&& GET_CODE (XEXP (m, 0)) == SCRATCH)
return 1;
}
if (replace_dest)
SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0);
- else if (GET_CODE (SET_DEST (x)) == MEM
+ else if (MEM_P (SET_DEST (x))
|| GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
/* Even if we are not to replace destinations, replace register if it
is CONTAINED in destination (destination is memory or
<< (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
!= 0))
: LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
- || GET_CODE (SUBREG_REG (x)) != MEM)
+ || !MEM_P (SUBREG_REG (x)))
#endif
{
/* On many CISC machines, accessing an object in a wider mode
if ((GET_MODE_SIZE (GET_MODE (x))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
- && GET_CODE (SUBREG_REG (x)) == MEM)
+ && MEM_P (SUBREG_REG (x)))
return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
#endif
abort ();
return result;
}
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
/* The only additional case we can do is MEM. */
int offset = 0;
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
- if (GET_CODE (t) == MEM)
+ if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
}
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* Writing memory. */
rtx t = dest;
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
- if (GET_CODE (t) == MEM)
+ if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
/* 1 if PARM is passed to this function in memory. */
#define PARM_PASSED_IN_MEMORY(PARM) \
- (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+ (MEM_P (DECL_INCOMING_RTL (PARM)))
/* A C expression for the integer offset value of an automatic variable
(C_AUTO) having address X (an RTX). */
a DECL_INITIAL value of 0. */
if (! DECL_INITIAL (decl))
return;
- if (GET_CODE (DECL_RTL (decl)) != MEM
+ if (!MEM_P (DECL_RTL (decl))
|| GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
return;
PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
/* Don't output anything if an auto variable
gets RTL that is static.
GAS version 2.2 can't handle such output. */
- else if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0))
+ else if (MEM_P (value) && CONSTANT_P (XEXP (value, 0))
&& ! TREE_STATIC (decl))
return;
/* Defer SDB information for top-level initialized variables! */
if (! local
- && GET_CODE (value) == MEM
+ && MEM_P (value)
&& DECL_INITIAL (decl))
return;
else
name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
- if (GET_CODE (value) == MEM
+ if (MEM_P (value)
&& GET_CODE (XEXP (value, 0)) == SYMBOL_REF)
{
PUT_SDB_DEF (name);
PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno));
PUT_SDB_SCL (C_REG);
}
- else if (GET_CODE (value) == MEM
- && (GET_CODE (XEXP (value, 0)) == MEM
+ else if (MEM_P (value)
+ && (MEM_P (XEXP (value, 0))
|| (REG_P (XEXP (value, 0))
&& REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM
&& REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM)))
type = make_node (POINTER_TYPE);
TREE_TYPE (type) = TREE_TYPE (decl);
}
- else if (GET_CODE (value) == MEM
+ else if (MEM_P (value)
&& ((GET_CODE (XEXP (value, 0)) == PLUS
&& REG_P (XEXP (XEXP (value, 0), 0))
&& GET_CODE (XEXP (XEXP (value, 0), 1)) == CONST_INT)
return;
if (! (TREE_CODE (decl) == VAR_DECL
- && GET_CODE (DECL_RTL (decl)) == MEM
+ && MEM_P (DECL_RTL (decl))
&& DECL_INITIAL (decl)))
abort ();
(GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
- GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
- if (GET_CODE (DECL_RTL (parms)) == MEM
+ if (MEM_P (DECL_RTL (parms))
&& GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
&& (GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1))
== CONST_INT)
PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
PUT_SDB_ENDEF;
}
- else if (GET_CODE (DECL_RTL (parms)) == MEM
+ else if (MEM_P (DECL_RTL (parms))
&& XEXP (DECL_RTL (parms), 0) != const0_rtx)
{
/* Parm was passed in registers but lives on the stack. */
or (MEM (REG ...)) or (MEM (MEM ...)),
in which case we use a value of zero. */
if (REG_P (XEXP (DECL_RTL (parms), 0))
- || GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ || MEM_P (XEXP (DECL_RTL (parms), 0)))
current_sym_value = 0;
else
current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
PUT_SDB_ENDEF;
}
/* Report parms that live in memory but not where they were passed. */
- else if (GET_CODE (DECL_RTL (parms)) == MEM
+ else if (MEM_P (DECL_RTL (parms))
&& GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
&& GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT
&& PARM_PASSED_IN_MEMORY (parms)
/* Output COFF information for non-global file-scope initialized
variables. */
- if (DECL_INITIAL (decl) && GET_CODE (DECL_RTL (decl)) == MEM)
+ if (DECL_INITIAL (decl) && MEM_P (DECL_RTL (decl)))
sdbout_toplevel_data (decl);
}
}
SUBREG with it. Don't do this if the MEM has a mode-dependent address
or if we would be widening it. */
- if (GET_CODE (op) == MEM
+ if (MEM_P (op)
&& ! mode_dependent_address_p (XEXP (op, 0))
/* Allow splitting of volatile memory references in case we don't
have instruction to move the whole thing. */
|| is_inout)
{
op = expand_expr (val, NULL_RTX, VOIDmode, EXPAND_WRITE);
- if (GET_CODE (op) == MEM)
+ if (MEM_P (op))
op = validize_mem (op);
- if (! allows_reg && GET_CODE (op) != MEM)
+ if (! allows_reg && !MEM_P (op))
error ("output number %d not directly addressable", i);
- if ((! allows_mem && GET_CODE (op) == MEM)
+ if ((! allows_mem && MEM_P (op))
|| GET_CODE (op) == CONCAT)
{
real_output_rtx[i] = protect_from_queue (op, 1);
/* Never pass a CONCAT to an ASM. */
if (GET_CODE (op) == CONCAT)
op = force_reg (GET_MODE (op), op);
- else if (GET_CODE (op) == MEM)
+ else if (MEM_P (op))
op = validize_mem (op);
if (asm_operand_ok (op, constraint) <= 0)
else if (!allows_mem)
warning ("asm operand %d probably doesn't match constraints",
i + noutputs);
- else if (GET_CODE (op) == MEM)
+ else if (MEM_P (op))
{
/* We won't recognize either volatile memory or memory
with a queued address as available a memory_operand
/* If all we do is reference a volatile value in memory,
copy it to a register to be sure it is actually touched. */
- if (value && GET_CODE (value) == MEM && TREE_THIS_VOLATILE (exp))
+ if (value && MEM_P (value) && TREE_THIS_VOLATILE (exp))
{
if (TYPE_MODE (type) == VOIDmode)
;
to the proper address. */
if (DECL_RTL_SET_P (decl))
{
- if (GET_CODE (DECL_RTL (decl)) != MEM
+ if (!MEM_P (DECL_RTL (decl))
|| !REG_P (XEXP (DECL_RTL (decl), 0)))
abort ();
oldaddr = XEXP (DECL_RTL (decl), 0);
/* (SUBREG (MEM ...)) at RTL generation time is invalid, so we
instead create a new MEM rtx with the proper mode. */
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
if (mode == GET_MODE (x))
SET_DECL_RTL (decl_elt, x);
do_pending_stack_adjust ();
index = protect_from_queue (index, 0);
- if (GET_CODE (index) == MEM)
+ if (MEM_P (index))
index = copy_to_reg (index);
if (GET_CODE (index) == CONST_INT
|| TREE_CODE (index_expr) == INTEGER_CST)
/* Some ports store large constants in memory and add a REG_EQUAL
note to the store insn. */
- else if (GET_CODE (increment) == MEM)
+ else if (MEM_P (increment))
{
rtx note = find_reg_note (src_insn, REG_EQUAL, 0);
if (note)
else
*post -= INTVAL (XEXP (src, 1));
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* (set (mem (pre_dec (reg sp))) (foo)) */
src = XEXP (dest, 0);
offset += VTI (bb)->mos[i].u.adjust;
else if (VTI (bb)->mos[i].type != MO_CALL)
{
- if (GET_CODE (VTI (bb)->mos[i].u.loc) == MEM)
+ if (MEM_P (VTI (bb)->mos[i].u.loc))
{
VTI (bb)->mos[i].u.loc
= adjust_stack_reference (VTI (bb)->mos[i].u.loc, -offset);
extern char **_dl_argv_internal __attribute__ ((alias ("_dl_argv")));
char **_dl_argv;
*/
- if (GET_CODE (decl_rtl) == MEM
+ if (MEM_P (decl_rtl)
&& contains_symbol_ref (XEXP (decl_rtl, 0)))
return 0;
/* If RTX is a memory it should not be very large (because it would be
an array or struct). */
- if (GET_CODE (decl_rtl) == MEM)
+ if (MEM_P (decl_rtl))
{
/* Do not track structures and arrays. */
if (GET_MODE (decl_rtl) == BLKmode)
#endif
VTI (bb)->n_mos++;
}
- else if (GET_CODE (*loc) == MEM
+ else if (MEM_P (*loc)
&& MEM_EXPR (*loc)
&& track_expr_p (MEM_EXPR (*loc)))
{
mo->u.loc = *loc;
mo->insn = (rtx) insn;
}
- else if (GET_CODE (*loc) == MEM
+ else if (MEM_P (*loc)
&& MEM_EXPR (*loc)
&& track_expr_p (MEM_EXPR (*loc)))
{
mo->u.loc = loc;
mo->insn = (rtx) insn;
}
- else if (GET_CODE (loc) == MEM
+ else if (MEM_P (loc)
&& MEM_EXPR (loc)
&& track_expr_p (MEM_EXPR (loc)))
{
if (REG_P (loc))
var_reg_delete_and_set (out, loc);
- else if (GET_CODE (loc) == MEM)
+ else if (MEM_P (loc))
var_mem_delete_and_set (out, loc);
}
break;
if (REG_P (loc))
var_reg_delete (out, loc);
- else if (GET_CODE (loc) == MEM)
+ else if (MEM_P (loc))
var_mem_delete (out, loc);
}
break;
return true;
}
}
- else if (GET_CODE (rtl) == MEM)
+ else if (MEM_P (rtl))
{
if (MEM_ATTRS (rtl))
{
#endif
incoming = eliminate_regs (incoming, 0, NULL_RTX);
- if (!frame_pointer_needed && GET_CODE (incoming) == MEM)
+ if (!frame_pointer_needed && MEM_P (incoming))
incoming = adjust_stack_reference (incoming, -stack_adjust);
out = &VTI (ENTRY_BLOCK_PTR)->out;
parm, offset, incoming);
set_variable_part (out, incoming, parm, offset);
}
- else if (GET_CODE (incoming) == MEM)
+ else if (MEM_P (incoming))
{
set_variable_part (out, incoming, parm, offset);
}
void
make_var_volatile (tree var)
{
- if (GET_CODE (DECL_RTL (var)) != MEM)
+ if (!MEM_P (DECL_RTL (var)))
abort ();
MEM_VOLATILE_P (DECL_RTL (var)) = 1;
|| (DECL_COMMON (decl)
&& (DECL_INITIAL (decl) == 0
|| DECL_INITIAL (decl) == error_mark_node))))
- || GET_CODE (DECL_RTL (decl)) != MEM)
+ || !MEM_P (DECL_RTL (decl)))
return;
/* We win when global object is found, but it is useful to know about weak
{
rtx rtl = DECL_RTL (decl);
- if (GET_CODE (rtl) == MEM && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
+ if (MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
&& !SYMBOL_REF_USED (XEXP (rtl, 0))
&& !incorporeal_function_p (decl))
{
abort ();
}
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
x = XEXP (x, 0);
DECL_WEAK (decl) = 1;
if (DECL_RTL_SET_P (decl)
- && GET_CODE (DECL_RTL (decl)) == MEM
+ && MEM_P (DECL_RTL (decl))
&& XEXP (DECL_RTL (decl), 0)
&& GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF)
SYMBOL_REF_WEAK (XEXP (DECL_RTL (decl), 0)) = 1;
int flags;
/* Careful not to prod global register variables. */
- if (GET_CODE (rtl) != MEM)
+ if (!MEM_P (rtl))
return;
symbol = XEXP (rtl, 0);
if (GET_CODE (symbol) != SYMBOL_REF)