/* Analyze RTL for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software
- Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
-#include "rtl.h"
+#include "diagnostic-core.h"
#include "hard-reg-set.h"
+#include "rtl.h"
#include "insn-config.h"
#include "recog.h"
#include "target.h"
#include "output.h"
#include "tm_p.h"
#include "flags.h"
-#include "real.h"
#include "regs.h"
#include "function.h"
#include "df.h"
#include "tree.h"
-
-/* Information about a subreg of a hard register. */
-struct subreg_info
-{
- /* Offset of first hard register involved in the subreg. */
- int offset;
- /* Number of hard registers involved in the subreg. */
- int nregs;
- /* Whether this subreg can be represented as a hard reg with the new
- mode. */
- bool representable_p;
-};
+#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
/* Forward declarations */
static void set_of_1 (rtx, const_rtx, void *);
static int rtx_referenced_p_1 (rtx *, void *);
static int computed_jump_p_1 (const_rtx);
static void parms_set (rtx, const_rtx, void *);
-static void subreg_get_info (unsigned int, enum machine_mode,
- unsigned int, enum machine_mode,
- struct subreg_info *);
static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
const_rtx, enum machine_mode,
-1 if a code has no such operand. */
static int non_rtx_starting_operands[NUM_RTX_CODE];
-/* Bit flags that specify the machine subtype we are compiling for.
- Bits are tested using macros TARGET_... defined in the tm.h file
- and set by `-m...' switches. Must be defined in rtlanal.c. */
-
-int target_flags;
-
/* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
SIGN_EXTEND then while narrowing we also have to enforce the
/* The arg pointer varies if it is not a fixed register. */
|| (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
return 0;
-#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
/* ??? When call-clobbered, the value is stable modulo the restore
that must happen after a call. This currently screws up local-alloc
into believing that the restore is not needed. */
- if (x == pic_offset_table_rtx)
+ if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
return 0;
-#endif
return 1;
case ASM_OPERANDS:
|| (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
return 0;
if (x == pic_offset_table_rtx
-#ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
/* ??? When call-clobbered, the value is stable modulo the restore
that must happen after a call. This currently screws up
local-alloc into believing that the restore is not needed, so we
must return 0 only if we are called from alias analysis. */
- && for_alias
-#endif
- )
+ && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
return 0;
return 1;
alignment machines. */
static int
-rtx_addr_can_trap_p_1 (const_rtx x, enum machine_mode mode, bool unaligned_mems)
+rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
+ enum machine_mode mode, bool unaligned_mems)
{
enum rtx_code code = GET_CODE (x);
+ if (STRICT_ALIGNMENT
+ && unaligned_mems
+ && GET_MODE_SIZE (mode) != 0)
+ {
+ HOST_WIDE_INT actual_offset = offset;
+#ifdef SPARC_STACK_BOUNDARY_HACK
+ /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
+ the real alignment of %sp. However, when it does this, the
+ alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
+ if (SPARC_STACK_BOUNDARY_HACK
+ && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
+ actual_offset -= STACK_POINTER_OFFSET;
+#endif
+
+ if (actual_offset % GET_MODE_SIZE (mode) != 0)
+ return 1;
+ }
+
switch (code)
{
case SYMBOL_REF:
- return SYMBOL_REF_WEAK (x);
+ if (SYMBOL_REF_WEAK (x))
+ return 1;
+ if (!CONSTANT_POOL_ADDRESS_P (x))
+ {
+ tree decl;
+ HOST_WIDE_INT decl_size;
+
+ if (offset < 0)
+ return 1;
+ if (size == 0)
+ size = GET_MODE_SIZE (mode);
+ if (size == 0)
+ return offset != 0;
+
+ /* If the size of the access or of the symbol is unknown,
+ assume the worst. */
+ decl = SYMBOL_REF_DECL (x);
+
+ /* Else check that the access is in bounds. TODO: restructure
+ expr_size/tree_expr_size/int_expr_size and just use the latter. */
+ if (!decl)
+ decl_size = -1;
+ else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
+ decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
+ ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
+ : -1);
+ else if (TREE_CODE (decl) == STRING_CST)
+ decl_size = TREE_STRING_LENGTH (decl);
+ else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
+ decl_size = int_size_in_bytes (TREE_TYPE (decl));
+ else
+ decl_size = -1;
+
+ return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
+ }
+
+ return 0;
case LABEL_REF:
return 0;
return 1;
case CONST:
- return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
+ return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
+ mode, unaligned_mems);
case PLUS:
/* An address is assumed not to trap if:
- - it is an address that can't trap plus a constant integer,
+ - it is the pic register plus a constant. */
+ if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
+ return 0;
+
+ /* - or it is an address that can't trap plus a constant integer,
with the proper remainder modulo the mode size if we are
considering unaligned memory references. */
- if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- HOST_WIDE_INT offset;
-
- if (!STRICT_ALIGNMENT
- || !unaligned_mems
- || GET_MODE_SIZE (mode) == 0)
- return 0;
-
- offset = INTVAL (XEXP (x, 1));
-
-#ifdef SPARC_STACK_BOUNDARY_HACK
- /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
- the real alignment of %sp. However, when it does this, the
- alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
- if (SPARC_STACK_BOUNDARY_HACK
- && (XEXP (x, 0) == stack_pointer_rtx
- || XEXP (x, 0) == hard_frame_pointer_rtx))
- offset -= STACK_POINTER_OFFSET;
-#endif
-
- return offset % GET_MODE_SIZE (mode) != 0;
- }
-
- /* - or it is the pic register plus a constant. */
- if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
+ if (CONST_INT_P (XEXP (x, 1))
+ && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
+ size, mode, unaligned_mems))
return 0;
return 1;
case LO_SUM:
case PRE_MODIFY:
- return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
+ return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
+ mode, unaligned_mems);
case PRE_DEC:
case PRE_INC:
case POST_DEC:
case POST_INC:
case POST_MODIFY:
- return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
+ return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
+ mode, unaligned_mems);
default:
break;
int
rtx_addr_can_trap_p (const_rtx x)
{
- return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
+ return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
}
/* Return true if X is an address that is known to not be zero. */
return nonzero_address_p (XEXP (x, 0));
case PLUS:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
return nonzero_address_p (XEXP (x, 0));
/* Handle PIC references. */
else if (XEXP (x, 0) == pic_offset_table_rtx
/* Similar to the above; allow positive offsets. Further, since
auto-inc is only allowed in memories, the register must be a
pointer. */
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) > 0)
return true;
return nonzero_address_p (XEXP (x, 0));
x = XEXP (x, 0);
if (GET_CODE (x) == MINUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
return - INTVAL (XEXP (x, 1));
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
return INTVAL (XEXP (x, 1));
return 0;
}
return 0;
x = XEXP (x, 0);
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
return XEXP (x, 0);
else if (GET_CODE (x) == MINUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
return XEXP (x, 0);
return 0;
}
if (GET_CODE (x) == CONST)
{
x = XEXP (x, 0);
- if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
{
*base_out = XEXP (x, 0);
*offset_out = XEXP (x, 1);
if (XEXP (x, 1))
count += count_occurrences (XEXP (x, 1), find, count_dest);
return count;
-
+
case MEM:
if (MEM_P (find) && rtx_equal_p (x, find))
return 1;
return 0;
for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
- if (INSN_P (insn)
+ if (NONDEBUG_INSN_P (insn)
&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
|| (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
return 1;
note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
return data.found;
}
+
+/* This function, called through note_stores, collects sets and
+ clobbers of hard registers in a HARD_REG_SET, which is pointed to
+ by DATA. */
+void
+record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
+{
+ HARD_REG_SET *pset = (HARD_REG_SET *)data;
+ if (REG_P (x) && HARD_REGISTER_P (x))
+ add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
+}
+
+/* Examine INSN, and compute the set of hard registers written by it.
+ Store it in *PSET. Should only be called after reload. */
+void
+find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
+{
+ rtx link;
+
+ CLEAR_HARD_REG_SET (*pset);
+ note_stores (PATTERN (insn), record_hard_reg_sets, pset);
+ if (CALL_P (insn))
+ IOR_HARD_REG_SET (*pset, call_used_reg_set);
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ record_hard_reg_sets (XEXP (link, 0), NULL, pset);
+}
+
+/* A for_each_rtx subroutine of record_hard_reg_uses. */
+static int
+record_hard_reg_uses_1 (rtx *px, void *data)
+{
+ rtx x = *px;
+ HARD_REG_SET *pused = (HARD_REG_SET *)data;
+
+ if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ {
+ int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
+ while (nregs-- > 0)
+ SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
+ }
+ return 0;
+}
+
+/* Like record_hard_reg_sets, but called through note_uses. */
+void
+record_hard_reg_uses (rtx *px, void *data)
+{
+ for_each_rtx (px, record_hard_reg_uses_1, data);
+}
\f
/* Given an INSN, return a SET expression if this insn has only a single SET.
It may also have CLOBBERs, USEs, or SET whose output
{
rtx link;
- gcc_assert (insn);
+ gcc_checking_assert (insn);
/* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
if (! INSN_P (insn))
}
\f
-/* Add register note with kind KIND and datum DATUM to INSN. */
+/* Allocate a register note with kind KIND and datum DATUM. LIST is
+ stored as the pointer to the next register note. */
-void
-add_reg_note (rtx insn, enum reg_note kind, rtx datum)
+rtx
+alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
{
rtx note;
case REG_CC_USER:
case REG_LABEL_TARGET:
case REG_LABEL_OPERAND:
+ case REG_TM:
/* These types of register notes use an INSN_LIST rather than an
EXPR_LIST, so that copying is done right and dumps look
better. */
- note = alloc_INSN_LIST (datum, REG_NOTES (insn));
+ note = alloc_INSN_LIST (datum, list);
PUT_REG_NOTE_KIND (note, kind);
break;
default:
- note = alloc_EXPR_LIST (kind, datum, REG_NOTES (insn));
+ note = alloc_EXPR_LIST (kind, datum, list);
break;
}
- REG_NOTES (insn) = note;
+ return note;
+}
+
+/* Add register note with kind KIND and datum DATUM to INSN. */
+
+void
+add_reg_note (rtx insn, enum reg_note kind, rtx datum)
+{
+ REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
}
/* Remove register note NOTE from the REG_NOTES of INSN. */
}
}
+/* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
+
+void
+remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
+{
+ df_ref eq_use;
+
+ if (!df)
+ return;
+
+ /* This loop is a little tricky. We cannot just go down the chain because
+ it is being modified by some actions in the loop. So we just iterate
+ over the head. We plan to drain the list anyway. */
+ while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
+ {
+ rtx insn = DF_REF_INSN (eq_use);
+ rtx note = find_reg_equal_equiv_note (insn);
+
+ /* This assert is generally triggered when someone deletes a REG_EQUAL
+ or REG_EQUIV note by hacking the list manually rather than calling
+ remove_note. */
+ gcc_assert (note);
+
+ remove_note (insn, note);
+ }
+}
+
/* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
return 1 if it is found. A simple equality test is used to determine if
NODE matches. */
case SCRATCH:
case ADDR_VEC:
case ADDR_DIFF_VEC:
+ case VAR_LOCATION:
return 0;
case CLOBBER:
return 0;
}
\f
-enum may_trap_p_flags
-{
- MTP_UNALIGNED_MEMS = 1,
- MTP_AFTER_MOVE = 2
-};
/* Return nonzero if evaluating rtx X might cause a trap.
- (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
- unaligned memory accesses on strict alignment machines. If
- (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
- cannot trap at its current location, but it might become trapping if moved
- elsewhere. */
+ FLAGS controls how to consider MEMs. A nonzero means the context
+ of the access may have changed from the original, such that the
+ address may have become invalid. */
int
may_trap_p_1 (const_rtx x, unsigned flags)
int i;
enum rtx_code code;
const char *fmt;
- bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
+
+ /* We make no distinction currently, but this function is part of
+ the internal target-hooks ABI so we keep the parameter as
+ "unsigned flags". */
+ bool code_changed = flags != 0;
if (x == 0)
return 0;
/* Memory ref can trap unless it's a static var or a stack slot. */
case MEM:
+ /* Recognize specific pattern of stack checking probes. */
+ if (flag_stack_check
+ && MEM_VOLATILE_P (x)
+ && XEXP (x, 0) == stack_pointer_rtx)
+ return 1;
if (/* MEM_NOTRAP_P only relates to the actual position of the memory
- reference; moving it out of condition might cause its address
- become invalid. */
- !(flags & MTP_AFTER_MOVE)
- && MEM_NOTRAP_P (x)
- && (!STRICT_ALIGNMENT || !unaligned_mems))
- return 0;
- return
- rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
+ reference; moving it out of context such as when moving code
+ when optimizing, might cause its address to become invalid. */
+ code_changed
+ || !MEM_NOTRAP_P (x))
+ {
+ HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
+ return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
+ GET_MODE (x), code_changed);
+ }
+
+ return 0;
/* Division by a non-constant might trap. */
case DIV:
return may_trap_p_1 (x, 0);
}
-/* Return nonzero if evaluating rtx X might cause a trap, when the expression
- is moved from its current location by some optimization. */
-
-int
-may_trap_after_code_motion_p (const_rtx x)
-{
- return may_trap_p_1 (x, MTP_AFTER_MOVE);
-}
-
/* Same as above, but additionally return nonzero if evaluating rtx X might
cause a fault. We define a fault for the purpose of this function as a
erroneous execution condition that cannot be encountered during the normal
int
may_trap_or_fault_p (const_rtx x)
{
- return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
+ return may_trap_p_1 (x, 1);
}
\f
/* Return nonzero if X contains a comparison that is not either EQ or NE,
{
rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_subreg (GET_MODE (x), new_rtx,
GET_MODE (SUBREG_REG (x)),
{
rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new_rtx, GET_MODE (XEXP (x, 0)));
{
rtx label, table;
- if (JUMP_P (insn)
- && (label = JUMP_LABEL (insn)) != NULL_RTX
+ if (!JUMP_P (insn))
+ return false;
+
+ label = JUMP_LABEL (insn);
+ if (label != NULL_RTX && !ANY_RETURN_P (label)
&& (table = next_active_insn (label)) != NULL_RTX
- && JUMP_P (table)
- && (GET_CODE (PATTERN (table)) == ADDR_VEC
- || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
+ && JUMP_TABLE_DATA_P (table))
{
if (labelp)
*labelp = label;
else if (result != 0)
/* Stop the traversal. */
return result;
-
+
if (*x == NULL_RTX)
/* There are no sub-expressions. */
continue;
-
+
i = non_rtx_starting_operands[GET_CODE (*x)];
if (i >= 0)
{
else if (result != 0)
/* Stop the traversal. */
return result;
-
+
if (*x == NULL_RTX)
/* There are no sub-expressions. */
continue;
-
+
i = non_rtx_starting_operands[GET_CODE (*x)];
if (i >= 0)
{
return for_each_rtx_1 (*x, i, f, data);
}
+\f
+
+/* Data structure that holds the internal state communicated between
+ for_each_inc_dec, for_each_inc_dec_find_mem and
+ for_each_inc_dec_find_inc_dec. */
+
+struct for_each_inc_dec_ops {
+ /* The function to be called for each autoinc operation found. */
+ for_each_inc_dec_fn fn;
+ /* The opaque argument to be passed to it. */
+ void *arg;
+ /* The MEM we're visiting, if any. */
+ rtx mem;
+};
+
+static int for_each_inc_dec_find_mem (rtx *r, void *d);
+
+/* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
+ operands of the equivalent add insn and pass the result to the
+ operator specified by *D. */
+
+static int
+for_each_inc_dec_find_inc_dec (rtx *r, void *d)
+{
+ rtx x = *r;
+ struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
+
+ switch (GET_CODE (x))
+ {
+ case PRE_INC:
+ case POST_INC:
+ {
+ int size = GET_MODE_SIZE (GET_MODE (data->mem));
+ rtx r1 = XEXP (x, 0);
+ rtx c = gen_int_mode (size, GET_MODE (r1));
+ return data->fn (data->mem, x, r1, r1, c, data->arg);
+ }
+
+ case PRE_DEC:
+ case POST_DEC:
+ {
+ int size = GET_MODE_SIZE (GET_MODE (data->mem));
+ rtx r1 = XEXP (x, 0);
+ rtx c = gen_int_mode (-size, GET_MODE (r1));
+ return data->fn (data->mem, x, r1, r1, c, data->arg);
+ }
+
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ {
+ rtx r1 = XEXP (x, 0);
+ rtx add = XEXP (x, 1);
+ return data->fn (data->mem, x, r1, add, NULL, data->arg);
+ }
+
+ case MEM:
+ {
+ rtx save = data->mem;
+ int ret = for_each_inc_dec_find_mem (r, d);
+ data->mem = save;
+ return ret;
+ }
+
+ default:
+ return 0;
+ }
+}
+
+/* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
+ address, extract the operands of the equivalent add insn and pass
+ the result to the operator specified by *D. */
+
+static int
+for_each_inc_dec_find_mem (rtx *r, void *d)
+{
+ rtx x = *r;
+ if (x != NULL_RTX && MEM_P (x))
+ {
+ struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
+ int result;
+
+ data->mem = x;
+
+ result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
+ data);
+ if (result)
+ return result;
+
+ return -1;
+ }
+ return 0;
+}
+
+/* Traverse *X looking for MEMs, and for autoinc operations within
+ them. For each such autoinc operation found, call FN, passing it
+ the innermost enclosing MEM, the operation itself, the RTX modified
+ by the operation, two RTXs (the second may be NULL) that, once
+ added, represent the value to be held by the modified RTX
+ afterwards, and ARG. FN is to return -1 to skip looking for other
+ autoinc operations within the visited operation, 0 to continue the
+ traversal, or any other value to have it returned to the caller of
+ for_each_inc_dec. */
+
+int
+for_each_inc_dec (rtx *x,
+ for_each_inc_dec_fn fn,
+ void *arg)
+{
+ struct for_each_inc_dec_ops data;
+ data.fn = fn;
+ data.arg = arg;
+ data.mem = NULL;
+
+ return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
+}
+
+\f
/* Searches X for any reference to REGNO, returning the rtx of the
reference found if any. Otherwise, returns NULL_RTX. */
commutative_operand_precedence (rtx op)
{
enum rtx_code code = GET_CODE (op);
-
+
/* Constants always come the second operand. Prefer "nice" constants. */
if (code == CONST_INT)
return -8;
operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
is canonical, although it will usually be further simplified. */
return 2;
-
+
case RTX_UNARY:
/* Then prefer NEG and NOT. */
if (code == NEG || code == NOT)
unsigned int word;
/* A paradoxical subreg begins at bit position 0. */
- if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
+ if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
return 0;
if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
offset - The byte offset.
ymode - The mode of a top level SUBREG (or what may become one).
info - Pointer to structure to fill in. */
-static void
+void
subreg_get_info (unsigned int xregno, enum machine_mode xmode,
unsigned int offset, enum machine_mode ymode,
struct subreg_info *info)
picking a different register class, or doing it in memory if
necessary.) An example of a value with holes is XCmode on 32-bit
x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
- 3 for each part, but in memory it's two 128-bit parts.
+ 3 for each part, but in memory it's two 128-bit parts.
Padding is assumed to be at the end (not necessarily the 'high part')
of each unit. */
- if ((offset / GET_MODE_SIZE (xmode_unit) + 1
+ if ((offset / GET_MODE_SIZE (xmode_unit) + 1
< GET_MODE_NUNITS (xmode))
&& (offset / GET_MODE_SIZE (xmode_unit)
!= ((offset + GET_MODE_SIZE (ymode) - 1)
}
else
nregs_xmode = hard_regno_nregs[xregno][xmode];
-
+
nregs_ymode = hard_regno_nregs[xregno][ymode];
/* Paradoxical subregs are otherwise valid. */
if (!rknown
&& offset == 0
- && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
+ && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
{
info->representable_p = true;
/* If this is a big endian paradoxical subreg, which uses more
return a negative offset so that we find the proper highpart
of the register. */
if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
- ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+ ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
info->offset = nregs_xmode - nregs_ymode;
else
info->offset = 0;
gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
gcc_assert ((nregs_xmode % nregs_ymode) == 0);
+ if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
+ {
+ HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
+ HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
+ HOST_WIDE_INT off_low = offset & (ysize - 1);
+ HOST_WIDE_INT off_high = offset & ~(ysize - 1);
+ offset = (xsize - ysize - off_high) | off_low;
+ }
/* The XMODE value can be seen as a vector of NREGS_XMODE
values. The subreg must represent a lowpart of given field.
Compute what field it is. */
return info.representable_p;
}
+/* Return the number of a YMODE register to which
+
+ (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
+
+ can be simplified. Return -1 if the subreg can't be simplified.
+
+ XREGNO is a hard register number. */
+
+int
+simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
+ unsigned int offset, enum machine_mode ymode)
+{
+ struct subreg_info info;
+ unsigned int yregno;
+
+#ifdef CANNOT_CHANGE_MODE_CLASS
+ /* Give the backend a chance to disallow the mode change. */
+ if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
+ && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode))
+ return -1;
+#endif
+
+ /* We shouldn't simplify stack-related registers. */
+ if ((!reload_completed || frame_pointer_needed)
+ && xregno == FRAME_POINTER_REGNUM)
+ return -1;
+
+ if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && xregno == ARG_POINTER_REGNUM)
+ return -1;
+
+ if (xregno == STACK_POINTER_REGNUM)
+ return -1;
+
+ /* Try to get the register offset. */
+ subreg_get_info (xregno, xmode, offset, ymode, &info);
+ if (!info.representable_p)
+ return -1;
+
+ /* Make sure that the offsetted register value is in range. */
+ yregno = xregno + info.offset;
+ if (!HARD_REGISTER_NUM_P (yregno))
+ return -1;
+
+ /* See whether (reg:YMODE YREGNO) is valid.
+
+ ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
+ This is a kludge to work around how complex FP arguments are passed
+ on IA-64 and should be fixed. See PR target/49226. */
+ if (!HARD_REGNO_MODE_OK (yregno, ymode)
+ && HARD_REGNO_MODE_OK (xregno, xmode))
+ return -1;
+
+ return (int) yregno;
+}
+
/* Return the final regno that a subreg expression refers to. */
unsigned int
subreg_regno (const_rtx x)
&& general_operand (SET_SRC (set), VOIDmode))
return true;
if (REG_P (SET_SRC (set))
- && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
+ && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
&& REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
return true;
if (SET_DEST (set) == stack_pointer_rtx)
{
/* This CONST_CAST is okay because next_nonnote_insn just
- returns it's argument and we assign it to a const_rtx
+ returns its argument and we assign it to a const_rtx
variable. */
const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
if (i2 && keep_with_call_p (i2))
/* Return an estimate of the cost of computing rtx X.
One use is in cse, to decide which expression to keep in the hash table.
Another is in rtl generation, to pick the cheapest way to multiply.
- Other uses like the latter are expected in the future. */
+ Other uses like the latter are expected in the future.
+
+ X appears as operand OPNO in an expression with code OUTER_CODE.
+ SPEED specifies whether costs optimized for speed or size should
+ be returned. */
int
-rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
+rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
{
int i, j;
enum rtx_code code;
break;
default:
- if (targetm.rtx_costs (x, code, outer_code, &total))
+ if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
return total;
break;
}
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
if (fmt[i] == 'e')
- total += rtx_cost (XEXP (x, i), code);
+ total += rtx_cost (XEXP (x, i), code, i, speed);
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
- total += rtx_cost (XVECEXP (x, i, j), code);
+ total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
return total;
}
+
+/* Fill in the structure C with information about both speed and size rtx
+ costs for X, which is operand OPNO in an expression with code OUTER. */
+
+void
+get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
+ struct full_rtx_costs *c)
+{
+ c->speed = rtx_cost (x, outer, opno, true);
+ c->size = rtx_cost (x, outer, opno, false);
+}
+
\f
/* Return cost of address expression X.
- Expect that X is properly formed address reference. */
+ Expect that X is properly formed address reference.
+
+ SPEED parameter specify whether costs optimized for speed or size should
+ be returned. */
int
-address_cost (rtx x, enum machine_mode mode)
+address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
{
/* We may be asked for cost of various unusual addresses, such as operands
of push instruction. It is not worthwhile to complicate writing
of the target hook by such cases. */
- if (!memory_address_p (mode, x))
+ if (!memory_address_addr_space_p (mode, x, as))
return 1000;
- return targetm.address_cost (x);
+ return targetm.address_cost (x, speed);
}
/* If the target doesn't override, compute the cost as with arithmetic. */
int
-default_address_cost (rtx x)
+default_address_cost (rtx x, bool speed)
{
- return rtx_cost (x, MEM);
+ return rtx_cost (x, MEM, 0, speed);
}
\f
unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
unsigned HOST_WIDE_INT inner_nz;
enum rtx_code code;
- unsigned int mode_width = GET_MODE_BITSIZE (mode);
+ enum machine_mode inner_mode;
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
- /* For floating-point values, assume all bits are needed. */
- if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
+ /* For floating-point and vector values, assume all bits are needed. */
+ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
+ || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
return nonzero;
/* If X is wider than MODE, use its mode instead. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
+ if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
{
mode = GET_MODE (x);
nonzero = GET_MODE_MASK (mode);
- mode_width = GET_MODE_BITSIZE (mode);
+ mode_width = GET_MODE_PRECISION (mode);
}
if (mode_width > HOST_BITS_PER_WIDE_INT)
not known to be zero. */
if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
- && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
+ && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
{
nonzero &= cached_nonzero_bits (x, GET_MODE (x),
known_x, known_mode, known_ret);
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* If pointers extend unsigned and this is a pointer in Pmode, say that
all the bits above ptr_mode are known to be zero. */
- if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
&& REG_POINTER (x))
nonzero &= GET_MODE_MASK (ptr_mode);
#endif
case CONST_INT:
#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
/* If X is negative in MODE, sign-extend the value. */
- if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
- && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
- return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
+ if (INTVAL (x) > 0
+ && mode_width < BITS_PER_WORD
+ && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
+ != 0)
+ return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
#endif
- return INTVAL (x);
+ return UINTVAL (x);
case MEM:
#ifdef LOAD_EXTEND_OP
/* If this produces an integer result, we know which bits are set.
Code here used to clear bits outside the mode of X, but that is
now done above. */
- /* Mind that MODE is the mode the caller wants to look at this
- operation in, and not the actual operation mode. We can wind
+ /* Mind that MODE is the mode the caller wants to look at this
+ operation in, and not the actual operation mode. We can wind
up with (subreg:DI (gt:V4HI x y)), and we don't have anything
that describes the results of a vector compare. */
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
/* Disabled to avoid exponential mutual recursion between nonzero_bits
and num_sign_bit_copies. */
if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
- == GET_MODE_BITSIZE (GET_MODE (x)))
+ == GET_MODE_PRECISION (GET_MODE (x)))
nonzero = 1;
#endif
- if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
+ if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
break;
/* Disabled to avoid exponential mutual recursion between nonzero_bits
and num_sign_bit_copies. */
if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
- == GET_MODE_BITSIZE (GET_MODE (x)))
+ == GET_MODE_PRECISION (GET_MODE (x)))
nonzero = 1;
#endif
break;
if (GET_MODE (XEXP (x, 0)) != VOIDmode)
{
inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
- if (inner_nz
- & (((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
inner_nz |= (GET_MODE_MASK (mode)
& ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
}
case XOR: case IOR:
case UMIN: case UMAX: case SMIN: case SMAX:
{
- unsigned HOST_WIDE_INT nonzero0 =
- cached_nonzero_bits (XEXP (x, 0), mode,
- known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT nonzero0
+ = cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
/* Don't call nonzero_bits for the second time if it cannot change
anything. */
computing the width (position of the highest-order nonzero bit)
and the number of low-order zero bits for each value. */
{
- unsigned HOST_WIDE_INT nz0 =
- cached_nonzero_bits (XEXP (x, 0), mode,
- known_x, known_mode, known_ret);
- unsigned HOST_WIDE_INT nz1 =
- cached_nonzero_bits (XEXP (x, 1), mode,
- known_x, known_mode, known_ret);
- int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
+ unsigned HOST_WIDE_INT nz0
+ = cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT nz1
+ = cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
int width0 = floor_log2 (nz0) + 1;
int width1 = floor_log2 (nz1) + 1;
int low0 = floor_log2 (nz0 & -nz0);
int low1 = floor_log2 (nz1 & -nz1);
- HOST_WIDE_INT op0_maybe_minusp
- = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
- HOST_WIDE_INT op1_maybe_minusp
- = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
+ unsigned HOST_WIDE_INT op0_maybe_minusp
+ = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
+ unsigned HOST_WIDE_INT op1_maybe_minusp
+ = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
unsigned int result_width = mode_width;
int result_low = 0;
case DIV:
if (width1 == 0)
break;
- if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ if (!op0_maybe_minusp && !op1_maybe_minusp)
result_width = width0;
break;
case UDIV:
case MOD:
if (width1 == 0)
break;
- if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ if (!op0_maybe_minusp && !op1_maybe_minusp)
result_width = MIN (width0, width1);
result_low = MIN (low0, low1);
break;
}
if (result_width < mode_width)
- nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
+ nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
if (result_low > 0)
- nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
-
-#ifdef POINTERS_EXTEND_UNSIGNED
- /* If pointers extend unsigned and this is an addition or subtraction
- to a pointer in Pmode, all the bits above ptr_mode are known to be
- zero. */
- if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
- && (code == PLUS || code == MINUS)
- && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
- nonzero &= GET_MODE_MASK (ptr_mode);
-#endif
+ nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
}
break;
case ZERO_EXTRACT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
- nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
+ nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
break;
case SUBREG:
& cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
known_x, known_mode, known_ret);
+ inner_mode = GET_MODE (SUBREG_REG (x));
/* If the inner mode is a single word for both the host and target
machines, we can compute this from which bits of the inner
object might be nonzero. */
- if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
- && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
- <= HOST_BITS_PER_WIDE_INT))
+ if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
+ && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
{
nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
/* If this is a typical RISC machine, we only have to worry
about the way loads are extended. */
- if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
- ? (((nonzero
- & (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
- != 0))
- : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
+ if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
+ ? val_signbit_known_set_p (inner_mode, nonzero)
+ : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
|| !MEM_P (SUBREG_REG (x)))
#endif
{
/* On many CISC machines, accessing an object in a wider mode
causes the high-order bits to become undefined. So they are
not known to be zero. */
- if (GET_MODE_SIZE (GET_MODE (x))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ if (GET_MODE_PRECISION (GET_MODE (x))
+ > GET_MODE_PRECISION (inner_mode))
nonzero |= (GET_MODE_MASK (GET_MODE (x))
- & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+ & ~GET_MODE_MASK (inner_mode));
}
}
break;
the shift when shifted the appropriate number of bits. This
shows that high-order bits are cleared by the right shift and
low-order bits by left shifts. */
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
- && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
{
enum machine_mode inner_mode = GET_MODE (x);
- unsigned int width = GET_MODE_BITSIZE (inner_mode);
+ unsigned int width = GET_MODE_PRECISION (inner_mode);
int count = INTVAL (XEXP (x, 1));
unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
- unsigned HOST_WIDE_INT op_nonzero =
- cached_nonzero_bits (XEXP (x, 0), mode,
- known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT op_nonzero
+ = cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
unsigned HOST_WIDE_INT outer = 0;
/* If the sign bit may have been nonzero before the shift, we
need to mark all the places it could have been copied to
by the shift as possibly nonzero. */
- if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
- inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
+ if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
+ inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
+ << (width - count);
}
else if (code == ASHIFT)
inner <<= count;
case FFS:
case POPCOUNT:
/* This is at most the number of bits in the mode. */
- nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
+ nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
break;
case CLZ:
/* If CLZ has a known value at zero, then the nonzero bits are
that value, plus the number of bits in the mode minus one. */
if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
- nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ nonzero
+ |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
else
nonzero = -1;
break;
/* If CTZ has a known value at zero, then the nonzero bits are
that value, plus the number of bits in the mode minus one. */
if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
- nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ nonzero
+ |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
else
nonzero = -1;
break;
+ case CLRSB:
+ /* This is at most the number of bits in the mode minus 1. */
+ nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ break;
+
case PARITY:
nonzero = 1;
break;
case IF_THEN_ELSE:
{
- unsigned HOST_WIDE_INT nonzero_true =
- cached_nonzero_bits (XEXP (x, 1), mode,
- known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT nonzero_true
+ = cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
/* Don't call nonzero_bits for the second time if it cannot change
anything. */
unsigned int known_ret)
{
enum rtx_code code = GET_CODE (x);
- unsigned int bitwidth = GET_MODE_BITSIZE (mode);
+ unsigned int bitwidth = GET_MODE_PRECISION (mode);
int num0, num1, result;
unsigned HOST_WIDE_INT nonzero;
if (mode == VOIDmode)
mode = GET_MODE (x);
- if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
+ if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
+ || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
return 1;
/* For a smaller object, just ignore the high bits. */
- if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
{
num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
known_x, known_mode, known_ret);
return MAX (1,
- num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
+ num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
}
- if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
{
#ifndef WORD_REGISTER_OPERATIONS
- /* If this machine does not do all register operations on the entire
- register and MODE is wider than the mode of X, we can say nothing
- at all about the high-order bits. */
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
return 1;
#else
/* Likewise on machines that do, if the mode of the object is smaller
than a word and loads of that size don't sign extend, we can say
nothing about the high order bits. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
#ifdef LOAD_EXTEND_OP
&& LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
#endif
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* If pointers extend signed and this is a pointer in Pmode, say that
all the bits above ptr_mode are known to be sign bit copies. */
- if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
- && REG_POINTER (x))
- return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
+ /* As we do not know which address space the pointer is refering to,
+ we can do this only if the target does not support different pointer
+ or address modes depending on the address space. */
+ if (target_default_pointer_address_modes_p ()
+ && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && mode == Pmode && REG_POINTER (x))
+ return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
#endif
{
/* Some RISC machines sign-extend all loads of smaller than a word. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
return MAX (1, ((int) bitwidth
- - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
+ - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
#endif
break;
case CONST_INT:
/* If the constant is negative, take its 1's complement and remask.
Then see how many zero bits we have. */
- nonzero = INTVAL (x) & GET_MODE_MASK (mode);
+ nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
if (bitwidth <= HOST_BITS_PER_WIDE_INT
- && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
nonzero = (~nonzero) & GET_MODE_MASK (mode);
return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
return MAX ((int) bitwidth
- - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
num0);
}
/* For a smaller object, just ignore the high bits. */
- if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
{
num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
known_x, known_mode, known_ret);
return MAX (1, (num0
- - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
- bitwidth)));
}
then we lose all sign bit copies that existed before the store
to the stack. */
- if ((GET_MODE_SIZE (GET_MODE (x))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ if (paradoxical_subreg_p (x)
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
&& MEM_P (SUBREG_REG (x)))
return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
break;
case SIGN_EXTRACT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
break;
case SIGN_EXTEND:
- return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
+ cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
known_x, known_mode, known_ret));
/* For a smaller object, just ignore the high bits. */
num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
known_x, known_mode, known_ret);
- return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
- bitwidth)));
case NOT:
/* If we are rotating left by a number of bits less than the number
of sign bit copies, we can just subtract that amount from the
number. */
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
&& INTVAL (XEXP (x, 1)) < (int) bitwidth)
{
return bitwidth;
if (num0 > 1
- && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
+ && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
num0--;
return num0;
if (code == AND
&& num1 > 1
&& bitwidth <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && !(INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
+ && CONST_INT_P (XEXP (x, 1))
+ && (UINTVAL (XEXP (x, 1))
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
return num1;
/* Similarly for IOR when setting high-order bits. */
if (code == IOR
&& num1 > 1
&& bitwidth <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && (INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
+ && CONST_INT_P (XEXP (x, 1))
+ && (UINTVAL (XEXP (x, 1))
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
return num1;
return MIN (num0, num1);
&& bitwidth <= HOST_BITS_PER_WIDE_INT)
{
nonzero = nonzero_bits (XEXP (x, 0), mode);
- if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
+ if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
return (nonzero == 1 || nonzero == 0 ? bitwidth
: bitwidth - floor_log2 (nonzero) - 1);
}
known_x, known_mode, known_ret);
result = MAX (1, MIN (num0, num1) - 1);
-#ifdef POINTERS_EXTEND_UNSIGNED
- /* If pointers extend signed and this is an addition or subtraction
- to a pointer in Pmode, all the bits above ptr_mode are known to be
- sign bit copies. */
- if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
- && (code == PLUS || code == MINUS)
- && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
- result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
- - GET_MODE_BITSIZE (ptr_mode) + 1),
- result);
-#endif
return result;
case MULT:
if (result > 0
&& (bitwidth > HOST_BITS_PER_WIDE_INT
|| (((nonzero_bits (XEXP (x, 0), mode)
- & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
&& ((nonzero_bits (XEXP (x, 1), mode)
- & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
+ != 0))))
result--;
return MAX (1, result);
if (bitwidth > HOST_BITS_PER_WIDE_INT)
return 1;
else if ((nonzero_bits (XEXP (x, 0), mode)
- & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
return 1;
else
return cached_num_sign_bit_copies (XEXP (x, 0), mode,
known_x, known_mode, known_ret);
case UMOD:
- /* The result must be <= the second operand. */
- return cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ /* The result must be <= the second operand. If the second operand
+ has (or just might have) the high bit set, we know nothing about
+ the number of sign bit copies. */
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+ else if ((nonzero_bits (XEXP (x, 1), mode)
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ return 1;
+ else
+ return cached_num_sign_bit_copies (XEXP (x, 1), mode,
known_x, known_mode, known_ret);
case DIV:
if (result > 1
&& (bitwidth > HOST_BITS_PER_WIDE_INT
|| (nonzero_bits (XEXP (x, 1), mode)
- & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
result--;
return result;
if (result > 1
&& (bitwidth > HOST_BITS_PER_WIDE_INT
|| (nonzero_bits (XEXP (x, 1), mode)
- & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
result--;
return result;
sign bit. */
num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
known_x, known_mode, known_ret);
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
- && INTVAL (XEXP (x, 1)) > 0)
+ if (CONST_INT_P (XEXP (x, 1))
+ && INTVAL (XEXP (x, 1)) > 0
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
return num0;
case ASHIFT:
/* Left shifts destroy copies. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ if (!CONST_INT_P (XEXP (x, 1))
|| INTVAL (XEXP (x, 1)) < 0
- || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
+ || INTVAL (XEXP (x, 1)) >= (int) bitwidth
+ || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
return 1;
num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
Then see how many zero bits we have. */
nonzero = STORE_FLAG_VALUE;
if (bitwidth <= HOST_BITS_PER_WIDE_INT
- && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
nonzero = (~nonzero) & GET_MODE_MASK (mode);
return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
count those bits and return one less than that amount. If we can't
safely compute the mask for this mode, always return BITWIDTH. */
- bitwidth = GET_MODE_BITSIZE (mode);
+ bitwidth = GET_MODE_PRECISION (mode);
if (bitwidth > HOST_BITS_PER_WIDE_INT)
return 1;
nonzero = nonzero_bits (x, mode);
- return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
+ return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
? 1 : bitwidth - floor_log2 (nonzero) - 1;
}
zero indicates an instruction pattern without a known cost. */
int
-insn_rtx_cost (rtx pat)
+insn_rtx_cost (rtx pat, bool speed)
{
int i, cost;
rtx set;
else
return 0;
- cost = rtx_cost (SET_SRC (set), SET);
+ cost = set_src_cost (SET_SRC (set), speed);
return cost > 0 ? cost : COSTS_N_INSNS (1);
}
If WANT_REG is nonzero, we wish the condition to be relative to that
register, if possible. Therefore, do not canonicalize the condition
- further. If ALLOW_CC_MODE is nonzero, allow the condition returned
+ further. If ALLOW_CC_MODE is nonzero, allow the condition returned
to be a compare to a CC mode register.
If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
stop if it isn't a single set or if it has a REG_INC note because
we don't want to bother dealing with it. */
- if ((prev = prev_nonnote_insn (prev)) == 0
+ prev = prev_nonnote_nondebug_insn (prev);
+
+ if (prev == 0
|| !NONJUMP_INSN_P (prev)
|| FIND_REG_INC_NOTE (prev, NULL_RTX)
/* In cfglayout mode, there do not have to be labels at the
if ((GET_CODE (SET_SRC (set)) == COMPARE
|| (((code == NE
|| (code == LT
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == LT
&& SCALAR_FLOAT_MODE_P (inner_mode)
x = SET_SRC (set);
else if (((code == EQ
|| (code == GE
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == GE
&& SCALAR_FLOAT_MODE_P (inner_mode)
overflow. */
if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& GET_MODE (op0) != VOIDmode
- && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
{
HOST_WIDE_INT const_val = INTVAL (op1);
unsigned HOST_WIDE_INT uconst_val = const_val;
/* When cross-compiling, const_val might be sign-extended from
BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
case GE:
- if ((HOST_WIDE_INT) (const_val & max_val)
- != (((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ if ((const_val & max_val)
+ != ((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
break;
have to be sign-bit copies too. */
|| num_sign_bit_copies_in_rep [in_mode][mode])
num_sign_bit_copies_in_rep [in_mode][mode]
- += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
+ += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
}
}
}
x = avoid_constant_pool_reference (x);
return GET_CODE (x) == CONST_DOUBLE;
}
+\f
+/* If M is a bitmask that selects a field of low-order bits within an item but
+ not the entire word, return the length of the field. Return -1 otherwise.
+ M is used in machine mode MODE. */
+int
+low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
+{
+ if (mode != VOIDmode)
+ {
+ if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
+ return -1;
+ m &= GET_MODE_MASK (mode);
+ }
+
+ return exact_log2 (m + 1);
+}