+2004-08-23 Eric Christopher <echristo@redhat.com>
+
+ * defaults.h (VECTOR_MODE_SUPPORTED_P): Remove macro.
+ * system.h (VECTOR_MODE_SUPPORTED_P): Poison.
+ * target-def.h (TARGET_VECTOR_MODE_SUPPORTED_P): Define.
+ * target.h: Ditto.
+ * hooks.h: Include machmode.h.
+ (hook_bool_mode_false): Declare.
+ * hooks.c (hook_bool_mode_false): Define.
+ * expr.c (vector_mode_valid_p): Use targetm.vector_mode_supported_p.
+ * stor-layout.c (layout_type): Ditto.
+ * config/alpha/alpha.c (alpha_vector_mode_supported_p): New function.
+ Define to target macro.
+ * config/alpha/alpha.h (VECTOR_MODE_SUPPORTED_P): Delete.
+ * config/arm/arm.c: Ditto. Use.
+ * config/arm/arm.h: Ditto.
+ * config/arm/arm-protos.h: Ditto.
+ * config/i386/i386.c: Ditto.
+ * config/i386/i386.h: Ditto.
+ * config/rs6000/rs6000.c: Ditto.
+ * config/rs6000/rs6000.h: Ditto.
+ * config/sh/sh.c: Ditto.
+ * config/sh/sh.h: Ditto.
+ * config/sh/sh-protos.h: Ditto.
+ * config/sh/sh.md: Use.
+ * doc/tm.texi: Move documentation for VECTOR_MODE_SUPPORTED_P
+ to TARGET_VECTOR_MODE_SUPPORTED_P.
+
2004-08-23 Nathan Sidwell <nathan@codesourcery.com>
* Makefile.in (BUILD_ERRORS): Set to build-errors.
(copyprop_hardreg_forward_1): Update call to kill_value_regno.
2004-08-20 Daniel Berlin <dberlin@dberlin.org>
-
+
Fix PR tree-optimization/17111
* tree-ssa-pre.c (create_value_expr_from): Don't change the types
of non-value_handles.
(override_options): Added checks for the new options.
(s390_emit_prologue): Emit stack check and trap code and perform
compile time stack size checking.
-
- * config/s390/s390.h (TARGET_OPTIONS): Added new options
+
+ * config/s390/s390.h (TARGET_OPTIONS): Added new options
"warn-framesize", "warn-dynamicstack", "stack-size" and
"stack-guard".
/* Subroutines used for code generation on the DEC Alpha.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
/* Specify which cpu to schedule for. */
enum processor_type alpha_cpu;
-static const char * const alpha_cpu_name[] =
+static const char * const alpha_cpu_name[] =
{
"ev4", "ev5", "ev6"
};
{ "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
{ 0, 0, 0 }
};
-
+
/* Unicos/Mk doesn't have shared libraries. */
if (TARGET_ABI_UNICOSMK && flag_pic)
{
flag_pic = 0;
}
- /* On Unicos/Mk, the native compiler consistently generates /d suffices for
+ /* On Unicos/Mk, the native compiler consistently generates /d suffices for
floating-point instructions. Make that the default for this target. */
if (TARGET_ABI_UNICOSMK)
alpha_fprm = ALPHA_FPRM_DYN;
alpha_tp = ALPHA_TP_PROG;
alpha_fptm = ALPHA_FPTM_N;
- /* We cannot use su and sui qualifiers for conversion instructions on
+ /* We cannot use su and sui qualifiers for conversion instructions on
Unicos/Mk. I'm not sure if this is due to assembler or hardware
limitations. Right now, we issue a warning if -mieee is specified
and then ignore it; eventually, we should either get it right or
&& ISDIGIT ((unsigned char)alpha_mlat_string[1])
&& alpha_mlat_string[2] == '\0')
{
- static int const cache_latency[][4] =
+ static int const cache_latency[][4] =
{
{ 3, 30, -1 }, /* ev4 -- Bcache is a guess */
{ 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
return 1;
}
-/* Return true if OP is valid for a particular TLS relocation.
+/* Return true if OP is valid for a particular TLS relocation.
We are already guaranteed that OP is a CONST. */
int
}
}
+/* Implements target hook vector_mode_supported_p. */
+static bool
+alpha_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_MAX
+ && ((mode == V8QImode)
+ || (mode == V4HImode)
+ || (mode == V2SImode)))
+ return true;
+
+ return false;
+}
+
/* Return 1 if this function can directly return via $26. */
int
tga = get_tls_get_addr ();
dest = gen_reg_rtx (Pmode);
seq = GEN_INT (alpha_next_sequence_number++);
-
+
emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
insn = gen_call_value_osf_tlsgd (r0, tga, seq);
insn = emit_call_insn (insn);
small symbolic operand until after reload. At which point we need
to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
so that sched2 has the proper dependency information. */
-/*
+/*
{"some_small_symbolic_operand", {SET, PARALLEL, PREFETCH, UNSPEC, \
UNSPEC_VOLATILE}},
*/
return false;
}
-
+
/* Try a machine-dependent way of reloading an illegitimate address
operand. If we find one, push the reload and return the new rtx. */
-
+
rtx
alpha_legitimize_reload_address (rtx x,
enum machine_mode mode ATTRIBUTE_UNUSED,
else
*total = COSTS_N_INSNS (2);
return true;
-
+
case CONST:
case SYMBOL_REF:
case LABEL_REF:
/* Otherwise we do a load from the GOT. */
*total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
return true;
-
+
case PLUS:
case MINUS:
if (float_mode_p)
*pbitnum = GEN_INT ((offset & 3) * 8);
}
-/* Similar, but just get the address. Handle the two reload cases.
+/* Similar, but just get the address. Handle the two reload cases.
Add EXTRA_OFFSET to the address we return. */
rtx
}
/* On the Alpha, all (non-symbolic) constants except zero go into
- a floating-point register via memory. Note that we cannot
+ a floating-point register via memory. Note that we cannot
return anything that is not a subset of CLASS, and that some
symbolic constants cannot be dropped to memory. */
/* Loading and storing HImode or QImode values to and from memory
usually requires a scratch register. The exceptions are loading
QImode and HImode from an aligned address to a general register
- unless byte instructions are permitted.
+ unless byte instructions are permitted.
We also cannot load an unaligned address or a paradoxical SUBREG
- into an FP register.
+ into an FP register.
We also cannot do integral arithmetic into FP regs, as might result
from register elimination into a DImode fp register. */
if (GET_CODE (ref) != MEM)
return;
- /* This is only called from alpha.md, after having had something
+ /* This is only called from alpha.md, after having had something
generated from one of the insn patterns. So if everything is
zero, the pattern is already up-to-date. */
if (!MEM_VOLATILE_P (ref)
{
/* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
but that meant that we can't handle INT_MIN on 32-bit machines
- (like NT/Alpha), because we recurse indefinitely through
+ (like NT/Alpha), because we recurse indefinitely through
emit_move_insn to gen_movdi. So instead, since we know exactly
what we want, create it explicitly. */
else
{
/* ??? We mark the branch mode to be CCmode to prevent the
- compare and branch from being combined, since the compare
+ compare and branch from being combined, since the compare
insn follows IEEE rules that the branch does not. */
branch_mode = CCmode;
}
rtx libcall;
};
-static GTY(()) struct xfloating_op xfloating_ops[] =
+static GTY(()) struct xfloating_op xfloating_ops[] =
{
{ PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
{ MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
Note that these functions do not follow normal calling conventions:
TFmode arguments are passed in two integer registers (as opposed to
- indirect); TFmode return values appear in R16+R17.
+ indirect); TFmode return values appear in R16+R17.
FUNC is the function to call.
TARGET is where the output belongs.
out_operands[0] = operands[1];
out_operands[1] = operands[2];
out_operands[2] = GEN_INT (mode);
- alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
+ alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
gen_rtx_fmt_ee (code, TFmode, operands[1],
operands[2]));
}
abort ();
}
-/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
- op2 is a register containing the sign bit, operation is the
+/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
+ op2 is a register containing the sign bit, operation is the
logical operation to be performed. */
void
mema = force_reg (Pmode, mema);
/* AND addresses cannot be in any alias set, since they may implicitly
- alias surrounding code. Ideally we'd have some alias set that
+ alias surrounding code. Ideally we'd have some alias set that
covered all types except those with alignment 8 or higher. */
tmp = change_address (mem, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (mema, ofs),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
emit_move_insn (meml, tmp);
tmp = change_address (mem, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (mema, ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
addr for the target, because addr is marked as a pointer and combine
knows that pointers are always sign-extended 32 bit values. */
addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
- addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
+ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
addr, 1, OPTAB_WIDEN);
}
else
HOST_WIDE_INT size, HOST_WIDE_INT ofs)
{
rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
-
+
dstl = gen_reg_rtx (DImode);
dsth = gen_reg_rtx (DImode);
insl = gen_reg_rtx (DImode);
dsta = force_reg (Pmode, dsta);
/* AND addresses cannot be in any alias set, since they may implicitly
- alias surrounding code. Ideally we'd have some alias set that
+ alias surrounding code. Ideally we'd have some alias set that
covered all types except those with alignment 8 or higher. */
meml = change_address (dst, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (dsta, ofs),
GEN_INT (-8)));
set_mem_alias_set (meml, 0);
memh = change_address (dst, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (dsta, ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (memh, 0);
dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
}
-
+
if (WORDS_BIG_ENDIAN)
{
emit_move_insn (meml, dstl);
if (ofs != 0)
smem = adjust_address (smem, GET_MODE (smem), ofs);
-
+
/* Load up all of the source data. */
for (i = 0; i < words; ++i)
{
emit_move_insn (data_regs[words], tmp);
/* Extract the half-word fragments. Unfortunately DEC decided to make
- extxh with offset zero a noop instead of zeroing the register, so
+ extxh with offset zero a noop instead of zeroing the register, so
we must take care of that edge condition ourselves with cmov. */
sreg = copy_addr_to_reg (smema);
- areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
+ areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1, OPTAB_WIDEN);
if (WORDS_BIG_ENDIAN)
emit_move_insn (sreg, plus_constant (sreg, 7));
ins_tmps[i] = gen_reg_rtx(DImode);
st_tmp_1 = gen_reg_rtx(DImode);
st_tmp_2 = gen_reg_rtx(DImode);
-
+
if (ofs != 0)
dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
rtx data_regs[2 * MAX_MOVE_WORDS + 16];
rtx tmp;
unsigned int i, words, ofs, nregs = 0;
-
+
if (orig_bytes <= 0)
return 1;
else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
src_align = 16;
}
}
-
+
tmp = XEXP (orig_dst, 0);
if (GET_CODE (tmp) == REG)
dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else
alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
words, ofs);
-
+
i += words;
ofs += words * 8;
}
rtx orig_dst = operands[0];
rtx tmp;
int i, words, ofs = 0;
-
+
if (orig_bytes <= 0)
return 1;
if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
static struct machine_function *
alpha_init_machine_status (void)
{
- return ((struct machine_function *)
+ return ((struct machine_function *)
ggc_alloc_cleared (sizeof (struct machine_function)));
}
{
case ALPHA_FPRM_NORM:
return NULL;
- case ALPHA_FPRM_MINF:
+ case ALPHA_FPRM_MINF:
return "m";
case ALPHA_FPRM_CHOP:
return "c";
if (GET_CODE (x) != CONST_INT
|| (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
? 56
- : 64)
+ : 64)
|| (INTVAL (x) & 7) != 0)
output_operand_lossage ("invalid %%s value");
if (offset)
fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
-
+
addr = XEXP (addr, 0);
if (GET_CODE (addr) == REG)
basereg = REGNO (addr);
code. CXT is an RTX for the static chain value for the function.
The three offset parameters are for the individual template's
- layout. A JMPOFS < 0 indicates that the trampoline does not
+ layout. A JMPOFS < 0 indicates that the trampoline does not
contain instructions at all.
We assume here that a function will be called many more times than
break;
default:
- /* ??? We get called on all sorts of random stuff from
+ /* ??? We get called on all sorts of random stuff from
aggregate_value_p. We can't abort, but it's not clear
what's safe to return. Pretend it's a struct I guess. */
return true;
return gen_rtx_REG (mode, regnum);
}
-/* TCmode complex values are passed by invisible reference. We
+/* TCmode complex values are passed by invisible reference. We
should not split these values. */
static bool
in order to account for the integer arg registers which are counted
in argsize above, but which are not actually stored on the stack.
Must further be careful here about structures straddling the last
- integer argument register; that futzes with pretend_args_size,
+ integer argument register; that futzes with pretend_args_size,
which changes the meaning of AP. */
if (NUM_ARGS <= 6)
zero in the prologue of _Unwind_RaiseException et al. */
imask |= 1UL << 31;
}
-
+
/* If any register spilled, then spill the return address also. */
/* ??? This is required by the Digital stack unwind specification
and isn't needed if we're doing Dwarf2 unwinding. */
if (current_function_has_nonlocal_goto)
return 1;
- /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
+ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
Even if we are a static function, we still need to do this in case
our address is taken and passed to something like qsort. */
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
4096 bytes (we can probably get away without the latter test) and
every 8192 bytes in between. If the frame size is > 32768, we
do this in a loop. Otherwise, we generate the explicit probe
- instructions.
+ instructions.
Note that we are only allowed to adjust sp once in the prologue. */
/* For NT stack unwind (done by 'reverse execution'), it's
not OK to take the result of a loop, even though the value
is already in ptr, so we reload it via a single operation
- and subtract it to sp.
+ and subtract it to sp.
Yes, that's correct -- we have to reload the whole constant
into a temporary via ldah+lda then subtract from sp. */
if (low + sa_size <= 0x8000)
bias = reg_offset - low, reg_offset = low;
- else
+ else
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 24);
FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
GEN_INT (bias))));
}
-
+
/* Save regs in stack order. Beginning with VMS PV. */
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
{
if (current_function_outgoing_args_size != 0)
{
rtx seq
- = emit_move_insn (stack_pointer_rtx,
+ = emit_move_insn (stack_pointer_rtx,
plus_constant
(hard_frame_pointer_rtx,
- (ALPHA_ROUND
(current_function_outgoing_args_size))));
-
+
/* Only set FRAME_RELATED_P on the stack adjustment we just emitted
if ! frame_pointer_needed. Setting the bit will change the CFA
computation rule to use sp again, which would be wrong if we had
(clobber:BLK (scratch)), but this doesn't work for fp insns. So we
have to prevent all such scheduling with a blockage.
- Linux, on the other hand, never bothered to implement OSF/1's
+ Linux, on the other hand, never bothered to implement OSF/1's
exception handling, and so doesn't care about such things. Anyone
planning to use dwarf2 frame-unwind info can also omit the blockage. */
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
/* Write function epilogue. */
-/* ??? At some point we will want to support full unwind, and so will
+/* ??? At some point we will want to support full unwind, and so will
need to mark the epilogue as well. At the moment, we just confuse
dwarf2out. */
#undef FRP
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
if (low + sa_size <= 0x8000)
bias = reg_offset - low, reg_offset = low;
- else
+ else
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 22);
FRP (emit_move_insn (sa_reg, sa_reg_exp));
}
-
+
/* Restore registers in order, excepting a true frame pointer. */
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
FRP (emit_move_insn (stack_pointer_rtx,
gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
}
- else
+ else
{
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
{
case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
- case SQRT: case FFS:
+ case SQRT: case FFS:
summarize_insn (XEXP (x, 0), sum, 0);
break;
shadow.used.fp = 0;
shadow.used.mem = 0;
shadow.defd = shadow.used;
-
+
for (i = get_insns (); i ; i = NEXT_INSN (i))
{
if (GET_CODE (i) == NOTE)
}
}
-/* IN_USE is a mask of the slots currently filled within the insn group.
+/* IN_USE is a mask of the slots currently filled within the insn group.
The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
- the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
+ the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
LEN is, of course, the length of the group in bytes. */
abort();
}
len += 4;
-
+
/* Haifa doesn't do well scheduling branches. */
if (GET_CODE (insn) == JUMP_INSN)
goto next_and_done;
return insn;
}
-/* IN_USE is a mask of the slots currently filled within the insn group.
+/* IN_USE is a mask of the slots currently filled within the insn group.
The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
- the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
+ the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
LEN is, of course, the length of the group in bytes. */
len = get_attr_length (insn);
goto next_and_done;
- /* ??? Most of the places below, we would like to abort, as
- it would indicate an error either in Haifa, or in the
- scheduling description. Unfortunately, Haifa never
+ /* ??? Most of the places below, we would like to abort, as
+ it would indicate an error either in Haifa, or in the
+ scheduling description. Unfortunately, Haifa never
schedules the last instruction of the BB, so we don't
have an accurate TI bit to go off. */
case EV5_E01:
abort();
}
len += 4;
-
+
/* Haifa doesn't do well scheduling branches. */
/* ??? If this is predicted not-taken, slotting continues, except
that no more IBR, FBR, or JSR insns may be slotted. */
else
where = i;
- do
+ do
emit_insn_before ((*next_nop)(&prev_in_use), where);
while (--nop_count);
ofs = 0;
if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
fprintf (asm_out_file,
"\t.arch %s\n",
- TARGET_CPU_EV6 ? "ev6"
+ TARGET_CPU_EV6 ? "ev6"
: (TARGET_CPU_EV5
? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
: "ev4"));
if (!alpha_funcs_tree)
alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
splay_tree_compare_pointers);
-
+
cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
cfaf->links = 0;
al->rkind = KIND_CODEADDR;
else
al->rkind = KIND_LINKAGE;
-
+
if (lflag)
return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
else
/* Record an element in the table of global constructors. SYMBOL is
a SYMBOL_REF of the function to be called; PRIORITY is a number
- between 0 and MAX_INIT_PRIORITY.
+ between 0 and MAX_INIT_PRIORITY.
Differs from default_ctors_section_asm_out_constructor in that the
width of the .ctors entry is always 64 bits, rather than the 32 bits
unicosmk_initial_elimination_offset (int from, int to)
{
int fixed_size;
-
+
fixed_size = alpha_sa_size();
if (fixed_size != 0)
fixed_size += 48;
if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return -fixed_size;
+ return -fixed_size;
else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
return 0;
else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ ALPHA_ROUND (get_frame_size()));
else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
return (ALPHA_ROUND (fixed_size)
- + ALPHA_ROUND (get_frame_size()
+ + ALPHA_ROUND (get_frame_size()
+ current_function_outgoing_args_size));
else
abort ();
unsigned len = strlen (name);
char *clean_name = alloca (len + 2);
char *ptr = clean_name;
-
+
/* CAM only accepts module names that start with a letter or '$'. We
prefix the module name with a '$' if necessary. */
const char *name;
int len;
- if (!decl)
+ if (!decl)
abort ();
name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
{
char *string;
- /* It is essential that we prefix the section name here because
- otherwise the section names generated for constructors and
+ /* It is essential that we prefix the section name here because
+ otherwise the section names generated for constructors and
destructors confuse collect2. */
string = alloca (len + 6);
/* Output an alignment directive. We have to use the macro 'gcc@code@align'
in code sections because .align fill unused space with zeroes. */
-
+
void
unicosmk_output_align (FILE *file, int align)
{
unicosmk_defer_case_vector (rtx lab, rtx vec)
{
struct machine_function *machine = cfun->machine;
-
+
vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
- machine->addr_list);
+ machine->addr_list);
}
/* Output a case vector. */
static const char *
unicosmk_ssib_name (void)
{
- /* This is ok since CAM won't be able to deal with names longer than that
+ /* This is ok since CAM won't be able to deal with names longer than that
anyway. */
static char name[256];
return name;
}
-/* Set up the dynamic subprogram information block (DSIB) and update the
- frame pointer register ($15) for subroutines which have a frame. If the
+/* Set up the dynamic subprogram information block (DSIB) and update the
+ frame pointer register ($15) for subroutines which have a frame. If the
subroutine doesn't have a frame, simply increment $15. */
static void
unicosmk_text_section (void)
{
static int count = 0;
- sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
+ sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
count++);
return unicosmk_section_buf;
}
unicosmk_data_section (void)
{
static int count = 1;
- sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
+ sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
count++);
return unicosmk_section_buf;
}
len = strlen (user_label_prefix);
for (p = unicosmk_extern_head; p != 0; p = p->next)
{
- /* We have to strip the encoding and possibly remove user_label_prefix
+ /* We have to strip the encoding and possibly remove user_label_prefix
from the identifier in order to handle -fleading-underscore and
explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
real_name = default_strip_name_encoding (p->name);
if (len && p->name[0] == '*'
&& !memcmp (real_name, user_label_prefix, len))
real_name += len;
-
+
name_tree = get_identifier (real_name);
if (! TREE_ASM_WRITTEN (name_tree))
{
}
}
}
-
+
/* Record an extern. */
void
const char *name;
};
-/* List of identifiers which have been replaced by DEX expressions. The DEX
+/* List of identifiers which have been replaced by DEX expressions. The DEX
number is determined by the position in the list. */
-static struct unicosmk_dex *unicosmk_dex_list = NULL;
+static struct unicosmk_dex *unicosmk_dex_list = NULL;
/* The number of elements in the DEX list. */
struct unicosmk_dex *dex;
const char *name;
int i;
-
+
if (GET_CODE (x) != SYMBOL_REF)
return 0;
return i;
--i;
}
-
+
dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
dex->name = name;
dex->next = unicosmk_dex_list;
putc ('\n', file);
--i;
}
-
+
fprintf (file, "\t.dexend\n");
}
/* Output text that to appear at the beginning of an assembler file. */
-static void
+static void
unicosmk_file_start (void)
{
int i;
unicosmk_output_externs (asm_out_file);
- /* Output dex definitions used for functions whose names conflict with
+ /* Output dex definitions used for functions whose names conflict with
register names. */
unicosmk_output_dex (asm_out_file);
#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
\f
#include "gt-alpha.h"
-
#define MASK_SMALL_TEXT (1 << 15)
#define TARGET_SMALL_TEXT (target_flags & MASK_SMALL_TEXT)
-/* This means use IEEE quad-format for long double. Assumes the
+/* This means use IEEE quad-format for long double. Assumes the
presence of the GEM support library routines. */
#define MASK_LONG_DOUBLE_128 (1 << 16)
#define TARGET_LONG_DOUBLE_128 (target_flags & MASK_LONG_DOUBLE_128)
#define WCHAR_TYPE_SIZE 32
/* Define this macro if it is advisable to hold scalars in registers
- in a wider mode than that declared by the program. In such cases,
+ in a wider mode than that declared by the program. In such cases,
the value is constrained to be within the bounds of the declared
type, but kept valid in the wider mode. The signedness of the
extension may differ from that of the type.
We define all 32 integer registers, even though $31 is always zero,
and all 32 floating-point registers, even though $f31 is also
always zero. We do not bother defining the FP status register and
- there are no other registers.
+ there are no other registers.
Since $31 is always zero, we will use register number 31 as the
argument pointer. It will never appear in the generated code
? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \
: 1)
-/* Value is 1 if MODE is a supported vector mode. */
-
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- (TARGET_MAX \
- && ((MODE) == V8QImode || (MODE) == V4HImode || (MODE) == V2SImode))
-
/* A C expression that is nonzero if a value of mode
MODE1 is accessible in mode MODE2 without copying.
/* Base register for access to local variables of function. */
#define FRAME_POINTER_REGNUM 63
-/* Register in which static-chain is passed to a function.
+/* Register in which static-chain is passed to a function.
For the Alpha, this is based on an example; the calling sequence
doesn't seem to specify this. */
For any two classes, it is very desirable that there be another
class that represents their union. */
-
+
enum reg_class {
NO_REGS, R0_REG, R24_REG, R25_REG, R27_REG,
GENERAL_REGS, FLOAT_REGS, ALL_REGS,
`R' is a SYMBOL_REF that has SYMBOL_REF_FLAG set or is the current
function.
- 'S' is a 6-bit constant (valid for a shift insn).
+ 'S' is a 6-bit constant (valid for a shift insn).
'T' is a HIGH.
? reg_classes_intersect_p (FLOAT_REGS, CLASS) : 0)
/* Define the cost of moving between registers of various classes. Moving
- between FLOAT_REGS and anything else except float regs is expensive.
+ between FLOAT_REGS and anything else except float regs is expensive.
In fact, we make it quite expensive because we really don't want to
do these moves unless it is clearly worth it. Optimizations may
reduce the impact of not being able to allocate a pseudo to a
#define ASM_DECLARE_FUNCTION_SIZE(FILE,NAME,DECL) \
alpha_end_function(FILE,NAME,DECL)
-
+
/* Output any profiling code before the prologue. */
#define PROFILE_BEFORE_PROLOGUE 1
of a trampoline, leaving space for the variable parts.
The trampoline should set the static chain pointer to value placed
- into the trampoline and should branch to the specified routine.
+ into the trampoline and should branch to the specified routine.
Note that $27 has been set to the address of the trampoline, so we can
use it for addressability of the two data items. */
/* Try a machine-dependent way of reloading an illegitimate address
operand. If we find one, push the reload and jump to WIN. This
macro is used in only one place: `find_reloads_address' in reload.c. */
-
+
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_L,WIN) \
do { \
rtx new_x = alpha_legitimize_reload_address (X, MODE, OPNUM, TYPE, IND_L); \
/* Nonzero if access to memory by bytes is no faster than for words.
Also nonzero if doing byte operations (specifically shifts) in registers
- is undesirable.
+ is undesirable.
On the Alpha, we want to not use the byte operation and instead use
masking operations to access fields; these will save instructions. */
extern void arm_encode_call_attribute (tree, int);
#endif
#ifdef RTX_CODE
+extern bool arm_vector_mode_supported_p (enum machine_mode);
extern int arm_hard_regno_mode_ok (unsigned int, enum machine_mode);
extern int const_ok_for_arm (HOST_WIDE_INT);
extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx,
extern rtx arm_function_value(tree, tree);
#endif
-#if defined AOF_ASSEMBLER
+#if defined AOF_ASSEMBLER
extern rtx aof_pic_entry (rtx);
extern char *aof_text_section (void);
extern char *aof_data_section (void);
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST arm_address_cost
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
+
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
int thumb_code = 0;
/* Nonzero if we should define __THUMB_INTERWORK__ in the
- preprocessor.
+ preprocessor.
XXX This is a bit of a hack, it's intended to help work around
problems in GLD which doesn't understand that armv5t code is
interworking clean. */
/* ARM Architectures */
/* We don't specify rtx_costs here as it will be figured out
from the core. */
-
+
{"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
{"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
{"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
struct arm_cpu_select arm_select[] =
{
- /* string name processors */
+ /* string name processors */
{ NULL, "-mcpu=", all_cores },
{ NULL, "-march=", all_architectures },
{ NULL, "-mtune=", all_cores }
bit_count (unsigned long value)
{
unsigned long count = 0;
-
+
while (value)
{
count++;
set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
-
+
/* Single-precision comparisions. Table 5. */
set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
set_optab_libfunc (ne_optab, SFmode, NULL);
for (i = ARRAY_SIZE (arm_select); i--;)
{
struct arm_cpu_select * ptr = arm_select + i;
-
+
if (ptr->string != NULL && ptr->string[0] != '\0')
{
const struct processors * sel;
if (insn_flags != 0 && (insn_flags ^ sel->flags))
warning ("switch -mcpu=%s conflicts with -march= switch",
ptr->string);
-
+
insn_flags = sel->flags;
}
-
+
break;
}
error ("bad value (%s) for %s switch", ptr->string, ptr->name);
}
}
-
+
/* If the user did not specify a processor, choose one for them. */
if (insn_flags == 0)
{
/* Now check to see if the user has specified some command line
switch that require certain abilities from the cpu. */
sought = 0;
-
+
if (TARGET_INTERWORK || TARGET_THUMB)
{
sought |= (FL_THUMB | FL_MODE32);
-
+
/* There are no ARM processors that support both APCS-26 and
interworking. Therefore we force FL_MODE26 to be removed
from insn_flags here (if it was set), so that the search
below will always be able to find a compatible processor. */
insn_flags &= ~FL_MODE26;
}
-
+
if (sought != 0 && ((sought & insn_flags) != sought))
{
/* Try to locate a CPU type that supports all of the abilities
{
unsigned current_bit_count = 0;
const struct processors * best_fit = NULL;
-
+
/* Ideally we would like to issue an error message here
saying that it was not possible to find a CPU compatible
with the default CPU, but which also supports the command
if (arm_tune == arm_none)
arm_tune = (enum processor_type) (sel - all_cores);
}
-
+
/* The processor for which we should tune should now have been
chosen. */
if (arm_tune == arm_none)
abort ();
-
+
tune_flags = all_cores[(int)arm_tune].flags;
if (optimize_size)
targetm.rtx_costs = arm_size_rtx_costs;
warning ("target CPU does not support interworking" );
target_flags &= ~ARM_FLAG_INTERWORK;
}
-
+
if (TARGET_THUMB && !(insn_flags & FL_THUMB))
{
warning ("target CPU does not support THUMB instructions");
warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
target_flags |= ARM_FLAG_APCS_FRAME;
}
-
+
if (TARGET_POKE_FUNCTION_NAME)
target_flags |= ARM_FLAG_APCS_FRAME;
-
+
if (TARGET_APCS_REENT && flag_pic)
error ("-fpic and -mapcs-reent are incompatible");
-
+
if (TARGET_APCS_REENT)
warning ("APCS reentrant code not supported. Ignored");
-
+
/* If this target is normally configured to use APCS frames, warn if they
are turned off and debugging is turned on. */
if (TARGET_ARM
&& !TARGET_APCS_FRAME
&& (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
warning ("-g with -mno-apcs-frame may not give sensible debugging");
-
+
/* If stack checking is disabled, we can use r10 as the PIC register,
which keeps r9 available. */
if (flag_pic)
arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
-
+
if (TARGET_APCS_FLOAT)
warning ("passing floating point arguments in fp regs not yet supported");
-
+
/* Initialize boolean versions of the flags, for use in the arm.md file. */
arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
arm_arch4 = (insn_flags & FL_ARCH4) != 0;
/* If soft-float is specified then don't use FPU. */
if (TARGET_SOFT_FLOAT)
arm_fpu_arch = FPUTYPE_NONE;
-
+
/* For arm2/3 there is no need to do any scheduling if there is only
a floating point emulator, or we are doing software floating-point. */
if ((TARGET_SOFT_FLOAT
|| arm_fpu_tune == FPUTYPE_FPA_EMU3)
&& (tune_flags & FL_MODE32) == 0)
flag_schedule_insns = flag_schedule_insns_after_reload = 0;
-
+
/* Override the default structure alignment for AAPCS ABI. */
if (arm_abi == ARM_ABI_AAPCS)
arm_structure_size_boundary = 8;
unsigned long type = ARM_FT_UNKNOWN;
tree a;
tree attr;
-
+
if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
abort ();
&& TREE_NOTHROW (current_function_decl)
&& TREE_THIS_VOLATILE (current_function_decl))
type |= ARM_FT_VOLATILE;
-
+
if (cfun->static_chain_decl != NULL)
type |= ARM_FT_NESTED;
attr = DECL_ATTRIBUTES (current_function_decl);
-
+
a = lookup_attribute ("naked", attr);
if (a != NULL_TREE)
type |= ARM_FT_NAKED;
a = lookup_attribute ("isr", attr);
if (a == NULL_TREE)
a = lookup_attribute ("interrupt", attr);
-
+
if (a == NULL_TREE)
type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
else
type |= arm_isr_value (TREE_VALUE (a));
-
+
return type;
}
return cfun->machine->func_type;
}
\f
-/* Return 1 if it is possible to return using a single instruction.
+/* Return 1 if it is possible to return using a single instruction.
If SIBLING is non-null, this is a test for a return before a sibling
call. SIBLING is the call insn, so we can examine its register usage. */
pointer won't be correctly restored if the instruction takes a
page fault. We work around this problem by popping r3 along with
the other registers, since that is never slower than executing
- another instruction.
+ another instruction.
We test for !arm_arch5 here, because code for any architecture
less than this could potentially be run on one of the buggy
taken and multiple registers have been stacked. */
if (iscond && arm_is_strong)
{
- /* Conditional return when just the LR is stored is a simple
+ /* Conditional return when just the LR is stored is a simple
conditional-load instruction, that's not expensive. */
if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
return 0;
{
unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
- /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
be all zero, or all one. */
if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
&& ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
!= ((~(unsigned HOST_WIDE_INT) 0)
& ~(unsigned HOST_WIDE_INT) 0xffffffff)))
return FALSE;
-
+
/* Fast return for 0 and powers of 2 */
if ((i & (i - 1)) == 0)
return TRUE;
*/
if (!after_arm_reorg
&& !cond
- && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
+ && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1, 0)
> arm_constant_limit + (code != SET)))
{
}
}
- return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
+ return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1);
}
do
{
int end;
-
+
if (i <= 0)
i += 32;
if (remainder & (3 << (i - 2)))
{
if (generate)
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, target,
+ gen_rtx_SET (VOIDmode, target,
gen_rtx_MINUS (mode, GEN_INT (val),
source)));
return 1;
if (generate)
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, target,
- (source
+ (source
? gen_rtx_fmt_ee (code, mode, source,
GEN_INT (val))
: GEN_INT (val))));
if (set_sign_bit_copies > 1)
{
if (const_ok_for_arm
- (temp1 = ARM_SIGN_EXTEND (remainder
+ (temp1 = ARM_SIGN_EXTEND (remainder
<< (set_sign_bit_copies - 1))))
{
if (generate)
{
rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, new_src,
+ gen_rtx_SET (VOIDmode, new_src,
GEN_INT (temp1)));
emit_constant_insn (cond,
- gen_ashrsi3 (target, new_src,
+ gen_ashrsi3 (target, new_src,
GEN_INT (set_sign_bit_copies - 1)));
}
return 2;
gen_rtx_SET (VOIDmode, new_src,
GEN_INT (temp1)));
emit_constant_insn (cond,
- gen_ashrsi3 (target, new_src,
+ gen_ashrsi3 (target, new_src,
GEN_INT (set_sign_bit_copies - 1)));
}
return 2;
source, subtargets, generate);
source = new_src;
if (generate)
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET
(VOIDmode, target,
rtx sub = subtargets ? gen_reg_rtx (mode) : target;
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_SET (VOIDmode, sub,
GEN_INT (val)));
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, target,
+ gen_rtx_SET (VOIDmode, target,
gen_rtx_fmt_ee (code, mode,
source, sub)));
}
rtx sub = subtargets ? gen_reg_rtx (mode) : target;
rtx shift = GEN_INT (set_sign_bit_copies);
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, sub,
- gen_rtx_NOT (mode,
+ gen_rtx_NOT (mode,
gen_rtx_ASHIFT (mode,
- source,
+ source,
shift))));
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, target,
gen_rtx_NOT (mode,
gen_rtx_LSHIFTRT (mode,
source,
shift))));
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, target,
gen_rtx_NOT (mode,
sub = gen_reg_rtx (mode);
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, sub,
- gen_rtx_AND (mode, source,
+ gen_rtx_AND (mode, source,
GEN_INT (temp1))));
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, target,
if (generate)
{
rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
- insns = arm_gen_constant (AND, mode, cond,
+ insns = arm_gen_constant (AND, mode, cond,
remainder | shift_mask,
new_src, source, subtargets, 1);
source = new_src;
if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
{
HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
-
+
if ((remainder | shift_mask) != 0xffffffff)
{
if (generate)
rather than having to synthesize both large constants from scratch.
Therefore, we calculate how many insns would be required to emit
- the constant starting from `best_start', and also starting from
- zero (ie with bit 31 first to be output). If `best_start' doesn't
+ the constant starting from `best_start', and also starting from
+ zero (ie with bit 31 first to be output). If `best_start' doesn't
yield a shorter sequence, we may as well use zero. */
if (best_start != 0
&& ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
- && (count_insns_for_constant (remainder, 0) <=
+ && (count_insns_for_constant (remainder, 0) <=
count_insns_for_constant (remainder, best_start)))
best_start = 0;
temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, new_src,
+ gen_rtx_SET (VOIDmode, new_src,
temp1_rtx));
source = new_src;
}
int unsignedp ATTRIBUTE_UNUSED;
rtx r ATTRIBUTE_UNUSED;
-
+
mode = TYPE_MODE (type);
/* Promote integer types. */
if (INTEGRAL_TYPE_P (type))
larger than a word (or are variable size). */
return (size < 0 || size > UNITS_PER_WORD);
}
-
+
/* For the arm-wince targets we choose to be compatible with Microsoft's
ARM and Thumb compilers, which always return aggregates in memory. */
#ifndef ARM_WINCE
we will want to return it via memory and not in a register. */
if (size < 0 || size > UNITS_PER_WORD)
return 1;
-
+
if (TREE_CODE (type) == RECORD_TYPE)
{
tree field;
has an offset of zero. For practical purposes this means
that the structure can have at most one non bit-field element
and that this element must be the first one in the structure. */
-
+
/* Find the first field, ignoring non FIELD_DECL things which will
have been created by C++. */
for (field = TYPE_FIELDS (type);
field && TREE_CODE (field) != FIELD_DECL;
field = TREE_CHAIN (field))
continue;
-
+
if (field == NULL)
return 0; /* An empty structure. Allowed by an extension to ANSI C. */
{
if (TREE_CODE (field) != FIELD_DECL)
continue;
-
+
if (!DECL_BIT_FIELD_TYPE (field))
return 1;
}
return 0;
}
-
+
if (TREE_CODE (type) == UNION_TYPE)
{
tree field;
if (FLOAT_TYPE_P (TREE_TYPE (field)))
return 1;
-
+
if (RETURN_IN_MEMORY (TREE_TYPE (field)))
return 1;
}
-
+
return 0;
}
-#endif /* not ARM_WINCE */
-
+#endif /* not ARM_WINCE */
+
/* Return all other types in memory. */
return 1;
}
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is NULL. */
void
-arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
+arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
rtx libname ATTRIBUTE_UNUSED,
tree fndecl ATTRIBUTE_UNUSED)
{
pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
pcum->iwmmxt_nregs = 0;
pcum->can_split = true;
-
+
pcum->call_cookie = CALL_NORMAL;
if (TARGET_LONG_CALLS)
pcum->call_cookie = CALL_LONG;
-
+
/* Check for long call/short call attributes. The attributes
override any command line option. */
if (fntype)
/* Varargs vectors are treated the same as long long.
named_count avoids having to change the way arm handles 'named' */
if (TARGET_IWMMXT_ABI
- && VECTOR_MODE_SUPPORTED_P (mode)
+ && arm_vector_mode_supported_p (mode)
&& pcum->named_count > pcum->nargs + 1)
{
if (pcum->iwmmxt_nregs <= 9)
if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
return NULL_RTX;
-
+
return gen_rtx_REG (mode, pcum->nregs);
}
/* Whereas these functions are always known to reside within the 26 bit
addressing range. */
{ "short_call", 0, 0, false, true, true, NULL },
- /* Interrupt Service Routines have special prologue and epilogue requirements. */
+ /* Interrupt Service Routines have special prologue and epilogue requirements. */
{ "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
{ "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
{ "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
arm_comp_type_attributes (tree type1, tree type2)
{
int l1, l2, s1, s2;
-
+
/* Check for mismatch of non-default calling convention. */
if (TREE_CODE (type1) != FUNCTION_TYPE)
return 1;
if ((l1 & s2) || (l2 & s1))
return 0;
}
-
+
/* Check for mismatched ISR attribute. */
l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
if (! l1)
or 3. the target function has __attribute__ ((section))
However we do not generate a long call if the function:
-
+
d. has an __attribute__ ((short_call))
or e. is inside the scope of a #pragma no_long_calls
or f. is defined within the current compilation unit.
-
+
This function will be called by C fragments contained in the machine
description file. SYM_REF and CALL_COOKIE correspond to the matched
rtl operands. CALL_SYMBOL is used to distinguish between
if (current_file_function_operand (sym_ref))
return 0;
-
+
return (call_cookie & CALL_LONG)
|| ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
|| TARGET_LONG_CALLS;
return false;
/* If we are interworking and the function is not declared static
- then we can't tail-call it unless we know that it exists in this
+ then we can't tail-call it unless we know that it exists in this
compilation unit (since it might be a Thumb routine). */
if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
return false;
emit_insn (gen_pic_load_addr_thumb (address, orig));
if ((GET_CODE (orig) == LABEL_REF
- || (GET_CODE (orig) == SYMBOL_REF &&
+ || (GET_CODE (orig) == SYMBOL_REF &&
SYMBOL_REF_LOCAL_P (orig)))
&& NEED_GOT_RELOC)
pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
-
+
if (TARGET_ARM)
{
emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
{
bool use_ldrd;
enum rtx_code code = GET_CODE (x);
-
+
if (arm_address_register_rtx_p (x, strict_p))
return 1;
}
/* Return nonzero if x is a legitimate Thumb-state address.
-
+
The AP may be eliminated to either the SP or the FP, so we use the
least common denominator, e.g. SImode, and offsets from 0 to 64.
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
- case ROTATERT:
+ case ROTATERT:
case PLUS:
case MINUS:
case COMPARE:
case NEG:
- case NOT:
+ case NOT:
return COSTS_N_INSNS (1);
-
- case MULT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- int cycles = 0;
+
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int cycles = 0;
unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
-
- while (i)
- {
- i >>= 2;
- cycles++;
- }
- return COSTS_N_INSNS (2) + cycles;
+
+ while (i)
+ {
+ i >>= 2;
+ cycles++;
+ }
+ return COSTS_N_INSNS (2) + cycles;
}
return COSTS_N_INSNS (1) + 16;
-
- case SET:
- return (COSTS_N_INSNS (1)
- + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+
+ case SET:
+ return (COSTS_N_INSNS (1)
+ + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+ GET_CODE (SET_DEST (x)) == MEM));
-
- case CONST_INT:
- if (outer == SET)
- {
- if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
- return 0;
- if (thumb_shiftable_const (INTVAL (x)))
- return COSTS_N_INSNS (2);
- return COSTS_N_INSNS (3);
- }
+
+ case CONST_INT:
+ if (outer == SET)
+ {
+ if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
+ return 0;
+ if (thumb_shiftable_const (INTVAL (x)))
+ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (3);
+ }
else if ((outer == PLUS || outer == COMPARE)
- && INTVAL (x) < 256 && INTVAL (x) > -256)
+ && INTVAL (x) < 256 && INTVAL (x) > -256)
return 0;
else if (outer == AND
&& INTVAL (x) < 256 && INTVAL (x) >= -256)
return COSTS_N_INSNS (1);
- else if (outer == ASHIFT || outer == ASHIFTRT
- || outer == LSHIFTRT)
- return 0;
+ else if (outer == ASHIFT || outer == ASHIFTRT
+ || outer == LSHIFTRT)
+ return 0;
return COSTS_N_INSNS (2);
-
- case CONST:
- case CONST_DOUBLE:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
return COSTS_N_INSNS (3);
-
+
case UDIV:
case UMOD:
case DIV:
case AND:
case XOR:
- case IOR:
+ case IOR:
/* XXX guess. */
return 8;
case QImode:
return (1 + (mode == DImode ? 4 : 0)
+ (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
case HImode:
return (4 + (mode == DImode ? 4 : 0)
+ (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
case SImode:
return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
default:
return 99;
}
-
+
default:
return 99;
}
case ASHIFT: case LSHIFTRT: case ASHIFTRT:
if (mode == DImode)
return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
- + ((GET_CODE (XEXP (x, 0)) == REG
+ + ((GET_CODE (XEXP (x, 0)) == REG
|| (GET_CODE (XEXP (x, 0)) == SUBREG
&& GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
? 0 : 8));
return 1;
/* Fall through */
- case PLUS:
+ case PLUS:
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ ((REG_OR_SUBREG_REG (XEXP (x, 1))
? 0 : 8));
/* Fall through */
- case AND: case XOR: case IOR:
+ case AND: case XOR: case IOR:
extra_cost = 0;
/* Normally the frame registers will be spilt into reg+const during
}
abort ();
- case CONST_INT:
- if (const_ok_for_arm (INTVAL (x)))
- return outer == SET ? 2 : -1;
- else if (outer == AND
- && const_ok_for_arm (~INTVAL (x)))
- return -1;
- else if ((outer == COMPARE
- || outer == PLUS || outer == MINUS)
- && const_ok_for_arm (-INTVAL (x)))
- return -1;
- else
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
+ return outer == SET ? 2 : -1;
+ else if (outer == AND
+ && const_ok_for_arm (~INTVAL (x)))
+ return -1;
+ else if ((outer == COMPARE
+ || outer == PLUS || outer == MINUS)
+ && const_ok_for_arm (-INTVAL (x)))
+ return -1;
+ else
return 5;
-
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
return 6;
-
- case CONST_DOUBLE:
+
+ case CONST_DOUBLE:
if (arm_const_double_rtx (x))
- return outer == SET ? 2 : -1;
- else if ((outer == COMPARE || outer == PLUS)
- && neg_const_double_rtx_ok_for_fpa (x))
- return -1;
+ return outer == SET ? 2 : -1;
+ else if ((outer == COMPARE || outer == PLUS)
+ && neg_const_double_rtx_ok_for_fpa (x))
+ return -1;
return 7;
-
+
default:
return 99;
}
switch (code)
{
case MEM:
- /* A memory access costs 1 insn if the mode is small, or the address is
+ /* A memory access costs 1 insn if the mode is small, or the address is
a single register, otherwise it costs one insn per word. */
if (REG_P (XEXP (x, 0)))
*total = COSTS_N_INSNS (1);
*total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
return false;
- case PLUS:
+ case PLUS:
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
{
*total = COSTS_N_INSNS (1);
case HImode:
*total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
-
+
case SImode:
break;
return false;
- case CONST_INT:
- if (const_ok_for_arm (INTVAL (x)))
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
*total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
else if (const_ok_for_arm (~INTVAL (x)))
*total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
else
*total = COSTS_N_INSNS (2);
return true;
-
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
*total = COSTS_N_INSNS (2);
return true;
-
+
case CONST_DOUBLE:
*total = COSTS_N_INSNS (4);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
int cost, const_ok = const_ok_for_arm (i);
int j, booth_unit_size;
- /* Tune as appropriate. */
+ /* Tune as appropriate. */
cost = const_ok ? 4 : 8;
booth_unit_size = 2;
for (j = 0; i && j < 32; j += booth_unit_size)
*total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
*total = 8;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == DImode)
int cost, const_ok = const_ok_for_arm (i);
int j, booth_unit_size;
- /* Tune as appropriate. */
+ /* Tune as appropriate. */
cost = const_ok ? 4 : 8;
booth_unit_size = 8;
for (j = 0; i && j < 32; j += booth_unit_size)
*total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
*total = 8;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == DImode)
*total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
enum machine_mode mode = GET_MODE (x);
int nonreg_cost;
int cost;
-
+
if (TARGET_THUMB)
{
switch (code)
case MULT:
*total = COSTS_N_INSNS (3);
return true;
-
+
default:
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
}
-
+
switch (code)
{
case MULT:
*total = 3;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
*total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
{
rtx shifted_operand;
int opno;
-
+
/* Get the shifted operand. */
extract_insn (insn);
shifted_operand = recog_data.operand[shift_opnum];
rtx src_mem = XEXP (SET_SRC (i_pat), 0);
/* This is a load after a store, there is no conflict if the load reads
from a cached area. Assume that loads from the stack, and from the
- constant pool are cached, and that others will miss. This is a
+ constant pool are cached, and that others will miss. This is a
hack. */
-
+
if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
|| reg_mentioned_p (stack_pointer_rtx, src_mem)
|| reg_mentioned_p (frame_pointer_rtx, src_mem)
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
if (REAL_VALUE_MINUS_ZERO (r))
return 0;
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
r = REAL_VALUE_NEGATE (r);
if (REAL_VALUE_MINUS_ZERO (r))
return (GET_CODE (op) == REG
&& REGNO (op) < FIRST_PSEUDO_REGISTER);
}
-
+
/* An arm register operand. */
int
arm_general_register_operand (rtx op, enum machine_mode mode)
{
if (TARGET_THUMB)
return thumb_cmp_operand (op, mode);
-
+
return (s_register_operand (op, mode)
|| (GET_CODE (op) == CONST_INT
&& (const_ok_for_arm (INTVAL (op))
&& REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
}
-/* Similar to s_register_operand, but does not allow hard integer
+/* Similar to s_register_operand, but does not allow hard integer
registers. */
int
f_register_operand (rtx op, enum machine_mode mode)
if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
return FALSE;
-
+
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
-
+
switch (GET_CODE (op))
{
case CONST_DOUBLE:
if (mode == VOIDmode)
{
mode = GET_MODE (x);
-
+
if (GET_MODE_CLASS (mode) != MODE_CC)
return FALSE;
}
if (mode == VOIDmode)
{
mode = GET_MODE (x);
-
+
if (GET_MODE_CLASS (mode) != MODE_CC)
return FALSE;
}
return 1;
fmt = GET_RTX_FORMAT (GET_CODE (x));
-
+
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'E')
{
int val0 = 0, val1 = 0;
int reg0, reg1;
-
+
if (GET_CODE (XEXP (a, 0)) == PLUS)
{
reg0 = REGNO (XEXP (XEXP (a, 0), 0));
arith_adjacentmem pattern to output an overlong sequence. */
if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
return 0;
-
+
return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
}
return 0;
: REGNO (SUBREG_REG (operands[i])));
order[0] = 0;
}
- else
+ else
{
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
scratch register (one of the result regs) and then doing a load
multiple actually becomes slower (and no smaller in code size).
That is the transformation
-
+
ldr rd1, [rbase + offset]
ldr rd2, [rbase + offset + 4]
-
+
to
-
+
add rd1, rbase, offset
ldmia rd1, {rd1, rd2}
-
+
produces worse code -- '3 cycles + any stalls on rd2' instead of
'2 cycles + any stalls on rd2'. On ARMs with only one cache
access per cycle, the first sequence could never complete in less
/* Can't do it without setting up the offset, only do this if it takes
no more than one insn. */
- return (const_ok_for_arm (unsorted_offsets[order[0]])
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
|| const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
}
abort ();
}
- sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
for (i = 1; i < nops; i++)
: REGNO (SUBREG_REG (operands[i])));
order[0] = 0;
}
- else
+ else
{
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
abort ();
}
- sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
for (i = 1; i < nops; i++)
if (arm_tune_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
-
+
start_sequence ();
-
+
for (i = 0; i < count; i++)
{
addr = plus_constant (from, i * 4 * sign);
seq = get_insns ();
end_sequence ();
-
+
return seq;
}
if (arm_tune_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
-
+
start_sequence ();
-
+
for (i = 0; i < count; i++)
{
addr = plus_constant (to, i * 4 * sign);
seq = get_insns ();
end_sequence ();
-
+
return seq;
}
emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
srcbase, &srcoffset));
else
- emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
FALSE, srcbase, &srcoffset));
if (out_words_to_go)
dstbase, &dstoffset));
else if (out_words_to_go != 1)
emit_insn (arm_gen_store_multiple (0, out_words_to_go,
- dst, TRUE,
+ dst, TRUE,
(last_bytes == 0
? FALSE : TRUE),
dstbase, &dstoffset));
if (out_words_to_go)
{
rtx sreg;
-
+
mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
sreg = copy_to_reg (mem);
mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
emit_move_insn (mem, sreg);
in_words_to_go--;
-
+
if (in_words_to_go) /* Sanity check */
abort ();
}
emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
GEN_INT (8 * (4 - last_bytes))));
part_bytes_reg = tmp;
-
+
while (last_bytes)
{
mem = adjust_automodify_address (dstbase, QImode,
part_bytes_reg = tmp;
}
}
-
+
}
else
{
dstoffset += 2;
}
}
-
+
if (last_bytes)
{
mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
/* Select a dominance comparison mode if possible for a test of the general
form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
- COND_OR == DOM_CC_X_AND_Y => (X && Y)
+ COND_OR == DOM_CC_X_AND_Y => (X && Y)
COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
- COND_OR == DOM_CC_X_OR_Y => (X || Y)
+ COND_OR == DOM_CC_X_OR_Y => (X || Y)
In all cases OP will be either EQ or NE, but we don't need to know which
- here. If we are unable to support a dominance comparison we return
+ here. If we are unable to support a dominance comparison we return
CC mode. This will then fail to match for the RTL expressions that
generate this call. */
enum machine_mode
/* If the comparisons are not equal, and one doesn't dominate the other,
then we can't do this. */
- if (cond1 != cond2
+ if (cond1 != cond2
&& !comparison_dominates_p (cond1, cond2)
&& (swapped = 1, !comparison_dominates_p (cond2, cond1)))
return CCmode;
if (cond2 == NE)
return CC_DNEmode;
break;
-
+
case LTU:
if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
return CC_DLTUmode;
abort ();
}
}
-
+
/* A compare with a shifted operand. Because of canonicalization, the
comparison will have to be swapped when we emit the assembler. */
if (GET_MODE (y) == SImode && GET_CODE (y) == REG
|| GET_CODE (x) == ROTATERT))
return CC_SWPmode;
- /* This is a special case that is used by combine to allow a
+ /* This is a special case that is used by combine to allow a
comparison of a shifted byte load to be split into a zero-extend
followed by a comparison of the shifted integer (only valid for
equalities and unsigned inequalities). */
|| XEXP (x, 2) == const1_rtx)
&& COMPARISON_P (XEXP (x, 0))
&& COMPARISON_P (XEXP (x, 1)))
- return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
+ return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
INTVAL (XEXP (x, 2)));
/* Alternate canonicalizations of the above. These are somewhat cleaner. */
plus_constant (base,
offset))));
emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_MEM (QImode,
+ gen_rtx_MEM (QImode,
plus_constant (base,
offset + 1))));
if (!BYTES_BIG_ENDIAN)
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_IOR (SImode,
+ gen_rtx_IOR (SImode,
gen_rtx_ASHIFT
(SImode,
gen_rtx_SUBREG (SImode, operands[0], 0),
scratch)));
else
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_IOR (SImode,
+ gen_rtx_IOR (SImode,
gen_rtx_ASHIFT (SImode, scratch,
GEN_INT (8)),
gen_rtx_SUBREG (SImode, operands[0],
if (BYTES_BIG_ENDIAN)
{
- emit_insn (gen_movqi (gen_rtx_MEM (QImode,
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode,
plus_constant (base, offset + 1)),
gen_lowpart (QImode, outval)));
emit_insn (gen_lshrsi3 (scratch,
is_jump_table (rtx insn)
{
rtx table;
-
+
if (GET_CODE (insn) == JUMP_INSN
&& JUMP_LABEL (insn) != NULL
&& ((table = next_real_insn (JUMP_LABEL (insn)))
mp->next = max_mp;
mp->prev = max_mp->prev;
max_mp->prev = mp;
-
+
if (mp->prev != NULL)
mp->prev->next = mp;
else
Mnode * max_mp = NULL;
HOST_WIDE_INT max_address = fix->address + fix->forwards;
Mnode * mp;
-
+
/* If this fix's address is greater than the address of the first
entry, then we can't put the fix in this pool. We subtract the
size of the current fix to ensure that if the table is fully
}
return min_mp;
-}
+}
/* Add a constant to the minipool for a backward reference. Returns the
- node added or NULL if the constant will not fit in this pool.
+ node added or NULL if the constant will not fit in this pool.
Note that the code for insertion for a backwards reference can be
somewhat confusing because the calculated offsets for each fix do
&& rtx_equal_p (fix->value, mp->value)
/* Check that there is enough slack to move this entry to the
end of the table (this is conservative). */
- && (mp->max_address
- > (minipool_barrier->address
+ && (mp->max_address
+ > (minipool_barrier->address
+ minipool_vector_tail->offset
+ minipool_vector_tail->fix_size)))
{
mp->next = min_mp->next;
mp->prev = min_mp;
min_mp->next = mp;
-
+
if (mp->next != NULL)
mp->next->prev = mp;
else
for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
{
mp->offset = offset;
-
+
if (mp->refcount > 0)
offset += mp->fix_size;
}
{
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
";; Offset %u, min %ld, max %ld ",
(unsigned) mp->offset, (unsigned long) mp->min_address,
(unsigned long) mp->max_address);
case CODE_LABEL:
/* It will always be better to place the table before the label, rather
than after it. */
- return 50;
+ return 50;
case INSN:
case CALL_INSN:
}
new_cost = arm_barrier_cost (from);
-
+
if (count < max_count && new_cost <= selected_cost)
{
selected = from;
fprintf (dump_file,
";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
GET_MODE_NAME (mode),
- INSN_UID (insn), (unsigned long) address,
+ INSN_UID (insn), (unsigned long) address,
-1 * (long)fix->backwards, (long)fix->forwards);
arm_print_value (dump_file, fix->value);
fprintf (dump_file, "\n");
/* Add it to the chain of fixes. */
fix->next = NULL;
-
+
if (minipool_fix_head != NULL)
minipool_fix_tail->next = fix;
else
/* Casting the address of something to a mode narrower
than a word can cause avoid_constant_pool_reference()
to return the pool reference itself. That's no good to
- us here. Lets just hope that we can use the
+ us here. Lets just hope that we can use the
constant pool value directly. */
if (op == cop)
cop = get_pool_constant (XEXP (op, 0));
}
fix = minipool_fix_head;
-
+
/* Now scan the fixups and perform the required changes. */
while (fix)
{
the next mini-pool. */
if (last_barrier != NULL)
{
- /* Reduce the refcount for those fixes that won't go into this
+ /* Reduce the refcount for those fixes that won't go into this
pool after all. */
for (fdel = last_barrier->next;
fdel && fdel != ftmp;
if (GET_CODE (this_fix->insn) != BARRIER)
{
rtx addr
- = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
+ = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
minipool_vector_label),
this_fix->minipool->offset);
*this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
for (i = 0; i < 8; i++)
if (REAL_VALUES_EQUAL (r, values_fp[i]))
fputc ('\t', stream);
asm_fprintf (stream, instr, reg);
fputs (", {", stream);
-
+
for (i = 0; i <= LAST_ARM_REGNUM; i++)
if (mask & (1 << i))
{
if (not_first)
fprintf (stream, ", ");
-
+
asm_fprintf (stream, "%r", i);
not_first = TRUE;
}
operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
output_asm_insn ("mov%?\t%0, %|lr", operands);
}
-
+
output_asm_insn ("mov%?\t%|lr, %|pc", operands);
-
+
if (TARGET_INTERWORK || arm_arch4t)
output_asm_insn ("bx%?\t%0", operands);
else
output_asm_insn ("mov%?\t%|pc, %0", operands);
-
+
return "";
}
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
-
+
output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
-
+
return "";
}
if (arm_reg0 == IP_REGNUM)
abort ();
-
+
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
int reg0 = REGNO (operands[0]);
otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
-
+
if (code1 == REG)
{
int reg1 = REGNO (operands[1]);
operands[1] = GEN_INT (hint);
break;
-
+
default:
abort ();
}
otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
}
-
+
output_mov_immediate (operands);
output_mov_immediate (otherops);
}
avoid a conflict. */
otherops[1] = XEXP (XEXP (operands[1], 0), 1);
otherops[2] = XEXP (XEXP (operands[1], 0), 0);
-
+
}
/* If both registers conflict, it will usually
have been fixed by a splitter. */
}
else
output_asm_insn ("sub%?\t%0, %1, %2", otherops);
-
+
return "ldm%?ia\t%0, %M0";
}
else
}
}
}
-
+
return "";
}
shift. >=32 is not a valid shift for "asl", so we must try and
output a shift that produces the correct arithmetical result.
Using lsr #32 is identical except for the fact that the carry bit
- is not set correctly if we set the flags; but we never use the
+ is not set correctly if we set the flags; but we never use the
carry bit from such an operation, so we can ignore that. */
if (code == ROTATERT)
/* Rotate is just modulo 32. */
/* Shifts of 0 are no-ops. */
if (*amountp == 0)
return NULL;
- }
+ }
return mnem;
}
int len_so_far = 0;
fputs ("\t.ascii\t\"", stream);
-
+
for (i = 0; i < len; i++)
{
int c = p[i];
switch (c)
{
- case TARGET_TAB:
+ case TARGET_TAB:
fputs ("\\t", stream);
- len_so_far += 2;
+ len_so_far += 2;
break;
-
+
case TARGET_FF:
fputs ("\\f", stream);
len_so_far += 2;
break;
-
+
case TARGET_BS:
fputs ("\\b", stream);
len_so_far += 2;
break;
-
+
case TARGET_CR:
fputs ("\\r", stream);
len_so_far += 2;
break;
-
+
case TARGET_NEWLINE:
fputs ("\\n", stream);
c = p [i + 1];
else
len_so_far += 2;
break;
-
+
case '\"':
case '\\':
putc ('\\', stream);
max_reg = 7;
else
max_reg = 12;
-
+
for (reg = 0; reg <= max_reg; reg++)
if (regs_ever_live[reg]
|| (! current_function_is_leaf && call_used_regs [reg]))
/* If we aren't loading the PIC register,
don't stack it even though it may be live. */
if (flag_pic
- && ! TARGET_SINGLE_PIC_BASE
+ && ! TARGET_SINGLE_PIC_BASE
&& regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
}
if (current_function_calls_eh_return)
{
unsigned int i;
-
+
for (i = 0; ; i++)
{
reg = EH_RETURN_DATA_REGNO (i);
if (really_return)
{
rtx ops[2];
-
+
/* Otherwise, trap an attempted return by aborting. */
ops[0] = operand;
- ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
+ ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
: "abort");
assemble_external_libcall (ops[1]);
output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
}
-
+
return "";
}
{
const char * return_reg;
- /* If we do not have any special requirements for function exit
- (eg interworking, or ISR) then we can load the return address
+ /* If we do not have any special requirements for function exit
+ (eg interworking, or ISR) then we can load the return address
directly into the PC. Otherwise we must load it into LR. */
if (really_return
&& ! TARGET_INTERWORK)
}
if (reg <= LAST_ARM_REGNUM
&& (reg != LR_REGNUM
- || ! really_return
+ || ! really_return
|| ! IS_INTERRUPT (func_type)))
{
- sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
+ sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
(reg == LR_REGNUM) ? return_reg : reg_names[reg]);
}
else
memcpy (p + 2, reg_names[reg], l);
p += l + 2;
}
-
+
if (live_regs_mask & (1 << LR_REGNUM))
{
sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
default:
/* Use bx if it's available. */
if (arm_arch5 || arm_arch4t)
- sprintf (instr, "bx%s\t%%|lr", conditional);
+ sprintf (instr, "bx%s\t%%|lr", conditional);
else
sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
break;
length = strlen (name) + 1;
alignlength = ROUND_UP_WORD (length);
-
+
ASM_OUTPUT_ASCII (stream, name, length);
ASM_OUTPUT_ALIGN (stream, 2);
x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
thumb_output_function_prologue (f, frame_size);
return;
}
-
+
/* Sanity check. */
if (arm_ccfsm_state || arm_target_insn)
abort ();
func_type = arm_current_func_type ();
-
+
switch ((int) ARM_FUNC_TYPE (func_type))
{
default:
asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
break;
}
-
+
if (IS_NAKED (func_type))
asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
if (IS_NESTED (func_type))
asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
-
+
asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
current_function_args_size,
current_function_pretend_args_size, frame_size);
asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
#endif
- return_used_this_function = 0;
+ return_used_this_function = 0;
}
const char *
int reg;
unsigned long saved_regs_mask;
unsigned long func_type;
- /* Floats_offset is the offset from the "virtual" frame. In an APCS
+ /* Floats_offset is the offset from the "virtual" frame. In an APCS
frame that is $fp + 4 for a non-variadic function. */
int floats_offset = 0;
rtx operands[3];
if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
{
rtx op;
-
+
/* A volatile function should never return. Call abort. */
op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
assemble_external_libcall (op);
output_asm_insn ("bl\t%a0", &op);
-
+
return "";
}
/* If we are throwing an exception, then we really must
be doing a return, so we can't tail-call. */
abort ();
-
+
offsets = arm_get_frame_offsets ();
saved_regs_mask = arm_compute_save_reg_mask ();
for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
if (saved_regs_mask & (1 << reg))
floats_offset += 4;
-
+
if (frame_pointer_needed)
{
/* This variable is for the Virtual Frame Pointer, not VFP regs. */
if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
- asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
+ asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
reg, FP_REGNUM, floats_offset - vfp_offset);
}
}
if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
-
+
/* We can't unstack more than four registers at once. */
if (start_reg - reg == 3)
{
We can ignore floats_offset since that was already included in
the live_regs_mask. */
lrm_count += (lrm_count % 2 ? 2 : 1);
-
+
for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
if (regs_ever_live[reg] && !call_used_regs[reg])
{
- asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
+ asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
reg, FP_REGNUM, lrm_count * 4);
- lrm_count += 2;
+ lrm_count += 2;
}
}
asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
start_reg, reg - start_reg,
SP_REGNUM);
-
+
start_reg = reg + 1;
}
}
/* Stack adjustment for exception handler. */
if (current_function_calls_eh_return)
- asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
+ asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
ARM_EH_STACKADJ_REGNUM);
/* Generate the return instruction. */
by the push_multi pattern in the arm.md file. The insn looks
something like this:
- (parallel [
+ (parallel [
(set (mem:BLK (pre_dec:BLK (reg:SI sp)))
(unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
(use (reg:SI 11 fp))
stack decrement per instruction. The RTL we generate for the note looks
something like this:
- (sequence [
+ (sequence [
(set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
(set (mem:SI (reg:SI sp)) (reg:SI r4))
(set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
This sequence is used both by the code to support stack unwinding for
exceptions handlers and the code to generate dwarf2 frame debugging. */
-
+
par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
dwarf_par_index = 1;
}
par = emit_insn (par);
-
+
tmp = gen_rtx_SET (SImode,
stack_pointer_rtx,
gen_rtx_PLUS (SImode,
GEN_INT (-4 * num_regs)));
RTX_FRAME_RELATED_P (tmp) = 1;
XVECEXP (dwarf, 0, 0) = tmp;
-
+
REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
REG_NOTES (par));
return par;
reg = gen_rtx_REG (XFmode, base_reg++);
XVECEXP (par, 0, 0)
- = gen_rtx_SET (VOIDmode,
+ = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (BLKmode,
gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
gen_rtx_UNSPEC (BLKmode,
gen_rtvec (1, reg),
UNSPEC_PUSH_MULT));
- tmp = gen_rtx_SET (VOIDmode,
+ tmp = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
RTX_FRAME_RELATED_P (tmp) = 1;
- XVECEXP (dwarf, 0, 1) = tmp;
-
+ XVECEXP (dwarf, 0, 1) = tmp;
+
for (i = 1; i < count; i++)
{
reg = gen_rtx_REG (XFmode, base_reg++);
XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
- tmp = gen_rtx_SET (VOIDmode,
+ tmp = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (XFmode,
plus_constant (stack_pointer_rtx,
i * 12)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
- XVECEXP (dwarf, 0, i + 1) = tmp;
+ XVECEXP (dwarf, 0, i + 1) = tmp;
}
tmp = gen_rtx_SET (VOIDmode,
HOST_WIDE_INT frame_size;
offsets = &cfun->machine->stack_offsets;
-
+
/* We need to know if we are a leaf function. Unfortunately, it
is possible to be called after start_sequence has been called,
which causes get_insns to return the insns for the sequence,
/* Make a copy of c_f_p_a_s as we may need to modify it locally. */
args_to_push = current_function_pretend_args_size;
-
+
/* Compute which register we will have to save onto the stack. */
live_regs_mask = arm_compute_save_reg_mask ();
To get around this need to find somewhere to store IP
whilst the frame is being created. We try the following
places in order:
-
+
1. The last argument register.
2. A slot on the stack above the frame. (This only
works if the function is not a varargs function).
((0xf0 >> (args_to_push / 4)) & 0xf);
else
insn = emit_insn
- (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (- args_to_push)));
RTX_FRAME_RELATED_P (insn) = 1;
}
else
insn = gen_movsi (ip_rtx, stack_pointer_rtx);
-
+
insn = emit_insn (insn);
RTX_FRAME_RELATED_P (insn) = 1;
}
((0xf0 >> (args_to_push / 4)) & 0xf);
else
insn = emit_insn
- (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (- args_to_push)));
RTX_FRAME_RELATED_P (insn) = 1;
}
if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
&& (live_regs_mask & (1 << LR_REGNUM)) != 0
&& ! frame_pointer_needed)
- emit_insn (gen_rtx_SET (SImode,
+ emit_insn (gen_rtx_SET (SImode,
gen_rtx_REG (SImode, LR_REGNUM),
gen_rtx_PLUS (SImode,
gen_rtx_REG (SImode, LR_REGNUM),
insn = GEN_INT (-(4 + args_to_push + fp_offset));
insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
RTX_FRAME_RELATED_P (insn) = 1;
-
+
if (IS_NESTED (func_type))
{
/* Recover the static chain register. */
case '_':
fputs (user_label_prefix, stream);
return;
-
+
case '|':
fputs (REGISTER_PREFIX, stream);
return;
return;
/* An explanation of the 'Q', 'R' and 'H' register operands:
-
+
In a pair of registers containing a DI or DF value the 'Q'
operand returns the register number of the register containing
the least significant part of the value. The 'R' operand returns
the register number of the register containing the most
significant part of the value.
-
+
The 'H' operand returns the higher of the two register numbers.
On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
same as the 'Q' operand, since the most significant part of the
value is held in the lower number register. The reverse is true
on systems where WORDS_BIG_ENDIAN is false.
-
+
The purpose of these operands is to distinguish between cases
where the endian-ness of the values is important (for example
when they are added together), and cases where the endian-ness
return;
case 'm':
- asm_fprintf (stream, "%r",
+ asm_fprintf (stream, "%r",
GET_CODE (XEXP (x, 0)) == REG
? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
return;
/* CONST_TRUE_RTX means always -- that's the default. */
if (x == const_true_rtx)
return;
-
+
fputs (arm_condition_codes[get_arm_condition_code (x)],
stream);
return;
"wCGR0", "wCGR1", "wCGR2", "wCGR3",
"wC12", "wC13", "wC14", "wC15"
};
-
+
fprintf (stream, wc_reg_names [INTVAL (x)]);
}
return;
return true;
}
- if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
+ if (arm_vector_mode_supported_p (GET_MODE (x)))
{
int i, units;
case GEU: return ARM_CC;
default: abort ();
}
-
+
case CCmode:
switch (comp_code)
{
means that we have to grub around within the jump expression to find
out what the conditions are when the jump isn't taken. */
int jump_clobbers = 0;
-
+
/* If we start with a return insn, we only succeed if we find another one. */
int seeking_return = 0;
-
+
/* START_INSN will hold the insn from where we start looking. This is the
first insn after the following code_label if REVERSE is true. */
rtx start_insn = insn;
if (GET_CODE (insn) != JUMP_INSN)
return;
- /* This jump might be paralleled with a clobber of the condition codes
+ /* This jump might be paralleled with a clobber of the condition codes
the jump should always come first */
if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
body = XVECEXP (body, 0, 0);
int then_not_else = TRUE;
rtx this_insn = start_insn, label = 0;
- /* If the jump cannot be done with one instruction, we cannot
+ /* If the jump cannot be done with one instruction, we cannot
conditionally execute the instruction in the inverse case. */
if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
{
jump_clobbers = 1;
return;
}
-
+
/* Register the insn jumped to. */
if (reverse)
{
case BARRIER:
/* Succeed if the following insn is the target label.
- Otherwise fail.
- If return insns are used then the last insn in a function
+ Otherwise fail.
+ If return insns are used then the last insn in a function
will be a barrier. */
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label)
{
if (reverse)
abort ();
- arm_current_cc =
+ arm_current_cc =
get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
0), 0), 1));
if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
if (reverse || then_not_else)
arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
}
-
+
/* Restore recog_data (getting the attributes of other insns can
destroy this array, but final.c assumes that it remains intact
across this call; since the insn has been recognized already we
{
if (GET_MODE_CLASS (mode) == MODE_CC)
return regno == CC_REGNUM || regno == VFPCC_REGNUM;
-
+
if (TARGET_THUMB)
/* For the Thumb we only allow values bigger than SImode in
registers 0 - 6, so that there is always a second low
|| regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
return GENERAL_REGS;
-
+
if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
return NO_REGS;
an offset of 0 is correct. */
if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
return 0;
-
+
/* If we are using the stack pointer to point at the
argument, then an offset of 0 is correct. */
if ((TARGET_THUMB || !frame_pointer_needed)
&& REGNO (addr) == SP_REGNUM)
return 0;
-
+
/* Oh dear. The argument is pointed to by a register rather
than being held in a register, or being stored at a known
offset from the frame pointer. Since GDB only understands
looking to see where this register gets its value. If the
register is initialized from the frame pointer plus an offset
then we are in luck and we can continue, otherwise we give up.
-
+
This code is exercised by producing debugging information
for a function with arguments like this:
-
+
double func (double a, double b, int c, double d) {return d;}
-
+
Without this code the stab for parameter 'd' will be set to
an offset of 0 from the frame pointer, rather than 8. */
a constant integer
then... */
-
+
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if ( GET_CODE (insn) == INSN
+ if ( GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == SET
&& REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
&& GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
)
{
value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
-
+
break;
}
}
-
+
if (value == 0)
{
debug_rtx (addr);
#define IWMMXT_BUILTIN2(code, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
-
+
IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
return 0;
emit_insn (pat);
return target;
-
+
case ARM_BUILTIN_WZERO:
target = gen_reg_rtx (DImode);
emit_insn (gen_iwmmxt_clrdi (target));
for (; block; block = BLOCK_CHAIN (block))
{
tree sym;
-
+
if (!TREE_USED (block))
continue;
SET_DECL_RTL (sym, new);
}
-
+
replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
}
}
/* If we have any popping registers left over, remove them. */
if (available > 0)
regs_available_for_popping &= ~available;
-
+
/* Otherwise if we need another popping register we can use
the fourth argument register. */
else if (pops_needed)
/* Register a4 is being used to hold part of the return value,
but we have dire need of a free, low register. */
restore_a4 = TRUE;
-
+
asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
}
-
+
if (reg_containing_return_addr != LAST_ARG_REGNUM)
{
/* The fourth argument register is available. */
regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
-
+
--pops_needed;
}
}
{
/* The return address was popped into the lowest numbered register. */
regs_to_pop &= ~(1 << LR_REGNUM);
-
+
reg_containing_return_addr =
number_of_first_bit_set (regs_available_for_popping);
if (regs_available_for_popping)
{
int frame_pointer;
-
+
/* Work out which register currently contains the frame pointer. */
frame_pointer = number_of_first_bit_set (regs_available_for_popping);
/* (Temporarily) remove it from the mask of popped registers. */
regs_available_for_popping &= ~(1 << frame_pointer);
regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
-
+
if (regs_available_for_popping)
{
int stack_pointer;
-
+
/* We popped the stack pointer as well,
find the register that contains it. */
stack_pointer = number_of_first_bit_set (regs_available_for_popping);
/* Move it into the stack register. */
asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
-
+
/* At this point we have popped all necessary registers, so
do not worry about restoring regs_available_for_popping
to its correct value:
regs_available_for_popping |= (1 << frame_pointer);
}
}
-
+
/* If we still have registers left on the stack, but we no longer have
any registers into which we can pop them, then we must move the return
address into the link register and make available the register that
if (regs_available_for_popping == 0 && pops_needed > 0)
{
regs_available_for_popping |= 1 << reg_containing_return_addr;
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
reg_containing_return_addr);
-
+
reg_containing_return_addr = LR_REGNUM;
}
{
int popped_into;
int move_to;
-
+
thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
regs_available_for_popping);
--pops_needed;
}
-
+
/* If we still have not popped everything then we must have only
had one register available to us and we are now popping the SP. */
if (pops_needed > 0)
{
int popped_into;
-
+
thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
regs_available_for_popping);
asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
reg_containing_return_addr = LR_REGNUM;
}
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
}
thumb_exit (f, -1);
return;
}
-
+
fprintf (f, "\t%s\t{", push ? "push" : "pop");
/* Look at the low registers first. */
if (lo_mask & 1)
{
asm_fprintf (f, "%r", regno);
-
+
if ((lo_mask & ~1) != 0)
fprintf (f, ", ");
pushed_words++;
}
}
-
+
if (push && (mask & (1 << LR_REGNUM)))
{
/* Catch pushing the LR. */
if (mask & 0xFF)
fprintf (f, ", ");
-
+
asm_fprintf (f, "%r", LR_REGNUM);
pushed_words++;
{
if (mask & 0xFF)
fprintf (f, ", ");
-
+
asm_fprintf (f, "%r", PC_REGNUM);
}
}
-
+
fprintf (f, "}\n");
if (push && pushed_words && dwarf2out_do_frame ())
if (val == 0) /* XXX */
return 0;
-
+
for (i = 0; i < 25; i++)
if ((val & (mask << i)) == val)
return 1;
/* This test is only important for leaf functions. */
/* assert (!leaf_function_p ()); */
-
+
/* If we have already decided that far jumps may be used,
do not bother checking again, and always return true even if
it turns out that they are not being used. Once we have made
return 1;
}
}
-
+
return 0;
}
if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
return TRUE;
-#ifdef ARM_PE
+#ifdef ARM_PE
return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
#else
return FALSE;
mov r6, r8
push {r6, r7}
as part of the prolog. We have to undo that pushing here. */
-
+
if (high_regs_pushed)
{
int mask = live_regs_mask & 0xff;
high registers! */
internal_error
("no low registers available for popping high registers");
-
+
for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
if (live_regs_mask & (1 << next_hi_reg))
break;
{
asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
regno);
-
+
for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
if (live_regs_mask & (1 << next_hi_reg))
break;
if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
{
- /* Pop the return address into the PC. */
+ /* Pop the return address into the PC. */
if (had_to_push_lr)
live_regs_mask |= 1 << PC_REGNUM;
}
else
regno = LR_REGNUM;
-
+
/* Remove the argument registers that were pushed onto the stack. */
asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
SP_REGNUM, SP_REGNUM,
current_function_pretend_args_size);
-
+
thumb_exit (asm_out_file, regno);
}
struct machine_function *machine;
machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
-#if ARM_FT_UNKNOWN != 0
+#if ARM_FT_UNKNOWN != 0
machine->func_type = ARM_FT_UNKNOWN;
#endif
return machine;
unsigned long live_regs_mask;
func_type = arm_current_func_type ();
-
+
/* Naked functions don't have prologues. */
if (IS_NAKED (func_type))
return;
/* Restore the low register's original value. */
emit_insn (gen_movsi (reg, spare));
-
+
/* Emit a USE of the restored scratch register, so that flow
analysis will not consider the restore redundant. The
register won't be used again in this function and isn't
emit_insn (gen_stack_tie (stack_pointer_rtx,
hard_frame_pointer_rtx));
}
-
+
if (current_function_profile || TARGET_NO_SCHED_PRO)
emit_insn (gen_blockage ());
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
}
}
-
+
/* Emit a USE (stack_pointer_rtx), so that
the stack adjustment will not be deleted. */
emit_insn (gen_prologue_use (stack_pointer_rtx));
if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
abort ();
name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
-
+
/* Generate code sequence to switch us into Thumb mode. */
/* The .code 32 directive has already been emitted by
ASM_DECLARE_FUNCTION_NAME. */
is called from a Thumb encoded function elsewhere in the
same file. Hence the definition of STUB_NAME here must
agree with the definition in gas/config/tc-arm.c. */
-
+
#define STUB_NAME ".real_start_of"
-
+
fprintf (f, "\t.code\t16\n");
#ifdef ARM_PE
if (arm_dllexport_name_p (name))
name = arm_strip_name_encoding (name);
-#endif
+#endif
asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
fprintf (f, "\t.thumb_func\n");
asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
}
-
+
if (current_function_pretend_args_size)
{
if (cfun->machine->uses_anonymous_args)
{
int num_pushes;
-
+
fprintf (f, "\tpush\t{");
num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
-
+
for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
regno <= LAST_ARG_REGNUM;
regno++)
fprintf (f, "}\n");
}
else
- asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
+ asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
SP_REGNUM, SP_REGNUM,
current_function_pretend_args_size);
{
int offset;
int work_register;
-
+
/* We have been asked to create a stack backtrace structure.
The code looks like this:
-
+
0 .align 2
0 func:
0 sub SP, #16 Reserve space for 4 registers.
22 mov FP, R7 Put this value into the frame pointer. */
work_register = thumb_find_work_register (live_regs_mask);
-
+
asm_fprintf
(f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
SP_REGNUM, SP_REGNUM);
}
else
offset = 0;
-
+
asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
offset + 16 + current_function_pretend_args_size);
-
+
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 4);
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 12);
}
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 8);
if (pushable_regs & (1 << regno))
{
asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
-
+
high_regs_pushed--;
real_regs_mask |= (1 << next_hi_reg);
-
+
if (high_regs_pushed)
{
for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
rtx offset;
rtx arg1;
rtx arg2;
-
+
if (GET_CODE (operands[0]) != REG)
abort ();
-
+
if (GET_CODE (operands[1]) != MEM)
abort ();
/* Get the memory address. */
addr = XEXP (operands[1], 0);
-
+
/* Work out how the memory address is computed. */
switch (GET_CODE (addr))
{
output_asm_insn ("ldr\t%H0, %2", operands);
}
break;
-
+
case CONST:
/* Compute <address> + 4 for the high order load. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
output_asm_insn ("ldr\t%0, %1", operands);
output_asm_insn ("ldr\t%H0, %2", operands);
break;
-
+
case PLUS:
arg1 = XEXP (addr, 0);
arg2 = XEXP (addr, 1);
-
+
if (CONSTANT_P (arg1))
base = arg2, offset = arg1;
else
base = arg1, offset = arg2;
-
+
if (GET_CODE (base) != REG)
abort ();
int reg_offset = REGNO (offset);
int reg_base = REGNO (base);
int reg_dest = REGNO (operands[0]);
-
+
/* Add the base and offset registers together into the
higher destination register. */
asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
reg_dest + 1, reg_base, reg_offset);
-
+
/* Load the lower destination register from the address in
the higher destination register. */
asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
reg_dest, reg_dest + 1);
-
+
/* Load the higher destination register from its own address
plus 4. */
asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
/* Compute <address> + 4 for the high order load. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
/* If the computed address is held in the low order register
then load the high order register first, otherwise always
load the low order register first. */
directly. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
output_asm_insn ("ldr\t%H0, %2", operands);
output_asm_insn ("ldr\t%0, %1", operands);
break;
-
+
default:
abort ();
break;
}
-
+
return "";
}
operands[4] = operands[5];
operands[5] = tmp;
}
-
+
output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
break;
emit_insn (gen_movmem12b (out, in, out, in));
len -= 12;
}
-
+
if (len >= 8)
{
emit_insn (gen_movmem8b (out, in, out, in));
len -= 8;
}
-
+
if (len >= 4)
{
rtx reg = gen_reg_rtx (SImode);
len -= 4;
offset += 4;
}
-
+
if (len >= 2)
{
rtx reg = gen_reg_rtx (HImode);
- emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
+ emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
plus_constant (in, offset))));
emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
reg));
len -= 2;
offset += 2;
}
-
+
if (len)
{
rtx reg = gen_reg_rtx (QImode);
&& memory_operand (op, mode)));
}
-/* Handle storing a half-word to memory during reload. */
+/* Handle storing a half-word to memory during reload. */
void
thumb_reload_out_hi (rtx *operands)
{
emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
}
-/* Handle reading a half-word from memory during reload. */
+/* Handle reading a half-word from memory during reload. */
void
thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
{
switch (c)
{
ARM_NAME_ENCODING_LENGTHS
- default: return 0;
+ default: return 0;
}
}
arm_strip_name_encoding (const char *name)
{
int skip;
-
+
while ((skip = arm_get_strip_length (* name)))
name += skip;
PIC_OFFSET_TABLE_REGNUM,
PIC_OFFSET_TABLE_REGNUM);
fputs ("|x$adcons|\n", f);
-
+
for (chain = aof_pic_chain; chain; chain = chain->next)
{
fputs ("\tDCD\t", f);
rtx offset;
rtx wcgr;
rtx sum;
-
+
if (GET_CODE (operands [1]) != MEM
|| GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
|| GET_CODE (reg = XEXP (sum, 0)) != REG
|| GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
|| ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
return "wldrw%?\t%0, %1";
-
- /* Fix up an out-of-range load of a GR register. */
+
+ /* Fix up an out-of-range load of a GR register. */
output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
wcgr = operands[0];
operands[0] = reg;
if (GET_CODE (addr) == PARALLEL)
addr = XVECEXP (addr, 0, 0);
addr = XEXP (addr, 0);
-
+
return !reg_overlap_mentioned_p (value, addr);
}
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
early_op = XEXP (op, 0);
/* This is either an actual independent shift, or a shift applied to
the first operand of another operation. We want the whole shift
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
early_op = XEXP (op, 0);
/* This is either an actual independent shift, or a shift applied to
shifted, in either case. */
if (GET_CODE (early_op) != REG)
early_op = XEXP (early_op, 0);
-
+
return !reg_overlap_mentioned_p (value, early_op);
}
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
return (GET_CODE (op) == PLUS
&& !reg_overlap_mentioned_p (value, XEXP (op, 0)));
}
offsets = arm_get_frame_offsets ();
delta = offsets->outgoing_args - (offsets->frame + 4);
-
+
if (delta >= 4096)
{
emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
delta -= 16;
/* The link register is always the first saved register. */
delta -= 4;
-
+
/* Construct the address. */
addr = gen_rtx_REG (SImode, reg);
if ((reg != SP_REGNUM && delta >= 128)
emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
}
+/* Implements target hook vector_mode_supported_p. */
+bool
+arm_vector_mode_supported_p (enum machine_mode mode)
+{
+ if ((mode == V2SImode)
+ || (mode == V4HImode)
+ || (mode == V8QImode))
+ return true;
+
+ return false;
+}
/* Nonzero if we need to protect the prolog from scheduling */
#define ARM_FLAG_NO_SCHED_PRO (1 << 12)
-/* Nonzero if a call to abort should be generated if a noreturn
+/* Nonzero if a call to abort should be generated if a noreturn
function tries to return. */
#define ARM_FLAG_ABORT_NORETURN (1 << 13)
/* Nonzero if all call instructions should be indirect. */
#define ARM_FLAG_LONG_CALLS (1 << 15)
-
+
/* Nonzero means that the target ISA is the THUMB, not the ARM. */
#define ARM_FLAG_THUMB (1 << 16)
etc., in addition to just the AAPCS calling conventions. */
#ifndef TARGET_BPABI
#define TARGET_BPABI false
-#endif
+#endif
/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
#ifndef SUBTARGET_SWITCHES
extern int arm_is_6_or_7;
/* Nonzero if we should define __THUMB_INTERWORK__ in the
- preprocessor.
+ preprocessor.
XXX This is a bit of a hack, it's intended to help work around
problems in GLD which doesn't understand that armv5t code is
interworking clean. */
/* Nonzero if we need to refer to the GOT with a PC-relative
offset. In other words, generate
- .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
+ .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
rather than
.word _GLOBAL_OFFSET_TABLE_ - (.Lxx + 8)
- The default is true, which matches NetBSD. Subtargets can
+ The default is true, which matches NetBSD. Subtargets can
override this if required. */
#ifndef GOT_PCREL
#define GOT_PCREL 1
in instructions that operate on numbered bit-fields. */
#define BITS_BIG_ENDIAN 0
-/* Define this if most significant byte of a word is the lowest numbered.
+/* Define this if most significant byte of a word is the lowest numbered.
Most ARM processors are run in little endian mode, so that is the default.
If you want to have it run-time selectable, change the definition in a
cover file to be TARGET_BIG_ENDIAN. */
/* Make strings word-aligned so strcpy from constants will be faster. */
#define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_tune_xscale ? 1 : 2)
-
+
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
((TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
r4-r8 S register variable
r9 S (rfp) register variable (real frame pointer)
-
+
r10 F S (sl) stack limit (used by -mapcs-stack-check)
r11 F S (fp) argument pointer
r12 (ip) temp workspace
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you like.
- The CC is not preserved over function calls on the ARM 6, so it is
+ The CC is not preserved over function calls on the ARM 6, so it is
easier to assume this for all. SFP is preserved, since FP is. */
#define CALL_USED_REGISTERS \
{ \
} \
SUBTARGET_CONDITIONAL_REGISTER_USAGE \
}
-
+
/* These are a couple of extensions to the formats accepted
by asm_fprintf:
%@ prints out ASM_COMMENT_START
#define MUST_USE_SJLJ_EXCEPTIONS 1
/* We can generate DWARF2 Unwind info, even though we don't use it. */
#define DWARF2_UNWIND_INFO 1
-
+
/* Use r0 and r1 to pass exception handling information. */
#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
/* Value should be nonzero if functions must have frame pointers.
Zero means the frame pointer need not be set up (and parms may be accessed
- via the stack pointer) in functions that seem suitable.
+ via the stack pointer) in functions that seem suitable.
If we have to have a frame pointer we might as well make use of it.
APCS says that the frame pointer does not need to be pushed in leaf
functions, or simple tail call functions. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
(GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode)
-
#define VALID_IWMMXT_REG_MODE(MODE) \
- (VECTOR_MODE_SUPPORTED_P (MODE) || (MODE) == DImode)
+ (arm_vector_mode_supported_p (MODE) || (MODE) == DImode)
/* The order in which register should be allocated. It is good to use ip
since no saving is required (though calls clobber it) and it never contains
function parameters. It is quite good to use lr since other calls may
- clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
least likely to contain a function parameter; in addition results are
returned in r0. */
#define CLASS_LIKELY_SPILLED_P(CLASS) \
((TARGET_THUMB && (CLASS) == LO_REGS) \
|| (CLASS) == CC_REG)
-
+
/* The class value for index registers, and the one for base regs. */
#define INDEX_REG_CLASS (TARGET_THUMB ? LO_REGS : GENERAL_REGS)
#define BASE_REG_CLASS (TARGET_THUMB ? LO_REGS : GENERAL_REGS)
C is the letter, and VALUE is a constant value.
Return 1 if VALUE is in the range specified by C.
I: immediate arithmetic operand (i.e. 8 bits shifted as required).
- J: valid indexing constants.
+ J: valid indexing constants.
K: ~value ok in rhs argument of data operand.
- L: -value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
#define CONST_OK_FOR_ARM_LETTER(VALUE, C) \
((C) == 'I' ? const_ok_for_arm (VALUE) : \
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
(TARGET_ARM ? \
CONST_OK_FOR_ARM_LETTER (VALUE, C) : CONST_OK_FOR_THUMB_LETTER (VALUE, C))
-
+
/* Constant letter 'G' for the FP immediate constants.
'H' means the same constant negated. */
#define CONST_DOUBLE_OK_FOR_ARM_LETTER(X, C) \
CONST_DOUBLE_OK_FOR_ARM_LETTER (X, C) : 0)
/* For the ARM, `Q' means that this is a memory operand that is just
- an offset from a register.
+ an offset from a register.
`S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
address. This means that the symbol is in the text segment and can be
accessed without using a load.
'U' Prefixes an extended memory constraint where:
- 'Uv' is an address valid for VFP load/store insns.
- 'Uy' is an address valid for iwmmxt load/store insns.
+ 'Uv' is an address valid for VFP load/store insns.
+ 'Uy' is an address valid for iwmmxt load/store insns.
'Uq' is an address valid for ldrsb. */
#define EXTRA_CONSTRAINT_STR_ARM(OP, C, STR) \
? (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
? GENERAL_REGS : NO_REGS) \
: THUMB_SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X))
-
+
/* If we need to load shorts byte-at-a-time, then we need a scratch. */
#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
/* Restrict which direct reloads are allowed for VFP regs. */ \
ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
else \
THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
-
+
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS.
ARM regs are UNITS_PER_WORD bits while FPA regs can hold any FP mode */
: TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK \
&& GET_MODE_CLASS (MODE) == MODE_FLOAT \
? gen_rtx_REG (MODE, FIRST_CIRRUS_FP_REGNUM) \
- : TARGET_IWMMXT_ABI && VECTOR_MODE_SUPPORTED_P (MODE) \
+ : TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (MODE) \
? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \
: gen_rtx_REG (MODE, ARG_REGISTER (1)))
/* For an arg passed partly in registers and partly in memory,
this is the number of registers used.
For args passed entirely in registers or entirely in memory, zero. */
-#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
- (VECTOR_MODE_SUPPORTED_P (MODE) ? 0 : \
- NUM_ARG_REGS > (CUM).nregs \
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ (arm_vector_mode_supported_p (MODE) ? 0 : \
+ NUM_ARG_REGS > (CUM).nregs \
&& (NUM_ARG_REGS < ((CUM).nregs + ARM_NUM_REGS2 (MODE, TYPE)) \
- && (CUM).can_split) \
+ && (CUM).can_split) \
? NUM_ARG_REGS - (CUM).nregs : 0)
/* Initialize a variable CUM of type CUMULATIVE_ARGS
(TYPE is null for libcalls where that information may not be available.) */
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
(CUM).nargs += 1; \
- if (VECTOR_MODE_SUPPORTED_P (MODE) \
+ if (arm_vector_mode_supported_p (MODE) \
&& (CUM).named_count > (CUM).nargs) \
(CUM).iwmmxt_nregs += 1; \
else \
/* Special case handling of the location of arguments passed on the stack. */
#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
-
+
/* Initialize data used by insn expanders. This is called from insn_emit,
once for every function before code is generated. */
#define INIT_EXPANDERS arm_init_expanders ()
ARM_TRAMPOLINE_TEMPLATE (FILE) \
else \
THUMB_TRAMPOLINE_TEMPLATE (FILE)
-
+
/* Length in units of the trampoline for entering a nested function. */
#define TRAMPOLINE_SIZE (TARGET_ARM ? 16 : 24)
On the ARM, allow any integer (invalid ones are removed later by insn
patterns), nice doubles and symbol_refs which refer to the function's
constant pool XXX.
-
+
When generating pic allow anything. */
#define ARM_LEGITIMATE_CONSTANT_P(X) (flag_pic || ! label_mentioned_p (X))
case SHORT_CALL_FLAG_CHAR: return 1; \
case LONG_CALL_FLAG_CHAR: return 1; \
case '*': return 1; \
- SUBTARGET_NAME_ENCODING_LENGTHS
+ SUBTARGET_NAME_ENCODING_LENGTHS
/* This is how to output a reference to a user-level label named NAME.
`assemble_name' uses this. */
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address. */
-
+
#define ARM_BASE_REGISTER_RTX_P(X) \
(GET_CODE (X) == REG && ARM_REG_OK_FOR_BASE_P (X))
if (TARGET_ARM) \
ARM_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN) \
else /* if (TARGET_THUMB) */ \
- THUMB_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN)
+ THUMB_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN)
\f
/* Try machine-dependent ways of modifying an illegitimate address
if (memory_address_p (MODE, X)) \
goto WIN; \
} while (0)
-
+
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
#define ARM_GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
/* Nothing helpful to do for the Thumb */
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
if (TARGET_ARM) \
- ARM_GO_IF_MODE_DEPENDENT_ADDRESS (ADDR, LABEL)
+ ARM_GO_IF_MODE_DEPENDENT_ADDRESS (ADDR, LABEL)
\f
/* Specify the machine mode that this machine uses
#define SLOW_BYTE_ACCESS 0
#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
+
/* Immediate shift counts are truncated by the output routines (or was it
the assembler?). Shift counts in a register are truncated by ARM. Note
that the native compiler puts too large (> 32) immediate shift counts
(TARGET_ARM ? 10 : \
((GET_MODE_SIZE (M) < 4 ? 8 : 2 * GET_MODE_SIZE (M)) \
* (CLASS == LO_REGS ? 1 : 2)))
-
+
/* Try to generate sequences that don't involve branches, we can then use
conditional instructions */
#define BRANCH_COST \
#define RETURN_ADDR_RTX(COUNT, FRAME) \
arm_return_addr (COUNT, FRAME)
-/* Mask of the bits in the PC that contain the real return address
+/* Mask of the bits in the PC that contain the real return address
when running in 26-bit mode. */
#define RETURN_ADDR_MASK26 (0x03fffffc)
static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool ix86_vector_mode_supported_p (enum machine_mode);
static int ix86_address_cost (rtx);
static bool ix86_cannot_force_const_mem (rtx);
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
+
#ifdef SUBTARGET_INSERT_ATTRIBUTES
#undef TARGET_INSERT_ATTRIBUTES
#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
return regparm;
}
-/* Return true if EAX is live at the start of the function. Used by
+/* Return true if EAX is live at the start of the function. Used by
ix86_expand_prologue to determine if we need special help before
calling allocate_stack_worker. */
t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
t = build2 (MODIFY_EXPR, void_type_node, addr, t);
gimplify_and_add (t, pre_p);
-
+
for (i = 0; i < XVECLEN (container, 0); i++)
{
rtx slot = XVECEXP (container, 0, i);
GEN_INT ((count >> (size == 4 ? 2 : 3))
& (TARGET_64BIT ? -1 : 0x3fffffff)));
countreg = ix86_zero_extend_to_Pmode (countreg);
-
+
destexp = gen_rtx_ASHIFT (Pmode, countreg,
GEN_INT (size == 4 ? 2 : 3));
srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
if (is_mulwiden)
op0 = XEXP (op0, 0), mode = GET_MODE (op0);
}
-
+
*total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
+ nbits * ix86_cost->mult_bit)
+ rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
if (TARGET_MACHO)
{
rtx sym_ref = XEXP (DECL_RTL (function), 0);
- tmp = (gen_rtx_SYMBOL_REF
- (Pmode,
+ tmp = (gen_rtx_SYMBOL_REF
+ (Pmode,
machopic_indirection_name (sym_ref, /*stub_p=*/true)));
tmp = gen_rtx_MEM (QImode, tmp);
xops[0] = tmp;
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
int n_elts = (GET_MODE_SIZE (mode) / elt_size);
int i;
-
+
for (i = n_elts - 1; i >= 0; i--)
if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
&& GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
break;
- /* Few special cases first...
+ /* Few special cases first...
... constants are best loaded from constant pool. */
if (i < 0)
{
}
}
+/* Implements target hook vector_mode_supported_p. */
+static bool
+ix86_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_SSE
+ && VALID_SSE_REG_MODE (mode))
+ return true;
+
+ else if (TARGET_MMX
+ && VALID_MMX_REG_MODE (mode))
+ return true;
+
+ else if (TARGET_3DNOW
+ && VALID_MMX_REG_MODE_3DNOW (mode))
+ return true;
+
+ else
+ return false;
+}
+
/* Worker function for TARGET_MD_ASM_CLOBBERS.
We do this in the new i386 backend to maintain source compatibility
static tree
ix86_md_asm_clobbers (tree clobbers)
{
- clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
- clobbers);
- clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
- clobbers);
- clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
- clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
+ clobbers);
return clobbers;
}
{
emit_insn (gen_x86_sahf_1 (reg));
- temp = gen_rtx_REG (CCmode, FLAGS_REG);
+ temp = gen_rtx_REG (CCmode, FLAGS_REG);
temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
}
else
{
emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
- temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
}
-
+
temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
gen_rtx_LABEL_REF (VOIDmode, label),
pc_rtx);
emit_label (label2);
}
-
+
#include "gt-i386.h"
const int lea; /* cost of a lea instruction */
const int shift_var; /* variable shift costs */
const int shift_const; /* constant shift costs */
- const int mult_init[5]; /* cost of starting a multiply
+ const int mult_init[5]; /* cost of starting a multiply
in QImode, HImode, SImode, DImode, TImode*/
const int mult_bit; /* cost of multiply per each bit set */
- const int divide[5]; /* cost of a divide/mod
+ const int divide[5]; /* cost of a divide/mod
in QImode, HImode, SImode, DImode, TImode*/
int movsx; /* The cost of movsx operation. */
int movzx; /* The cost of movzx operation. */
((MODE) == DImode || (MODE) == V8QImode || (MODE) == V4HImode \
|| (MODE) == V2SImode || (MODE) == SImode)
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- (VALID_SSE_REG_MODE (MODE) && TARGET_SSE ? 1 \
- : VALID_MMX_REG_MODE (MODE) && TARGET_MMX ? 1 \
- : VALID_MMX_REG_MODE_3DNOW (MODE) && TARGET_3DNOW ? 1 : 0)
-
#define UNITS_PER_SIMD_WORD \
(TARGET_SSE ? 16 : TARGET_MMX || TARGET_3DNOW ? 8 : 0)
|| ((CLASS) == FP_SECOND_REG))
/* Return a class of registers that cannot change FROM mode to TO mode.
-
+
x87 registers can't do subreg as all values are reformated to extended
precision. XMM registers does not support with nonzero offsets equal
to 4, 8 and 12 otherwise valid for integer registers. Since we can't
/* Subroutines used for code generation on IBM RS/6000.
- Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
/* Schedule instructions for group formation. */
static GTY(()) bool rs6000_sched_groups;
-/* Support adjust_priority scheduler hook
+/* Support adjust_priority scheduler hook
and -mprioritize-restricted-insns= option. */
const char *rs6000_sched_restricted_insns_priority_str;
int rs6000_sched_restricted_insns_priority;
static rtx altivec_expand_st_builtin (tree, rtx, bool *);
static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
-static rtx altivec_expand_predicate_builtin (enum insn_code,
+static rtx altivec_expand_predicate_builtin (enum insn_code,
const char *, tree, rtx);
static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
static rtx altivec_expand_stv_builtin (enum insn_code, tree);
static tree rs6000_build_builtin_va_list (void);
static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static bool rs6000_vector_mode_supported_p (enum machine_mode);
static enum machine_mode rs6000_eh_return_filter_mode (void);
#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
#undef TARGET_SCHED_ADJUST_PRIORITY
#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
-#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
+#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
#undef TARGET_SCHED_FINISH
#define TARGET_SCHED_FINISH rs6000_sched_finish
#undef TARGET_EH_RETURN_FILTER_MODE
#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* This table occasionally claims that a processor does not support
a particular feature even though it does, but the feature is slower
than the alternative. Thus, it shouldn't be relied on as a
- complete description of the processor's support.
+ complete description of the processor's support.
Please keep this list in order, and don't forget to update the
documentation in invoke.texi when adding a new processor or
enum {
POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
- POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
+ POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
| MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
| MASK_MFCRF)
};
= (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
if (rs6000_sched_costly_dep_str)
{
- if (! strcmp (rs6000_sched_costly_dep_str, "no"))
+ if (! strcmp (rs6000_sched_costly_dep_str, "no"))
rs6000_sched_costly_dep = no_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
rs6000_sched_costly_dep = all_deps_costly;
rs6000_sched_costly_dep = true_store_to_load_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
rs6000_sched_costly_dep = store_to_load_dep_costly;
- else
+ else
rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
}
/* Allocate an alias set for register saves & restores from stack. */
rs6000_sr_alias_set = new_alias_set ();
- if (TARGET_TOC)
+ if (TARGET_TOC)
ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
/* We can only guarantee the availability of DI pseudo-ops when
if (!TARGET_SPE_ABI)
error ("not configured for ABI: '%s'", rs6000_abi_string);
}
-
+
else if (! strcmp (rs6000_abi_string, "no-spe"))
rs6000_spe_abi = 0;
else
/* Returns 1 always. */
int
-any_operand (rtx op ATTRIBUTE_UNUSED,
+any_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return 1;
/* Returns 1 always. */
int
-any_parallel_operand (rtx op ATTRIBUTE_UNUSED,
+any_parallel_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return 1;
int
altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
-
+
return (register_operand (op, mode)
&& (GET_CODE (op) != REG
|| REGNO (op) > FIRST_PSEUDO_REGISTER
{
return (register_operand (op, mode)
&& (GET_CODE (op) != REG
- || (REGNO (op) >= ARG_POINTER_REGNUM
+ || (REGNO (op) >= ARG_POINTER_REGNUM
&& !XER_REGNO_P (REGNO (op)))
|| REGNO (op) < MQ_REGNO));
}
return CONST_DOUBLE_HIGH (op) == 0;
}
- else
+ else
return gpc_reg_operand (op, mode);
}
static int
easy_vector_splat_const (int cst, enum machine_mode mode)
{
- switch (mode)
+ switch (mode)
{
case V4SImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
if ((cst & 0xffff) != ((cst >> 16) & 0xffff))
break;
cst = cst >> 16;
case V8HImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
if ((cst & 0xff) != ((cst >> 8) & 0xff))
break;
cst = cst >> 8;
case V16QImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
- default:
+ default:
break;
}
return 0;
&& cst2 >= -0x7fff && cst2 <= 0x7fff)
return 1;
- if (TARGET_ALTIVEC
+ if (TARGET_ALTIVEC
&& easy_vector_same (op, mode))
{
cst = easy_vector_splat_const (cst, mode);
- if (EASY_VECTOR_15_ADD_SELF (cst)
+ if (EASY_VECTOR_15_ADD_SELF (cst)
|| EASY_VECTOR_15 (cst))
return 1;
- }
+ }
return 0;
}
{
cst = easy_vector_splat_const (INTVAL (CONST_VECTOR_ELT (op, 0)), mode);
if (EASY_VECTOR_15_ADD_SELF (cst))
- return 1;
+ return 1;
}
return 0;
}
/* Generate easy_vector_constant out of a easy_vector_constant_add_self. */
-rtx
+rtx
gen_easy_vector_constant_add_self (rtx op)
{
int i, units;
v = rtvec_alloc (units);
for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) =
+ RTVEC_ELT (v, i) =
GEN_INT (INTVAL (CONST_VECTOR_ELT (op, i)) >> 1);
return gen_rtx_raw_CONST_VECTOR (GET_MODE (op), v);
}
if (reload_completed && GET_CODE (inner) == SUBREG)
inner = SUBREG_REG (inner);
-
+
return gpc_reg_operand (inner, mode)
|| (memory_operand (inner, mode)
&& GET_CODE (XEXP (inner, 0)) != PRE_INC
this file. */
int
-current_file_function_operand (rtx op,
+current_file_function_operand (rtx op,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return (GET_CODE (op) == SYMBOL_REF
/* Return 1 for an operand in small memory on V.4/eabi. */
int
-small_data_operand (rtx op ATTRIBUTE_UNUSED,
+small_data_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
#if TARGET_ELF
\f
/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
-static int
-constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
+static int
+constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
{
- switch (GET_CODE(op))
+ switch (GET_CODE(op))
{
case SYMBOL_REF:
if (RS6000_SYMBOL_REF_TLS_P (op))
return rs6000_legitimize_tls_address (x, model);
}
- if (GET_CODE (x) == PLUS
+ if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000)
- {
+ {
HOST_WIDE_INT high_int, low_int;
rtx sum;
low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
GEN_INT (high_int)), 0);
return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
}
- else if (GET_CODE (x) == PLUS
+ else if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) != CONST_INT
&& GET_MODE_NUNITS (mode) == 1
&& TARGET_NO_TOC
&& ! flag_pic
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
&& (GET_MODE_BITSIZE (mode) <= 32
&& ! MACHO_DYNAMIC_NO_PIC_P
#endif
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& ((TARGET_HARD_FLOAT && TARGET_FPRS) || mode != DFmode)
- && mode != DImode
+ && mode != DImode
&& mode != TImode)
{
rtx reg = gen_reg_rtx (Pmode);
emit_insn (gen_macho_high (reg, x));
return gen_rtx_LO_SUM (Pmode, reg, x);
}
- else if (TARGET_TOC
+ else if (TARGET_TOC
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
{
rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
- }
+ }
return rs6000_got_symbol;
}
The Darwin code is inside #if TARGET_MACHO because only then is
machopic_function_base_name() defined. */
rtx
-rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win)
{
- /* We must recognize output that we have already generated ourselves. */
+ /* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
}
*win = 0;
return x;
-}
+}
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
??? Except that due to conceptual problems in offsettable_address_p
we can't really report the problems of integral offsets. So leave
- this assuming that the adjustable offset must be valid for the
+ this assuming that the adjustable offset must be valid for the
sub-words of a TFmode operand, which is what we had before. */
bool
insns, zero is returned and no insns and emitted. */
rtx
-rs6000_emit_set_const (rtx dest, enum machine_mode mode,
+rs6000_emit_set_const (rtx dest, enum machine_mode mode,
rtx source, int n ATTRIBUTE_UNUSED)
{
rtx result, insn, set;
ud3 = c2 & 0xffff;
ud4 = (c2 & 0xffff0000) >> 16;
- if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
{
if (ud1 & 0x8000)
emit_move_insn (dest, GEN_INT (ud1));
}
- else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
{
if (ud2 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud2 << 16));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
|| (ud4 == 0 && ! (ud3 & 0x8000)))
{
if (ud3 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud3 << 16));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else
+ else
{
if (ud4 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud4 << 16));
emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
if (ud2 != 0)
- emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
- GEN_INT (ud2 << 16)));
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
+ GEN_INT (ud2 << 16)));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
rtx operands[2];
operands[0] = dest;
operands[1] = source;
-
+
/* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
if (GET_CODE (operands[1]) == CONST_DOUBLE
&& ! FLOAT_MODE_P (mode)
&& ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
? 32 : MEM_ALIGN (operands[0])))
|| SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
- ? 32
+ ? 32
: MEM_ALIGN (operands[1]))))
&& ! MEM_VOLATILE_P (operands [0])
&& ! MEM_VOLATILE_P (operands [1]))
regnum = REGNO (operands[1]);
else
regnum = -1;
-
+
/* If operands[1] is a register, on POWER it may have
double-precision data in it, so truncate it to single
precision. */
case DFmode:
case SFmode:
- if (CONSTANT_P (operands[1])
+ if (CONSTANT_P (operands[1])
&& ! easy_fp_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case V16QImode:
case V8HImode:
case V4SFmode:
&& !easy_vector_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case SImode:
case DImode:
/* Use default pattern for address of ELF small data */
if (TARGET_ELF
&& mode == Pmode
&& DEFAULT_ABI == ABI_V4
- && (GET_CODE (operands[1]) == SYMBOL_REF
+ && (GET_CODE (operands[1]) == SYMBOL_REF
|| GET_CODE (operands[1]) == CONST)
&& small_data_operand (operands[1], mode))
{
operands[1] = force_const_mem (mode, operands[1]);
- if (TARGET_TOC
+ if (TARGET_TOC
&& constant_pool_expr_p (XEXP (operands[1], 0))
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
get_pool_constant (XEXP (operands[1], 0)),
so we never return a PARALLEL. */
void
-init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
rtx libname ATTRIBUTE_UNUSED, int incoming,
int libcall, int n_named_args)
{
fprintf (stderr, " proto = %d, nargs = %d\n",
cum->prototype, cum->nargs_prototype);
}
-
- if (fntype
- && !TARGET_ALTIVEC
+
+ if (fntype
+ && !TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
{
}
/* If defined, a C expression that gives the alignment boundary, in bits,
- of an argument with the specified mode and type. If it is not defined,
+ of an argument with the specified mode and type. If it is not defined,
PARM_BOUNDARY is used for all arguments.
-
+
V.4 wants long longs to be double word aligned. */
int
itself. */
void
-function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
cum->nargs_prototype--;
" to enable them.");
/* PowerPC64 Linux and AIX allocate GPRs for a vector argument
- even if it is going to be passed in a vector register.
+ even if it is going to be passed in a vector register.
Darwin does the same for variable-argument functions. */
if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
|| (cum->stdarg && DEFAULT_ABI != ABI_V4))
if (stack)
{
int align;
-
+
/* Vector parameters must be 16-byte aligned. This places
them at 2 mod 4 in terms of words in 32-bit mode, since
the parameter save area starts at offset 24 from the
else
align = cum->words & 1;
cum->words += align + rs6000_arg_size (mode, type);
-
+
if (TARGET_DEBUG_ARG)
{
- fprintf (stderr, "function_adv: words = %2d, align=%d, ",
+ fprintf (stderr, "function_adv: words = %2d, align=%d, ",
cum->words, align);
fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
- cum->nargs_prototype, cum->prototype,
+ cum->nargs_prototype, cum->prototype,
GET_MODE_NAME (mode));
}
}
/* Determine where to put a SIMD argument on the SPE. */
static rtx
-rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type)
{
if (cum->stdarg)
itself. */
struct rtx_def *
-function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
enum rs6000_abi abi = DEFAULT_ABI;
number of registers used by the first element of the PARALLEL. */
int
-function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
int ret = 0;
reference. */
static bool
-rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
+rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
tree type, bool named ATTRIBUTE_UNUSED)
{
if ((DEFAULT_ABI == ABI_V4
if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
tem = NULL_RTX;
else
- tem = simplify_gen_subreg (reg_mode, x, BLKmode,
+ tem = simplify_gen_subreg (reg_mode, x, BLKmode,
i * GET_MODE_SIZE(reg_mode));
}
else
\f
/* Perform any needed actions needed for a function that is receiving a
- variable number of arguments.
+ variable number of arguments.
CUM is as above.
stack and set PRETEND_SIZE to the length of the registers pushed. */
static void
-setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int *pretend_size ATTRIBUTE_UNUSED, int no_rtl)
{
CUMULATIVE_ARGS next_cum;
set_mem_alias_set (mem, set);
set_mem_align (mem, BITS_PER_WORD);
- rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
+ rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
GP_ARG_NUM_REG - first_reg_offset);
}
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
- f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
unsigned_char_type_node);
- f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
unsigned_char_type_node);
/* Give the two bytes of padding a name, so that -Wpadded won't warn on
every user file. */
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
- { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
}
static rtx
-altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
+altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
tree arglist, rtx target)
{
rtx pat, scratch;
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
- op1 = copy_to_mode_reg (mode1, op1);
+ op1 = copy_to_mode_reg (mode1, op1);
if (op0 == const0_rtx)
{
if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
op0 = copy_to_mode_reg (tmode, op0);
- op2 = copy_to_mode_reg (mode2, op2);
+ op2 = copy_to_mode_reg (mode2, op2);
if (op1 == const0_rtx)
{
/* Expand the stvx builtins. */
static rtx
-altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
/* Expand the dst builtins. */
static rtx
-altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
emit_insn (gen_altivec_dss (op0));
return NULL_RTX;
-
+
case ALTIVEC_BUILTIN_COMPILETIME_ERROR:
arg0 = TREE_VALUE (arglist);
while (TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == ADDR_EXPR)
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
static rtx
rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
size_t i;
rtx ret;
bool success;
-
+
if (TARGET_ALTIVEC)
{
ret = altivec_expand_builtin (exp, target, &success);
END is the builtin enum at which to end. */
static void
enable_mask_for_builtins (struct builtin_description *desc, int size,
- enum rs6000_builtins start,
+ enum rs6000_builtins start,
enum rs6000_builtins end)
{
int i;
opaque_V2SI_type_node));
/* Initialize irregular SPE builtins. */
-
+
def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
default:
abort ();
}
-
+
def_builtin (dp->mask, dp->name, type, dp->code);
}
default:
abort ();
}
-
+
def_builtin (d->mask, d->name, type, d->code);
}
}
tree v2sf_ftype_v2sf
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SF_type_node, NULL_TREE);
-
+
tree v2sf_ftype_v2si
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SI_type_node, NULL_TREE);
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node,
V4SF_type_node, NULL_TREE);
- tree v4si_ftype_v4si_v4si_v4si
+ tree v4si_ftype_v4si_v4si_v4si
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node,
V4SI_type_node, NULL_TREE);
d = (struct builtin_description *) bdesc_3arg;
for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
{
-
+
enum machine_mode mode0, mode1, mode2, mode3;
tree type;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
mode3 = insn_data[d->icode].operand[3].mode;
-
+
/* When all four are of the same mode. */
if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
{
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v8hi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ abort();
}
}
else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v16qi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ abort();
}
}
- else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
&& mode3 == V4SImode)
type = v4si_ftype_v16qi_v16qi_v4si;
- else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
&& mode3 == V4SImode)
type = v4si_ftype_v8hi_v8hi_v4si;
- else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
&& mode3 == V4SImode)
type = v4sf_ftype_v4sf_v4sf_v4si;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
/* vint, vshort, vint. */
else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
type = v4si_ftype_v8hi_v4si;
-
+
/* vint, vint, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
type = v4si_ftype_v4si_int;
-
+
/* vshort, vshort, 5 bit literal. */
else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
type = v8hi_ftype_v8hi_int;
-
+
/* vchar, vchar, 5 bit literal. */
else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
type = v16qi_ftype_v16qi_int;
/* vfloat, vint, 5 bit literal. */
else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
type = v4sf_ftype_v4si_int;
-
+
/* vint, vfloat, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
type = v4si_ftype_v4sf_int;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
rtx (*mov) (rtx, rtx);
enum machine_mode mode = BLKmode;
rtx dest;
-
+
if (bytes >= 8 && TARGET_POWERPC64
/* 64-bit loads and stores require word-aligned
displacements. */
mode = QImode;
mov = gen_movqi;
}
-
+
dest = adjust_address (orig_dest, mode, offset);
-
+
emit_insn ((*mov) (dest, const0_rtx));
}
return 1;
/* store_one_arg depends on expand_block_move to handle at least the size of
- reg_parm_stack_space. */
+ reg_parm_stack_space. */
if (bytes > (TARGET_POWERPC64 ? 64 : 32))
return 0;
} gen_func;
enum machine_mode mode = BLKmode;
rtx src, dest;
-
+
if (TARGET_STRING
&& bytes > 24 /* move up to 32 bytes at a time */
&& ! fixed_regs[5]
mode = QImode;
gen_func.mov = gen_movqi;
}
-
+
src = adjust_address (orig_src, mode, offset);
dest = adjust_address (orig_dest, mode, offset);
-
- if (mode != BLKmode)
+
+ if (mode != BLKmode)
{
rtx tmp_reg = gen_reg_rtx (mode);
-
+
emit_insn ((*gen_func.mov) (tmp_reg, src));
stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
}
src = replace_equiv_address (src, src_reg);
}
set_mem_size (src, GEN_INT (move_bytes));
-
+
if (!REG_P (XEXP (dest, 0)))
{
rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
dest = replace_equiv_address (dest, dest_reg);
}
set_mem_size (dest, GEN_INT (move_bytes));
-
+
emit_insn ((*gen_func.movmemsi) (dest, src,
GEN_INT (move_bytes & 31),
align_rtx));
|| XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
return 0;
src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
-
+
if (GET_CODE (src_reg) != REG
|| GET_MODE (src_reg) != SImode
|| ! INT_REGNO_P (REGNO (src_reg)))
rtx exp = XVECEXP (op, 0, i);
rtx unspec;
int maskval;
-
+
if (GET_CODE (exp) != SET
|| GET_CODE (SET_DEST (exp)) != REG
|| GET_MODE (SET_DEST (exp)) != CCmode
return 0;
unspec = SET_SRC (exp);
maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
-
+
if (GET_CODE (unspec) != UNSPEC
|| XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
|| XVECLEN (unspec, 0) != 2
|| code == UNGT || code == UNLT
|| code == UNGE || code == UNLE))
abort ();
-
- /* These should never be generated except for
+
+ /* These should never be generated except for
flag_finite_math_only. */
if (mode == CCFPmode
&& ! flag_finite_math_only
abort ();
/* These are invalid; the information is not there. */
- if (mode == CCEQmode
+ if (mode == CCEQmode
&& code != EQ && code != NE)
abort ();
}
registers_ok_for_quad_peep (rtx reg1, rtx reg2)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
return 0;
-
+
/* We might have been passed non floating point registers. */
if (!FP_REGNO_P (REGNO (reg1))
|| !FP_REGNO_P (REGNO (reg2)))
/* The mems cannot be volatile. */
if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
return 0;
-
+
addr1 = XEXP (mem1, 0);
addr2 = XEXP (mem2, 0);
}
/* Make sure the second address is a (mem (plus (reg) (const_int)))
- or if it is (mem (reg)) then make sure that offset1 is -8 and the same
+ or if it is (mem (reg)) then make sure that offset1 is -8 and the same
register as addr1. */
if (offset1 == -8 && GET_CODE (addr2) == REG && reg1 == REGNO (addr2))
return 1;
NO_REGS is returned. */
enum reg_class
-secondary_reload_class (enum reg_class class,
+secondary_reload_class (enum reg_class class,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx in)
{
other than BASE_REGS for TARGET_ELF. So indicate that a
register from BASE_REGS is needed as an intermediate
register.
-
+
On Darwin, pic addresses require a load from memory, which
needs a base register. */
if (class != BASE_REGS
}
\f
/* Given a comparison operation, return the bit number in CCR to test. We
- know this is a valid comparison.
+ know this is a valid comparison.
SCC_P is 1 if this is for an scc. That means that %D will have been
used instead of %C, so the bits will be in different places.
if (scc_p && code != EQ && code != GT && code != LT && code != UNORDERED
&& code != GTU && code != LTU)
abort ();
-
+
switch (code)
{
case NE:
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'm':
/* MB value for a mask operand. */
if (! mask_operand (x, SImode))
else
s = t[1];
}
-
+
fputs (s, file);
}
return;
if (! INT_P (x))
output_operand_lossage ("invalid %%u value");
else
- fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
(INT_LOWPART (x) >> 16) & 0xffff);
return;
/* If constant, low-order 16 bits of constant, signed. Otherwise, write
normally. */
if (INT_P (x))
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
else
print_operand (file, x, 0);
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'z':
/* X is a SYMBOL_REF. Write out the name preceded by a
period and without any trailing data in brackets. Used for function
abort ();
break;
}
-
+
case 0:
if (GET_CODE (x) == REG)
fprintf (file, "%s", reg_names[REGNO (x)]);
rtx contains_minus = XEXP (x, 1);
rtx minus, symref;
const char *name;
-
+
/* Find the (minus (sym) (toc)) buried in X, and temporarily
turn it into (sym) for output_addr_const. */
while (GET_CODE (XEXP (contains_minus, 0)) != MINUS)
{
extern int in_toc_section (void);
static int recurse = 0;
-
+
/* For -mrelocatable, we mark all addresses that need to be fixed up
in the .fixup section. */
if (TARGET_RELOCATABLE
{
/* Reversal of FP compares takes care -- an ordered compare
becomes an unordered compare and vice versa. */
- if (mode == CCFPmode
+ if (mode == CCFPmode
&& (!flag_finite_math_only
|| code == UNLT || code == UNLE || code == UNGT || code == UNGE
|| code == UNEQ || code == LTGT))
else
emit_insn (gen_rtx_SET (VOIDmode, compare_result,
gen_rtx_COMPARE (comp_mode,
- rs6000_compare_op0,
+ rs6000_compare_op0,
rs6000_compare_op1)));
-
+
/* Some kinds of FP comparisons need an OR operation;
under flag_finite_math_only we don't bother. */
if (rs6000_compare_fp_p
enum rtx_code or1, or2;
rtx or1_rtx, or2_rtx, compare2_rtx;
rtx or_result = gen_reg_rtx (CCEQmode);
-
+
switch (code)
{
case LE: or1 = LT; or2 = EQ; break;
}
validate_condition_mode (code, GET_MODE (compare_result));
-
+
return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
}
rtx not_result = gen_reg_rtx (CCEQmode);
rtx not_op, rev_cond_rtx;
enum machine_mode cc_mode;
-
+
cc_mode = GET_MODE (XEXP (condition_rtx, 0));
rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
/* Return the string to output a conditional branch to LABEL, which is
the operand number of the label, or -1 if the branch is really a
- conditional return.
+ conditional return.
OP is the conditional expression. XEXP (OP, 0) is assumed to be a
condition code register and its mode specifies what kind of
ccode = "ne"; break;
case EQ: case UNEQ:
ccode = "eq"; break;
- case GE: case GEU:
+ case GE: case GEU:
ccode = "ge"; break;
- case GT: case GTU: case UNGT:
+ case GT: case GTU: case UNGT:
ccode = "gt"; break;
- case LE: case LEU:
+ case LE: case LEU:
ccode = "le"; break;
- case LT: case LTU: case UNLT:
+ case LT: case LTU: case UNLT:
ccode = "lt"; break;
case UNORDERED: ccode = "un"; break;
case ORDERED: ccode = "nu"; break;
default:
abort ();
}
-
- /* Maybe we have a guess as to how likely the branch is.
+
+ /* Maybe we have a guess as to how likely the branch is.
The old mnemonics don't have a way to specify this information. */
pred = "";
note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
prediction. For older cpus we may as well always hint, but
assume not taken for branches that are very close to 50% as a
mispredicted taken branch is more expensive than a
- mispredicted not-taken branch. */
+ mispredicted not-taken branch. */
if (rs6000_always_hint
|| abs (prob) > REG_BR_PROB_BASE / 100 * 48)
{
it'll probably be faster to use a branch here too. */
if (code == UNEQ && HONOR_NANS (compare_mode))
return 0;
-
+
if (GET_CODE (op1) == CONST_DOUBLE)
REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
-
+
/* We're going to try to implement comparisons by performing
a subtract, then comparing against zero. Unfortunately,
Inf - Inf is NaN which is not zero, and so if we don't
&& (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
/* Constructs of the form (a OP b ? a : b) are safe. */
&& ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
- || (! rtx_equal_p (op0, true_cond)
+ || (! rtx_equal_p (op0, true_cond)
&& ! rtx_equal_p (op1, true_cond))))
return 0;
/* At this point we know we can use fsel. */
case EQ:
temp = gen_reg_rtx (compare_mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp,
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
gen_rtx_NEG (compare_mode,
gen_rtx_ABS (compare_mode, op0))));
op0 = temp;
/* a GT 0 <-> (a GE 0 && -a UNLT 0) */
temp = gen_reg_rtx (result_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
c = GEU;
if (code == SMAX || code == UMAX)
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op0, op1, mode, 0);
else
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op1, op0, mode, 0);
if (target == NULL_RTX)
abort ();
else
reg_mode = word_mode;
reg_mode_size = GET_MODE_SIZE (reg_mode);
-
+
if (reg_mode_size * nregs != GET_MODE_SIZE (mode))
abort ();
-
+
if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
{
/* Move register range backwards, if we might have destructive
overlap. */
int i;
for (i = nregs - 1; i >= 0; i--)
- emit_insn (gen_rtx_SET (VOIDmode,
+ emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
i * reg_mode_size),
simplify_gen_subreg (reg_mode, src, mode,
{
rtx delta_rtx;
breg = XEXP (XEXP (src, 0), 0);
- delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (src)));
+ delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (src)));
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
}
/* We have now address involving an base register only.
- If we use one of the registers to address memory,
+ If we use one of the registers to address memory,
we have change that register last. */
breg = (GET_CODE (XEXP (src, 0)) == PLUS
if (!REG_P (breg))
abort();
- if (REGNO (breg) >= REGNO (dst)
+ if (REGNO (breg) >= REGNO (dst)
&& REGNO (breg) < REGNO (dst) + nregs)
j = REGNO (breg) - REGNO (dst);
}
{
rtx delta_rtx;
breg = XEXP (XEXP (dst, 0), 0);
- delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst)));
+ delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst)));
/* We have to update the breg before doing the store.
Use store with update, if available. */
}
for (i = 0; i < nregs; i++)
- {
+ {
/* Calculate index to next subword. */
++j;
- if (j == nregs)
+ if (j == nregs)
j = 0;
- /* If compiler already emited move of first word by
+ /* If compiler already emited move of first word by
store with update, no need to do anything. */
if (j == 0 && used_update)
continue;
-
+
emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
j * reg_mode_size),
/* Find lowest numbered live register. */
for (first_reg = 13; first_reg <= 31; first_reg++)
- if (regs_ever_live[first_reg]
+ if (regs_ever_live[first_reg]
&& (! call_used_regs[first_reg]
|| (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
&& info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
&& info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
&& info_ptr->cr_save_p;
-
+
/* This will not work in conjunction with sibcalls. Make sure there
are none. (This check is expensive, but seldom executed.) */
if ( info_ptr->world_save_p )
- {
+ {
rtx insn;
for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
if ( GET_CODE (insn) == CALL_INSN
&& SIBLING_CALL_P (insn))
- {
+ {
info_ptr->world_save_p = 0;
break;
}
}
-
+
if (info_ptr->world_save_p)
{
/* Even if we're not touching VRsave, make sure there's room on the
/* Because the Darwin register save/restore routines only handle
F14 .. F31 and V20 .. V31 as per the ABI, perform a consistancy
check and abort if there's something worng. */
- if (info_ptr->first_fp_reg_save < FIRST_SAVED_FP_REGNO
+ if (info_ptr->first_fp_reg_save < FIRST_SAVED_FP_REGNO
|| info_ptr->first_altivec_reg_save < FIRST_SAVED_ALTIVEC_REGNO)
abort ();
}
- return;
+ return;
}
| Parameter save area (P) | 8
+---------------------------------------+
| Alloca space (A) | 8+P
- +---------------------------------------+
+ +---------------------------------------+
| Varargs save area (V) | 8+P+A
- +---------------------------------------+
+ +---------------------------------------+
| Local variable space (L) | 8+P+A+V
- +---------------------------------------+
+ +---------------------------------------+
| Float/int conversion temporary (X) | 8+P+A+V+L
+---------------------------------------+
| Save area for AltiVec registers (W) | 8+P+A+V+L+X
| SPE alignment padding |
+---------------------------------------+
| saved CR (C) | 8+P+A+V+L+X+W+Y+Z
- +---------------------------------------+
+ +---------------------------------------+
| Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
- +---------------------------------------+
+ +---------------------------------------+
| Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+---------------------------------------+
old SP->| back chain to caller's caller |
/* Calculate which registers need to be saved & save area size. */
info_ptr->first_gp_reg_save = first_reg_to_save ();
- /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
even if it currently looks like we won't. */
if (((TARGET_TOC && TARGET_MINIMAL_TOC)
|| (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
}
/* Determine if we need to save the condition code registers. */
- if (regs_ever_live[CR2_REGNO]
+ if (regs_ever_live[CR2_REGNO]
|| regs_ever_live[CR3_REGNO]
|| regs_ever_live[CR4_REGNO])
{
When we're called from the epilogue, we need to avoid counting
this as a store. */
-
+
push_topmost_sequence ();
top = get_insns ();
pop_topmost_sequence ();
{
if (FIND_REG_INC_NOTE (insn, reg))
return 1;
- else if (GET_CODE (insn) == CALL_INSN
+ else if (GET_CODE (insn) == CALL_INSN
&& !SIBLING_CALL_P (insn))
return 1;
else if (set_of (reg, insn) != NULL_RTX
static GTY(()) int set = -1;
-int
+int
get_TOC_alias_set (void)
{
if (set == -1)
set = new_alias_set ();
return set;
-}
+}
/* This returns nonzero if the current function uses the TOC. This is
determined by the presence of (use (unspec ... UNSPEC_TOC)), which
is generated by the ABI_V4 load_toc_* patterns. */
#if TARGET_ELF
static int
-uses_TOC (void)
+uses_TOC (void)
{
rtx insn;
rtx pat = PATTERN (insn);
int i;
- if (GET_CODE (pat) == PARALLEL)
+ if (GET_CODE (pat) == PARALLEL)
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx sub = XVECEXP (pat, 0, i);
#endif
rtx
-create_TOC_reference (rtx symbol)
+create_TOC_reference (rtx symbol)
{
- return gen_rtx_PLUS (Pmode,
+ return gen_rtx_PLUS (Pmode,
gen_rtx_REG (Pmode, TOC_REGISTER),
- gen_rtx_CONST (Pmode,
- gen_rtx_MINUS (Pmode, symbol,
+ gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, symbol,
gen_rtx_SYMBOL_REF (Pmode, toc_label_name))));
}
if (current_function_limit_stack)
{
if (REG_P (stack_limit_rtx)
- && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) > 1
&& REGNO (stack_limit_rtx) <= 31)
{
emit_insn (TARGET_32BIT
&& DEFAULT_ABI == ABI_V4)
{
rtx toload = gen_rtx_CONST (VOIDmode,
- gen_rtx_PLUS (Pmode,
- stack_limit_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
GEN_INT (size)));
emit_insn (gen_elf_high (tmp_reg, toload));
insn = emit_insn (TARGET_32BIT
? gen_movsi_update (stack_reg, stack_reg,
todec, stack_reg)
- : gen_movdi_update (stack_reg, stack_reg,
+ : gen_movdi_update (stack_reg, stack_reg,
todec, stack_reg));
}
else
emit_move_insn (gen_rtx_MEM (Pmode, stack_reg),
gen_rtx_REG (Pmode, 12));
}
-
+
RTX_FRAME_RELATED_P (insn) = 1;
- REG_NOTES (insn) =
+ REG_NOTES (insn) =
gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_SET (VOIDmode, stack_reg,
gen_rtx_PLUS (Pmode, stack_reg,
GEN_INT (-size))),
REG_NOTES (insn));
its hand so much. */
static void
-rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
+rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
rtx reg2, rtx rreg)
{
rtx real, temp;
if (reg2 != NULL_RTX)
real = replace_rtx (real, reg2, rreg);
-
- real = replace_rtx (real, reg,
+
+ real = replace_rtx (real, reg,
gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
STACK_POINTER_REGNUM),
GEN_INT (val)));
-
+
/* We expect that 'real' is either a SET or a PARALLEL containing
SETs (and possibly other stuff). In a PARALLEL, all the SETs
are important so they all have to be marked RTX_FRAME_RELATED_P. */
if (GET_CODE (real) == SET)
{
rtx set = real;
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
if (GET_CODE (XVECEXP (real, 0, i)) == SET)
{
rtx set = XVECEXP (real, 0, i);
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
static void
-emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
+emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
unsigned int regno, int offset, HOST_WIDE_INT total_size)
{
rtx reg, offset_rtx, insn, mem, addr, int_rtx;
int saving_FPRs_inline;
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
-
+
if (TARGET_FIX_AND_CONTINUE)
{
/* gdb on darwin arranges to forward a function from the old
sp_offset = info->total_size;
else
frame_reg_rtx = frame_ptr_rtx;
- rs6000_emit_allocate_stack (info->total_size,
+ rs6000_emit_allocate_stack (info->total_size,
(frame_reg_rtx != sp_reg_rtx
&& (info->cr_save_p
|| info->lr_save_p
/* AltiVec addressing mode is [reg+reg]. */
mem = gen_rtx_MEM (V4SImode,
gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
-
+
set_mem_alias_set (mem, rs6000_sr_alias_set);
insn = emit_move_insn (mem, savereg);
if (! info->world_save_p && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
rtx set;
-
+
cr_save_rtx = gen_rtx_REG (SImode, 12);
insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
RTX_FRAME_RELATED_P (insn) = 1;
{
int i;
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((regs_ever_live[info->first_fp_reg_save+i]
+ if ((regs_ever_live[info->first_fp_reg_save+i]
&& ! call_used_regs[info->first_fp_reg_save+i]))
emit_frame_save (frame_reg_rtx, frame_ptr_rtx, DFmode,
info->first_fp_reg_save + i,
const char *alloc_rname;
rtvec p;
p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
-
- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode,
+
+ RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
rtx addr, reg, mem;
reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
+ GEN_INT (info->fp_save_offset
+ sp_offset + 8*i));
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
}
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
{
rtx addr, reg, mem;
reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
mem = gen_rtx_MEM (reg_mode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
}
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
else if (! info->world_save_p)
{
int i;
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- if ((regs_ever_live[info->first_gp_reg_save+i]
+ if ((regs_ever_live[info->first_gp_reg_save+i]
&& (! call_used_regs[info->first_gp_reg_save+i]
|| (i+info->first_gp_reg_save
== RS6000_PIC_OFFSET_TABLE_REGNUM
}
else
{
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
mem = gen_rtx_MEM (reg_mode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
}
set_mem_alias_set (mem, rs6000_sr_alias_set);
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
PATTERN (insn) = gen_blockage ();
}
rtx mem = gen_rtx_MEM (Pmode, addr);
/* This should not be of rs6000_sr_alias_set, because of
__builtin_return_address. */
-
+
insn = emit_move_insn (mem, reg);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
set,
REG_NOTES (insn));
-
+
}
insn = emit_move_insn (mem, cr_save_rtx);
- rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
- /* Update stack and set back pointer unless this is V.4,
+ /* Update stack and set back pointer unless this is V.4,
for which it was done previously. */
if (! info->world_save_p && info->push_p
&& !(DEFAULT_ABI == ABI_V4 || current_function_calls_eh_return))
/* Set frame pointer, if needed. */
if (frame_pointer_needed)
{
- insn = emit_move_insn (gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),
+ insn = emit_move_insn (gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),
sp_reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
}
rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (lr, src)));
- insn = emit_move_insn (gen_rtx_REG (Pmode,
+ insn = emit_move_insn (gen_rtx_REG (Pmode,
RS6000_PIC_OFFSET_TABLE_REGNUM),
lr);
rs6000_maybe_dead (insn);
/* Write function prologue. */
static void
-rs6000_output_function_prologue (FILE *file,
+rs6000_output_function_prologue (FILE *file,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
rs6000_stack_t *info = rs6000_stack_info ();
rs6000_pic_labelno++;
}
-
+
/* Emit function epilogue as insns.
At present, dwarf2out_frame_debug_expr doesn't understand
|| current_function_calls_eh_return
|| info->first_fp_reg_save == 64
|| FP_SAVE_INLINE (info->first_fp_reg_save));
- use_backchain_to_restore_sp = (frame_pointer_needed
+ use_backchain_to_restore_sp = (frame_pointer_needed
|| current_function_calls_alloca
|| info->total_size > 32767);
using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
p = rtvec_alloc (9
+ 1
- + 32 - info->first_gp_reg_save
+ + 32 - info->first_gp_reg_save
+ LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ 63 + 1 - info->first_fp_reg_save);
emit_move_insn (frame_reg_rtx,
gen_rtx_MEM (Pmode, sp_reg_rtx));
-
+
}
else if (info->push_p)
{
GEN_INT (info->total_size)));
}
}
-
+
/* Restore AltiVec registers if needed. */
if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
{
emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
}
-
+
/* Get the old cr if we saved it. */
if (info->cr_save_p)
{
emit_move_insn (gen_rtx_REG (SImode, 12), mem);
}
-
+
/* Set LR here to try to overlap restores below. */
if (info->lr_save_p)
emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
gen_rtx_REG (Pmode, 0));
-
+
/* Load exception handler data registers, if needed. */
if (current_function_calls_eh_return)
{
emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
}
}
-
+
/* Restore GPRs. This is done as a PARALLEL if we are using
the load-multiple instructions. */
if (using_load_multiple)
p = rtvec_alloc (32 - info->first_gp_reg_save);
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
{
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
rtx mem = gen_rtx_MEM (reg_mode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- RTVEC_ELT (p, i) =
+ RTVEC_ELT (p, i) =
gen_rtx_SET (VOIDmode,
gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
mem);
}
else
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
- if ((regs_ever_live[info->first_gp_reg_save+i]
+ if ((regs_ever_live[info->first_gp_reg_save+i]
&& (! call_used_regs[info->first_gp_reg_save+i]
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
{
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + sp_offset
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ reg_size * i));
rtx mem = gen_rtx_MEM (reg_mode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- emit_move_insn (gen_rtx_REG (reg_mode,
+ emit_move_insn (gen_rtx_REG (reg_mode,
info->first_gp_reg_save + i), mem);
}
/* Restore fpr's if we need to do it without calling a function. */
if (restoring_FPRs_inline)
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((regs_ever_live[info->first_fp_reg_save+i]
+ if ((regs_ever_live[info->first_fp_reg_save+i]
&& ! call_used_regs[info->first_fp_reg_save+i]))
{
rtx addr, mem;
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
- + sp_offset
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ 8 * i));
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- emit_move_insn (gen_rtx_REG (DFmode,
+ emit_move_insn (gen_rtx_REG (DFmode,
info->first_fp_reg_save + i),
mem);
}
{
rtx r12_rtx = gen_rtx_REG (SImode, 12);
int count = 0;
-
+
if (using_mfcr_multiple)
{
for (i = 0; i < 8; i++)
{
rtvec p;
int ndx;
-
+
p = rtvec_alloc (count);
ndx = 0;
RTVEC_ELT (r, 0) = r12_rtx;
RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
RTVEC_ELT (p, ndx) =
- gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
ndx++;
}
for (i = 0; i < 8; i++)
if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
{
- emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
CR0_REGNO+i),
r12_rtx));
}
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
- gen_rtx_REG (Pmode,
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
/* If we have to restore more than two FP registers, branch to the
char rname[30];
const char *alloc_rname;
- sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
+ sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
alloc_rname = ggc_strdup (rname);
RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- RTVEC_ELT (p, i+3) =
+ RTVEC_ELT (p, i+3) =
gen_rtx_SET (VOIDmode,
gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
mem);
}
}
-
+
emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
}
}
/* Write function epilogue. */
static void
-rs6000_output_function_epilogue (FILE *file,
+rs6000_output_function_epilogue (FILE *file,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
rs6000_stack_t *info = rs6000_stack_info ();
&& NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
insn = PREV_INSN (insn);
- if (insn
- && (LABEL_P (insn)
+ if (insn
+ && (LABEL_P (insn)
|| (NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
fputs ("\tnop\n", file);
not support varargs. */
static void
-rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
- HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
rtx this, insn, funexp;
/* A quick summary of the various types of 'constant-pool tables'
under PowerPC:
- Target Flags Name One table per
+ Target Flags Name One table per
AIX (none) AIX TOC object file
AIX -mfull-toc AIX TOC object file
AIX -mminimal-toc AIX minimal TOC translation unit
SVR4/EABI -fPIC SVR4 PIC translation unit
SVR4/EABI -mrelocatable EABI TOC function
SVR4/EABI -maix AIX TOC object file
- SVR4/EABI -maix -mminimal-toc
+ SVR4/EABI -maix -mminimal-toc
AIX minimal TOC translation unit
Name Reg. Set by entries contains:
unsigned result = (code << 3) ^ mode;
const char *format;
int flen, fidx;
-
+
format = GET_RTX_FORMAT (code);
flen = strlen (format);
fidx = 0;
static unsigned
toc_hash_function (const void *hash_entry)
{
- const struct toc_hash_struct *thc =
+ const struct toc_hash_struct *thc =
(const struct toc_hash_struct *) hash_entry;
return rs6000_hash_constant (thc->key) ^ thc->key_mode;
}
|| strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
|| strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
|| strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
- || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
+ || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
void
rs6000_output_symbol_ref (FILE *file, rtx x)
/* Currently C++ toc references to vtables can be emitted before it
is decided whether the vtable is public or private. If this is
the case, then the linker will eventually complain that there is
- a reference to an unknown section. Thus, for vtables only,
+ a reference to an unknown section. Thus, for vtables only,
we emit the TOC reference to reference the symbol and not the
section. */
const char *name = XSTR (x, 0);
- if (VTABLE_NAME_P (name))
+ if (VTABLE_NAME_P (name))
{
RS6000_OUTPUT_BASENAME (file, name);
}
{
struct toc_hash_struct *h;
void * * found;
-
+
/* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
time because GGC is not initialized at that point. */
if (toc_hash_table == NULL)
- toc_hash_table = htab_create_ggc (1021, toc_hash_function,
+ toc_hash_table = htab_create_ggc (1021, toc_hash_function,
toc_hash_eq, NULL);
h = ggc_alloc (sizeof (*h));
h->key = x;
h->key_mode = mode;
h->labelno = labelno;
-
+
found = htab_find_slot (toc_hash_table, h, 1);
if (*found == NULL)
*found = h;
- else /* This is indeed a duplicate.
+ else /* This is indeed a duplicate.
Set this label equal to that label. */
{
fputs ("\t.set ", file);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
fprintf (file, "%d,", labelno);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
- fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
+ fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
found)->labelno));
return;
}
base = XEXP (XEXP (x, 0), 0);
offset = INTVAL (XEXP (XEXP (x, 0), 1));
}
-
+
if (GET_CODE (base) == SYMBOL_REF)
name = XSTR (base, 0);
else if (GET_CODE (base) == LABEL_REF)
the name. */
void
-rs6000_gen_section_name (char **buf, const char *filename,
+rs6000_gen_section_name (char **buf, const char *filename,
const char *section_desc)
{
const char *q, *after_last_slash, *last_period = 0;
#ifndef NO_PROFILE_COUNTERS
# define NO_PROFILE_COUNTERS 0
#endif
- if (NO_PROFILE_COUNTERS)
+ if (NO_PROFILE_COUNTERS)
emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 0);
else
{
assemble_name (file, buf);
fputs ("-.\n1:", file);
asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
- asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
reg_names[0], reg_names[11]);
asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
reg_names[0], reg_names[0], reg_names[11]);
instructions to issue in this cycle. */
static int
-rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
- int verbose ATTRIBUTE_UNUSED,
+rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
rtx insn, int more)
{
if (GET_CODE (PATTERN (insn)) == USE
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn ATTRIBUTE_UNUSED,
+rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn ATTRIBUTE_UNUSED,
int cost)
{
if (! recog_memoized (insn))
{
enum attr_type type = get_attr_type (insn);
if (type == TYPE_BRANCH || type == TYPE_JMPREG)
- return true;
+ return true;
return false;
}
if (is_dispatch_slot_restricted (insn)
&& reload_completed
- && current_sched_info->sched_max_insns_priority
+ && current_sched_info->sched_max_insns_priority
&& rs6000_sched_restricted_insns_priority)
{
/* Prioritize insns that can be dispatched only in the first dispatch slot. */
if (rs6000_sched_restricted_insns_priority == 1)
- /* Attach highest priority to insn. This means that in
- haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
+ /* Attach highest priority to insn. This means that in
+ haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
precede 'priority' (critical path) considerations. */
- return current_sched_info->sched_max_insns_priority;
+ return current_sched_info->sched_max_insns_priority;
else if (rs6000_sched_restricted_insns_priority == 2)
- /* Increase priority of insn by a minimal amount. This means that in
+ /* Increase priority of insn by a minimal amount. This means that in
haifa-sched.c:ready_sort(), only 'priority' (critical path) considerations
precede dispatch-slot restriction considerations. */
- return (priority + 1);
- }
+ return (priority + 1);
+ }
return priority;
}
case CPU_PPC750:
case CPU_PPC7400:
case CPU_PPC8540:
- return 2;
+ return 2;
case CPU_RIOS2:
case CPU_PPC604:
case CPU_PPC604E:
}
/* Determine if PAT is a PATTERN of a load insn. */
-
+
static bool
is_load_insn1 (rtx pat)
{
static bool
rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distance)
-{
+{
/* If the flag is not enbled - no dependence is considered costly;
- allow all dependent insns in the same group.
+ allow all dependent insns in the same group.
This is the most aggressive option. */
if (rs6000_sched_costly_dep == no_dep_costly)
return false;
- /* If the flag is set to 1 - a dependence is always considered costly;
+ /* If the flag is set to 1 - a dependence is always considered costly;
do not allow dependent instructions in the same group.
This is the most conservative option. */
if (rs6000_sched_costly_dep == all_deps_costly)
- return true;
+ return true;
- if (rs6000_sched_costly_dep == store_to_load_dep_costly
- && is_load_insn (next)
+ if (rs6000_sched_costly_dep == store_to_load_dep_costly
+ && is_load_insn (next)
&& is_store_insn (insn))
/* Prevent load after store in the same group. */
return true;
if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
- && is_load_insn (next)
+ && is_load_insn (next)
&& is_store_insn (insn)
&& (!link || (int) REG_NOTE_KIND (link) == 0))
/* Prevent load after store in the same group if it is a true dependence. */
return true;
-
- /* The flag is set to X; dependences with latency >= X are considered costly,
+
+ /* The flag is set to X; dependences with latency >= X are considered costly,
and will not be scheduled in the same group. */
if (rs6000_sched_costly_dep <= max_dep_latency
&& ((cost - distance) >= (int)rs6000_sched_costly_dep))
return false;
}
-/* Return the next insn after INSN that is found before TAIL is reached,
+/* Return the next insn after INSN that is found before TAIL is reached,
skipping any "non-active" insns - insns that will not actually occupy
an issue slot. Return NULL_RTX if such an insn is not found. */
return false;
}
-/* Utility of the function redefine_groups.
+/* Utility of the function redefine_groups.
Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
in the same dispatch group. If so, insert nops before NEXT_INSN, in order
to keep it "far" (in a separate group) from GROUP_INSNS, following
-minsert_sched_nops = X:
(1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
in order to force NEXT_INSN into a separate group.
- (2) X < sched_finish_regroup_exact: insert exactly X nops.
- GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
+ (2) X < sched_finish_regroup_exact: insert exactly X nops.
+ GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
insertion (has a group just ended, how many vacant issue slots remain in the
last group, and how many dispatch groups were encountered so far). */
-static int
+static int
force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
bool *group_end, int can_issue_more, int *group_count)
{
*group_end = true;
return 0;
- }
+ }
if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
{
int n_nops = rs6000_sched_insert_nops;
- /* Nops can't be issued from the branch slot, so the effective
+ /* Nops can't be issued from the branch slot, so the effective
issue_rate for nops is 'issue_rate - 1'. */
if (can_issue_more == 0)
can_issue_more = issue_rate;
for (i = 0; i < issue_rate; i++)
{
group_insns[i] = 0;
- }
- }
+ }
+ }
n_nops--;
}
/* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
- can_issue_more++;
+ can_issue_more++;
*group_end = /* Is next_insn going to start a new group? */
- (end
+ (end
|| (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
|| (can_issue_more <= 2 && is_cracked_insn (next_insn))
|| (can_issue_more < issue_rate &&
if (sched_verbose > 6)
fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
*group_count, can_issue_more);
- return can_issue_more;
- }
+ return can_issue_more;
+ }
return can_issue_more;
}
/* This function tries to synch the dispatch groups that the compiler "sees"
- with the dispatch groups that the processor dispatcher is expected to
+ with the dispatch groups that the processor dispatcher is expected to
form in practice. It tries to achieve this synchronization by forcing the
estimated processor grouping on the compiler (as opposed to the function
'pad_goups' which tries to force the scheduler's grouping on the processor).
- only the last 2 or less issue slots, including the branch slot, are vacant,
which means that a cracked insn (which occupies two issue slots) can't be
issued in this group.
- - less than 'issue_rate' slots are vacant, and the next insn always needs to
+ - less than 'issue_rate' slots are vacant, and the next insn always needs to
start a new group. */
static int
/* Initialize. */
issue_rate = rs6000_issue_rate ();
group_insns = alloca (issue_rate * sizeof (rtx));
- for (i = 0; i < issue_rate; i++)
+ for (i = 0; i < issue_rate; i++)
{
group_insns[i] = 0;
}
|| (can_issue_more < issue_rate &&
insn_terminates_group_p (next_insn, previous_group)));
- can_issue_more = force_new_group (sched_verbose, dump, group_insns,
+ can_issue_more = force_new_group (sched_verbose, dump, group_insns,
next_insn, &group_end, can_issue_more, &group_count);
if (group_end)
/* Handle the "altivec" attribute. The attribute may have
arguments as follows:
-
+
__attribute__((altivec(vector__)))
__attribute__((altivec(pixel__))) (always followed by 'unsigned short')
__attribute__((altivec(bool__))) (always followed by 'unsigned')
= ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
&& TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
? *IDENTIFIER_POINTER (TREE_VALUE (args))
- : '?');
+ : '?');
while (POINTER_TYPE_P (type)
|| TREE_CODE (type) == FUNCTION_TYPE
struct attribute_spec.handler. */
static tree
-rs6000_handle_longcall_attribute (tree *node, tree name,
- tree args ATTRIBUTE_UNUSED,
- int flags ATTRIBUTE_UNUSED,
+rs6000_handle_longcall_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_TYPE
data section. */
static void
-rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
+rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
unsigned HOST_WIDE_INT align)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
the initial value of DECL requires link-time relocations. */
static void
-rs6000_elf_select_section (tree decl, int reloc,
+rs6000_elf_select_section (tree decl, int reloc,
unsigned HOST_WIDE_INT align)
{
/* Pretend that we're always building for a shared library when
/* Remember to generate a branch island for far calls to the given
function. */
-static void
+static void
add_compiler_branch_island (tree label_name, tree function_name, int line_number)
{
tree branch_island = build_tree_list (function_name, label_name);
strcat (tmp_buf, "_pic\n");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic:\n\tmflr r11\n");
-
+
strcat (tmp_buf, "\taddis r11,r11,ha16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic)\n");
-
+
strcat (tmp_buf, "\tmtlr r0\n");
-
+
strcat (tmp_buf, "\taddi r12,r11,lo16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic)\n");
-
+
strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
}
else
}
/* INSN is either a function call or a millicode call. It may have an
- unconditional jump in its delay slot.
+ unconditional jump in its delay slot.
CALL_DEST is the routine we are calling. */
{
tree labelname;
tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
-
+
if (no_previous_def (funname))
{
int line_number = 0;
label++;
local_label_0 = alloca (sizeof("\"L0000000000$spb\""));
sprintf (local_label_0, "\"L%011d$spb\"", label);
-
+
fprintf (file, "\tmflr r0\n");
fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
fprintf (file, "\tmtctr r12\n");
fprintf (file, "\tbctr\n");
}
-
+
machopic_lazy_symbol_ptr_section ();
fprintf (file, "%s:\n", lazy_ptr_name);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
#define SMALL_INT(X) ((unsigned) (INTVAL(X) + 0x8000) < 0x10000)
rtx
-rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
+rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
rtx reg)
{
rtx base, offset;
}
static void
-rs6000_xcoff_select_section (tree decl, int reloc,
+rs6000_xcoff_select_section (tree decl, int reloc,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (decl_readonly_section_1 (decl, reloc, 1))
toc entry. */
static void
-rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
+rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
scanned. In either case, *TOTAL contains the cost result. */
static bool
-rs6000_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
+rs6000_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
int *total)
{
enum machine_mode mode = GET_MODE (x);
CLASS1 to one of CLASS2. */
int
-rs6000_register_move_cost (enum machine_mode mode,
+rs6000_register_move_cost (enum machine_mode mode,
enum reg_class from, enum reg_class to)
{
/* Moves from/to GENERAL_REGS. */
/* Everything else has to go through GENERAL_REGS. */
else
- return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ rs6000_register_move_cost (mode, from, GENERAL_REGS));
}
or from memory. */
int
-rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
+rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
int in ATTRIBUTE_UNUSED)
{
if (reg_classes_intersect_p (class, GENERAL_REGS))
}
/* target hook eh_return_filter_mode */
-static enum machine_mode
+static enum machine_mode
rs6000_eh_return_filter_mode (void)
{
return TARGET_32BIT ? SImode : word_mode;
}
+/* Target hook for vector_mode_supported_p. */
+static bool
+rs6000_vector_mode_supported_p (enum machine_mode mode)
+{
+
+ if (TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return true;
+
+ else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
+ return true;
+
+ else
+ return false;
+}
+
#include "gt-rs6000.h"
Override the macro definitions when compiling libobjc to avoid undefined
reference to rs6000_alignment_flags due to library's use of GCC alignment
macros which use the macros below. */
-
+
#ifndef IN_TARGET_LIBS
#define MASK_ALIGN_POWER 0x00000000
#define MASK_ALIGN_NATURAL 0x00000001
avoid invalidating older SPE eh_frame info.
We must map them here to avoid huge unwinder tables mostly consisting
- of unused space. */
+ of unused space. */
#define DWARF_REG_TO_UNWIND_COLUMN(r) \
((r) > 1200 ? ((r) - 1200 + FIRST_PSEUDO_REGISTER) : (r))
(`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS').
This macro is optional. If not specified, it defaults to the value
of `CALL_USED_REGISTERS'. */
-
+
#define CALL_REALLY_USED_REGISTERS \
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
v19 - v14 (not saved or used for anything)
v31 - v20 (saved; order given to save least number)
*/
-
+
#if FIXED_R2 == 1
#define MAYBE_R2_AVAILABLE
#define MAYBE_R2_FIXED 2,
|| (MODE) == V1DImode \
|| (MODE) == V2SImode)
-/* Define this macro to be nonzero if the port is prepared to handle
- insns involving vector mode MODE. At the very least, it must have
- move patterns for this mode. */
-
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- ((TARGET_SPE && SPE_VECTOR_MODE (MODE)) \
- || (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (MODE)))
-
#define UNITS_PER_SIMD_WORD \
(TARGET_ALTIVEC ? 16 : (TARGET_SPE ? 8 : 0) )
'T' is a constant that can be placed into a 32-bit mask operand
'U' is for V.4 small data references.
'W' is a vector constant that can be easily generated (no mem refs).
- 'Y' is a indexed or word-aligned displacement memory operand.
+ 'Y' is a indexed or word-aligned displacement memory operand.
't' is for AND masks that can be performed by two rldic{l,r} insns. */
#define EXTRA_CONSTRAINT(OP, C) \
: 0)
/* Define which constraints are memory constraints. Tell reload
- that any memory address can be reloaded by copying the
+ that any memory address can be reloaded by copying the
memory address into a base register if required. */
#define EXTRA_MEMORY_CONSTRAINT(C, STR) \
in some cases it is preferable to use a more restrictive class.
On the RS/6000, we have to return NO_REGS when we want to reload a
- floating-point CONST_DOUBLE to force it to be copied to memory.
+ floating-point CONST_DOUBLE to force it to be copied to memory.
We also don't want to reload integer values into floating-point
registers if we can at all help it. In fact, this can
operand. If we find one, push the reload and jump to WIN. This
macro is used in only one place: `find_reloads_address' in reload.c.
- Implemented on rs6000 by rs6000_legitimize_reload_address.
+ Implemented on rs6000 by rs6000_legitimize_reload_address.
Note that (X) is evaluated twice; this is safe in current usage. */
-
+
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
do { \
int win; \
extern void sh_expand_unop_v2sf (enum rtx_code, rtx, rtx);
extern void sh_expand_binop_v2sf (enum rtx_code, rtx, rtx, rtx);
extern int sh_expand_t_scc (enum rtx_code code, rtx target);
+extern bool sh_vector_mode_supported_p (enum machine_mode);
#ifdef TREE_CODE
extern void sh_va_start (tree, rtx);
#endif /* TREE_CODE */
Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
- Improved by Jim Wilson (wilson@cygnus.com).
+ Improved by Jim Wilson (wilson@cygnus.com).
This file is part of GCC.
static int sh_reorder2 (FILE *, int, rtx *, int *, int);
static void sh_md_init (FILE *, int, int);
static int sh_variable_issue (FILE *, int, rtx, int);
-
+
static bool sh_function_ok_for_sibcall (tree, tree);
static bool sh_cannot_modify_jumps_p (void);
/* The next 5 hooks have been implemented for reenabling sched1. With the
help of these macros we are limiting the movement of insns in sched1 to
- reduce the register pressure. The overall idea is to keep count of SImode
+ reduce the register pressure. The overall idea is to keep count of SImode
and SFmode regs required by already scheduled insns. When these counts
cross some threshold values; give priority to insns that free registers.
The insn that frees registers is most likely to be the insn with lowest
- LUID (original insn order); but such an insn might be there in the stalled
+ LUID (original insn order); but such an insn might be there in the stalled
queue (Q) instead of the ready queue (R). To solve this, we skip cycles
upto a max of 8 cycles so that such insns may move from Q -> R.
scheduler; it is called inside the sched_init function just after
find_insn_reg_weights function call. It is used to calculate the SImode
and SFmode weights of insns of basic blocks; much similar to what
- find_insn_reg_weights does.
+ find_insn_reg_weights does.
TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
+
#undef TARGET_PCH_VALID_P
#define TARGET_PCH_VALID_P sh_pch_valid_p
fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
break;
-
+
case 'N':
if (x == CONST0_RTX (GET_MODE (x)))
{
case MEM:
output_address (XEXP (x, 0));
break;
-
+
case CONST:
if (TARGET_SHMEDIA
&& GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
{
rtx tga_op1, tga_ret, tmp, tmp2;
-
+
switch (tls_kind)
{
case TLS_MODEL_GLOBAL_DYNAMIC:
int label = lf++;
/* The call to print_slot will clobber the operands. */
rtx op0 = operands[0];
-
+
/* If the instruction in the delay slot is annulled (true), then
there is no delay slot where we can put it now. The only safe
place for it is after the label. final will do that by default. */
-
+
if (final_sequence
&& ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
{
}
else
asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
-
+
output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n");
(*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
-
+
return "";
}
/* When relaxing, handle this like a short branch. The linker
case 4:
{
char buffer[10];
-
+
sprintf (buffer, "b%s%ss\t%%l0",
logic ? "t" : "f",
ASSEMBLER_DIALECT ? "/" : ".");
fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
#endif
-
+
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
/* Likewise, but for shift amounts < 16, up to three highmost bits
might be clobbered. This is typically used when combined with some
kind of sign or zero extension. */
-
+
static const char ext_shift_insns[] =
{ 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
/* Output RTL to split a constant shift into its component SH constant
shift instructions. */
-
+
void
gen_shifty_op (int code, rtx *operands)
{
/* Truncate the shift count in case it is out of bounds. */
value = value & 0x1f;
-
+
if (value == 31)
{
if (code == LSHIFTRT)
for (i = 0; i < max; i++)
gen_ashift (code, shift_amounts[value][i], operands[0]);
}
-
+
/* Same as above, but optimized for values where the topmost bits don't
matter. */
|| reload_in_progress || reload_completed)
{
rtx operands[3];
-
+
/* Cases 3 and 4 should be handled by this split
only while combining */
if (kind > 2)
gen_rtx_UNSPEC (GET_MODE (sym),
gen_rtvec (1, sym),
UNSPEC_DATALABEL));
-
+
if (GET_CODE (sym) != SYMBOL_REF)
abort ();
pool_size = 0;
}
-
+
for (i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
case SUBREG:
{
rtx y = SUBREG_REG (x);
-
+
if (GET_CODE (y) != REG)
break;
if (REGNO (y) < 16)
pass 1. Pass 2 if a definite blocking insn is needed.
-1 is used internally to avoid deep recursion.
If a blocking instruction is made or recognized, return it. */
-
+
static rtx
gen_block_redirect (rtx jump, int addr, int need_block)
{
it would cause trouble if an interrupt occurred. */
unsigned try = 0x7fff, used;
int jump_left = flag_expensive_optimizations + 1;
-
+
/* It is likely that the most recent eligible instruction is wanted for
the delay slot. Therefore, find out which registers it uses, and
try to avoid using them. */
-
+
for (scan = jump; (scan = PREV_INSN (scan)); )
{
enum rtx_code code;
threading with a jump beyond the delay slot insn.
Don't check if we are called recursively; the jump has been or will be
checked in a different invocation then. */
-
+
else if (optimize && need_block >= 0)
{
rtx next = next_active_insn (next_active_insn (dest));
Hence, after delay slot scheduling, we'll have to expect
NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
the jump. */
-
+
INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
return insn;
{
rtx next = next_real_insn (barrier_or_label), pat, prev;
int slot, credit, jump_to_next = 0;
-
+
if (! next)
return 0;
an alignment, against that of fetching unneeded insn in front of the
branch target when there is no alignment. */
- /* There are two delay_slot cases to consider. One is the simple case
- where the preceding branch is to the insn beyond the barrier (simple
- delay slot filling), and the other is where the preceding branch has
- a delay slot that is a duplicate of the insn after the barrier
- (fill_eager_delay_slots) and the branch is to the insn after the insn
+ /* There are two delay_slot cases to consider. One is the simple case
+ where the preceding branch is to the insn beyond the barrier (simple
+ delay slot filling), and the other is where the preceding branch has
+ a delay slot that is a duplicate of the insn after the barrier
+ (fill_eager_delay_slots) and the branch is to the insn after the insn
after the barrier. */
/* PREV is presumed to be the JUMP_INSN for the barrier under
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
prev = XVECEXP (PATTERN (prev), 0, 1);
- if (INSN_UID (prev) == INSN_UID (next))
+ if (INSN_UID (prev) == INSN_UID (next))
{
/* Delay slot was filled with insn at jump target. */
jump_to_next = 1;
/* There is no upper bound on redundant instructions
that might have been skipped, but we must not put an
alignment where none had been before. */
- || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
- (INSN_P (x)
+ || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
+ (INSN_P (x)
&& (INSN_CODE (x) == CODE_FOR_block_branch_redirect
|| INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
|| INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
}
}
}
-
+
return align_jumps_log;
}
if (type == TYPE_CBRANCH)
{
rtx next, beyond;
-
+
if (get_attr_length (insn) > 4)
{
rtx src = SET_SRC (PATTERN (insn));
rtx label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
struct far_branch *bp = uid_branch[dest_uid];
-
+
/* redirect_jump needs a valid JUMP_LABEL, and it might delete
the label if the LABEL_NUSES count drops to zero. There is
always a jump_optimize pass that sets these values, but it
beyond
= next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
0));
-
+
if (beyond
&& (GET_CODE (beyond) == JUMP_INSN
|| ((beyond = next_active_insn (beyond))
gen_block_redirect (beyond,
INSN_ADDRESSES (INSN_UID (beyond)), 1);
}
-
+
next = next_active_insn (insn);
if ((GET_CODE (next) == JUMP_INSN
x = gen_pop_e (gen_rtx_REG (SFmode, rn));
else
x = gen_pop (gen_rtx_REG (SImode, rn));
-
+
x = emit_insn (x);
REG_NOTES (x)
= gen_rtx_EXPR_LIST (REG_INC,
stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
return stack_space;
}
-
+
/* Decide whether we should reserve space for callee-save target registers,
in case target register allocation wants to use them. REGS_SAVED is
the space, in bytes, that is already required for register saves.
use reverse order. Returns the last entry written to (not counting
the delimiter). OFFSET_BASE is a number to be added to all offset
entries. */
-
+
static save_entry *
sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
int offset_base)
int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
rtx insn;
- if (i >= (NPARM_REGS(SImode)
+ if (i >= (NPARM_REGS(SImode)
- current_function_args_info.arg_count[(int) SH_ARG_INT]
))
break;
that already happens to be at the function start into the prologue. */
if (target_flags != save_flags && ! current_function_interrupt)
emit_insn (gen_toggle_sz ());
-
+
if (TARGET_SH5)
{
int offset_base, offset;
GEN_INT (offset - offset_in_r0)));
offset_in_r0 += offset - offset_in_r0;
}
-
+
if (pre_dec != NULL_RTX)
{
if (! sp_in_r0)
insn = emit_move_insn (mem_rtx, reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
- if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
+ if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
{
rtx reg_rtx = gen_rtx_REG (mode, reg);
rtx set, note_rtx;
post_inc = NULL_RTX;
break;
-
+
post_inc_ok:
mem_rtx = NULL_RTX;
}
while (0);
-
+
if (mem_rtx != NULL_RTX)
goto addr_ok;
GEN_INT (offset - offset_in_r0)));
offset_in_r0 += offset - offset_in_r0;
}
-
+
if (post_inc != NULL_RTX)
{
if (! sp_in_r0)
(Pmode, r0, stack_pointer_rtx));
sp_in_r0 = 1;
}
-
+
mem_rtx = post_inc;
offset_in_r0 += GET_MODE_SIZE (mode);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
int j = (FIRST_PSEUDO_REGISTER - 1) - i;
-
+
if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
&& hard_regs_intersect_p (&live_regs_mask,
®_class_contents[DF_REGS]))
int offset;
save_schedule schedule;
save_entry *entry;
-
+
entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
offset = entry[1].offset;
for (; entry->mode != VOIDmode; entry--)
if (TARGET_SHCOMPACT)
return const0_rtx;
}
-
+
if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
{
error ("__builtin_saveregs not supported by this subtarget");
GEN_INT (-2 * UNITS_PER_WORD)));
mem = gen_rtx_MEM (DFmode, fpregs);
set_mem_alias_set (mem, alias_set);
- emit_move_insn (mem,
+ emit_move_insn (mem,
gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
}
regno = first_floatreg;
return gen_rtx_REG (mode, regno);
}
-
+
if (TARGET_SH5)
{
if (mode == VOIDmode && TARGET_SHCOMPACT)
return 0;
}
-
+
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be
int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
save_schedule schedule;
save_entry *entry;
-
+
n += total_auto_space;
/* If it wasn't saved, there's not much we can do. */
/* Symbian support adds three new attributes:
dllexport - for exporting a function/variable that will live in a dll
dllimport - for importing a function/variable from a dll
-
+
Microsoft allows multiple declspecs in one __declspec, separating
them with spaces. We do NOT support this. Instead, use __declspec
multiple times. */
int flag_mask
= (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
| SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
-
+
/* -fpic and -fpie also usually make a PCH invalid. */
if (data[0] != flag_pic)
return _("created and used with different settings of -fpic");
}
data += sizeof (target_flags);
len -= sizeof (target_flags);
-
+
/* Check string options. */
#ifdef TARGET_OPTIONS
for (i = 0; i < ARRAY_SIZE (target_options); i++)
#endif
return NULL;
-
+
make_message:
{
char *r;
}
\f
/* Return the destination address of a branch. */
-
+
static int
branch_dest (rtx branch)
{
remove assignments that are dead due to a following assignment in the
same basic block. */
-static void
+static void
mark_use (rtx x, rtx *reg_set_block)
{
enum rtx_code code;
abort ();
}
-/* This function will set the fpscr from memory.
+/* This function will set the fpscr from memory.
MODE is the mode we are setting it to. */
void
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
return 2;
/* sh-dsp parallel processing insn take four bytes instead of two. */
-
+
if (GET_CODE (insn) == INSN)
{
int sum = 0;
/* Return true if it's possible to redirect BRANCH1 to the destination
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
-int
+int
sh_can_redirect_branch (rtx branch1, rtx branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
rtx insn;
int distance;
-
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
insn = PREV_INSN (insn))
{
- if (insn == dest)
+ if (insn == dest)
return 1;
else
distance += get_attr_length (insn);
}
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
insn = NEXT_INSN (insn))
{
- if (insn == dest)
+ if (insn == dest)
return 1;
else
distance += get_attr_length (insn);
if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
- /* If insns are equally good, sort by INSN_LUID (original insn order), This
+ /* If insns are equally good, sort by INSN_LUID (original insn order), This
minimizes instruction movement, thus minimizing sched's effect on
register pressure. */
return INSN_LUID (tmp) - INSN_LUID (tmp2);
}
/* Skip cycles if the current register pressure is high. */
-static int
+static int
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
rtx *ready ATTRIBUTE_UNUSED,
if (reload_completed)
return cached_can_issue_more;
- if (high_pressure(SFmode) || high_pressure (SImode))
+ if (high_pressure(SFmode) || high_pressure (SImode))
skip_cycles = 1;
return cached_can_issue_more;
/* Skip cycles without sorting the ready queue. This will move insn from
Q->R. If this is the last cycle we are skipping; allow sorting of ready
- queue by sh_reorder. */
+ queue by sh_reorder. */
-/* Generally, skipping these many cycles are sufficient for all insns to move
- from Q -> R. */
-#define MAX_SKIPS 8
+/* Generally, skipping these many cycles are sufficient for all insns to move
+ from Q -> R. */
+#define MAX_SKIPS 8
static int
sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
if (reload_completed)
return 0;
- if (skip_cycles)
+ if (skip_cycles)
{
if ((clock_var - last_clock_var) < MAX_SKIPS)
{
return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
}
\f
-/*
+/*
On the SH1..SH4, the trampoline looks like
2 0002 D202 mov.l l2,r2
1 0000 D301 mov.l l1,r3
emit_move_insn (gen_rtx_MEM (Pmode,
plus_constant (tramp,
fixed_len
- + GET_MODE_SIZE (Pmode))),
+ + GET_MODE_SIZE (Pmode))),
cxt);
emit_insn (gen_ic_invalidate_line (tramp));
return;
static bool
sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
- return (decl
+ return (decl
&& (! TARGET_SHCOMPACT
|| current_function_args_info.stack_regs == 0)
&& ! sh_cfun_interrupt_handler_p ());
}
}
+/* Implements target hook vector_mode_supported_p. */
+bool
+sh_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_FPU_ANY
+ && ((mode == V2SFmode)
+ || (mode == V4SFmode)
+ || (mode == V16SFmode)))
+ return true;
+
+ else if (TARGET_SHMEDIA
+ && ((mode == V8QImode)
+ || (mode == V2HImode)
+ || (mode == V4HImode)
+ || (mode == V2SImode)))
+ return true;
+
+ return false;
+}
+
static void
sh_init_builtins (void)
{
if ((dstclass == FPUL_REGS
&& (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
- || (srcclass == FPUL_REGS
+ || (srcclass == FPUL_REGS
&& (dstclass == PR_REGS || dstclass == MAC_REGS)))
return 7;
/* Find the "this" pointer. We have such a wide range of ABIs for the
SH that it's best to do this completely machine independently.
- "this" is passed as first argument, unless a structure return pointer
+ "this" is passed as first argument, unless a structure return pointer
comes first, in which case "this" comes second. */
INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
#ifndef PCC_STATIC_STRUCT_RETURN
structure_value_byref = 1;
#endif /* not PCC_STATIC_STRUCT_RETURN */
if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
- {
+ {
tree ptype = build_pointer_type (TREE_TYPE (funtype));
FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
break;
if (! INSN_P (insn))
continue;
-
+
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
insn = XVECEXP (PATTERN (insn), 0, 0);
if (GET_CODE (PATTERN (insn)) != PARALLEL
return sh_fsca_sf2int_rtx;
}
-
+
/* This function returns a constant rtx that represents pi / 2**15 in
DFmode. it's used to scale DFmode angles, in radians, to a
fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
return sh_fsca_df2int_rtx;
}
-
+
/* This function returns a constant rtx that represents 2**15 / pi in
SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
of a full circle back to a SFmode value, i.e., 0x10000 maps to
: (REGNO) == FPSCR_REG ? (MODE) == PSImode \
: 1)
-/* Value is 1 if MODE is a supported vector mode. */
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- ((TARGET_FPU_ANY \
- && ((MODE) == V2SFmode || (MODE) == V4SFmode || (MODE) == V16SFmode)) \
- || (TARGET_SHMEDIA \
- && ((MODE) == V8QImode || (MODE) == V2HImode || (MODE) == V4HImode \
- || (MODE) == V2SImode)))
-
/* Value is 1 if it is a good idea to tie two pseudo registers
when one has mode MODE1 and one has mode MODE2.
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
(match_operand 1 "sh_rep_vec" ""))]
"TARGET_SHMEDIA && reload_completed
&& GET_MODE (operands[0]) == GET_MODE (operands[1])
- && VECTOR_MODE_SUPPORTED_P (GET_MODE (operands[0]))
+ && sh_vector_mode_supported_p (GET_MODE (operands[0]))
&& GET_MODE_SIZE (GET_MODE (operands[0])) == 8
&& (XVECEXP (operands[1], 0, 0) != const0_rtx
|| XVECEXP (operands[1], 0, 1) != const0_rtx)
(match_operand 1 "sh_const_vec" ""))]
"TARGET_SHMEDIA && reload_completed
&& GET_MODE (operands[0]) == GET_MODE (operands[1])
- && VECTOR_MODE_SUPPORTED_P (GET_MODE (operands[0]))
+ && sh_vector_mode_supported_p (GET_MODE (operands[0]))
&& operands[1] != CONST0_RTX (GET_MODE (operands[1]))"
[(set (match_dup 0) (match_dup 1))]
"
return \"\";
}"
[(set_attr "type" "other")])
-
archive's table of contents. Defining this macro to be nonzero has
the consequence that certain symbols will not be made weak that
otherwise would be. The C++ ABI requires this macro to be zero;
- see the documentation. */
+ see the documentation. */
#ifndef TARGET_WEAK_NOT_IN_ARCHIVE_TOC
#define TARGET_WEAK_NOT_IN_ARCHIVE_TOC 0
#endif
#endif
/* If the target supports weak symbols, define TARGET_ATTRIBUTE_WEAK to
- provide a weak attribute. Else define it to nothing.
+ provide a weak attribute. Else define it to nothing.
This would normally belong in ansidecl.h, but SUPPORTS_WEAK is
not available at that time.
/* By default, we generate a label at the beginning and end of the
text section, and compute the size of the text section by
- subtracting the two. However, on some platforms that doesn't
+ subtracting the two. However, on some platforms that doesn't
work, and we use the section itself, rather than a label at the
beginning of it, to indicate the start of the section. On such
platforms, define this to zero. */
/* By default, the C++ compiler will use function addresses in the
vtable entries. Setting this nonzero tells the compiler to use
function descriptors instead. The value of this macro says how
- many words wide the descriptor is (normally 2). It is assumed
+ many words wide the descriptor is (normally 2). It is assumed
that the address of a function descriptor may be treated as a
pointer to a function. */
#ifndef TARGET_VTABLE_USES_DESCRIPTORS
#define HAS_LONG_UNCOND_BRANCH 0
#endif
-#ifndef VECTOR_MODE_SUPPORTED_P
-#define VECTOR_MODE_SUPPORTED_P(MODE) 0
-#endif
-
#ifndef UNITS_PER_SIMD_WORD
#define UNITS_PER_SIMD_WORD 0
#endif
#endif
/* Indicate that CLZ and CTZ are undefined at zero. */
-#ifndef CLZ_DEFINED_VALUE_AT_ZERO
+#ifndef CLZ_DEFINED_VALUE_AT_ZERO
#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0
#endif
-#ifndef CTZ_DEFINED_VALUE_AT_ZERO
+#ifndef CTZ_DEFINED_VALUE_AT_ZERO
#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0
#endif
(DImode)} is assumed.
@end defmac
-@defmac VECTOR_MODE_SUPPORTED_P (@var{mode})
-Define this macro to be nonzero if the port is prepared to handle insns
-involving vector mode @var{mode}. At the very least, it must have move
-patterns for this mode.
-@end defmac
-
@defmac STACK_SAVEAREA_MODE (@var{save_level})
If defined, an expression of type @code{enum machine_mode} that
specifies the mode of the save area operand of a
Define this macro to map register numbers held in the call frame info
that GCC has collected using @code{DWARF_FRAME_REGNUM} to those that
should be output in .debug_frame (@code{@var{for_eh}} is zero) and
-.eh_frame (@code{@var{for_eh}} is nonzero). The default is to
+.eh_frame (@code{@var{for_eh}} is nonzero). The default is to
return @code{@var{regno}}.
@end defmac
@end defmac
@deftypefn {Target Hook} bool TARGET_PASS_BY_REFERENCE (CUMULATIVE_ARGS *@var{cum}, enum machine_mode @var{mode}, tree @var{type}, bool @var{named})
-This target hook should return @code{true} if an argument at the
+This target hook should return @code{true} if an argument at the
position indicated by @var{cum} should be passed by reference. This
-predicate is queried after target independent reasons for being
+predicate is queried after target independent reasons for being
passed by reference, such as @code{TREE_ADDRESSABLE (type)}.
If the hook returns true, a copy of that argument is made in memory and a
@code{gimplify.c:gimplify_expr}.
@end deftypefn
+@deftypefn {Target Hook} bool TARGET_VECTOR_MODE_SUPPORTED_P (enum machine_mode @var{mode})
+Define this to return nonzero if the port is prepared to handle
+insns involving vector mode @var{mode}. At the very least, it
+must have move patterns for this mode.
+@end deftypefn
+
@node Scalar Return
@subsection How Scalar Function Values Are Returned
@cindex return values in registers
@defmac TARGET_WEAK_NOT_IN_ARCHIVE_TOC
A C expression that evaluates to true if the target's linker expects
that weak symbols do not appear in a static archive's table of contents.
-The default is @code{0}.
+The default is @code{0}.
Leaving weak symbols out of an archive's table of contents means that,
if a symbol will only have a definition in one translation unit and
@end deftypefn
@deftypefn {Target Hook} bool TARGET_CXX_GUARD_MASK_BIT (void)
-This hook determines how guard variables are used. It should return
+This hook determines how guard variables are used. It should return
@code{false} (the default) if first byte should be used. A return value of
@code{true} indicates the least significant bit should be used.
@end deftypefn
tree pred = TREE_OPERAND (exp, 0);
tree then_ = TREE_OPERAND (exp, 1);
tree else_ = TREE_OPERAND (exp, 2);
-
+
if (TREE_CODE (then_) != GOTO_EXPR
|| TREE_CODE (GOTO_DESTINATION (then_)) != LABEL_DECL
|| TREE_CODE (else_) != GOTO_EXPR
|| TREE_CODE (GOTO_DESTINATION (else_)) != LABEL_DECL)
abort ();
-
+
jumpif (pred, label_rtx (GOTO_DESTINATION (then_)));
return expand_expr (else_, const0_rtx, VOIDmode, 0);
}
-
+
/* Note that COND_EXPRs whose type is a structure or union
are required to be constructed to contain assignments of
a temporary variable, so that we can evaluate them here
|| TREE_TYPE (TREE_OPERAND (exp, 1)) == void_type_node
|| TREE_TYPE (TREE_OPERAND (exp, 2)) == void_type_node)
abort ();
-
+
/* If we are not to produce a result, we have no target. Otherwise,
if a target was specified use it; it will not be used as an
intermediate target unless it is safe. If no target, use a
temporary. */
-
+
if (modifier != EXPAND_STACK_PARM
&& original_target
&& safe_from_p (original_target, TREE_OPERAND (exp, 0), 1)
temp = original_target;
else
temp = assign_temp (type, 0, 0, 1);
-
+
do_pending_stack_adjust ();
NO_DEFER_POP;
op0 = gen_label_rtx ();
jumpifnot (TREE_OPERAND (exp, 0), op0);
store_expr (TREE_OPERAND (exp, 1), temp,
modifier == EXPAND_STACK_PARM ? 2 : 0);
-
+
emit_jump_insn (gen_jump (op1));
emit_barrier ();
emit_label (op0);
store_expr (TREE_OPERAND (exp, 2), temp,
modifier == EXPAND_STACK_PARM ? 2 : 0);
-
+
emit_label (op1);
OK_DEFER_POP;
return temp;
-
+
case MODIFY_EXPR:
{
/* If lhs is complex, expand calls in rhs before computing it.
return 0;
/* Hardware support. Woo hoo! */
- if (VECTOR_MODE_SUPPORTED_P (mode))
+ if (targetm.vector_mode_supported_p (mode))
return 1;
innermode = GET_MODE_INNER (mode);
+2004-08-23 Eric Christopher <echristo@redhat.com>
+
+ * trans-types.c (gfc_type_for_mode): Remove VECTOR_TYPE_SUPPORTED_P
+ usage. Use build_vector_type_for_mode for vector types.
+
2004-08-22 Richard Henderson <rth@redhat.com>
PR 13465
2004-08-22 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
* check.c (gfc_check_reduction): Rename to ...
- (check_reduction): ... this. Make static. Don't check type of
+ (check_reduction): ... this. Make static. Don't check type of
first argument.
(gfc_check_minval_maxval, gfc_check_prodcut_sum): New functions.
* intrinsic.c (add_functions): Change MAXVAL, MINVAL, PRODUCT and
(gfc_match_simple_where, match_forall_header,
gfc_match_simple_forall): New functions.
(gfc_match_forall): Use match_forall_header.
-
+
2004-08-19 Paul Brook <paul@codesourcery.com>
PR fortran/17091
2004-08-19 Paul Brook <paul@codesourcery.com>
PR fortran/14976
- PR fortran/16228
+ PR fortran/16228
* data.c (assign_substring_data_value): Remove.
(create_character_intializer): New function.
(gfc_assign_data_value): Track the typespec for the current
PR fortran/17030
* f95-lang.c (gfc_init_builtin_functions): Initialize the builtins
for cabs{,f} and copysign{,f}.
- * trans-decl.c (gfor_fndecl_math_cabsf): Delete.
- (gfor_fndecl_math_cabs): Delete.
- (gfor_fndecl_math_sign4): Delete.
- (gfor_fndecl_math_sign8): Delete.
+ * trans-decl.c (gfor_fndecl_math_cabsf): Delete.
+ (gfor_fndecl_math_cabs): Delete.
+ (gfor_fndecl_math_sign4): Delete.
+ (gfor_fndecl_math_sign8): Delete.
(gfc_build_intrinsic_function_decls): Remove the
initializing of cabs{,f} and copysign{,f} functions.
* trans-intrinsic.c (gfc_conv_intrinsic_abs): Use the builtins
instead of the functions definitions.
(gfc_conv_intrinsic_sign): Likewise.
- * trans.h (gfor_fndecl_math_cabsf): Delete.
- (gfor_fndecl_math_cabs): Delete.
- (gfor_fndecl_math_sign4): Delete.
- (gfor_fndecl_math_sign8): Delete.
+ * trans.h (gfor_fndecl_math_cabsf): Delete.
+ (gfor_fndecl_math_cabs): Delete.
+ (gfor_fndecl_math_sign4): Delete.
+ (gfor_fndecl_math_sign8): Delete.
2004-08-15 Nathan Sidwell <nathan@codesourcery.com>
* trans-array.c (gfc_trans_array_constructor_value): Use
- build_int_cst.
+ build_int_cst.
* trans-const.c (gfc_build_string_const,
gfc_init_constants, gfc_conv_mpz_to_tree,
- gfc_conv_constant_to_tree): Likewise.
+ gfc_conv_constant_to_tree): Likewise.
* trans-decl.c (gfc_get_symbol_decl): Likewise.
* trans-intrinsic.c (gfc_conv_intrinsic_ibits,
gfc_conv_intrinsic_len, prepare_arg_info): Likewise.
* trans-expr.c (gfc_conv_structure): Handle array pointers.
2004-07-10 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
-
+
PR fortran/16336
* decl.c (gfc_match_save): Use-associated common block
doesn't collide.
* trans-decl.c (generate_local_decl): Remove workaround obsoleted
by fix for PR 15481.
-
+
2004-07-10 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
* trans-common.c: Fix whitespace issues, make variable names
* trans-types.c: Update comment.
2004-07-09 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
-
+
PR fortran/14077
* moduele.c (mio_symbol): Don't I/O initial values unless
symbol is a parameter.
2004-07-09 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
-
+
PR fortran/13201
* resolve.c (resolve_symbol): Verify that the shape of a
parameter array is not only explicit, but also constant.
gfc_set_default_type to issue error if no implicit type
can be found.
* trans-decl.c (gfc_create_module_variable): Remove workaround.
-
+
2004-07-08 Paul Brook <paul@codesourcery.com>
* intrinsic.c (add_sym_4s): New function.
2004-06-29 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
- * decl.c, interface.c, symbol.c, trans-common.c: Add 2004 to
+ * decl.c, interface.c, symbol.c, trans-common.c: Add 2004 to
copyright years.
2004-06-29 Steven Bosscher <stevenb@suse.de>
(gfc_clear_attr): Don't set removed attributes.
(gfc_copy_attr): Don't copy removed attributes.
(traverse_symtree): Remove.
- (gfc_traverse_symtree): Don't traverse symbol
+ (gfc_traverse_symtree): Don't traverse symbol
tree of the passed namespace, but require a symtree to be passed
instead. Unify with traverse_symtree.
(gfc_traverse_ns): Call gfc_traverse_symtree according to new
create_common.
(named_common): take 'gfc_symtree' instead of 'gfc_symbol'.
(gfc_trans_common): Adapt to new data structures.
- * trans-decl.c (gfc_create_module_variables): Remove test for
+ * trans-decl.c (gfc_create_module_variables): Remove test for
removed attribute.
2004-06-29 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
* scanner.c (load_line): Don't truncate preprocessor lines.
Reformat error message.
(preprocessor_line): Issue warning in case of malformed
- preprocessor line.
+ preprocessor line.
2004-06-21 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
* array.c (gfc_insert_constructor): Avoid redundant call to
mpz_comp. Add 2004 to copyright years.
-
+
2004-06-21 Joseph S. Myers <jsm@polyomino.org.uk>
* trans.h (stmtblock_t): Change has_scope to unsigned int.
PR fortran/15211
* trans-intrinsic.c (gfc_conv_intrinsic_len): Deal with arrays
- of strings.
+ of strings.
2004-06-14 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
* intrinsic.h (gfc_check_minloc_maxloc): ... adapt prototype.
* intrinsic.c (add_sym_3ml): New function.
(add_functions): Change to add_sym_3ml for MINLOC, MAXLOC.
- (check_specific): Catch special case MINLOC, MAXLOC.
+ (check_specific): Catch special case MINLOC, MAXLOC.
2004-06-14 Paul Brook <paul@codesourcery.com>
* intrinsic.c (add_sym_2s): New function.
* intrinsic.c: Add etime, dtime, irand, rand, second, srand.
* intrinsic.h: Function prototypes.
- * iresolve.c (gfc_resolve_etime_sub, gfc_resolve_second_sub
+ * iresolve.c (gfc_resolve_etime_sub, gfc_resolve_second_sub
gfc_resolve_srand): New functions.
2004-06-12 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
PR fortran/14957
* decl.c (gfc_match_end): Require END {SUBROUTINE|FUNCTION} for
contained procedure.
-
+
2004-06-12 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
PR fortran/12841
* interface.c (compare_parameter, compare_actual_formal): Don't
check types and array shapes for NULL()
* trans-expr.c (conv_function_call): No double indirection for
- NULL()
+ NULL()
2004-06-09 Toon Moene <toon@moene.indiv.nluug.nl>
2004-06-05 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
- * intrinsic.c (sort_actual): Keep track of type of missing
+ * intrinsic.c (sort_actual): Keep track of type of missing
arguments. (Missing from previous commit.)
2004-06-03 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
2004-05-17 Steve Kargl <kargls@comcast.net>
* arith.c (gfc_real2complex): Range checking wrong part of complex
- number.
-
+ number.
+
2004-05-16 Paul Brook <paul@codesourcery.com>
* options.c (gfc_handle_module_path_options): Fix buffer overrun.
not initialized in a disallowed fashion.
* match.c (gfc_match_common): Likewise.
(var_element): Verify that variable is not in the blank COMMON,
- if it is in a common.
+ if it is in a common.
2004-05-15 Joseph S. Myers <jsm@polyomino.org.uk>
2004-05-15 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
- PR fortran/13702
+ PR fortran/13702
(Port from g95)
* gfortran.h (gfc_linebuf): New typedef.
(linebuf): Remove.
* intrinsic.h: ... declare it here.
* intrinsic.c (add_functions): ... add it as resolving function
for NEAREST.
-
+
2004-05-14 Tobias Schlueter <tobias.schlueter@physik.uni-muenchen.de>
PR fortran/14066
PR fortran/14568
* trans-decl.c (generate_local_decl): Don't warn for unused
- variables which are in common blocks.
+ variables which are in common blocks.
2004-05-13 Diego Novillo <dnovillo@redhat.com>
2004-04-11 Feng Wang <fengwang@nudt.edu.cn>
- PR 14394
+ PR 14394
* trans-const.c (gfc_conv_mpf_to_tree): Loosen the maximum digits of
the real value when converting mpf to string.
* Make-lang.in (GFORTRAN_TEXI): Set it.
(fortran/dfortran.dvi): Use it. Add fortran to include paths.
- (fortran/gfortran.info): Ditto.
+ (fortran/gfortran.info): Ditto.
* gfortran.texi: Major update.
* invoke.texi: New file.
* gfortran.texi: Fix typos.
2004-02-07 Bud Davis <bdavis9659@comcast.net>
-
+
PR gfortran/13909
* intrinsic.c (add_conversions) Use logical conversion instead
of real.
2003-12-31 Huang Chun <chunhuang73@hotmail.com>
PR fortran/13434
- * trans-intrinsic.c (gfc_conv_intrinsic_minmaxval): Fixed bug in
+ * trans-intrinsic.c (gfc_conv_intrinsic_minmaxval): Fixed bug in
minval/maxval.
2003-12-22 Toon Moene <toon@moene.indiv.nluug.nl>
* primary.c (match_substring): Fix substring bug for start point
or end point is NULL.
* trans-expr.c (gfc_conv_substring): Ditto
- * trans-types.c (gfc_sym_type): Get correct type of scalar
+ * trans-types.c (gfc_sym_type): Get correct type of scalar
character variables.
- * trans-intrinsic.c (gfc_conv_intrinsic_len): Handle character in
+ * trans-intrinsic.c (gfc_conv_intrinsic_len): Handle character in
derived type.
2003-12-10 Richard Henderson <rth@redhat.com>
annotate_all_with_locus.
2003-11-11 Canqun Yang <canqun@nudt.edu.cn>
-
+
* options.c (gfc_init_options): Set flag_max_stack_var_size as 32768.
* trans-decl.c (gfc_finish_var_decl): Modified.
* data.c: New file.
2003-09-20 Kejia Zhao <kejia_zh@yahoo.com.cn>
-
+
* trans.h: Add declarations for gfor_fndecl_si_kind and
gfor_fndecl_sr_kind.
* trans-decl.c (g95_build_intrinsic_function_decls): Build them.
2003-08-24 XiaoQiang Zhang (zhangapache@yahoo.com>
- * trans-const.c (gfc_conv_mpz_to_tree): Fix bug, parameter for
+ * trans-const.c (gfc_conv_mpz_to_tree): Fix bug, parameter for
build_int_2 changed from (high, low) to (low, high).
* trans-io.c (ioparm_namelist_name, ioparm_namelist_name_len,
ioparm_namelist_read_mode, iocall_set_nml_val_int,
2003-09-07 Kejia Zhao <kejia_zh@yahoo.com.cn>
- * trans-intrinsic.c (gfc_conv_intrinsic_aint): Fix two bugs. One is
- about case_switch's break. The other is about building the condition
- statement tree, which judges the argument in the range of the
+ * trans-intrinsic.c (gfc_conv_intrinsic_aint): Fix two bugs. One is
+ about case_switch's break. The other is about building the condition
+ statement tree, which judges the argument in the range of the
corresponding integer type.
* trans-intrinsic.c (gfc_conv_intrinsic_mod): MOD and MODULO can work
for the large values.
-
+
2003-09-05 Paul Brook <paul@nowt.org>
* f95-lang.c (expand_function_body): Gimplify the function.
type components.
2003-08-10 Chun Huang <compiler@sohu.com>
-
+
* resolve.c (resolve_formal_arglist): Resolve STATEMENT function.
(resolve_symbol): Ditto.
* trans-expr.c (gfc_conv_statement_function): New function.
Rename g95_* to gfc_*.
2003-07-25 Paul Brook <paul@nowt.org>
-
+
* gfortran.h: Rename from g95.h.
* trans-types.c (boolean_type_node, booelan_true_node,
boolean_false_node): Remove.
(g95_conv_intrinsic_anyall): New function.
* iresolve.c (g95_resolve_any, g95_resolve_all): Include rank in
mangled name
-
assert (rank <= GFC_DTYPE_RANK_MASK);
size = TYPE_SIZE_UNIT (type);
-
+
i = rank | (n << GFC_DTYPE_TYPE_SHIFT);
if (size && INTEGER_CST_P (size))
{
if (mode == TYPE_MODE (build_pointer_type (integer_type_node)))
return build_pointer_type (integer_type_node);
-#ifdef VECTOR_MODE_SUPPORTED_P
- if (VECTOR_MODE_SUPPORTED_P (mode))
+ if (VECTOR_MODE_P (mode))
{
- switch (mode)
- {
- case V16QImode:
- return unsignedp ? unsigned_V16QI_type_node : V16QI_type_node;
- case V8HImode:
- return unsignedp ? unsigned_V8HI_type_node : V8HI_type_node;
- case V4SImode:
- return unsignedp ? unsigned_V4SI_type_node : V4SI_type_node;
- case V2DImode:
- return unsignedp ? unsigned_V2DI_type_node : V2DI_type_node;
- case V2SImode:
- return unsignedp ? unsigned_V2SI_type_node : V2SI_type_node;
- case V4HImode:
- return unsignedp ? unsigned_V4HI_type_node : V4HI_type_node;
- case V8QImode:
- return unsignedp ? unsigned_V8QI_type_node : V8QI_type_node;
- case V16SFmode:
- return V16SF_type_node;
- case V4SFmode:
- return V4SF_type_node;
- case V2SFmode:
- return V2SF_type_node;
- case V2DFmode:
- return V2DF_type_node;
- default:
- break;
- }
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ tree inner_type = gfc_type_for_mode (inner_mode, unsignedp);
+ if (inner_type != NULL_TREE)
+ return build_vector_type_for_mode (inner_type, mode);
}
-#endif
return 0;
}
return false;
}
+/* Generic hook that takes (enum machine_mode) and returns false. */
+bool
+hook_bool_mode_false (enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
/* Generic hook that takes (FILE *, const char *) and does nothing. */
void
hook_void_FILEptr_constcharptr (FILE *a ATTRIBUTE_UNUSED, const char *b ATTRIBUTE_UNUSED)
#ifndef GCC_HOOKS_H
#define GCC_HOOKS_H
+#include "machmode.h"
+
extern bool hook_bool_void_false (void);
extern bool hook_bool_bool_false (bool);
+extern bool hook_bool_mode_false (enum machine_mode);
extern bool hook_bool_tree_false (tree);
extern bool hook_bool_tree_true (tree);
extern bool hook_bool_tree_hwi_hwi_tree_false (tree, HOST_WIDE_INT, HOST_WIDE_INT,
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
- if (xmode != BLKmode
+ if (xmode != BLKmode
&& (known_align == 0
|| known_align >= GET_MODE_ALIGNMENT (xmode)))
{
for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
if (GET_MODE_NUNITS (mode) == nunits
&& GET_MODE_INNER (mode) == innermode
- && VECTOR_MODE_SUPPORTED_P (mode))
+ && targetm.vector_mode_supported_p (mode))
break;
/* For integers, try mapping it to a same-sized scalar mode. */
if (is_unsigned)
{
min_value = build_int_cst (type, 0, 0);
- max_value
+ max_value
= build_int_cst (type, precision - HOST_BITS_PER_WIDE_INT >= 0
? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
precision - HOST_BITS_PER_WIDE_INT > 0
}
else
{
- min_value
+ min_value
= build_int_cst (type,
(precision - HOST_BITS_PER_WIDE_INT > 0
? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
(((HOST_WIDE_INT) (-1)
<< (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
? precision - HOST_BITS_PER_WIDE_INT - 1
- : 0))));
+ : 0))));
max_value
= build_int_cst (type,
(precision - HOST_BITS_PER_WIDE_INT > 0
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- set_min_and_max_values_for_integral_type (type, precision,
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/false);
/* Lay out the type: set its alignment, size, etc. */
precision = HOST_BITS_PER_WIDE_INT * 2;
TYPE_UNSIGNED (type) = 1;
-
- set_min_and_max_values_for_integral_type (type, precision,
+
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/true);
/* Lay out the type: set its alignment, size, etc. */
/* Provide a fake boolean type. We make no attempt to use the
C99 _Bool, as it may not be available in the bootstrap compiler,
- and even if it is, it is liable to be buggy.
+ and even if it is, it is liable to be buggy.
This must be after all inclusion of system headers, as some of
them will mess us up. */
SETUP_INCOMING_VARARGS EXPAND_BUILTIN_SAVEREGS \
DEFAULT_SHORT_ENUMS SPLIT_COMPLEX_ARGS MD_ASM_CLOBBERS \
HANDLE_PRAGMA_REDEFINE_EXTNAME HANDLE_PRAGMA_EXTERN_PREFIX \
- MUST_PASS_IN_STACK FUNCTION_ARG_PASS_BY_REFERENCE
+ MUST_PASS_IN_STACK FUNCTION_ARG_PASS_BY_REFERENCE \
+ VECTOR_MODE_SUPPORTED_P
/* Other obsolete target macros, or macros that used to be in target
headers and were not used, and may be obsolete or may never have
#define TARGET_VALID_POINTER_MODE default_valid_pointer_mode
#endif
+#ifndef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P hook_bool_mode_false
+#endif
+
#ifndef TARGET_VECTOR_OPAQUE_P
#define TARGET_VECTOR_OPAQUE_P hook_bool_tree_false
#endif
-/* In hook.c. */
+/* In hooks.c. */
#define TARGET_CANNOT_MODIFY_JUMPS_P hook_bool_void_false
#define TARGET_BRANCH_TARGET_REGISTER_CLASS hook_int_void_no_regs
#define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED hook_bool_bool_false
TARGET_ENCODE_SECTION_INFO, \
TARGET_STRIP_NAME_ENCODING, \
TARGET_VALID_POINTER_MODE, \
+ TARGET_VECTOR_MODE_SUPPORTED_P, \
TARGET_VECTOR_OPAQUE_P, \
TARGET_RTX_COSTS, \
TARGET_ADDRESS_COST, \
/* Tell assembler to switch to the readonly data section associated
with function DECL. */
- void (* function_rodata_section) (tree);
+ void (* function_rodata_section) (tree);
/* Output a constructor for a symbol with a given priority. */
void (* constructor) (rtx, int);
/* The following member value is a pointer to a function called
by the insn scheduler. It should return true if there exists a
- dependence which is considered costly by the target, between
- the insn passed as the first parameter, and the insn passed as
- the second parameter. The third parameter is the INSN_DEPEND
+ dependence which is considered costly by the target, between
+ the insn passed as the first parameter, and the insn passed as
+ the second parameter. The third parameter is the INSN_DEPEND
link that represents the dependence between the two insns. The
fourth argument is the cost of the dependence as estimated by
- the scheduler. The last argument is the distance in cycles
+ the scheduler. The last argument is the distance in cycles
between the already scheduled insn (first parameter) and the
the second insn (second parameter). */
bool (* is_costly_dependence) (rtx, rtx, rtx, int, int);
/* True if MODE is valid for a pointer in __attribute__((mode("MODE"))). */
bool (* valid_pointer_mode) (enum machine_mode mode);
+ /* True if MODE is valid for a vector. */
+ bool (* vector_mode_supported_p) (enum machine_mode mode);
+
/* True if a vector is opaque. */
bool (* vector_opaque_p) (tree);