#define CONST_OK_FOR_K(x) \
CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
#define CONST_OK_FOR_Os(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
#define CONST_OK_FOR_Op(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
#define CONST_OK_FOR_On(x) \
- CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
+ CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
#define REGNO_PAIR_OK(REGNO, MODE) \
(s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
/* Record the vector mode used for an element selector. This assumes:
1. There is no builtin with two different vector modes and an element selector
- 2. The element selector comes after the vector type it is referring to.
+ 2. The element selector comes after the vector type it is referring to.
This currently the true for all the builtins but FIXME we
should better check for that. */
if (VECTOR_MODE_P (insn_op->mode))
break;
case 1:
if (nonvoid)
- pat = GEN_FCN (icode) (target, op[0]);
+ pat = GEN_FCN (icode) (target, op[0]);
else
pat = GEN_FCN (icode) (op[0]);
break;
case E_CCZmode:
if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
|| m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
- return m2;
+ return m2;
return VOIDmode;
case E_CCSmode:
case E_CCVFHmode:
case E_CCVFHEmode:
if (req_mode != set_mode)
- return 0;
+ return 0;
break;
case E_CCZmode:
if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
&& req_mode != CCSRmode && req_mode != CCURmode
&& req_mode != CCZ1mode)
- return 0;
+ return 0;
break;
case E_CCAPmode:
case E_CCANmode:
if (req_mode != CCAmode)
- return 0;
+ return 0;
break;
default:
if (GET_CODE (PATTERN (insn)) == PARALLEL)
for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- {
- rtx set = XVECEXP (PATTERN (insn), 0, i);
- if (GET_CODE (set) == SET)
- if (!s390_match_ccmode_set (set, req_mode))
- return false;
- }
+ {
+ rtx set = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (set) == SET)
+ if (!s390_match_ccmode_set (set, req_mode))
+ return false;
+ }
return true;
}
bit1 = exact_log2 (INTVAL (op2));
bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
if (bit0 != -1 && bit1 != -1)
- return bit0 > bit1 ? CCT1mode : CCT2mode;
+ return bit0 > bit1 ? CCT1mode : CCT2mode;
}
return VOIDmode;
{
/* Relax CCTmode to CCZmode to allow fall-back to AND
if that turns out to be beneficial. */
- return ccmode == CCTmode ? CCZmode : ccmode;
+ return ccmode == CCTmode ? CCZmode : ccmode;
}
}
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
return CCAPmode;
- /* If constants are involved in an add instruction it is possible to use
- the resulting cc for comparisons with zero. Knowing the sign of the
+ /* If constants are involved in an add instruction it is possible to use
+ the resulting cc for comparisons with zero. Knowing the sign of the
constant the overflow behavior gets predictable. e.g.:
- int a, b; if ((b = a + c) > 0)
- with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
+ int a, b; if ((b = a + c) > 0)
+ with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
&& (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
|| (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
&& (GET_MODE_SIZE (GET_MODE (inner))
>= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
&& ((INTVAL (mask)
- & GET_MODE_MASK (GET_MODE (inner))
- & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
+ & GET_MODE_MASK (GET_MODE (inner))
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
== 0))
inner = SUBREG_REG (inner);
{
/* For CCRAWmode put the required cc mask into the second
operand. */
- if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
- && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
+ if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
+ && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
*op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
*op0 = XVECEXP (*op0, 0, 0);
*code = new_code;
&& *op1 == const0_rtx)
{
if ((*code == EQ && GET_CODE (*op0) == NE)
- || (*code == NE && GET_CODE (*op0) == EQ))
+ || (*code == NE && GET_CODE (*op0) == EQ))
*code = EQ;
else
*code = NE;
case E_CCZmode:
case E_CCZ1mode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
+ {
+ case EQ: return CC0;
case NE: return CC1 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT1mode:
switch (GET_CODE (code))
- {
- case EQ: return CC1;
+ {
+ case EQ: return CC1;
case NE: return CC0 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT2mode:
switch (GET_CODE (code))
- {
- case EQ: return CC2;
+ {
+ case EQ: return CC2;
case NE: return CC0 | CC1 | CC3;
default: return -1;
- }
+ }
break;
case E_CCT3mode:
switch (GET_CODE (code))
- {
- case EQ: return CC3;
+ {
+ case EQ: return CC3;
case NE: return CC0 | CC1 | CC2;
default: return -1;
- }
+ }
break;
case E_CCLmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0 | CC2;
+ {
+ case EQ: return CC0 | CC2;
case NE: return CC1 | CC3;
default: return -1;
- }
+ }
break;
case E_CCL1mode:
switch (GET_CODE (code))
- {
+ {
case LTU: return CC2 | CC3; /* carry */
case GEU: return CC0 | CC1; /* no carry */
default: return -1;
- }
+ }
break;
case E_CCL2mode:
switch (GET_CODE (code))
- {
+ {
case GTU: return CC0 | CC1; /* borrow */
case LEU: return CC2 | CC3; /* no borrow */
default: return -1;
- }
+ }
break;
case E_CCL3mode:
case E_CCUmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LTU: return CC1;
- case GTU: return CC2;
- case LEU: return CC0 | CC1;
- case GEU: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LTU: return CC1;
+ case GTU: return CC2;
+ case LEU: return CC0 | CC1;
+ case GEU: return CC0 | CC2;
default: return -1;
- }
+ }
break;
case E_CCURmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC2 | CC1 | CC3;
- case LTU: return CC2;
- case GTU: return CC1;
- case LEU: return CC0 | CC2;
- case GEU: return CC0 | CC1;
+ {
+ case EQ: return CC0;
+ case NE: return CC2 | CC1 | CC3;
+ case LTU: return CC2;
+ case GTU: return CC1;
+ case LEU: return CC0 | CC2;
+ case GEU: return CC0 | CC1;
default: return -1;
- }
+ }
break;
case E_CCAPmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1 | CC3;
- case GT: return CC2;
- case LE: return CC0 | CC1 | CC3;
- case GE: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1 | CC3;
+ case GT: return CC2;
+ case LE: return CC0 | CC1 | CC3;
+ case GE: return CC0 | CC2;
default: return -1;
- }
+ }
break;
case E_CCANmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1;
- case GT: return CC2 | CC3;
- case LE: return CC0 | CC1;
- case GE: return CC0 | CC2 | CC3;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1;
+ case GT: return CC2 | CC3;
+ case LE: return CC0 | CC1;
+ case GE: return CC0 | CC2 | CC3;
default: return -1;
- }
+ }
break;
case E_CCSmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC1 | CC2 | CC3;
- case LT: return CC1;
- case GT: return CC2;
- case LE: return CC0 | CC1;
- case GE: return CC0 | CC2;
+ {
+ case EQ: return CC0;
+ case NE: return CC1 | CC2 | CC3;
+ case LT: return CC1;
+ case GT: return CC2;
+ case LE: return CC0 | CC1;
+ case GE: return CC0 | CC2;
case UNORDERED: return CC3;
case ORDERED: return CC0 | CC1 | CC2;
case UNEQ: return CC0 | CC3;
- case UNLT: return CC1 | CC3;
- case UNGT: return CC2 | CC3;
- case UNLE: return CC0 | CC1 | CC3;
- case UNGE: return CC0 | CC2 | CC3;
+ case UNLT: return CC1 | CC3;
+ case UNGT: return CC2 | CC3;
+ case UNLE: return CC0 | CC1 | CC3;
+ case UNGE: return CC0 | CC2 | CC3;
case LTGT: return CC1 | CC2;
default: return -1;
- }
+ }
break;
case E_CCSRmode:
switch (GET_CODE (code))
- {
- case EQ: return CC0;
- case NE: return CC2 | CC1 | CC3;
- case LT: return CC2;
- case GT: return CC1;
- case LE: return CC0 | CC2;
- case GE: return CC0 | CC1;
+ {
+ case EQ: return CC0;
+ case NE: return CC2 | CC1 | CC3;
+ case LT: return CC2;
+ case GT: return CC1;
+ case LE: return CC0 | CC2;
+ case GE: return CC0 | CC1;
case UNORDERED: return CC3;
case ORDERED: return CC0 | CC2 | CC1;
case UNEQ: return CC0 | CC3;
- case UNLT: return CC2 | CC3;
- case UNGT: return CC1 | CC3;
- case UNLE: return CC0 | CC2 | CC3;
- case UNGE: return CC0 | CC1 | CC3;
+ case UNLT: return CC2 | CC3;
+ case UNGT: return CC1 | CC3;
+ case UNLE: return CC0 | CC2 | CC3;
+ case UNGE: return CC0 | CC1 | CC3;
case LTGT: return CC2 | CC1;
default: return -1;
- }
+ }
break;
/* Vector comparison modes. */
{
rtx subreg = operand_subword (dst, first_subword, 0, mode);
if (reg_overlap_mentioned_p (subreg, src))
- return false;
+ return false;
}
return true;
else if (REG_P (dst))
dst = gen_rtx_SUBREG (wmode, dst, 0);
else
- dst = gen_reg_rtx (wmode);
+ dst = gen_reg_rtx (wmode);
if (GET_CODE (src1) == SUBREG
&& (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
if (GET_CODE (disp) == CONST
&& GET_CODE (XEXP (disp, 0)) == UNSPEC
&& (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
- || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
+ || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
return false;
/* All other symbolic constants are literal pool references,
&& frame_pointer_needed
&& REGNO (base) == HARD_FRAME_POINTER_REGNUM)
|| REGNO (base) == ARG_POINTER_REGNUM
- || (flag_pic
- && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
- pointer = base_ptr = true;
+ || (flag_pic
+ && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
+ pointer = base_ptr = true;
if ((reload_completed || reload_in_progress)
&& base == cfun->machine->base_reg)
- pointer = base_ptr = literal_pool = true;
+ pointer = base_ptr = literal_pool = true;
}
/* Validate index register. */
&& frame_pointer_needed
&& REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
|| REGNO (indx) == ARG_POINTER_REGNUM
- || (flag_pic
- && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
- pointer = indx_ptr = true;
+ || (flag_pic
+ && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
+ pointer = indx_ptr = true;
if ((reload_completed || reload_in_progress)
&& indx == cfun->machine->base_reg)
- pointer = indx_ptr = literal_pool = true;
+ pointer = indx_ptr = literal_pool = true;
}
/* Prefer to use pointer as base, not index. */
pointer = true;
/* In the small-PIC case, the linker converts @GOT
- and @GOTNTPOFF offsets to possible displacements. */
+ and @GOTNTPOFF offsets to possible displacements. */
if (GET_CODE (disp) == UNSPEC
- && (XINT (disp, 1) == UNSPEC_GOT
+ && (XINT (disp, 1) == UNSPEC_GOT
|| XINT (disp, 1) == UNSPEC_GOTNTPOFF)
&& flag_pic == 1)
- {
+ {
;
- }
+ }
/* Accept pool label offsets. */
else if (GET_CODE (disp) == UNSPEC
/* Accept literal pool references. */
else if (GET_CODE (disp) == UNSPEC
&& XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
- {
+ {
/* In case CSE pulled a non literal pool reference out of
the pool we have to reject the address. This is
especially important when loading the GOT pointer on non
if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
return false;
- orig_disp = plus_constant (Pmode, orig_disp, offset);
+ orig_disp = plus_constant (Pmode, orig_disp, offset);
}
- }
+ }
else
return false;
static int
s390_register_move_cost (machine_mode mode,
- reg_class_t from, reg_class_t to)
+ reg_class_t from, reg_class_t to)
{
/* On s390, copy between fprs and gprs is expensive. */
case UDIV:
case UMOD:
- if (mode == TImode) /* 128 bit division */
+ if (mode == TImode) /* 128 bit division */
*total = s390_cost->dlgr;
else if (mode == DImode)
{
rtx right = XEXP (x, 1);
if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
*total = s390_cost->dlr;
- else /* 64 by 64 bit division */
+ else /* 64 by 64 bit division */
*total = s390_cost->dlgr;
}
else if (mode == SImode) /* 32 bit division */
*total = s390_cost->dsgfr;
else
*total = s390_cost->dr;
- else /* 64 by 64 bit division */
+ else /* 64 by 64 bit division */
*total = s390_cost->dsgr;
}
else if (mode == SImode) /* 32 bit division */
case SYMBOL_REF:
/* 'Naked' TLS symbol references are never OK,
- non-TLS symbols are OK iff we are non-PIC. */
+ non-TLS symbols are OK iff we are non-PIC. */
if (tls_symbolic_operand (x))
return true;
else
case CONST:
/* Symrefs cannot be pushed into the literal pool with -fPIC
so we *MUST NOT* return NO_REGS for these cases
- (s390_cannot_force_const_mem will return true).
+ (s390_cannot_force_const_mem will return true).
On the other hand we MUST return NO_REGS for symrefs with
invalid addend which might have been pushed to the literal
case LABEL_REF:
case SYMBOL_REF:
if (!legitimate_reload_constant_p (op))
- return NO_REGS;
+ return NO_REGS;
/* fallthrough */
case PLUS:
/* load address will be used. */
if (GET_CODE (symref) == SYMBOL_REF)
{
/* We have load-relative instructions for 2-byte, 4-byte, and
- 8-byte alignment so allow only these. */
+ 8-byte alignment so allow only these. */
switch (alignment)
{
case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
case E_##M##mode: \
if (TARGET_64BIT) \
sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
- CODE_FOR_reload##m##di_tomem_z10; \
+ CODE_FOR_reload##m##di_tomem_z10; \
else \
- sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
- CODE_FOR_reload##m##si_tomem_z10; \
+ sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
+ CODE_FOR_reload##m##si_tomem_z10; \
break;
switch (GET_MODE (x))
|| (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
{
/* Otherwise, one of the operands cannot be an address register;
- we reload its value into the scratch register. */
+ we reload its value into the scratch register. */
if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
{
emit_move_insn (scratch, sum1);
}
/* According to the way these invalid addresses are generated
- in reload.c, it should never happen (at least on s390) that
- *neither* of the PLUS components, after find_replacements
- was applied, is an address register. */
+ in reload.c, it should never happen (at least on s390) that
+ *neither* of the PLUS components, after find_replacements
+ was applied, is an address register. */
if (sum1 == scratch && sum2 == scratch)
{
debug_rtx (src);
that case. So no need to do it here. */
if (reg == 0)
- reg = gen_reg_rtx (Pmode);
+ reg = gen_reg_rtx (Pmode);
if (TARGET_Z10)
{
new_rtx = reg;
}
else if (flag_pic == 1)
- {
- /* Assume GOT offset is a valid displacement operand (< 4k
- or < 512k with z990). This is handled the same way in
- both 31- and 64-bit code (@GOT).
- lg <target>, sym@GOT(r12) */
+ {
+ /* Assume GOT offset is a valid displacement operand (< 4k
+ or < 512k with z990). This is handled the same way in
+ both 31- and 64-bit code (@GOT).
+ lg <target>, sym@GOT(r12) */
if (reload_in_progress || reload_completed)
df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
- new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
- new_rtx = gen_rtx_CONST (Pmode, new_rtx);
- new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
- new_rtx = gen_const_mem (Pmode, new_rtx);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
+ new_rtx = gen_const_mem (Pmode, new_rtx);
emit_move_insn (reg, new_rtx);
new_rtx = reg;
}
/* If the GOT offset might be >= 4k, we determine the position
of the GOT entry via a PC-relative LARL (@GOTENT).
larl temp, sym@GOTENT
- lg <target>, 0(temp) */
+ lg <target>, 0(temp) */
- rtx temp = reg ? reg : gen_reg_rtx (Pmode);
+ rtx temp = reg ? reg : gen_reg_rtx (Pmode);
gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
- new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
- new_rtx = gen_rtx_CONST (Pmode, new_rtx);
+ new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
+ new_rtx = gen_rtx_CONST (Pmode, new_rtx);
emit_move_insn (temp, new_rtx);
new_rtx = gen_const_mem (Pmode, temp);
emit_move_insn (reg, new_rtx);
new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
new_rtx = gen_rtx_CONST (Pmode, new_rtx);
new_rtx = force_const_mem (Pmode, new_rtx);
- temp = gen_reg_rtx (Pmode);
+ temp = gen_reg_rtx (Pmode);
emit_move_insn (temp, new_rtx);
new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
else if (flag_pic)
{
if (SYMBOLIC_CONST (x)
- || (GET_CODE (x) == PLUS
- && (SYMBOLIC_CONST (XEXP (x, 0))
- || SYMBOLIC_CONST (XEXP (x, 1)))))
+ || (GET_CODE (x) == PLUS
+ && (SYMBOLIC_CONST (XEXP (x, 0))
+ || SYMBOLIC_CONST (XEXP (x, 1)))))
x = legitimize_pic_address (x, 0);
if (s390_legitimate_address_p (mode, x, FALSE))
mode = GET_MODE (len);
if (mode == VOIDmode)
- mode = Pmode;
+ mode = Pmode;
dst_addr = gen_reg_rtx (Pmode);
src_addr = gen_reg_rtx (Pmode);
temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
OPTAB_DIRECT);
if (temp != count)
- emit_move_insn (count, temp);
+ emit_move_insn (count, temp);
temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
{
if (INTVAL (len) > 0)
- {
- emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
- emit_insn (gen_cmpint (target, ccreg));
- }
+ {
+ emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
+ emit_insn (gen_cmpint (target, ccreg));
+ }
else
- emit_move_insn (target, const0_rtx);
+ emit_move_insn (target, const0_rtx);
}
else if (TARGET_MVCLE)
{
mode = GET_MODE (len);
if (mode == VOIDmode)
- mode = Pmode;
+ mode = Pmode;
addr0 = gen_reg_rtx (Pmode);
addr1 = gen_reg_rtx (Pmode);
temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
OPTAB_DIRECT);
if (temp != count)
- emit_move_insn (count, temp);
+ emit_move_insn (count, temp);
temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
OPTAB_DIRECT);
if (temp != blocks)
- emit_move_insn (blocks, temp);
+ emit_move_insn (blocks, temp);
emit_cmp_and_jump_insns (blocks, const0_rtx,
EQ, NULL_RTX, mode, 1, loop_end_label);
highest_index_to_load_reg, str_idx_reg));
add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
- profile_probability::very_unlikely ());
+ profile_probability::very_unlikely ());
expand_binop (Pmode, add_optab, str_idx_reg,
GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) =
- gen_rtx_SET (dst, op_res);
+ gen_rtx_SET (dst, op_res);
RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
const0_rtx));
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) =
- gen_rtx_SET (dst, op_res);
+ gen_rtx_SET (dst, op_res);
RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
}
s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
- add_reg_br_prob_note (get_last_insn (),
- profile_probability::very_unlikely ());
+ add_reg_br_prob_note (get_last_insn (),
+ profile_probability::very_unlikely ());
/* If the jump is not taken, OUTPUT is the expected value. */
cmp = output;
/* Reload newval to a register manually, *after* the compare and jump
/* Extract the symbol ref from:
(plus:SI (reg:SI 12 %r12)
- (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
- UNSPEC_GOTOFF/PLTOFF)))
+ (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
+ UNSPEC_GOTOFF/PLTOFF)))
and
(plus:SI (reg:SI 12 %r12)
- (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
- UNSPEC_GOTOFF/PLTOFF)
+ (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
+ UNSPEC_GOTOFF/PLTOFF)
(const_int 4 [0x4])))) */
if (GET_CODE (x) == PLUS
&& REG_P (XEXP (x, 0))
{
/* Extract the symbol ref from:
(mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
- UNSPEC_PLT/GOTENT))) */
+ UNSPEC_PLT/GOTENT))) */
y = XEXP (x, 0);
if (GET_CODE (y) == UNSPEC
if (ad.base && ad.indx)
fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
- reg_names[REGNO (ad.base)]);
+ reg_names[REGNO (ad.base)]);
else if (ad.base)
fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
}
'o': print integer X as if it's an unsigned 32bit word.
's': "start" of contiguous bitmask X in either DImode or vector inner mode.
't': CONST_INT: "start" of contiguous bitmask X in SImode.
- CONST_VECTOR: Generate a bitmask for vgbm instruction.
+ CONST_VECTOR: Generate a bitmask for vgbm instruction.
'x': print integer X as if it's an unsigned halfword.
'v': print register number as vector register (v1 instead of f1).
*/
case 'O':
{
- struct s390_address ad;
+ struct s390_address ad;
int ret;
ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
return;
}
- if (ad.disp)
- output_addr_const (file, ad.disp);
- else
- fprintf (file, "0");
+ if (ad.disp)
+ output_addr_const (file, ad.disp);
+ else
+ fprintf (file, "0");
}
return;
case 'R':
{
- struct s390_address ad;
+ struct s390_address ad;
int ret;
ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
return;
}
- if (ad.base)
- fprintf (file, "%s", reg_names[REGNO (ad.base)]);
- else
- fprintf (file, "0");
+ if (ad.base)
+ fprintf (file, "%s", reg_names[REGNO (ad.base)]);
+ else
+ fprintf (file, "0");
}
return;
case CONST_WIDE_INT:
if (code == 'b')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
CONST_WIDE_INT_ELT (x, 0) & 0xff);
else if (code == 'x')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
CONST_WIDE_INT_ELT (x, 0) & 0xffff);
else if (code == 'h')
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
else
{
priority = priority << 1;
break;
default:
- break;
+ break;
}
return priority;
}
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- annotate_constant_pool_refs (&XEXP (*x, i));
- }
+ {
+ annotate_constant_pool_refs (&XEXP (*x, i));
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- annotate_constant_pool_refs (&XVECEXP (*x, i, j));
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ annotate_constant_pool_refs (&XVECEXP (*x, i, j));
+ }
}
}
return;
gcc_assert (GET_CODE (x) != SYMBOL_REF
- || !CONSTANT_POOL_ADDRESS_P (x));
+ || !CONSTANT_POOL_ADDRESS_P (x));
if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
{
rtx sym = XVECEXP (x, 0, 0);
gcc_assert (GET_CODE (sym) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (sym));
+ && CONSTANT_POOL_ADDRESS_P (sym));
if (*ref == NULL_RTX)
*ref = sym;
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- find_constant_pool_ref (XEXP (x, i), ref);
- }
+ {
+ find_constant_pool_ref (XEXP (x, i), ref);
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (x, i); j++)
- find_constant_pool_ref (XVECEXP (x, i, j), ref);
- }
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_constant_pool_ref (XVECEXP (x, i, j), ref);
+ }
}
}
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
- }
+ {
+ replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
+ }
}
}
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- rtx fnd = find_ltrel_base (XEXP (x, i));
+ {
+ rtx fnd = find_ltrel_base (XEXP (x, i));
if (fnd)
return fnd;
- }
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
{
- rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
+ rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
if (fnd)
return fnd;
}
- }
+ }
}
return NULL_RTX;
for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- {
- replace_ltrel_base (&XEXP (*x, i));
- }
+ {
+ replace_ltrel_base (&XEXP (*x, i));
+ }
else if (fmt[i] == 'E')
- {
- for (j = 0; j < XVECLEN (*x, i); j++)
- replace_ltrel_base (&XVECEXP (*x, i, j));
- }
+ {
+ for (j = 0; j < XVECLEN (*x, i); j++)
+ replace_ltrel_base (&XVECEXP (*x, i, j));
+ }
}
}
replace_ltrel_base (&PATTERN (insn));
if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- rtx addr, pool_ref = NULL_RTX;
- find_constant_pool_ref (PATTERN (insn), &pool_ref);
- if (pool_ref)
- {
+ {
+ rtx addr, pool_ref = NULL_RTX;
+ find_constant_pool_ref (PATTERN (insn), &pool_ref);
+ if (pool_ref)
+ {
if (s390_execute_label (insn))
addr = s390_find_execute (pool, insn);
else
addr = s390_find_constant (pool, get_pool_constant (pool_ref),
get_pool_mode (pool_ref));
- replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
- INSN_CODE (insn) = -1;
- }
- }
+ replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
+ INSN_CODE (insn) = -1;
+ }
+ }
}
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (LABEL_P (insn)
- && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
+ && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
{
struct constant_pool *pool = s390_find_pool (pool_list, insn);
if (pool)
continue;
if (NONJUMP_INSN_P (insn) || CALL_P (insn))
- {
- rtx addr, pool_ref = NULL_RTX;
- find_constant_pool_ref (PATTERN (insn), &pool_ref);
- if (pool_ref)
- {
+ {
+ rtx addr, pool_ref = NULL_RTX;
+ find_constant_pool_ref (PATTERN (insn), &pool_ref);
+ if (pool_ref)
+ {
if (s390_execute_label (insn))
addr = s390_find_execute (curr_pool, insn);
else
get_pool_constant (pool_ref),
get_pool_mode (pool_ref));
- replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
- INSN_CODE (insn) = -1;
- }
- }
+ replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
+ INSN_CODE (insn) = -1;
+ }
+ }
}
/* Dump out all literal pools. */
if (first == last)
{
if (TARGET_64BIT)
- insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
+ insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
else
- insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
+ insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
if (!global_not_special_regno_p (first))
RTX_FRAME_RELATED_P (insn) = 1;
if (first == last)
{
if (TARGET_64BIT)
- insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
+ insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
else
- insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
+ insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
RTX_FRAME_RELATED_P (insn) = 1;
return insn;
t, const0_rtx, const0_rtx));
}
}
- }
+ }
if (s390_warn_framesize > 0
&& cfun_frame_layout.frame_size >= s390_warn_framesize)
algorithms located at the branch target. */
/* Emit a blockage here so that all code
- lies between the profiling mechanisms. */
+ lies between the profiling mechanisms. */
emit_insn (gen_blockage ());
emit_insn (gen_epilogue_tpf ());
/* Nothing to restore. */
}
else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
- && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
+ && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
{
/* Area is in range. */
offset = cfun_frame_layout.frame_size;
LABEL_NUSES (call_done)++;
/* Mark the jump as very unlikely to be taken. */
- add_reg_br_prob_note (insn,
- profile_probability::very_unlikely ());
+ add_reg_br_prob_note (insn,
+ profile_probability::very_unlikely ());
if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
{
if (type)
{
if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
- return true;
+ return true;
if (TREE_CODE (type) == COMPLEX_TYPE
|| TREE_CODE (type) == VECTOR_TYPE)
static machine_mode
s390_promote_function_mode (const_tree type, machine_mode mode,
- int *punsignedp,
- const_tree fntype ATTRIBUTE_UNUSED,
- int for_return ATTRIBUTE_UNUSED)
+ int *punsignedp,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ int for_return ATTRIBUTE_UNUSED)
{
if (INTEGRAL_MODE_P (mode)
&& GET_MODE_SIZE (mode) < UNITS_PER_LONG)
On S/390, va_list is an array type equivalent to
typedef struct __va_list_tag
- {
- long __gpr;
- long __fpr;
- void *__overflow_arg_area;
- void *__reg_save_area;
- } va_list[1];
+ {
+ long __gpr;
+ long __fpr;
+ void *__overflow_arg_area;
+ void *__reg_save_area;
+ } va_list[1];
where __gpr and __fpr hold the number of general purpose
or floating point arguments used up to now, respectively,
if (cfun->va_list_fpr_size)
{
t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
- build_int_cst (NULL_TREE, n_fpr));
+ build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (flag_split_stack
&& (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
- == NULL)
+ == NULL)
&& cfun->machine->split_stack_varargs_pointer == NULL_RTX)
{
rtx reg;
|| TARGET_VX_ABI)
{
if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
- t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
+ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
else
- t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
+ t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
off = INTVAL (crtl->args.arg_offset_rtx);
off = off < 0 ? 0 : off;
if (integral value) {
if (size <= 4 && args.gpr < 5 ||
- size > 4 && args.gpr < 4 )
+ size > 4 && args.gpr < 4 )
ret = args.reg_save_area[args.gpr+8]
else
ret = *args.overflow_arg_area++;
{
/*
if (reg > ((typeof (reg))max_reg))
- goto lab_false;
+ goto lab_false;
- addr = sav + sav_ofs + reg * save_scale;
+ addr = sav + sav_ofs + reg * save_scale;
goto lab_over;
- lab_false:
+ lab_false:
*/
lab_false = create_artificial_label (UNKNOWN_LOCATION);
expanders.
DEST - Register location where CC will be stored.
TDB - Pointer to a 256 byte area where to store the transaction.
- diagnostic block. NULL if TDB is not needed.
+ diagnostic block. NULL if TDB is not needed.
RETRY - Retry count value. If non-NULL a retry loop for CC2
- is emitted
+ is emitted
CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
- of the tbegin instruction pattern. */
+ of the tbegin instruction pattern. */
void
s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
{
output_asm_insn ("brcl\t0,0", NULL);
hw -= 3;
- }
+ }
else if (hw >= 2)
- {
- output_asm_insn ("bc\t0,0", NULL);
- hw -= 2;
- }
+ {
+ output_asm_insn ("bc\t0,0", NULL);
+ hw -= 2;
+ }
else
- {
- output_asm_insn ("bcr\t0,0", NULL);
- hw -= 1;
- }
+ {
+ output_asm_insn ("bcr\t0,0", NULL);
+ hw -= 1;
+ }
}
}
if (flag_fentry)
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* brasl */ 3);
+ output_asm_nops ("-mnop-mcount", /* brasl */ 3);
else if (cfun->static_chain_decl)
- warning (OPT_Wcannot_profile, "nested functions cannot be profiled "
- "with -mfentry on s390");
+ warning (OPT_Wcannot_profile, "nested functions cannot be profiled "
+ "with -mfentry on s390");
else
- output_asm_insn ("brasl\t0,%4", op);
+ output_asm_insn ("brasl\t0,%4", op);
}
else if (TARGET_64BIT)
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* stg */ 3 + /* larl */ 3 +
- /* brasl */ 3 + /* lg */ 3);
+ output_asm_nops ("-mnop-mcount", /* stg */ 3 + /* larl */ 3 +
+ /* brasl */ 3 + /* lg */ 3);
else
- {
- output_asm_insn ("stg\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_rel_offset\t%0,%7", op);
- output_asm_insn ("larl\t%2,%3", op);
- output_asm_insn ("brasl\t%0,%4", op);
- output_asm_insn ("lg\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_restore\t%0", op);
- }
+ {
+ output_asm_insn ("stg\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_rel_offset\t%0,%7", op);
+ output_asm_insn ("larl\t%2,%3", op);
+ output_asm_insn ("brasl\t%0,%4", op);
+ output_asm_insn ("lg\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_restore\t%0", op);
+ }
}
else
{
if (flag_nop_mcount)
- output_asm_nops ("-mnop-mcount", /* st */ 2 + /* larl */ 3 +
- /* brasl */ 3 + /* l */ 2);
+ output_asm_nops ("-mnop-mcount", /* st */ 2 + /* larl */ 3 +
+ /* brasl */ 3 + /* l */ 2);
else
- {
- output_asm_insn ("st\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
- output_asm_insn (".cfi_rel_offset\t%0,%7", op);
- output_asm_insn ("larl\t%2,%3", op);
- output_asm_insn ("brasl\t%0,%4", op);
- output_asm_insn ("l\t%0,%1", op);
- if (flag_dwarf2_cfi_asm)
+ {
+ output_asm_insn ("st\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
+ output_asm_insn (".cfi_rel_offset\t%0,%7", op);
+ output_asm_insn ("larl\t%2,%3", op);
+ output_asm_insn ("brasl\t%0,%4", op);
+ output_asm_insn ("l\t%0,%1", op);
+ if (flag_dwarf2_cfi_asm)
output_asm_insn (".cfi_restore\t%0", op);
}
}
output_asm_insn ("lay\t%1,%2(%1)", op);
else if (CONST_OK_FOR_K (delta))
output_asm_insn ("aghi\t%1,%2", op);
- else if (CONST_OK_FOR_Os (delta))
- output_asm_insn ("agfi\t%1,%2", op);
+ else if (CONST_OK_FOR_Os (delta))
+ output_asm_insn ("agfi\t%1,%2", op);
else
{
op[6] = gen_label_rtx ();
output_asm_insn ("ag\t%4,0(%1)", op);
output_asm_insn ("ag\t%1,0(%4)", op);
}
- else if (CONST_OK_FOR_Os (vcall_offset))
- {
- output_asm_insn ("lgfi\t%4,%3", op);
- output_asm_insn ("ag\t%4,0(%1)", op);
- output_asm_insn ("ag\t%1,0(%4)", op);
- }
+ else if (CONST_OK_FOR_Os (vcall_offset))
+ {
+ output_asm_insn ("lgfi\t%4,%3", op);
+ output_asm_insn ("ag\t%4,0(%1)", op);
+ output_asm_insn ("ag\t%1,0(%4)", op);
+ }
else
{
op[7] = gen_label_rtx ();
/* Setup base pointer if required. */
if (!vcall_offset
|| (!DISP_IN_RANGE (delta)
- && !CONST_OK_FOR_K (delta)
+ && !CONST_OK_FOR_K (delta)
&& !CONST_OK_FOR_Os (delta))
|| (!DISP_IN_RANGE (delta)
- && !CONST_OK_FOR_K (vcall_offset)
+ && !CONST_OK_FOR_K (vcall_offset)
&& !CONST_OK_FOR_Os (vcall_offset)))
{
op[5] = gen_label_rtx ();
else if (CONST_OK_FOR_K (delta))
output_asm_insn ("ahi\t%1,%2", op);
else if (CONST_OK_FOR_Os (delta))
- output_asm_insn ("afi\t%1,%2", op);
+ output_asm_insn ("afi\t%1,%2", op);
else
{
op[6] = gen_label_rtx ();
/* Perform vcall adjustment. */
if (vcall_offset)
- {
+ {
if (CONST_OK_FOR_J (vcall_offset))
{
output_asm_insn ("l\t%4,0(%1)", op);
output_asm_insn ("a\t%1,0(%4)", op);
}
else if (CONST_OK_FOR_Os (vcall_offset))
- {
- output_asm_insn ("iilf\t%4,%3", op);
- output_asm_insn ("a\t%4,0(%1)", op);
- output_asm_insn ("a\t%1,0(%4)", op);
- }
+ {
+ output_asm_insn ("iilf\t%4,%3", op);
+ output_asm_insn ("a\t%4,0(%1)", op);
+ output_asm_insn ("a\t%1,0(%4)", op);
+ }
else
{
op[7] = gen_label_rtx ();
named. This only has an impact on vector argument register
usage none of which is call-saved. */
if (pass_by_reference (&cum_v, mode, type, true))
- {
- mode = Pmode;
- type = build_pointer_type (type);
- }
+ {
+ mode = Pmode;
+ type = build_pointer_type (type);
+ }
parm_rtx = s390_function_arg (cum, mode, type, true);
continue;
if (REG_P (parm_rtx))
- {
+ {
for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
if (!call_used_regs[reg + REGNO (parm_rtx)])
- return true;
+ return true;
}
if (GET_CODE (parm_rtx) == PARALLEL)
TLS_CALL the location of the thread-local symbol
RESULT_REG the register where the result of the call should be stored
RETADDR_REG the register where the return address should be stored
- If this parameter is NULL_RTX the call is considered
- to be a sibling call. */
+ If this parameter is NULL_RTX the call is considered
+ to be a sibling call. */
rtx_insn *
s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
if (GET_CODE (addr_location) == SYMBOL_REF)
{
/* When calling a global routine in PIC mode, we must
- replace the symbol itself with the PLT stub. */
+ replace the symbol itself with the PLT stub. */
if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
- {
+ {
if (TARGET_64BIT || retaddr_reg != NULL_RTX)
{
addr_location = gen_rtx_UNSPEC (Pmode,
emit_move_insn (reg, target);
emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
- gen_rtx_REG (Pmode, RETURN_REGNUM));
+ gen_rtx_REG (Pmode, RETURN_REGNUM));
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
/* Use the alternative scheduling-pressure algorithm by default. */
maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
- opts->x_param_values,
- opts_set->x_param_values);
+ opts->x_param_values,
+ opts_set->x_param_values);
maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
opts->x_param_values,
{
/* Don't emit DWARF3/4 unless specifically selected. The TPF
debuggers do not yet support DWARF 3/4. */
- if (!global_options_set.x_dwarf_strict)
+ if (!global_options_set.x_dwarf_strict)
dwarf_strict = 1;
if (!global_options_set.x_dwarf_version)
dwarf_version = 2;