From 6cecf4ce3c74bc0b04b2de762de8b0123c1786b4 Mon Sep 17 00:00:00 2001 From: Chris Smith Date: Fri, 7 May 1993 11:54:24 +0000 Subject: [PATCH] *** empty log message *** From-SVN: r4377 --- gcc/config/convex/convex.c | 1019 +++++++++++++++++--- gcc/config/convex/convex.h | 883 ++++++++++++------ gcc/config/convex/convex.md | 1662 ++++++++++++++++++++++----------- gcc/config/convex/x-convex | 4 +- gcc/config/convex/xm-convex.h | 23 +- 5 files changed, 2665 insertions(+), 926 deletions(-) diff --git a/gcc/config/convex/convex.c b/gcc/config/convex/convex.c index e3a2badd4a2..90d430839a9 100644 --- a/gcc/config/convex/convex.c +++ b/gcc/config/convex/convex.c @@ -1,5 +1,5 @@ /* Subroutines for insn-output.c for Convex. - Copyright (C) 1989,1991 Free Software Foundation, Inc. + Copyright (C) 1988,1993 Free Software Foundation, Inc. This file is part of GNU CC. @@ -18,6 +18,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "config.h" +#include "tree.h" #include "rtl.h" #include "regs.h" #include "hard-reg-set.h" @@ -25,79 +26,235 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "insn-config.h" #include "conditions.h" #include "insn-flags.h" -#include "output.h" #include "insn-attr.h" +#include "output.h" +#include "expr.h" + +#undef NULL +#include + +/* Tables used in convex.h */ + +char regno_ok_for_index_p_base[1 + LAST_VIRTUAL_REGISTER + 1]; +enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER]; +enum reg_class reg_class_from_letter[256]; + +/* Target cpu index. */ + +int target_cpu; /* Boolean to keep track of whether the current section is .text or not. Used by .align handler in convex.h. */ int current_section_is_text; -/* set_cmp saves the operands of a "cmp" insn, along with the type character - * to be used in the compare instruction. - * - * gen_cmp finds out what comparison is to be performed and outputs the - * necessary instructions, e.g. - * "eq.w a1,a2\;jbra.t L5" - * for (cmpsi a1 a2) (beq L5) */ - -static rtx xop0, xop1; -static char typech, regch; +/* Communication between output_compare and output_condjump. */ + +static rtx cmp_operand0, cmp_operand1; +static char cmp_modech; + +/* Forwards */ + +static rtx frame_argblock; +static int frame_argblock_size; +static rtx convert_arg_pushes (); +static void expand_movstr_call (); + +/* Here from OVERRIDE_OPTIONS at startup. Initialize constant tables. */ + +init_convex () +{ + int regno; + + /* Set A and S reg classes. */ + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + if (A_REGNO_P (regno)) + { + regno_ok_for_index_p[regno] = 1; + regno_reg_class[regno] = INDEX_REGS; + } + else + { + regno_ok_for_index_p[regno] = 0; + regno_reg_class[regno] = S_REGS; + } + + /* Can't index off the stack pointer, register 0. */ + regno_ok_for_index_p[STACK_POINTER_REGNUM] = 0; + regno_reg_class[STACK_POINTER_REGNUM] = SP_REGS; + + /* Can't index off aliases of the stack pointer. */ + regno_ok_for_index_p[VIRTUAL_INCOMING_ARGS_REGNUM] = 1; + regno_ok_for_index_p[VIRTUAL_STACK_VARS_REGNUM] = 1; + regno_ok_for_index_p[VIRTUAL_STACK_DYNAMIC_REGNUM] = 0; + regno_ok_for_index_p[VIRTUAL_OUTGOING_ARGS_REGNUM] = 0; + + /* Can't index off hard reg -1 == pseudos not assigned */ + regno_ok_for_index_p[-1] = 0; + + /* Set reg class letters */ + reg_class_from_letter['a'] = A_REGS; + reg_class_from_letter['A'] = INDEX_REGS; + reg_class_from_letter['d'] = S_REGS; + + /* Turn off floating point exception enables in the psw. */ + psw_disable_float (); +} + +psw_disable_float () +{ +#if __convex__ && __GNUC__ + register int *p; + asm ("mov fp,%0" : "=a" (p)); + while (p) + { + p[1] &= ~0x1000c400; + p = (int *) p[2]; + } +#endif +} + +/* Here to output code for a compare insn. Output nothing, just + record the operands and their mode. */ char * -set_cmp (op0, op1, typechr) - rtx op0, op1; - char typechr; -{ - xop0 = op0; - xop1 = op1; - typech = typechr; - if (GET_CODE (op0) == REG) - regch = A_REGNO_P (REGNO (op0)) ? 'a' : 's'; - else if (GET_CODE (op1) == REG) - regch = A_REGNO_P (REGNO (op1)) ? 'a' : 's'; - else abort (); +output_cmp (operand0, operand1, modech) + rtx operand0, operand1; + char modech; +{ + cmp_operand0 = operand0; + cmp_operand1 = operand1; + cmp_modech = modech; return ""; } +/* Output code for a conditional jump. The preceding instruction + is necessarily a compare. Output two instructions, for example + eq.w a1,a2 + jbra.t L5 + for + (cmpsi a1 a2) + (beq L5) + */ + char * -gen_cmp (label, cmpop, tf) +output_condjump (label, cond, jbr_sense) rtx label; - char *cmpop; - char tf; + char *cond; + char jbr_sense; { + rtx operands[3]; + char cmp_op[4]; char buf[80]; - char revop[4]; - rtx ops[3]; + char jbr_regch; + + strcpy (cmp_op, cond); + + /* [BL] mean the value is being compared against immediate 0. + Use neg.x, which produces the same carry that eq.x #0 would if it + existed. In this case operands[1] is a scratch register, not a + compare operand. */ - ops[2] = label; + if (cmp_modech == 'B' || cmp_modech == 'L') + { + cmp_modech = cmp_modech - 'A' + 'a'; + strcpy (cmp_op, "neg"); + } + + /* [WH] mean the value being compared resulted from "add.[wh] #-1,rk" + when rk was nonnegative -- we can omit equality compares against -1 + or inequality compares against 0. */ + + else if (cmp_modech == 'W' || cmp_modech == 'H') + { + if (! strcmp (cmp_op, "eq") && cmp_operand1 == constm1_rtx) + jbr_sense ^= 't' ^ 'f'; + else if (! strcmp (cmp_op, "lt") && cmp_operand1 == const0_rtx) + ; + else + cmp_modech = cmp_modech - 'A' + 'a'; + } /* Constant must be first; swap operands if necessary. If lt, le, ltu, leu are swapped, change to le, lt, leu, ltu and reverse the sense of the jump. */ - if (CONSTANT_P (xop1)) + if (! REG_P (cmp_operand1)) { - ops[0] = xop1; - ops[1] = xop0; - if (cmpop[0] == 'l') + operands[0] = cmp_operand1; + operands[1] = cmp_operand0; + if (cmp_op[0] == 'l') { - bcopy (cmpop, revop, sizeof revop); - revop[1] ^= 'e' ^ 't'; - tf ^= 't' ^ 'f'; - cmpop = revop; + cmp_op[1] ^= 'e' ^ 't'; + jbr_sense ^= 't' ^ 'f'; } } else { - ops[0] = xop0; - ops[1] = xop1; + operands[0] = cmp_operand0; + operands[1] = cmp_operand1; } - sprintf (buf, "%s.%c %%0,%%1\n\tjbr%c.%c %%l2", cmpop, typech, regch, tf); - output_asm_insn (buf, ops); + operands[2] = label; + + if (S_REG_P (operands[1])) + jbr_regch = 's'; + else if (A_REG_P (operands[1])) + jbr_regch = 'a'; + else + abort (); + + if (cmp_modech == 'W' || cmp_modech == 'H') + sprintf (buf, "jbr%c.%c %%l2", jbr_regch, jbr_sense); + else + sprintf (buf, "%s.%c %%0,%%1\n\tjbr%c.%c %%l2", + cmp_op, cmp_modech, jbr_regch, jbr_sense); + output_asm_insn (buf, operands); return ""; } + +/* Return 1 if OP is valid for cmpsf. + In IEEE mode, +/- zero compares are not handled by + the immediate versions of eq.s and on some machines, lt.s, and le.s. + So disallow 0.0 as the immediate operand of xx.s compares in IEEE mode. */ + +int +nonmemory_cmpsf_operand (op, mode) + rtx op; + enum machine_mode mode; +{ +#if _IEEE_FLOAT_ + if (op == CONST0_RTX (SFmode)) + return 0; +#endif + + return nonmemory_operand (op, mode); +} + +/* Convex /bin/as does not like unary minus in some contexts. + Simplify CONST addresses to remove it. */ + +rtx +simplify_for_convex (x) + rtx x; +{ + switch (GET_CODE (x)) + { + case MINUS: + if (GET_CODE (XEXP (x, 1)) == CONST_INT + && INTVAL (XEXP (x, 1)) < 0) + { + PUT_CODE (x, PLUS); + XEXP (x, 1) = GEN_INT (- INTVAL (XEXP (x, 1))); + } + break; + + case CONST: + return simplify_for_convex (XEXP (x, 0)); + } + + return x; +} /* Routines to separate CONST_DOUBLEs into component parts. */ @@ -121,100 +278,738 @@ const_double_low_int (x) return CONST_DOUBLE_LOW (x); } -/* Return the number of args in the call insn X. */ +/* Inline block copy. */ -static int -call_num_args (x) - rtx x; +void +expand_movstr (operands) + rtx *operands; { - if (GET_CODE (x) == CALL) - return INTVAL (x->fld[1].rtx); - if (GET_CODE (x) == SET) - return call_num_args (SET_SRC (x)); - abort (); + rtx dest = operands[0]; + rtx src = operands[1]; + int align = INTVAL (operands[3]); + int nregs, maxsize; + unsigned len; + enum machine_mode mode; + rtx reg, load, store, prev_store, prev_store_2; + int size; + + /* Decide how many regs to use, depending on load latency, and what + size pieces to move, depending on whether machine does unaligned + loads and stores efficiently. */ + + if (TARGET_C1) + { + /* ld.l latency is 4, no alignment problems. */ + nregs = 3, maxsize = 8; + } + else if (TARGET_C2) + { + /* loads are latency 2 if we avoid ld.l not at least word aligned. */ + if (align >= 4) + nregs = 2, maxsize = 8; + else + nregs = 2, maxsize = 4; + } + else if (TARGET_C34) + { + /* latency is 4 if aligned, horrible if not. */ + nregs = 3, maxsize = align; + } + else if (TARGET_C38) + { + /* latency is 2 if at least word aligned, 3 or 4 if unaligned. */ + if (align >= 4) + nregs = 2, maxsize = 8; + else + nregs = 3, maxsize = 8; + } + else + abort (); + + /* Caller is not necessarily prepared for us to fail in this + expansion. So fall back by generating memcpy call here. */ + + if (GET_CODE (operands[2]) != CONST_INT + || (len = INTVAL (operands[2])) > (unsigned) 32 * maxsize) + { + expand_movstr_call (operands); + return; + } + + reg = 0; + prev_store = prev_store_2 = 0; + + while (len > 0) + { + if (len >= 8 && maxsize >= 8) + mode = DImode; + else if (len >= 4 && maxsize >= 4) + mode = SImode; + else if (len >= 2 && maxsize >= 2) + mode = HImode; + else + mode = QImode; + + /* If no temp pseudo to reuse, or not the right mode, make one */ + if (! reg || GET_MODE (reg) != mode) + reg = gen_reg_rtx (mode); + + /* Get src and dest in the right mode */ + if (GET_MODE (src) != mode) + src = change_address (src, mode, 0), + dest = change_address (dest, mode, 0); + + /* Make load and store patterns for this piece */ + load = gen_rtx (SET, VOIDmode, reg, src); + store = gen_rtx (SET, VOIDmode, dest, reg); + + /* Emit the load and the store from last time. + When we emit a store, we can reuse its temp reg. */ + emit_insn (load); + if (prev_store) + { + reg = SET_SRC (prev_store); + emit_insn (prev_store); + } + else + reg = 0; + + /* Queue up the store, for next time or the time after that. */ + if (nregs == 2) + prev_store = store; + else + prev_store = prev_store_2, prev_store_2 = store; + + /* Advance to next piece. */ + size = GET_MODE_SIZE (mode); + src = adj_offsettable_operand (src, size); + dest = adj_offsettable_operand (dest, size); + len -= size; + } + + /* Finally, emit the last stores. */ + if (prev_store) + emit_insn (prev_store); + if (prev_store_2) + emit_insn (prev_store_2); } -/* Scan forward from a call to decide whether we need to reload AP - from 12(FP) after it. We need to if there can be a reference to - arg_pointer_rtx before the next call, which will clobber AP. - Look forward in the instruction list until encountering a call - (don't need the load), or a reference to AP (do need it), or - a jump (don't know, do the load). */ +static void +expand_movstr_call (operands) + rtx *operands; +{ + emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, + VOIDmode, 3, + XEXP (operands[0], 0), Pmode, + XEXP (operands[1], 0), Pmode, + operands[2], SImode); +} + +#if _IEEE_FLOAT_ +#define MAX_FLOAT 3.4028234663852886e+38 +#define MIN_FLOAT 1.1754943508222875e-38 +#else +#define MAX_FLOAT 1.7014117331926443e+38 +#define MIN_FLOAT 2.9387358770557188e-39 +#endif -static int -ap_reload_needed (insn) - rtx insn; +void +check_float_value (mode, dp) + enum machine_mode mode; + REAL_VALUE_TYPE *dp; { - for (;;) + REAL_VALUE_TYPE d = *dp; + + if (mode == SFmode) { - insn = NEXT_INSN (insn); - switch (GET_CODE (insn)) + if (d > MAX_FLOAT) { - case JUMP_INSN: - /* Basic block ends. If return, no AP needed, else assume it is. */ - return GET_CODE (PATTERN (insn)) != RETURN; - case CALL_INSN: - /* A subsequent call. AP isn't needed unless the call itself - requires it. But zero-arg calls don't clobber AP, so - don't terminate the search in that case. */ - if (reg_mentioned_p (arg_pointer_rtx, PATTERN (insn))) - return 1; - if (! TARGET_ARGCOUNT && call_num_args (PATTERN (insn)) == 0) - break; - return 0; - case BARRIER: - /* Barrier, don't need AP. */ - return 0; - case INSN: - /* Other insn may need AP; if not, keep looking. */ - if (reg_mentioned_p (arg_pointer_rtx, PATTERN (insn))) - return 1; + error ("magnitude of constant too large for `float'"); + *dp = MAX_FLOAT; + } + else if (d < -MAX_FLOAT) + { + error ("magnitude of constant too large for `float'"); + *dp = -MAX_FLOAT; + } + else if ((d > 0 && d < MIN_FLOAT) || (d < 0 && d > -MIN_FLOAT)) + { + warning ("`float' constant truncated to zero"); + *dp = 0.0; } } } + +/* Output the label at the start of a function. + Precede it with the number of formal args so debuggers will have + some idea of how many args to print. */ -/* Output the insns needed to do a call. */ +void +asm_declare_function_name (file, name, decl) + FILE *file; + char *name; + tree decl; +{ + tree parms; + int nargs = list_length (DECL_ARGUMENTS (decl)); -char * -output_call (insn, address, argcount) - rtx insn, address, argcount; + char *p, c; + extern char *version_string; + static char vers[4]; + int i; + + p = version_string; + for (i = 0; i < 3; ) { + c = *p; + if (c - '0' < (unsigned) 10) + vers[i++] = c; + if (c == 0 || c == ' ') + vers[i++] = '0'; + else + p++; + } + fprintf (file, "\tds.b \"g%s\"\n", vers); + + if (nargs < 100) + fprintf (file, "\tds.b \"+%02d\\0\"\n", nargs); + else + fprintf (file, "\tds.b \"+00\\0\"\n"); + + ASM_OUTPUT_LABEL (file, name); +} + +/* Print an instruction operand X on file FILE. + CODE is the code from the %-spec that requested printing this operand; + if `%z3' was used to print operand 3, then CODE is 'z'. */ +/* Convex codes: + %u prints a CONST_DOUBLE's high word + %v prints a CONST_DOUBLE's low word + %z prints a CONST_INT shift count as a multiply operand -- viz. 1 << n. + */ + +print_operand (file, x, code) + FILE *file; + rtx x; + char code; { - int set_ap = TARGET_ARGCOUNT || argcount != const0_rtx; + long u[2]; + REAL_VALUE_TYPE d; - /* If AP is used by the call address, evaluate the address into a temp. */ - if (reg_mentioned_p (arg_pointer_rtx, address)) - if (set_ap) - { - address = XEXP (address, 0); - output_asm_insn ("ld.w %0,a1", &address); - address = gen_rtx (MEM, QImode, gen_rtx (REG, Pmode, 9)); + switch (GET_CODE (x)) + { + case REG: + fprintf (file, "%s", reg_names[REGNO (x)]); + break; + + case MEM: + output_address (XEXP (x, 0)); + break; + + case CONST_DOUBLE: + REAL_VALUE_FROM_CONST_DOUBLE (d, x); + switch (GET_MODE (x)) { + case DFmode: +#if 0 /* doesn't work, produces dfloats */ + REAL_VALUE_TO_TARGET_DOUBLE (d, u); +#else + { + union { double d; int i[2]; } t; + t.d = d; + u[0] = t.i[0]; + u[1] = t.i[1]; + } +#endif + if (code == 'u') + fprintf (file, "#%#x", u[0]); + else if (code == 'v') + fprintf (file, "#%#x", u[1]); + else + outfloat (file, d, "%.17e", "#", ""); + break; + case SFmode: + outfloat (file, d, "%.9e", "#", ""); + break; + default: + if (code == 'u') + fprintf (file, "#%d", CONST_DOUBLE_HIGH (x)); + else + fprintf (file, "#%d", CONST_DOUBLE_LOW (x)); } + break; - /* If there are args, point AP to them. */ - if (set_ap) - output_asm_insn ("mov sp,ap", 0); + default: + if (code == 'z') + { + if (GET_CODE (x) != CONST_INT) + abort (); + fprintf (file, "#%d", 1 << INTVAL (x)); + } + else + { + putc ('#', file); + output_addr_const (file, x); + } + } +} - /* If we are passing an arg count, convert it to words and push it. */ - if (TARGET_ARGCOUNT) +/* Print a memory operand whose address is X, on file FILE. */ + +print_operand_address (file, addr) + FILE *file; + rtx addr; +{ + rtx index = 0; + rtx offset = 0; + + if (GET_CODE (addr) == MEM) { - argcount = gen_rtx (CONST_INT, VOIDmode, (INTVAL (argcount) + 3) / 4); - output_asm_insn ("pshea %a0", &argcount); + fprintf (file, "@"); + addr = XEXP (addr, 0); } - /* The call. */ - output_asm_insn ("calls %0", &address); + switch (GET_CODE (addr)) + { + case REG: + index = addr; + break; - /* If we clobbered AP, reload it if it is live. */ - if (set_ap) - if (ap_reload_needed (insn)) - output_asm_insn ("ld.w 12(fp),ap", 0); + case PLUS: + index = XEXP (addr, 0); + if (REG_P (index)) + offset = XEXP (addr, 1); + else + { + offset = XEXP (addr, 0); + index = XEXP (addr, 1); + if (! REG_P (index)) + abort (); + } + break; - /* If we pushed an arg count, pop it and the args. */ - if (TARGET_ARGCOUNT) - { - argcount = gen_rtx (CONST_INT, VOIDmode, INTVAL (argcount) * 4 + 4); - output_asm_insn ("add.w %0,sp", &argcount); + default: + offset = addr; + break; } + + if (offset) + output_addr_const (file, offset); + + if (index) + fprintf (file, "(%s)", reg_names[REGNO (index)]); +} + +/* Output a float to FILE, value VALUE, format FMT, preceded by PFX + and followed by SFX. */ + +outfloat (file, value, fmt, pfx, sfx) + FILE *file; + REAL_VALUE_TYPE value; + char *fmt, *pfx, *sfx; +{ + char buf[64]; + fputs (pfx, file); + REAL_VALUE_TO_DECIMAL (value, fmt, buf); + fputs (buf, file); + fputs (sfx, file); +} + +/* Here during RTL generation of return. If we are at the final return + in a function, go through the function and replace pushes with stores + into a frame arg block. This is similar to what ACCUMULATE_OUTGOING_ARGS + does, but we must index off the frame pointer, not the stack pointer, + and the calling sequence does not require the arg block to be at the + top of the stack. */ + +replace_arg_pushes () +{ + end_sequence (); + replace_arg_pushes_1 (); + start_sequence (); +} + +replace_arg_pushes_1 () +{ + rtx insn, argblock; + int size; + int n; + + /* Look back to see if we are at the return at the end of the function. */ + n = 0; + for (insn = get_last_insn (); ; insn = PREV_INSN (insn)) + if (! insn || ++n > 5) + return; + else if (GET_CODE (insn) == NOTE + && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END) + break; + + /* Yes, we are. Find the max stack depth used by fixable arg pushes. */ + size = replace_pushes (0); + + /* Allocate block in frame to hold all arg lists. */ + argblock = assign_stack_local (BLKmode, size, STACK_BOUNDARY); + + /* Replace pushes with stores into the block. */ + replace_pushes (plus_constant (XEXP (argblock, 0), size)); +} + +int +replace_pushes (arg_addr) + rtx arg_addr; +{ + struct slot_info { rtx insn; int offs; int size; }; +#define MAXSLOTS 1024 + struct slot_info slots[MAXSLOTS]; + rtx insn, pattern, dest; + enum machine_mode mode; + int offs, minoffs; + int nslot, islot; + int args_size, slots_size; + nslot = 0; + offs = 0; + minoffs = 0; + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + switch (GET_CODE (insn)) + { + case INSN: + pattern = PATTERN (insn); + if (GET_CODE (pattern) == SET) + { + dest = SET_DEST (pattern); + mode = GET_MODE (dest); + if (push_operand (dest, mode)) + { + offs -= + slots[nslot].size = PUSH_ROUNDING (GET_MODE_SIZE (mode)); + slots[nslot].offs = offs; + slots[nslot].insn = insn; + nslot++; + } + else if (dest == stack_pointer_rtx) + { + rtx src = SET_SRC (pattern); + if (GET_CODE (src) == PLUS + && XEXP (src, 0) == stack_pointer_rtx + && GET_CODE (XEXP (src, 1)) == CONST_INT) + { + offs -= + slots[nslot].size = - INTVAL (XEXP (src, 1)); + slots[nslot].offs = 0; + slots[nslot].insn = insn; + nslot++; + } + else + { + slots[nslot].size = 0; + slots[nslot].offs = 0; + slots[nslot].insn = 0; + nslot++; + } + } + else if (reg_mentioned_p (stack_pointer_rtx, pattern)) + { + slots[nslot].size = 0; + slots[nslot].offs = 0; + slots[nslot].insn = 0; + nslot++; + } + else if (reg_mentioned_p (virtual_stack_dynamic_rtx, pattern) + || reg_mentioned_p (virtual_outgoing_args_rtx, pattern)) + { + slots[nslot].size = 0; + slots[nslot].offs = 0; + slots[nslot].insn = 0; + nslot++; + } + } + else + if (reg_mentioned_p (stack_pointer_rtx, pattern) + || reg_mentioned_p (virtual_stack_dynamic_rtx, pattern) + || reg_mentioned_p (virtual_outgoing_args_rtx, pattern) + || reg_mentioned_p (frame_pointer_rtx, pattern)) + abort (); + + break; + + case CALL_INSN: + { + pattern = PATTERN (insn); + if (GET_CODE (pattern) != PARALLEL) + abort (); + pattern = XVECEXP (pattern, 0, 0); + if (GET_CODE (pattern) == SET) + pattern = SET_SRC (pattern); + if (GET_CODE (pattern) != CALL) + abort (); + args_size = INTVAL (XEXP (pattern, 1)); + + slots_size = 0; + for (islot = nslot; islot > 0; islot--) + { + if (slots[islot - 1].insn == 0) + break; + if (slots_size >= args_size) + break; + slots_size += slots[islot - 1].size; + } + + if (slots_size != args_size) + { + offs += args_size; + if (offs > 0) + offs = 0; + slots[nslot].size = 0; + slots[nslot].offs = 0; + slots[nslot].insn = 0; + nslot++; + + if (arg_addr) + { + /* add insn to pop arg list if left on stack */ + rtx pop_size = XVECEXP (PATTERN (insn), 0, 2); + if (pop_size != const0_rtx) + emit_insn_after (gen_addsi3 (stack_pointer_rtx, + stack_pointer_rtx, + pop_size), + insn); + insn = NEXT_INSN (insn); + } + break; + } + + /* Record size of arg block */ + if (offs < minoffs) + minoffs = offs; + + /*printf ("call %d, args", INSN_UID (insn));*/ + if (arg_addr) + { + /* store arg block + offset as arg list address for call */ + XVECEXP (PATTERN (insn), 0, 3) = plus_constant (arg_addr, offs); + + /* rewrite arg instructions to use block */ + while (nslot > islot) + { + nslot--; + /*printf (" insn %d size %d offs %d", + INSN_UID(slots[nslot].insn), + slots[nslot].size, + slots[nslot].offs);*/ + + if (slots[nslot].offs == 0) + delete_insn (slots[nslot].insn); + else + { + rtx pattern = PATTERN (slots[nslot].insn); + enum machine_mode mode = GET_MODE (SET_DEST (pattern)); + if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (SImode)) + { + SET_SRC (pattern) = + gen_lowpart (SImode, SET_SRC (pattern)); + SET_DEST (pattern) = + gen_rtx (MEM, SImode, + plus_constant (arg_addr, + slots[nslot].offs)); + } + else + SET_DEST (pattern) = + gen_rtx (MEM, mode, + plus_constant (arg_addr, + slots[nslot].offs)); + } + } + /*printf ("\n");*/ + } + + nslot = islot; + + offs += args_size; + if (offs > 0) + abort (); + } + break; + + case CODE_LABEL: + case JUMP_INSN: + case BARRIER: + nslot = offs = 0; + } + + /*printf ("min offset %d\n", minoffs);*/ + return -minoffs; +} + +/* Output the insns needed to do a call. operands[] are + 0 - MEM, the place to call + 1 - CONST_INT, the number of bytes in the arg list + 2 - CONST_INT, the number of arguments + 3 - address of the arg list. + */ + +char * +output_call (insn, operands) + rtx insn, *operands; +{ + /*if (operands[3] == stack_pointer_rtx) + output_asm_insn ("mov sp,ap"); + else + output_asm_insn ("ldea %a4,ap", operands);*/ + + if (TARGET_ARGCOUNT) + output_asm_insn ("pshea %a2", operands); + + output_asm_insn ("calls %0", operands); + + /*output_asm_insn ("ld.w 12(fp),ap");*/ + + /*if (operands[3] == stack_pointer_rtx && operands[1] != const0_rtx) + output_asm_insn ("add.w %1,sp", operands);*/ + return ""; } + + +/* Here after reloading, before the second scheduling pass. + Insert explicit AP moves. */ + +emit_ap_optimizations () +{ + end_sequence (); + insert_ap_loads (); + start_sequence (); +} + +#define LABEL_DEAD_AP(INSN) ((INSN)->volatil) + +insert_ap_loads () +{ + rtx insn, pattern, src; + int ap_is_live, doagain; + + /* Check that code_label->volatil is not being used for something else */ + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CODE_LABEL) + if (LABEL_DEAD_AP (insn)) + abort (); + + ap_is_live = 0; + + do + { + doagain = 0; + for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) + switch (GET_CODE (insn)) + { + case INSN: + pattern = PATTERN (insn); + if (! ap_is_live) + { + if (reg_mentioned_p (arg_pointer_rtx, pattern)) + ap_is_live = 1; + } + break; + + case CALL_INSN: + pattern = PATTERN (insn); + if (XVECEXP (pattern, 0, 2) != const0_rtx) + ap_is_live = reg_mentioned_p (arg_pointer_rtx, pattern); + break; + + case CODE_LABEL: + if (! ap_is_live) + { + if (! LABEL_DEAD_AP (insn)) + doagain = 1; + LABEL_DEAD_AP (insn) = 1; + } + break; + + case JUMP_INSN: + pattern = PATTERN (insn); + if (GET_CODE (pattern) == RETURN) + ap_is_live = 0; + else if (JUMP_LABEL (insn)) + { + if (simplejump_p (insn)) + ap_is_live = ! LABEL_DEAD_AP (JUMP_LABEL (insn)); + else if (! ap_is_live && condjump_p (insn)) + ap_is_live = ! LABEL_DEAD_AP (JUMP_LABEL (insn)); + else + ap_is_live = 1; + } + else + ap_is_live = 1; + break; + + case BARRIER: + ap_is_live = 0; + break; + } + } while (doagain); + + for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) + switch (GET_CODE (insn)) + { + case INSN: + pattern = PATTERN (insn); + if (! ap_is_live) + { + if (reg_mentioned_p (arg_pointer_rtx, pattern)) + ap_is_live = 1; + } + break; + + case CALL_INSN: + pattern = PATTERN (insn); + if (XVECEXP (pattern, 0, 2) != const0_rtx) + { + rtx arg_addr = XVECEXP (pattern, 0, 3); + emit_insn_before (gen_movsi (arg_pointer_rtx, arg_addr), insn); + if (ap_is_live) + emit_insn_after (gen_movsi (arg_pointer_rtx, + gen_rtx (MEM, SImode, + gen_rtx (PLUS, Pmode, + frame_pointer_rtx, + GEN_INT (12)))), + insn); + XVECEXP (pattern, 0, 3) = const0_rtx; + insn = PREV_INSN (insn); + ap_is_live = 0; + } + break; + + case CODE_LABEL: + if (ap_is_live != ! LABEL_DEAD_AP (insn)) + abort (); + break; + + case JUMP_INSN: + pattern = PATTERN (insn); + if (GET_CODE (pattern) == RETURN) + ap_is_live = 0; + else if (JUMP_LABEL (insn)) + { + if (simplejump_p (insn)) + ap_is_live = ! LABEL_DEAD_AP (JUMP_LABEL (insn)); + else if (! ap_is_live && condjump_p (insn)) + ap_is_live = ! LABEL_DEAD_AP (JUMP_LABEL (insn)); + else + ap_is_live = 1; + } + else + ap_is_live = 1; + break; + + case BARRIER: + ap_is_live = 0; + break; + } + + /* Clear code-label flag recording dead ap's. */ + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CODE_LABEL) + LABEL_DEAD_AP (insn) = 0; +} diff --git a/gcc/config/convex/convex.h b/gcc/config/convex/convex.h index f31b03f520d..1400ab4988b 100644 --- a/gcc/config/convex/convex.h +++ b/gcc/config/convex/convex.h @@ -1,5 +1,5 @@ /* Definitions of target machine for GNU compiler. Convex version. - Copyright (C) 1992 Free Software Foundation, Inc. + Copyright (C) 1988, 1993 Free Software Foundation, Inc. This file is part of GNU CC. @@ -22,28 +22,75 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ extern int target_flags; -/* Interface to convex.c. */ +/* Convex machine-specific flags + -mc1 target instruction set, libraries, scheduling + -mc2 + -mc32 + -mc34 + -mc38 + -margcount use standard calling sequence, with arg count word + -mno-argcount don't push arg count, depend on symbol table + -margcount-nop place arg count in a nop instruction (faster than push) + -mvolatile-cache use data cache for volatile mem refs (default) + -mvolatile-nocache bypass data cache for volatile mem refs + -mlong32 cc- and libc-compatible 32-bit longs + -mlong64 64-bit longs +*/ -extern int current_section_is_text; -extern int const_double_low_int (); -extern int const_double_high_int (); -extern char *set_cmp (), *gen_cmp (); -extern char *output_call (); +/* Macro to define tables used to set -mXXX flags. + This is a list in braces of pairs in braces, + each pair being { "NAME", VALUE } + where VALUE is the bits to set or minus the bits to clear. + An empty string NAME is used to identify the default VALUE. */ -/* Use the proper incantation to search Posix-compliant libraries. */ +#ifndef TARGET_DEFAULT +#error Use one of convex1.h, convex2.h, etc. +#endif -#define LINK_SPEC \ -"%{!traditional:-Eposix}%{traditional:-Enoposix}\ - -A__iob=___ap$iob\ - -A_use_libc_sema=___ap$use_libc_sema\ - -L /usr/lib" +#define TARGET_SWITCHES \ + { { "c1", 001 }, \ + { "c2", 002 }, \ + { "c32", 004 }, \ + { "c34", 010 }, \ + { "c38", 020 }, \ + { "argcount", 0100 }, \ + { "argcount-nop", 0200 }, \ + { "no-argcount", -0300 }, \ + { "volatile-cache", -0400 }, \ + { "no-volatile-cache", 0400 }, \ + { "volatile-nocache", 0400 }, \ + { "long64", 01000 }, \ + { "long32", -01000 }, \ + { "", TARGET_DEFAULT }} -/* Use the matching startup files. */ +/* Macros used in the machine description to test the flags. */ -#define STARTFILE_SPEC \ -"%{pg:/usr/lib/crt/gcrt0.o}\ -%{!pg:%{p:/usr/lib/crt/mcrt0.o}\ -%{!p:/usr/lib/crt/crt0.o}}" +#define TARGET_C1 (target_cpu == 0) +#define TARGET_C2 (target_cpu == 1) +#define TARGET_C34 (target_cpu == 2) +#define TARGET_C38 (target_cpu == 3) +#define TARGET_ARGCOUNT (target_flags & 0100) +#define TARGET_ARGCOUNT_NOP (target_flags & 0200) +#define TARGET_LONG64 (target_flags & 01000) +#define TARGET_VOLATILE_NOCACHE (target_flags & 0400) + +#define OVERRIDE_OPTIONS \ +{ \ + extern int dollars_in_ident; \ + init_convex (); \ + /* To compile system header files, allow $ in identifiers even if -ansi */ \ + dollars_in_ident = 1; \ + if ((target_flags & 077) != (TARGET_DEFAULT & 077)) \ + target_flags &= ~TARGET_DEFAULT; \ + if (target_flags & 001) \ + target_cpu = 0; \ + else if (target_flags & 006) \ + target_cpu = 1; \ + else if (target_flags & 010) \ + target_cpu = 2; \ + else if (target_flags & 020) \ + target_cpu = 3; \ +} /* Names to predefine in the preprocessor for this target machine. */ @@ -53,52 +100,338 @@ extern char *output_call (); #define TARGET_VERSION fprintf (stderr, " (convex)"); -/* Macros used in the machine description to test the flags. */ +/* Target-dependent specs. + Some libraries come in c1 and c2+ versions; use the appropriate ones. + Make a target-dependent __convex_cxx__ define to relay the target cpu + to the program being compiled. */ + +#if TARGET_DEFAULT & 1 + +/* C1 default */ + +#if _IEEE_FLOAT_ + +#define CPP_SPEC \ +"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-D__convex_c1__}}}} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_IEEE_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" -/* - -mc1 C1 target (avoid C2-only instructions) - -mc2 C2 target - -mc32 vitesse - -mc34 javelin - -mc38 neptune - -margcount use standard calling sequence, with arg count word - -mnoargcount don't push arg count, depend on symbol table -*/ +#else -#define TARGET_C1 (target_flags & 1) -#define TARGET_C2 (target_flags & 2) -#define TARGET_C34 (target_flags & 4) -#define TARGET_C38 (target_flags & 010) -#define TARGET_INDIRECTS (1) -#define TARGET_ARGCOUNT (target_flags & 040) +#define CPP_SPEC \ +"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-D__convex_c1__}}}} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_CONVEX_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" -/* Macro to define tables used to set the flags. - This is a list in braces of pairs in braces, - each pair being { "NAME", VALUE } - where VALUE is the bits to set or minus the bits to clear. - An empty string NAME is used to identify the default VALUE. */ +#endif -#define TARGET_SWITCHES \ - { { "c1", 021 }, \ - { "c2", 022 }, \ - { "c32", 022 }, \ - { "c34", 006 }, \ - { "c38", 012 }, \ - { "noc1", -001 }, \ - { "noc2", -002 }, \ - { "argcount", 040 }, \ - { "noargcount", -040 }, \ - { "", TARGET_DEFAULT }} +#define LIB_SPEC \ +"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-lC1%{traditional:_old}%{p:_p}%{pg:_p}}}}} \ + %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + -lc%{traditional:_old}%{p:_p}%{pg:_p}" -/* Default target_flags if no switches specified. */ +#endif + +#if TARGET_DEFAULT & 2 + +/* C2 default */ + +#if _IEEE_FLOAT_ + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{!mc1:%{!mc32:%{!mc34:%{!mc38:-D__convex_c2__}}}} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_IEEE_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#else + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{!mc1:%{!mc32:%{!mc34:%{!mc38:-D__convex_c2__}}}} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_CONVEX_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#endif + +#define LIB_SPEC \ +"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{!mc1:%{!mc32:%{!mc34:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \ + %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + -lc%{traditional:_old}%{p:_p}%{pg:_p}" + +#endif + +#if TARGET_DEFAULT & 4 + +/* C32 default */ + +#if _IEEE_FLOAT_ + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{!mc1:%{!mc2:%{!mc34:%{!mc38:-D__convex_c32__}}}} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_IEEE_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#else + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{!mc1:%{!mc2:%{!mc34:%{!mc38:-D__convex_c32__}}}} \ + %{mc34:-D__convex_c34__} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_CONVEX_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#endif + +#define LIB_SPEC \ +"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{!mc1:%{!mc2:%{!mc34:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \ + %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + -lc%{traditional:_old}%{p:_p}%{pg:_p}" + +#endif + +#if TARGET_DEFAULT & 010 + +/* C34 default */ + +#if _IEEE_FLOAT_ + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{!mc1:%{!mc2:%{!mc32:%{!mc38:-D__convex_c34__}}}} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_IEEE_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#else + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{!mc1:%{!mc2:%{!mc32:%{!mc38:-D__convex_c34__}}}} \ + %{mc38:-D__convex_c38__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_CONVEX_FLOAT_ \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#endif + +#define LIB_SPEC \ +"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{!mc1:%{!mc2:%{!mc32:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \ + %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + -lc%{traditional:_old}%{p:_p}%{pg:_p}" + +#endif + +#if TARGET_DEFAULT & 020 + +/* C38 default */ + +#if _IEEE_FLOAT_ + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_IEEE_FLOAT_ \ + %{!mc1:%{!mc2:%{!mc32:%{!mc34:-D__convex_c38__}}}} \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#else + +#define CPP_SPEC \ +"%{mc1:-D__convex_c1__} \ + %{mc2:-D__convex_c2__} \ + %{mc32:-D__convex_c32__} \ + %{mc34:-D__convex_c34__} \ + %{fno-builtin:-D__NO_INLINE} \ + -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \ + -D_CONVEX_FLOAT_ \ + %{!mc1:%{!mc2:%{!mc32:%{!mc34:-D__convex_c38__}}}} \ + %{.S:-P} \ + %{!traditional:-D__stdc__} \ + %{!traditional:-D_LONGLONG} \ + %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \ + %{!ansi:-D_POSIX_SOURCE} \ + %{!ansi:-D_CONVEX_SOURCE}" + +#endif + +#define LIB_SPEC \ +"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \ + %{!mc1:%{!mc2:%{!mc32:%{!mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \ + -lc%{traditional:_old}%{p:_p}%{pg:_p}" + +#endif + +#if _IEEE_FLOAT_ + +/* ieee default */ + +#define ASM_SPEC "-fi" + +#define LINK_SPEC \ +"-E%{traditional:no}posix \ + -X \ + %{F} %{M*} %{y*} \ + -fi \ + -A__iob=___ap$iob \ + -A_use_libc_sema=___ap$use_libc_sema \ + %{traditional:-A__gcc_cleanup=__cleanup} \ + %{!traditional:-A__gcc_cleanup=___ap$do_registered_functions} \ + -L/usr/lib" + +#define STARTFILE_SPEC \ +"%{!pg:%{!p:/usr/lib/crt/crt0.o}} \ + %{!pg:%{p:/usr/lib/crt/mcrt0.o}} \ + %{pg:/usr/lib/crt/gcrt0.o} \ + /usr/lib/crt/fpmode_i.o" + +#else + +/* native default */ + +#define ASM_SPEC "-fn" + +#define LINK_SPEC \ +"-E%{traditional:no}posix \ + -X \ + %{F} %{M*} %{y*} \ + -fn \ + -A__iob=___ap$iob \ + -A_use_libc_sema=___ap$use_libc_sema \ + %{traditional:-A___gcc_cleanup=__cleanup} \ + %{!traditional:-A___gcc_cleanup=___ap$do_registered_functions} \ + -L/usr/lib" + +#define STARTFILE_SPEC \ +"%{!pg:%{!p:/usr/lib/crt/crt0.o}} \ + %{!pg:%{p:/usr/lib/crt/mcrt0.o}} \ + %{pg:/usr/lib/crt/gcrt0.o}" -#ifndef TARGET_DEFAULT -#define TARGET_DEFAULT 0 #endif +/* Use /path/libgcc.a instead of -lgcc, makes bootstrap work more smoothly. */ + +#define LINK_LIBGCC_SPECIAL + /* Allow $ in identifiers. */ #define DOLLARS_IN_IDENTIFIERS 2 + +/* Since IEEE support was added to gcc, most things seem to like it + better if we disable exceptions and check afterward for infinity. */ + +#if __convex__ +#if _IEEE_FLOAT_ +#define REAL_VALUE_ISNAN(x) 0 +#define REAL_VALUE_ISINF(x) ((*(short *) &(x) & 0x7ff0) == 0x7ff0) +#else +#define REAL_VALUE_ISNAN(x) 0 +#define REAL_VALUE_ISINF(x) ((*(short *) &(x) & 0xfff0) == 0x8000) +#endif +#endif /* Target machine storage layout */ @@ -132,7 +465,7 @@ extern char *output_call (); #define PARM_BOUNDARY 32 /* Boundary (in *bits*) on which stack pointer should be aligned. */ -#define STACK_BOUNDARY 32 +#define STACK_BOUNDARY 64 /* Allocation boundary (in *bits*) for the code of a function. */ #define FUNCTION_BOUNDARY 16 @@ -147,7 +480,7 @@ extern char *output_call (); #define PCC_BITFIELD_TYPE_MATTERS 1 /* No data type wants to be aligned rounder than this. */ -/* beware of doubles in structs -- 64 is incompatible with pcc */ +/* beware of doubles in structs -- 64 is incompatible with cc */ #define BIGGEST_ALIGNMENT 32 /* Set this nonzero if move instructions will actually fail to work @@ -159,7 +492,7 @@ extern char *output_call (); #define CHAR_TYPE_SIZE 8 #define SHORT_TYPE_SIZE 16 #define INT_TYPE_SIZE 32 -#define LONG_TYPE_SIZE 32 +#define LONG_TYPE_SIZE (TARGET_LONG64 ? 64 : 32) #define LONG_LONG_TYPE_SIZE 64 #define FLOAT_TYPE_SIZE 32 #define DOUBLE_TYPE_SIZE 64 @@ -183,7 +516,8 @@ extern char *output_call (); /* 1 for registers that have pervasive standard uses and are not available for the register allocator. For Convex, these are AP, FP, and SP. */ -#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1} +#define FIXED_REGISTERS \ + { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1 } /* 1 for registers not available across function calls. These must include the FIXED_REGISTERS and also any @@ -191,7 +525,14 @@ extern char *output_call (); The latter must include the registers where values are returned and the register where structure-value addresses are passed. Aside from that, you can include as many other registers as you like. */ -#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} +#define CALL_USED_REGISTERS \ + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } + +/* List the order in which to allocate registers. Each register must be + listed once, even those in FIXED_REGISTERS. + For Convex, put S0 (the return register) last. */ +#define REG_ALLOC_ORDER \ + { 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 0, 8, 14, 15 } /* Return number of consecutive hard regs needed starting at reg REGNO to hold something of mode MODE. @@ -203,30 +544,28 @@ extern char *output_call (); /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. On Convex, S registers can hold any type, A registers any nonfloat. */ #define HARD_REGNO_MODE_OK(REGNO, MODE) \ - ((REGNO) < 8 || (GET_MODE_CLASS (MODE) != MODE_FLOAT && \ - GET_MODE_CLASS (MODE) != MODE_COMPLEX_FLOAT && \ - (MODE) != DImode)) + (S_REGNO_P (REGNO) \ + || (GET_MODE_SIZE (MODE) <= 4 && (MODE) != SFmode)) /* Value is 1 if it is a good idea to tie two pseudo registers when one has mode MODE1 and one has mode MODE2. If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, for any hard reg, then this must be 0 for correct output. */ #define MODES_TIEABLE_P(MODE1, MODE2) \ - ((GET_MODE_CLASS (MODE1) == MODE_FLOAT \ - || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT \ - || (MODE1) == DImode) \ - == (GET_MODE_CLASS (MODE2) == MODE_FLOAT \ - || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT \ - || (MODE2) == DImode)) + ((GET_MODE_SIZE (MODE1) <= 4 && (MODE1) != SFmode) \ + == (GET_MODE_SIZE (MODE2) <= 4 && (MODE2) != SFmode)) /* Specify the registers used for certain standard purposes. The values of these macros are register numbers. */ +#define S0_REGNUM 0 +#define A0_REGNUM 8 + /* Register to use for pushing function arguments. */ -#define STACK_POINTER_REGNUM 8 +#define STACK_POINTER_REGNUM A0_REGNUM /* Base register for access to local variables of the function. */ -#define FRAME_POINTER_REGNUM 15 +#define FRAME_POINTER_REGNUM (A0_REGNUM + 7) /* Value should be nonzero if functions must have frame pointers. Zero means the frame pointer need not be set up (and parms @@ -235,16 +574,16 @@ extern char *output_call (); #define FRAME_POINTER_REQUIRED 1 /* Base register for access to arguments of the function. */ -#define ARG_POINTER_REGNUM 14 +#define ARG_POINTER_REGNUM (A0_REGNUM + 6) /* Register in which static-chain is passed to a function. Use S0, not an A reg, because this rare use would otherwise prevent an A reg from being available to global-alloc across calls. */ -#define STATIC_CHAIN_REGNUM 0 +#define STATIC_CHAIN_REGNUM S0_REGNUM /* Register in which address to store a structure value is passed to a function. */ -#define STRUCT_VALUE_REGNUM 9 +#define STRUCT_VALUE_REGNUM (A0_REGNUM + 1) /* Define the classes of registers for register constraints in the machine description. Also define ranges of constants. @@ -268,10 +607,11 @@ extern char *output_call (); /* Convex has classes A (address) and S (scalar). A is further divided into SP_REGS (stack pointer) and INDEX_REGS. - Seems to work better to put S first, here and in the md. */ + SI_REGS is S_REGS + INDEX_REGS -- all the regs except SP. */ enum reg_class { - NO_REGS, S_REGS, INDEX_REGS, SP_REGS, A_REGS, ALL_REGS, LIM_REG_CLASSES + NO_REGS, S_REGS, INDEX_REGS, SP_REGS, A_REGS, SI_REGS, + ALL_REGS, LIM_REG_CLASSES }; #define N_REG_CLASSES (int) LIM_REG_CLASSES @@ -284,13 +624,15 @@ enum reg_class { /* Give names of register classes as strings for dump file. */ #define REG_CLASS_NAMES \ - {"NO_REGS", "S_REGS", "INDEX_REGS", "SP_REGS", "A_REGS", "ALL_REGS" } + {"NO_REGS", "S_REGS", "INDEX_REGS", "SP_REGS", "A_REGS", "SI_REGS", \ + "ALL_REGS" } /* Define which registers fit in which classes. This is an initializer for a vector of HARD_REG_SET of length N_REG_CLASSES. */ -#define REG_CLASS_CONTENTS {0, 0x00ff, 0xfe00, 0x0100, 0xff00, 0xffff} +#define REG_CLASS_CONTENTS \ + { 0, 0x00ff, 0xfe00, 0x0100, 0xff00, 0xfeff, 0xffff } /* The same information, inverted: Return the class number of the smallest class containing @@ -298,10 +640,10 @@ enum reg_class { or could index an array. */ #define REGNO_REG_CLASS(REGNO) \ - (S_REGNO_P (REGNO) ? S_REGS : REGNO == 8 ? SP_REGS : INDEX_REGS) + ((REGNO) >= FIRST_PSEUDO_REGISTER ? abort() : regno_reg_class[REGNO]) -#define S_REGNO_P(REGNO) ((REGNO) < 8) -#define A_REGNO_P(REGNO) ((REGNO) >= 8) +#define S_REGNO_P(REGNO) (((REGNO) - S0_REGNUM) < (unsigned) 8) +#define A_REGNO_P(REGNO) (((REGNO) - A0_REGNUM) < (unsigned) 8) #define S_REG_P(X) (REG_P (X) && S_REGNO_P (REGNO (X))) #define A_REG_P(X) (REG_P (X) && A_REGNO_P (REGNO (X))) @@ -312,33 +654,32 @@ enum reg_class { #define BASE_REG_CLASS INDEX_REGS /* Get reg_class from a letter such as appears in the machine description. */ -/* S regs use the letter 'd' because 's' is taken. */ +/* a => A_REGS + d => S_REGS ('s' is taken) + A => INDEX_REGS (i.e., A_REGS except sp) */ #define REG_CLASS_FROM_LETTER(C) \ - ((C) == 'a' ? A_REGS : \ - (C) == 'd' ? S_REGS : \ - (C) == 'A' ? INDEX_REGS : \ - NO_REGS) + reg_class_from_letter[(unsigned char) (C)] /* The letters I, J, K, L and M in a register constraint string can be used to stand for particular ranges of immediate operands. This macro defines what the ranges are. C is the letter, and VALUE is a constant value. Return 1 if VALUE is in the range specified by C. */ +/* 'I' is used to pass any CONST_INT and reject any CONST_DOUBLE. + CONST_DOUBLE integers are handled by G and H constraint chars. */ -/* Convex uses only I: - 32-bit value with sign bit off, usable as immediate in DImode logical - instructions and, or, xor */ - -#define CONST_OK_FOR_LETTER_P(VALUE, C) ((VALUE) >= 0) +#define CONST_OK_FOR_LETTER_P(VALUE, C) 1 /* Similar, but for floating constants, and defining letters G and H. Here VALUE is the CONST_DOUBLE rtx itself. */ -/* Convex uses only G: +/* Convex uses G, H: value usable in ld.d (low word 0) or ld.l (high word all sign) */ -#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \ - (LD_D_P (VALUE) || LD_L_P (VALUE)) +#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \ + (((C) == 'G' && LD_D_P (VALUE)) || \ + ((C) == 'H' && LD_L_P (VALUE)) || \ + 0) #define LD_D_P(X) (const_double_low_int (X) == 0) @@ -346,22 +687,27 @@ enum reg_class { ? const_double_high_int (X) == 0 \ : const_double_high_int (X) == -1) +/* Optional extra constraints for this machine. + For Convex, 'Q' means that OP is a volatile MEM. + For volatile scalars, we use instructions that bypass the data cache. */ + +#define EXTRA_CONSTRAINT(OP, C) \ + ((C) == 'Q' ? (GET_CODE (OP) == MEM && MEM_VOLATILE_P (OP) \ + && ! TARGET_C1 && TARGET_VOLATILE_NOCACHE) \ + : 0) + /* Given an rtx X being reloaded into a reg required to be in class CLASS, return the class of reg to actually use. In general this is just CLASS; but on some machines in some cases it is preferable to use a more restrictive class. */ -/* CONST_DOUBLEs (constraint 'F') are passed by LEGITIMATE_CONSTANT_P - without regard to their value. Constraint 'G' is used by instructions - that need to reject non-immediate values. The rejected values are - dealt with by reload -- PREFERRED_RELOAD_CLASS returns NO_REGS for - nonimmediate values, causing reload to put them in memory. Every insn - that uses 'G' must have an alternative that accepts memory. */ +/* Put 2-word constants that can't be immediate operands into memory. */ #define PREFERRED_RELOAD_CLASS(X,CLASS) \ - (GET_CODE (X) != CONST_DOUBLE ? (CLASS) : \ - (GET_MODE (X) != TFmode && (LD_L_P (X) || LD_D_P (X))) ? (CLASS) : NO_REGS) - + ((GET_CODE (X) != CONST_DOUBLE \ + || GET_MODE (X) == SFmode \ + || LD_L_P (X) || LD_D_P (X)) ? (CLASS) : NO_REGS) + /* Return the maximum number of consecutive registers needed to represent mode MODE in a register of class CLASS. */ #define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) + 7) / 8) @@ -399,27 +745,21 @@ enum reg_class { FUNTYPE is the data type of the function (as a tree), or for a library call it is an identifier node for the subroutine name. SIZE is the number of bytes of arguments passed on the stack. */ -/* The standard Convex call, with arg count word, includes popping the - args as part of the call template. We optionally omit the arg count - word and let gcc combine the arg pops. */ -#define RETURN_POPS_ARGS(FUNTYPE, SIZE) (TARGET_ARGCOUNT ? (SIZE) : 0) + +#define RETURN_POPS_ARGS(FUNTYPE, SIZE) (SIZE) /* Define how to find the value returned by a function. VALTYPE is the data type of the value (as a tree). If the precise function being called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */ -/* On Convex the return value is in S0 regardless. */ - -#define FUNCTION_VALUE(VALTYPE, FUNC) \ - gen_rtx (REG, TYPE_MODE (VALTYPE), 0) +#define FUNCTION_VALUE(VALTYPE, FUNC) \ + gen_rtx (REG, TYPE_MODE (VALTYPE), S0_REGNUM) /* Define how to find the value returned by a library function assuming the value has mode MODE. */ -/* On Convex the return value is in S0 regardless. */ - -#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, 0) +#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, S0_REGNUM) /* Define this if PCC uses the nonreentrant convention for returning structure and union values. */ @@ -429,7 +769,7 @@ enum reg_class { /* 1 if N is a possible register number for a function value. On the Convex, S0 is the only register thus used. */ -#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0) +#define FUNCTION_VALUE_REGNO_P(N) ((N) == S0_REGNUM) /* 1 if N is a possible register number for function argument passing. */ @@ -439,30 +779,24 @@ enum reg_class { during the scan of that argument list. This data type should hold all necessary information about the function itself and about the args processed so far, enough to enable macros - such as FUNCTION_ARG to determine where the next arg should go. - - On convex, this is a single integer, which is a number of bytes - of arguments scanned so far. */ + such as FUNCTION_ARG to determine where the next arg should go. */ +/* On convex, simply count the arguments in case TARGET_ARGCOUNT is set. */ #define CUMULATIVE_ARGS int /* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to a function whose data type is FNTYPE. - For a library call, FNTYPE is 0. - - On Convex, the offset starts at 0. */ + For a library call, FNTYPE is 0. */ -#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \ - ((CUM) = 0) +#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \ + ((CUM) = 0) /* Update the data in CUM to advance over an argument of mode MODE and data type TYPE. (TYPE is null for libcalls where that information may not be available.) */ -#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ - ((CUM) += ((MODE) != BLKmode \ - ? (GET_MODE_SIZE (MODE) + 3) & ~3 \ - : (int_size_in_bytes (TYPE) + 3) & ~3)) +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ + ((CUM) += 1) /* Define where to put the arguments to a function. Value is zero to push the argument on the stack, @@ -475,11 +809,13 @@ enum reg_class { CUM is a variable of type CUMULATIVE_ARGS which gives info about the preceding args and about the function being called. NAMED is nonzero if this argument is a named parameter - (otherwise it is an extra parameter matching an ellipsis). */ + (otherwise it is an extra parameter matching an ellipsis). -/* On Convex, all args are pushed. */ + Convex: all args go on the stack. But return the arg count + as the "next arg register" to be passed to gen_call. */ -#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) 0 +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ + ((MODE) == VOIDmode ? gen_rtx (CONST_INT, VOIDmode, (CUM)) : 0) /* This macro generates the assembly code for function entry. FILE is a stdio stream to output the code to. @@ -489,8 +825,23 @@ enum reg_class { is ever used in the function. This macro is responsible for knowing which registers should not be saved even if used. */ -#define FUNCTION_PROLOGUE(FILE, SIZE) \ -{ if ((SIZE) != 0) fprintf (FILE, "\tsub.w #%d,sp\n", ((SIZE) + 3) & -4);} +#define FUNCTION_PROLOGUE(FILE, SIZE) \ +{ \ + int size = ((SIZE) + 7) & -8; \ + if (size != 0) \ + fprintf (FILE, "\tsub.w #%d,sp\n", size); \ +} + +/* This macro generates the assembly code for function exit, + on machines that need it. If FUNCTION_EPILOGUE is not defined + then individual return instructions are generated for each + return statement. Args are same as for FUNCTION_PROLOGUE. */ + +#define FUNCTION_EPILOGUE(FILE, SIZE) \ +{ \ + /* Follow function with a zero to stop c34 icache prefetching. */ \ + fprintf (FILE, "\tds.h 0\n"); \ +} /* Output assembler code for a block containing the constant parts of a trampoline, leaving space for the variable parts. */ @@ -501,12 +852,8 @@ enum reg_class { #define TRAMPOLINE_TEMPLATE(FILE) \ { \ - ASM_OUTPUT_SHORT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x11c8)); \ - ASM_OUTPUT_SHORT (FILE, const0_rtx); \ - ASM_OUTPUT_SHORT (FILE, const0_rtx); \ - ASM_OUTPUT_SHORT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x0140)); \ - ASM_OUTPUT_SHORT (FILE, const0_rtx); \ - ASM_OUTPUT_SHORT (FILE, const0_rtx); \ + fprintf (FILE, "\tld.w #69696969,s0\n"); \ + fprintf (FILE, "\tjmp 52525252\n"); \ } /* Length in units of the trampoline for entering a nested function. */ @@ -521,10 +868,10 @@ enum reg_class { { \ emit_move_insn (gen_rtx (MEM, Pmode, plus_constant (TRAMP, 2)), CXT); \ emit_move_insn (gen_rtx (MEM, Pmode, plus_constant (TRAMP, 8)), FNADDR); \ - emit_call_insn (gen_call (gen_rtx (MEM, QImode, \ - gen_rtx (SYMBOL_REF, Pmode, \ - "__enable_execute_stack")), \ - const0_rtx)); \ + emit_call_insn (gen_call_pop (gen_rtx (MEM, QImode, \ + gen_rtx (SYMBOL_REF, Pmode, \ + "__enable_execute_stack")), \ + const0_rtx, const0_rtx, const0_rtx)); \ } /* Output assembler code to FILE to increment profiler label # LABELNO @@ -540,19 +887,12 @@ enum reg_class { #define EXIT_IGNORE_STACK 1 -/* This macro generates the assembly code for function exit, - on machines that need it. If FUNCTION_EPILOGUE is not defined - then individual return instructions are generated for each - return statement. Args are same as for FUNCTION_PROLOGUE. */ - -/* #define FUNCTION_EPILOGUE(FILE, SIZE) */ - /* Store in the variable DEPTH the initial difference between the frame pointer reg contents and the stack pointer reg contents, as of the start of the function body. This depends on the layout of the fixed parts of the stack frame and on how registers are saved. */ #define INITIAL_FRAME_POINTER_OFFSET(DEPTH) \ -{ (DEPTH) = get_frame_size (); } +{ (DEPTH) = (get_frame_size () + 7) & -8; } /* Addressing modes, and classification of registers for them. */ @@ -571,8 +911,9 @@ enum reg_class { has been allocated, which happens in local-alloc.c. */ #define REGNO_OK_FOR_INDEX_P(regno) \ - ((((regno) ^ 010) < 8 || ((reg_renumber[regno] ^ 010) & -8) == 0) \ - && regno != 8) + ((regno) <= LAST_VIRTUAL_REGISTER \ + ? regno_ok_for_index_p[regno] \ + : regno_ok_for_index_p[reg_renumber[regno]]) #define REGNO_OK_FOR_BASE_P(regno) REGNO_OK_FOR_INDEX_P (regno) @@ -590,11 +931,12 @@ enum reg_class { /* Nonzero if the constant value X is a legitimate general operand. It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ -/* For convex, any single-word constant is ok; the only contexts - allowing general_operand of mode DI or DF are movdi and movdf. */ +/* For convex, bounce 2-word constants that can't be immediate operands. */ #define LEGITIMATE_CONSTANT_P(X) \ - (GET_CODE (X) != CONST_DOUBLE ? 1 : (LD_D_P (X) || LD_L_P (X))) + (GET_CODE (X) != CONST_DOUBLE \ + || GET_MODE (X) == SFmode \ + || LD_L_P (X) || LD_D_P (X)) /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx and check its validity for a certain class. @@ -614,10 +956,8 @@ enum reg_class { /* Nonzero if X is a hard reg that can be used as an index or if it is a pseudo reg. */ #define REG_OK_FOR_INDEX_P(X) \ - (REGNO (X) > 8 \ - && REGNO (X) != VIRTUAL_STACK_VARS_REGNUM \ - && REGNO (X) != VIRTUAL_STACK_DYNAMIC_REGNUM \ - && REGNO (X) != VIRTUAL_OUTGOING_ARGS_REGNUM) + (REGNO (X) > LAST_VIRTUAL_REGISTER || regno_ok_for_index_p[REGNO (X)]) + /* Nonzero if X is a hard reg that can be used as a base reg or if it is a pseudo reg. */ #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_INDEX_P (X) @@ -626,6 +966,7 @@ enum reg_class { /* Nonzero if X is a hard reg that can be used as an index. */ #define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) + /* Nonzero if X is a hard reg that can be used as a base reg. */ #define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) @@ -641,8 +982,8 @@ enum reg_class { where indirectable is const, reg, (PLUS reg const) - On C3-series processors, we avoid indirection since it's substantially - slower. */ + We don't use indirection since with insn scheduling, load + indexing + is better. */ /* 1 if X is an address that we could indirect through. */ #define INDIRECTABLE_ADDRESS_P(X) \ @@ -662,14 +1003,9 @@ enum reg_class { { register rtx xfoob = (X); \ if (INDIRECTABLE_ADDRESS_P (xfoob)) \ goto ADDR; \ - xfoob = XEXP (X, 0); \ - if (GET_CODE (X) == MEM \ - && TARGET_INDIRECTS \ - && INDIRECTABLE_ADDRESS_P (xfoob)) \ + if (GET_CODE (xfoob) == PRE_DEC && XEXP (xfoob, 0) == stack_pointer_rtx) \ goto ADDR; \ - if (GET_CODE (X) == PRE_DEC && REG_P (xfoob) \ - && REGNO (xfoob) == STACK_POINTER_REGNUM) \ - goto ADDR; } +} /* Try machine-dependent ways of modifying an illegitimate address to be legitimate. If we find one, return the new, valid address. @@ -728,7 +1064,7 @@ enum reg_class { /* #define SLOW_ZERO_EXTEND */ /* Nonzero if access to memory by bytes is slow and undesirable. */ -#define SLOW_BYTE_ACCESS 0 +#define SLOW_BYTE_ACCESS (! TARGET_C2) /* Define if shifts truncate the shift count which implies one can omit a sign-extension or zero-extension @@ -766,24 +1102,29 @@ enum reg_class { case LABEL_REF: \ case SYMBOL_REF: \ case CONST_INT: \ - return 0; \ case CONST_DOUBLE: \ - return 2; + return 0; /* Provide the costs of a rtl expression. This is in the body of a - switch on CODE. - On C1 and C2, multiply is faster than shift. */ + switch on CODE. */ #define RTX_COSTS(RTX,CODE,OUTER_CODE) \ + case PLUS: \ + if (regno_pointer_flag != 0 \ + && GET_CODE (XEXP (RTX, 0)) == REG \ + && REGNO_POINTER_FLAG (REGNO (XEXP (RTX, 0))) \ + && GET_CODE (XEXP (RTX, 1)) == CONST_INT) \ + return 0; \ + else break; \ case MULT: \ - total = COSTS_N_INSNS (4); \ - break; \ + return 4 * (char) (0x03060403 >> target_cpu * 8); \ case LSHIFT: \ case ASHIFT: \ case LSHIFTRT: \ case ASHIFTRT: \ - total = COSTS_N_INSNS (3); \ - break; + return 4 * (char) (0x03010403 >> target_cpu * 8); \ + case MEM: \ + return 5; /* Compute the cost of an address. This is meant to approximate the size and/or execution delay of an insn using that address. If the cost is @@ -793,31 +1134,53 @@ enum reg_class { this macro should be a constant. The value of this macro only matters for valid addresses. */ -#define ADDRESS_COST(RTX) (GET_CODE (RTX) == MEM ? 3 : 1) +#define ADDRESS_COST(RTX) 0 /* Specify the cost of a branch insn; roughly the number of extra insns that should be added to avoid a branch. */ #define BRANCH_COST 0 -/* Check a `double' value for validity for a particular machine mode. */ +/* Adjust the cost of dependences. */ + +#define ADJUST_COST(INSN,LINK,DEP,COST) \ +{ \ + /* Antidependencies don't block issue. */ \ + if (REG_NOTE_KIND (LINK) != 0) \ + (COST) = 0; \ + /* C38 situations where delay depends on context */ \ + else if (TARGET_C38 \ + && GET_CODE (PATTERN (INSN)) == SET \ + && GET_CODE (PATTERN (DEP)) == SET) \ + { \ + enum attr_type insn_type = get_attr_type (INSN); \ + enum attr_type dep_type = get_attr_type (DEP); \ + /* index register must be ready one cycle early */ \ + if (insn_type == TYPE_MLDW || insn_type == TYPE_MLDL \ + || (insn_type == TYPE_MST \ + && reg_mentioned_p (SET_DEST (PATTERN (DEP)), \ + SET_SRC (PATTERN (INSN))))) \ + (COST) += 1; \ + /* alu forwarding off alu takes two */ \ + if (dep_type == TYPE_ALU \ + && insn_type != TYPE_ALU \ + && ! (insn_type == TYPE_MST \ + && SET_DEST (PATTERN (DEP)) == SET_SRC (PATTERN (INSN)))) \ + (COST) += 1; \ + } \ +} +/* Convex uses Vax or IEEE floats. + Follow the host format. */ +#define TARGET_FLOAT_FORMAT HOST_FLOAT_FORMAT + +/* But must prevent real.c from constructing Vax dfloats */ +#define REAL_VALUE_ATOF(X,S) atof (X) +extern double atof(); + +/* Check a `double' value for validity for a particular machine mode. */ #define CHECK_FLOAT_VALUE(mode, d) \ - if ((mode) == SFmode) \ - { \ - if ((d) > 1.7014117331926443e+38) \ - { error ("magnitude of constant too large for `float'"); \ - (d) = 1.7014117331926443e+38; } \ - else if ((d) < -1.7014117331926443e+38) \ - { error ("magnitude of constant too large for `float'"); \ - (d) = -1.7014117331926443e+38; } \ - else if (((d) > 0) && ((d) < 2.9387358770557188e-39)) \ - { warning ("`float' constant truncated to zero"); \ - (d) = 0.0; } \ - else if (((d) < 0) && ((d) > -2.9387358770557188e-39)) \ - { warning ("`float' constant truncated to zero"); \ - (d) = 0.0; } \ - } + check_float_value ((mode), &(d)) /* Tell final.c how to eliminate redundant test instructions. */ @@ -835,7 +1198,11 @@ enum reg_class { /* Output at beginning of assembler file. */ -#define ASM_FILE_START(FILE) fprintf (FILE, ";NO_APP\n") +#if _IEEE_FLOAT_ +#define ASM_FILE_START(FILE) fprintf (FILE, ";NO_APP\n.fpmode ieee\n") +#else +#define ASM_FILE_START(FILE) fprintf (FILE, ";NO_APP\n.fpmode native\n") +#endif /* Output to assembler file text saying following lines may contain character constants, extra white space, comments, etc. */ @@ -847,12 +1214,6 @@ enum reg_class { #define ASM_APP_OFF ";NO_APP\n" -/* Output something following the gcc2_compiled tag to keep that label from - hiding a real function name for tools like adb and prof. */ - -#define ASM_IDENTIFY_GCC(FILE) \ - fprintf (FILE, "gcc2_compiled.:\n\tds.h 0\n"); - /* Alignment with Convex's assembler goes like this: .text can be .aligned up to a halfword. .data and .bss can be .aligned up to a longword. @@ -904,9 +1265,11 @@ bss_section () \ /* How to refer to registers in assembler output. This sequence is indexed by compiler's hard-register-number (see above). */ -#define REGISTER_NAMES \ -{"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \ - "sp", "a1", "a2", "a3", "a4", "a5", "ap", "fp"} +#define REGISTER_NAMES \ +{ \ + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \ + "sp", "a1", "a2", "a3", "a4", "a5", "ap", "fp", \ +} /* This is BSD, so it wants DBX format. */ @@ -972,20 +1335,22 @@ bss_section () \ /* This is how to output an assembler line defining a `double' constant. */ -#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \ - fprintf (FILE, "\tds.d %.17e\n", (VALUE)) +#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \ + outfloat (FILE, VALUE, "%.17e", "\tds.d ", "\n") /* This is how to output an assembler line defining a `float' constant. */ -#define ASM_OUTPUT_FLOAT(FILE,VALUE) \ - fprintf (FILE, "\tds.s %.9e\n", (VALUE)) +#define ASM_OUTPUT_FLOAT(FILE,VALUE) \ + outfloat (FILE, VALUE, "%.9e", "\tds.s ", "\n") /* This is how to output an assembler line defining an `int' constant. */ #define ASM_OUTPUT_INT(FILE,VALUE) \ -( fprintf (FILE, "\tds.w "), \ - output_addr_const (FILE, (VALUE)), \ - fprintf (FILE, "\n")) +{ \ + fprintf (FILE, "\tds.w "); \ + output_addr_const (FILE, simplify_for_convex (VALUE)); \ + fprintf (FILE, "\n"); \ +} /* Likewise for a `long long int' constant. */ @@ -1090,6 +1455,11 @@ bss_section () \ ( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO))) +/* Output an arg count before function entries. */ + +#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ + asm_declare_function_name (FILE, NAME, DECL) + /* Define the parentheses used to group arithmetic operations in assembler code. */ @@ -1110,60 +1480,12 @@ bss_section () \ if `%z3' was used to print operand 3, then CODE is 'z'. */ #define PRINT_OPERAND(FILE, X, CODE) \ -{ if (GET_CODE (X) == REG) \ - fprintf (FILE, "%s", reg_names[REGNO (X)]); \ - else if (GET_CODE (X) == MEM) \ - output_address (XEXP (X, 0)); \ - else if (GET_CODE (X) == CONST_DOUBLE \ - && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \ - { union { double d; int i[2]; } u; \ - u.i[0] = CONST_DOUBLE_LOW (X); u.i[1] = CONST_DOUBLE_HIGH (X); \ - fprintf (FILE, "#%.9e", u.d); } \ - else { putc ('#', FILE); output_addr_const (FILE, X); }} + print_operand (FILE, X, CODE) /* Print a memory operand whose address is X, on file FILE. */ #define PRINT_OPERAND_ADDRESS(FILE, ADDR) \ -{ \ - register rtx addr = ADDR; \ - register rtx index = 0; \ - register rtx offset = 0; \ - \ - if (GET_CODE (addr) == MEM) \ - { \ - fprintf (FILE, "@"); \ - addr = XEXP (addr, 0); \ - } \ - \ - switch (GET_CODE (addr)) \ - { \ - case REG: \ - index = addr; \ - break; \ - \ - case PLUS: \ - index = XEXP (addr, 0); \ - if (REG_P (index)) \ - offset = XEXP (addr, 1); \ - else \ - { \ - offset = XEXP (addr, 0); \ - index = XEXP (addr, 1); \ - if (! REG_P (index)) abort (); \ - } \ - break; \ - \ - default: \ - offset = addr; \ - break; \ - } \ - \ - if (offset) \ - output_addr_const (FILE, offset); \ - \ - if (index) \ - fprintf (FILE, "(%s)", reg_names[REGNO (index)]); \ -} + print_operand_address (FILE, ADDR) /* Definitions for g++. */ @@ -1180,17 +1502,38 @@ bss_section () \ #define SET_DECL_VINDEX(DECL, INDEX) \ (DECL_VINDEX (DECL) = (INDEX)) -#if 0 /* collect2.c should no longer need these. */ -/* Defs for compiling collect2.c in -pcc mode during bootstrap. */ +/* __gcc_cleanup is loader-aliased to __ap$do_registered_functions if we + are linking against standard libc, 0 if old (-traditional) libc. */ -#ifdef COLLECT - -#ifndef __STDC__ +#define EXIT_BODY \ +{ \ + extern void __gcc_cleanup (); \ + if (__gcc_cleanup != _cleanup) \ + __gcc_cleanup (); \ + _cleanup (); \ +} + +/* cexp.y uses LONG_TYPE_SIZE which depends on target_flags, which it + doesn't have. Until some better way exists, provide a def here. */ +#ifdef YYBISON +int target_flags; +#endif -#define WTERMSIG(x) (((union wait *) &(x))->w_termsig) -#define WEXITSTATUS(x) (((union wait *) &(x))->w_retcode) +/* Header for convex.c. + Here at the end so we can use types defined above. */ -#endif +extern int target_cpu; +extern int current_section_is_text; +extern enum reg_class regno_reg_class[]; +extern enum reg_class reg_class_from_letter[]; +extern char regno_ok_for_index_p_base[]; +#define regno_ok_for_index_p (regno_ok_for_index_p_base + 1) -#endif /* COLLECT */ -#endif /* 0 */ +extern int const_double_low_int (); +extern int const_double_high_int (); +extern char *output_cmp (); +extern char *output_condjump (); +extern char *output_call (); +extern void gen_ap_for_call (); +extern void check_float_value (); +extern void asm_declare_function_name (); diff --git a/gcc/config/convex/convex.md b/gcc/config/convex/convex.md index a8700bd87cd..4cc642d9e6f 100644 --- a/gcc/config/convex/convex.md +++ b/gcc/config/convex/convex.md @@ -1,6 +1,6 @@ ;;- Machine description for GNU compiler ;;- Convex Version -;; Copyright (C) 1991 Free Software Foundation, Inc. +;; Copyright (C) 1988, 1993 Free Software Foundation, Inc. ;; This file is part of GNU CC. @@ -17,350 +17,356 @@ ;; You should have received a copy of the GNU General Public License ;; along with GNU CC; see the file COPYING. If not, write to ;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + +;; Attribute specifications + +; Target CPU +(define_attr "cpu" "c1,c32,c34,c38" + (const (symbol_ref "(enum attr_cpu) target_cpu"))) + +;; Instruction classification + +(define_attr "type" + "alu,xalu,mldw,mldl,mldb,mst,adds,addd,mulw,mull,muls,muld,divw,divl,divs,divd,shfw,shfl,cvts,cvtd" + (const_string "alu")) + +;; Instruction times + +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "mldw")) 2 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "mldl")) 4 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "mldw,mldl")) 2 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "mldw,mldl")) 4 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "mldw,mldl")) 2 0) + +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "mldb")) 9 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "mldb")) 36 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "mldb")) 21 0) + +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "xalu")) 1 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "xalu")) 1 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "xalu")) 5 0) +(define_function_unit "mem" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "xalu")) 2 0) + +(define_function_unit "add" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "adds,addd")) 3 2) +(define_function_unit "add" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "adds,addd")) 2 1) +(define_function_unit "add" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "adds,addd")) 5 2) +(define_function_unit "add" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "adds,addd")) 2 1) + +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "mulw,muls")) 3 2) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "mulw,muls")) 4 2) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "mulw,muls")) 6 2) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "mulw,muls")) 3 2) + +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "mull,muld")) 4 3) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "mull")) 10 7) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "muld")) 5 2) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "mull,muld")) 7 3) +(define_function_unit "mul" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "mull,muld")) 4 3) + +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "divw")) 24 24) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "divw")) 44 6) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "divw")) 14 10) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "divw")) 11 10) + +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "divl")) 41 42) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "divl")) 76 5) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "divl")) 22 18) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "divl")) 19 18) + +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "divs")) 22 22) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "divs")) 8 6) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "divs")) 13 9) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "divs")) 10 9) + +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "divd")) 37 38) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "divd")) 12 8) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "divd")) 20 16) +(define_function_unit "div" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "divd")) 17 16) + +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "cvts,cvtd")) 4 3) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "cvts")) 9 7) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "cvtd")) 9 6) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "cvts")) 6 2) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c34") (eq_attr "type" "cvtd")) 6 1) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "cvts,cvtd")) 3 1) + +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c1") (eq_attr "type" "shfw,shfl")) 3 2) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "shfw")) 7 5) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c32") (eq_attr "type" "shfl")) 7 4) +(define_function_unit "misc" 1 0 + (and (eq_attr "cpu" "c38") (eq_attr "type" "shfw,shfl")) 3 1) + +(define_function_unit "mystery_latch" 1 1 + (and (eq_attr "type" "!alu,mldw,mldl,adds,addd") (eq_attr "cpu" "c32")) 2 2) + +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c1") +; (eq_attr "type" "divw,divl,divs,divd,xalu")) 2 2) +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c1") +; (eq_attr "type" "!divw,divl,divs,divd,xalu")) 1 1) +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c32") +; (eq_attr "type" "mull,muld,divl,divd,shfl,cvtd,xalu")) 2 2) +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c32") +; (eq_attr "type" "!mull,muld,divl,divd,shfl,cvtd,xalu")) 1 1) +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c34") +; (eq_attr "type" "addd,mull,muld,divl,divd,cvtd,xalu")) 2 2) +;(define_function_unit "ip" 1 1 +; (and (eq_attr "cpu" "c34") +; (eq_attr "type" "!addd,mull,muld,divl,divd,cvtd,xalu")) 1 1) + +;; Make the first thing a real insn in case of genattrtab bug -;; Scheduling defs -;; -;; Insn scheduling is not used at present. Scheduling increases -;; register pressure so much that many spills are generated -;; even for very small functions. - -;; Compares - -(define_insn "tstsi" - [(set (cc0) - (match_operand:SI 0 "register_operand" "r"))] - "" - "* return set_cmp (operands[0], const0_rtx, 'w');") - -(define_insn "tsthi" - [(set (cc0) - (match_operand:HI 0 "register_operand" "r"))] - "" - "* return set_cmp (operands[0], const0_rtx, 'h');") - -(define_expand "tstqi" - [(set (match_dup 1) - (sign_extend:SI (match_operand:QI 0 "register_operand" "r"))) - (set (cc0) - (match_dup 1))] +(define_insn "nop" + [(const_int 0)] "" - "operands[1] = gen_reg_rtx (SImode);") + "nop") + +;; Moves -(define_expand "tstdi" - [(parallel [(set (cc0) (match_operand:DI 0 "register_operand" "d")) - (use (match_dup 1))])] +(define_expand "movdf" + [(set (match_operand:DF 0 "general_operand" "") + (match_operand:DF 1 "general_operand" ""))] "" - "operands[1] = force_reg (DImode, const0_rtx);") + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (DFmode, operands[1]);") (define_insn "" - [(set (cc0) (match_operand:DI 0 "register_operand" "d")) - (use (match_operand:DI 1 "register_operand" "d"))] - "" - "* return set_cmp (operands[0], operands[1], 'l');") - -(define_expand "tstdf" - [(set (cc0) - (compare (match_operand:DF 0 "register_operand" "d") - (match_dup 1)))] - "" - "operands[1] = force_reg (DFmode, CONST0_RTX (DFmode));") - -(define_insn "tstsf" - [(set (cc0) - (match_operand:SF 0 "register_operand" "d"))] - "" - "* return set_cmp (operands[0], CONST0_RTX (SFmode), 's');") - -(define_insn "cmpsi" - [(set (cc0) - (compare (match_operand:SI 0 "register_operand" "d,a,i,r") - (match_operand:SI 1 "nonmemory_operand" "d,a,r,i")))] - "" - "* return set_cmp (operands[0], operands[1], 'w');") - -(define_insn "cmphi" - [(set (cc0) - (compare (match_operand:HI 0 "register_operand" "d,a,r,i") - (match_operand:HI 1 "nonmemory_operand" "d,a,i,r")))] - "" - "* return set_cmp (operands[0], operands[1], 'h');") - -(define_insn "cmpqi" - [(set (cc0) - (compare (match_operand:QI 0 "register_operand" "d") - (match_operand:QI 1 "register_operand" "d")))] - "" - "* return set_cmp (operands[0], operands[1], 'b');") - -(define_insn "cmpdi" - [(set (cc0) - (compare (match_operand:DI 0 "register_operand" "d") - (match_operand:DI 1 "register_operand" "d")))] - "" - "* return set_cmp (operands[0], operands[1], 'l');") - -(define_insn "cmpdf" - [(set (cc0) - (compare (match_operand:DF 0 "register_operand" "d") - (match_operand:DF 1 "register_operand" "d")))] - "" - "* return set_cmp (operands[0], operands[1], 'd');") + [(set (match_operand:DF 0 "general_operand" "=d,d,d,d,d,<,m") + (match_operand:DF 1 "general_operand" "d,Q,m,G,H,d,d"))] + "register_operand (operands[0], DFmode) + || register_operand (operands[1], DFmode)" + "@ + mov %1,%0 + ldb.d %1,%0 + ld.d %1,%0 + ld.d %u1,%0 + ld.l %v1,%0 + psh.l %1 + st.d %1,%0" + [(set_attr "type" "alu,mldb,mldl,alu,alu,alu,mst")]) + +;; This is here so we can load any result of RTL constant folding +;; but do not use it on constants that can be loaded from memory. +;; It is never better and can be worse. -(define_insn "cmpsf" - [(set (cc0) - (compare (match_operand:SF 0 "nonmemory_operand" "dF,d") - (match_operand:SF 1 "nonmemory_operand" "d,F")))] - "" - "* return set_cmp (operands[0], operands[1], 's');") - -;; Moves +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=d") + (match_operand:DF 1 "const_double_operand" "F"))] + "CONST_DOUBLE_MEM (operands[1]) == const0_rtx" + "ld.u %u1,%0\;ld.w %v1,%0" + [(set_attr "type" "xalu")]) -;(define_insn "movtf" -; [(set (match_operand:TF 0 "general_operand" "=g,d") -; (match_operand:TF 1 "general_operand" "d,g"))] -; "" -; "* -;{ -; rtx opaddr = 0; -; rtx xoperands[4]; -; xoperands[0] = operands[0]; -; xoperands[2] = operands[1]; -; -; if (REG_P (operands[0])) -; xoperands[1] = gen_rtx (REG, TFmode, REGNO (operands[0]) + 1); -; else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) -; xoperands[1] = 0; -; else if (offsettable_memref_p (operands[0])) -; xoperands[1] = adj_offsettable_operand (operands[0], 8); -; else -; { -; opaddr = XEXP (operands[0], 0); -; xoperands[0] = gen_rtx (MEM, TFmode, gen_rtx (REG, SImode, 13)); -; xoperands[1] = adj_offsettable_operand (xoperands[0], 8); -; } -; -; if (REG_P (operands[1])) -; xoperands[3] = gen_rtx (REG, TFmode, REGNO (operands[1]) + 1); -; else if (offsettable_memref_p (operands[1])) -; xoperands[3] = adj_offsettable_operand (operands[1], 8); -; else -; { -; opaddr = XEXP (operands[1], 0); -; xoperands[2] = gen_rtx (MEM, TFmode, gen_rtx (REG, SImode, 13)); -; xoperands[3] = adj_offsettable_operand (xoperands[2], 8); -; } -; -; if (opaddr) -; output_asm_insn (\"psh.w a5\;ld.w %0,a5\", &opaddr); -; if (push_operand (operands[0], TFmode)) -; output_asm_insn (\"psh.l %3\;psh.l %2\", xoperands); -; else if (GET_CODE (operands[0]) == MEM) -; output_asm_insn (\"st.l %2,%0\;st.l %3,%1\", xoperands); -; else if (GET_CODE (operands[1]) == REG) -; output_asm_insn (\"mov %2,%0\;mov %3,%1\", xoperands); -; else -; output_asm_insn (\"ld.l %2,%0\;ld.l %3,%1\", xoperands); -; if (opaddr) -; output_asm_insn (\"pop.w a5\"); -; return \"\"; -;}") - -(define_insn "movdf" - [(set (match_operand:DF 0 "general_operand" "=g,d") - (match_operand:DF 1 "general_operand" "d,dmG"))] +(define_expand "movsf" + [(set (match_operand:SF 0 "general_operand" "") + (match_operand:SF 1 "general_operand" ""))] "" - "* -{ - if (push_operand (operands[0], DFmode)) - return \"psh.l %1\"; - else if (GET_CODE (operands[0]) == MEM) - return \"st.l %1,%0\"; - else if (GET_CODE (operands[1]) == REG) - return \"mov %1,%0\"; - else if (GET_CODE (operands[1]) == CONST_DOUBLE && LD_D_P (operands[1])) - { - operands[1] = gen_rtx (CONST_INT, VOIDmode, - const_double_high_int (operands[1])); - return \"ld.d %1,%0\"; - } - else if (GET_CODE (operands[1]) == CONST_DOUBLE && LD_L_P (operands[1])) - { - operands[1] = gen_rtx (CONST_INT, VOIDmode, - const_double_low_int (operands[1])); - return \"ld.l %1,%0\"; - } - else - return \"ld.l %1,%0\"; -}") + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (SFmode, operands[1]);") -(define_insn "movsf" - [(set (match_operand:SF 0 "general_operand" "=g,d") - (match_operand:SF 1 "general_operand" "d,gF"))] - "" - "* -{ - if (push_operand (operands[0], SFmode)) - return \"psh.w %1\"; - else if (GET_CODE (operands[0]) == MEM) - return \"st.s %1,%0\"; - else if (GET_CODE (operands[1]) == REG) - return \"mov.s %1,%0\"; - else - return \"ld.s %1,%0\"; -}") +(define_insn "" + [(set (match_operand:SF 0 "general_operand" "=d,d,d,d,<,m") + (match_operand:SF 1 "general_operand" "d,Q,m,F,d,d"))] + "register_operand (operands[0], SFmode) + || register_operand (operands[1], SFmode)" + "@ + mov.s %1,%0 + ldb.s %1,%0 + ld.s %1,%0 + ld.s %1,%0 + psh.w %1 + st.s %1,%0" + [(set_attr "type" "alu,mldb,mldw,alu,alu,mst")]) -(define_insn "movdi" - [(set (match_operand:DI 0 "general_operand" "=g,d") - (match_operand:DI 1 "general_operand" "d,dmiG"))] +(define_expand "movdi" + [(set (match_operand:DI 0 "general_operand" "") + (match_operand:DI 1 "general_operand" ""))] "" - "* -{ - if (push_operand (operands[0], DImode)) - return \"psh.l %1\"; - else if (GET_CODE (operands[0]) == MEM) - return \"st.l %1,%0\"; - else if (GET_CODE (operands[1]) == REG) - return \"mov %1,%0\"; - else if (GET_CODE (operands[1]) == CONST_DOUBLE && LD_D_P (operands[1])) - { - operands[1] = gen_rtx (CONST_INT, VOIDmode, - const_double_high_int (operands[1])); - return \"ld.d %1,%0\"; - } - else - return \"ld.l %1,%0\"; -}") - -;; Special case of movsi, needed to express A-reg preference. + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (DImode, operands[1]);") (define_insn "" - [(set (match_operand:SI 0 "push_operand" "=<") - (plus:SI (match_operand:SI 1 "register_operand" "a") - (match_operand:SI 2 "immediate_operand" "i")))] - "operands[1] != stack_pointer_rtx" - "pshea %a2(%1)") + [(set (match_operand:DI 0 "general_operand" "=d,d,d,d,d,<,m") + (match_operand:DI 1 "general_operand" "d,Q,m,G,HI,d,d"))] + "register_operand (operands[0], DImode) + || register_operand (operands[1], DImode)" + "@ + mov %1,%0 + ldb.l %1,%0 + ld.l %1,%0 + ld.d %u1,%0 + ld.l %1,%0 + psh.l %1 + st.l %1,%0" + [(set_attr "type" "alu,mldb,mldl,alu,alu,alu,mst")]) + +;; This is here so we can load any result of RTL constant folding +;; but do not use it on constants that can be loaded from memory. +;; It is never better and can be worse. -;; General movsi. Constraints will be selected based on TARGET_INDIRECTS -;; to avoid indirect addressing on C3, where it is slow. +(define_insn "" + [(set (match_operand:DI 0 "register_operand" "=d") + (match_operand:DI 1 "const_double_operand" "F"))] + "CONST_DOUBLE_MEM (operands[1]) == const0_rtx" + "ld.u %u1,%0\;ld.w %v1,%0" + [(set_attr "type" "xalu")]) (define_expand "movsi" [(set (match_operand:SI 0 "general_operand" "") (match_operand:SI 1 "general_operand" ""))] "" - "") + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (SImode, operands[1]);") (define_insn "" [(set (match_operand:SI 0 "push_operand" "=<,<") - (match_operand:SI 1 "general_operand" "Ad,io"))] + (match_operand:SI 1 "nonmemory_operand" "Ad,i"))] "" "@ psh.w %1 pshea %a1") (define_insn "" - [(set (match_operand:SI 0 "general_operand" "=g,r,<") - (match_operand:SI 1 "general_operand" "r,g,io"))] - "TARGET_INDIRECTS" - "* -{ - if (push_operand (operands[0], SImode)) - { - if (GET_CODE (operands[1]) == REG) - return \"psh.w %1\"; - else - return \"pshea %a1\"; - } - if (GET_CODE (operands[0]) == MEM) - return \"st.w %1,%0\"; - if (GET_CODE (operands[1]) != REG) - return \"ld.w %1,%0\"; - if (S_REG_P (operands[0]) && S_REG_P (operands[1])) - return \"mov.w %1,%0\"; - return \"mov %1,%0\"; -}") + [(set (match_operand:SI 0 "general_operand" "=d,r,d,r,r,m") + (match_operand:SI 1 "general_operand" "d,r,Q,m,i,r"))] + "register_operand (operands[0], SImode) + || register_operand (operands[1], SImode)" + "@ + mov.w %1,%0 + mov %1,%0 + ldb.w %1,%0 + ld.w %1,%0 + ld.w %1,%0 + st.w %1,%0" + [(set_attr "type" "alu,alu,mldb,mldw,alu,mst")]) + +(define_expand "movstrictsi" + [(set (strict_low_part (match_operand:SI 0 "general_operand" "")) + (match_operand:SI 1 "general_operand" ""))] + "" + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (SImode, operands[1]);") (define_insn "" - [(set (match_operand:SI 0 "general_operand" "=g,r,<") - (match_operand:SI 1 "general_operand" "r,g,i"))] - "! TARGET_INDIRECTS" - "* -{ - if (push_operand (operands[0], SImode)) - { - if (GET_CODE (operands[1]) == REG) - return \"psh.w %1\"; - else - return \"pshea %a1\"; - } - if (GET_CODE (operands[0]) == MEM) - return \"st.w %1,%0\"; - if (GET_CODE (operands[1]) != REG) - return \"ld.w %1,%0\"; - if (S_REG_P (operands[0]) && S_REG_P (operands[1])) - return \"mov.w %1,%0\"; - return \"mov %1,%0\"; -}") + [(set (strict_low_part (match_operand:SI 0 "general_operand" "=d,r,d,r,r,m")) + (match_operand:SI 1 "general_operand" "d,r,Q,m,i,r"))] + "register_operand (operands[0], SImode) + || register_operand (operands[1], SImode)" + "@ + mov.w %1,%0 + mov %1,%0 + ldb.w %1,%0 + ld.w %1,%0 + ld.w %1,%0 + st.w %1,%0" + [(set_attr "type" "alu,alu,mldb,mldw,alu,mst")]) -(define_insn "movstrictsi" - [(set (strict_low_part (match_operand:SI 0 "general_operand" "+g,r")) - (match_operand:SI 1 "general_operand" "r,g"))] +(define_expand "movhi" + [(set (match_operand:HI 0 "general_operand" "") + (match_operand:HI 1 "general_operand" ""))] "" - "* -{ - if (GET_CODE (operands[0]) == MEM) - return \"st.w %1,%0\"; - if (GET_CODE (operands[1]) != REG) - return \"ld.w %1,%0\"; - if (S_REG_P (operands[0]) && S_REG_P (operands[1])) - return \"mov.w %1,%0\"; - return \"mov %1,%0\"; -}") + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (HImode, operands[1]);") + +(define_insn "" + [(set (match_operand:HI 0 "general_operand" "=d,r,d,r,r,<,m") + (match_operand:HI 1 "general_operand" "d,r,Q,m,i,Ad,r"))] + "register_operand (operands[0], HImode) + || register_operand (operands[1], HImode)" + "@ + mov.w %1,%0 + mov %1,%0 + ldb.h %1,%0 + ld.h %1,%0 + ld.w %1,%0 + psh.w %1 + st.h %1,%0" + [(set_attr "type" "alu,alu,mldb,mldw,alu,alu,mst")]) -(define_insn "movhi" - [(set (match_operand:HI 0 "general_operand" "=g,r") - (match_operand:HI 1 "general_operand" "r,g"))] +(define_expand "movqi" + [(set (match_operand:QI 0 "general_operand" "") + (match_operand:QI 1 "general_operand" ""))] "" - "* -{ - if (push_operand (operands[0], HImode)) - abort (); - else if (GET_CODE (operands[0]) == MEM) - return \"st.h %1,%0\"; - else if (GET_CODE (operands[1]) == REG) - { - if (S_REG_P (operands[0]) && S_REG_P (operands[1])) - return \"mov.w %1,%0\"; - else - return \"mov %1,%0\"; - } - else if (GET_CODE (operands[1]) == CONST_INT) - return \"ld.w %1,%0\"; - else - return \"ld.h %1,%0\"; -}") + "if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (QImode, operands[1]);") -(define_insn "movqi" - [(set (match_operand:QI 0 "general_operand" "=g,r") - (match_operand:QI 1 "general_operand" "r,g"))] +(define_insn "" + [(set (match_operand:QI 0 "general_operand" "=d,r,d,r,r,<,m") + (match_operand:QI 1 "general_operand" "d,r,Q,m,i,Ad,r"))] + "register_operand (operands[0], QImode) + || register_operand (operands[1], QImode)" + "@ + mov.w %1,%0 + mov %1,%0 + ldb.b %1,%0 + ld.b %1,%0 + ld.w %1,%0 + psh.w %1 + st.b %1,%0" + [(set_attr "type" "alu,alu,mldb,mldw,alu,alu,mst")]) + +;; Expand block moves manually to get code that pipelines the loads. + +(define_expand "movstrsi" + [(set (match_operand:BLK 0 "memory_operand" "=m") + (match_operand:BLK 1 "memory_operand" "m")) + (use (match_operand:SI 2 "const_int_operand" "i")) + (use (match_operand:SI 3 "const_int_operand" "i"))] "" - "* -{ - if (push_operand (operands[0], QImode)) - abort (); - else if (GET_CODE (operands[0]) == MEM) - return \"st.b %1,%0\"; - else if (GET_CODE (operands[1]) == REG) - { - if (S_REG_P (operands[0]) && S_REG_P (operands[1])) - return \"mov.w %1,%0\"; - else - return \"mov %1,%0\"; - } - else if (GET_CODE (operands[1]) == CONST_INT) - return \"ld.w %1,%0\"; - else - return \"ld.b %1,%0\"; -}") + " expand_movstr (operands); DONE; ") ;; Extension and truncation insns. ;; Those for integer source operand @@ -418,13 +424,15 @@ [(set (match_operand:DF 0 "register_operand" "=d") (float_extend:DF (match_operand:SF 1 "register_operand" "d")))] "" - "cvts.d %1,%0") + "cvts.d %1,%0" + [(set_attr "type" "cvts")]) (define_insn "truncdfsf2" [(set (match_operand:SF 0 "register_operand" "=d") (float_truncate:SF (match_operand:DF 1 "register_operand" "d")))] "" - "cvtd.s %1,%0") + "cvtd.s %1,%0" + [(set_attr "type" "cvtd")]) (define_insn "zero_extendhisi2" [(set (match_operand:SI 0 "register_operand" "=r") @@ -461,25 +469,95 @@ [(set (match_operand:SF 0 "register_operand" "=d") (float:SF (match_operand:SI 1 "register_operand" "d")))] "" - "cvtw.s %1,%0") + "cvtw.s %1,%0" + [(set_attr "type" "cvts")]) (define_insn "floatdisf2" [(set (match_operand:SF 0 "register_operand" "=d") (float:SF (match_operand:DI 1 "register_operand" "d")))] "" - "cvtl.s %1,%0") + "cvtl.s %1,%0" + [(set_attr "type" "cvtd")]) (define_insn "floatsidf2" [(set (match_operand:DF 0 "register_operand" "=d") (float:DF (match_operand:SI 1 "register_operand" "d")))] - "TARGET_C2" - "cvtw.d %1,%0") + "! TARGET_C1" + "cvtw.d %1,%0" + [(set_attr "type" "cvts")]) (define_insn "floatdidf2" [(set (match_operand:DF 0 "register_operand" "=d") (float:DF (match_operand:DI 1 "register_operand" "d")))] "" - "cvtl.d %1,%0") + "cvtl.d %1,%0" + [(set_attr "type" "cvtd")]) + +;; These are a little slower than gcc's normal way of doing unsigned +;; DI floats (if the DI number is "negative") but they avoid double +;; rounding and they avoid explicit constants. + +(define_expand "floatunsdidf2" + [(set (match_operand:DF 0 "register_operand" "=d") + (float:DF (match_operand:DI 1 "register_operand" "d"))) + (set (cc0) (compare:DI (match_dup 3) (match_dup 1))) + (set (pc) + (if_then_else (le (cc0) (const_int 0)) + (label_ref (match_dup 4)) + (pc))) + (set (match_dup 2) (lshiftrt:DI (match_dup 1) (const_int 1))) + (set (match_dup 0) (float:DF (match_dup 2))) + (set (match_dup 0) (plus:DF (match_dup 0) (match_dup 0))) + (match_dup 4) + (set (match_dup 0) (match_dup 0))] + "" + " +{ + operands[2] = gen_reg_rtx (DImode); + operands[3] = force_reg (DImode, const0_rtx); + operands[4] = gen_label_rtx (); +}") + +(define_expand "floatunsdisf2" + [(set (match_operand:SF 0 "register_operand" "=d") + (float:SF (match_operand:DI 1 "register_operand" "d"))) + (set (cc0) (compare:DI (match_dup 3) (match_dup 1))) + (set (pc) + (if_then_else (le (cc0) (const_int 0)) + (label_ref (match_dup 4)) + (pc))) + (set (match_dup 2) (lshiftrt:DI (match_dup 1) (const_int 1))) + (set (match_dup 0) (float:SF (match_dup 2))) + (set (match_dup 0) (plus:SF (match_dup 0) (match_dup 0))) + (match_dup 4) + (set (match_dup 0) (match_dup 0))] + "" + " +{ + operands[2] = gen_reg_rtx (DImode); + operands[3] = force_reg (DImode, const0_rtx); + operands[4] = gen_label_rtx (); +}") + +;; These patterns are identical to gcc's default action +;; if DI->DF and DI->SF are not present. There are here +;; only to prevent SI->*F from promoting to DI->*F. + +(define_expand "floatunssidf2" + [(set (match_dup 2) + (zero_extend:DI (match_operand:SI 1 "register_operand" ""))) + (set (match_operand:DF 0 "register_operand" "") + (float:DF (match_dup 2)))] + "" + "operands[2] = gen_reg_rtx (DImode);") + +(define_expand "floatunssisf2" + [(set (match_dup 2) + (zero_extend:DI (match_operand:SI 1 "register_operand" ""))) + (set (match_operand:SF 0 "register_operand" "") + (float:SF (match_dup 2)))] + "" + "operands[2] = gen_reg_rtx (DImode);") ;; Float-to-fix conversion insns. @@ -487,30 +565,29 @@ [(set (match_operand:SI 0 "register_operand" "=d") (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "d"))))] "" - "cvts.w %1,%0") + "cvts.w %1,%0" + [(set_attr "type" "cvts")]) (define_insn "fix_truncsfdi2" [(set (match_operand:DI 0 "register_operand" "=d") (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "d"))))] "" - "cvts.l %1,%0") + "cvts.l %1,%0" + [(set_attr "type" "cvts")]) (define_insn "fix_truncdfsi2" [(set (match_operand:SI 0 "register_operand" "=d") (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "d"))))] "" - "* -{ - if (TARGET_C2) - return \"cvtd.w %1,%0\"; - return \"cvtd.l %1,%0\"; -}") + "cvtd.l %1,%0" + [(set_attr "type" "cvtd")]) (define_insn "fix_truncdfdi2" [(set (match_operand:DI 0 "register_operand" "=d") (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "d"))))] "" - "cvtd.l %1,%0") + "cvtd.l %1,%0" + [(set_attr "type" "cvtd")]) ;;- All kinds of add instructions. @@ -519,14 +596,16 @@ (plus:DF (match_operand:DF 1 "register_operand" "%0") (match_operand:DF 2 "register_operand" "d")))] "" - "add.d %2,%0") + "add.d %2,%0" + [(set_attr "type" "addd")]) (define_insn "addsf3" [(set (match_operand:SF 0 "register_operand" "=d") (plus:SF (match_operand:SF 1 "register_operand" "%0") (match_operand:SF 2 "nonmemory_operand" "dF")))] "" - "add.s %2,%0") + "add.s %2,%0" + [(set_attr "type" "adds")]) (define_insn "adddi3" [(set (match_operand:DI 0 "register_operand" "=d") @@ -535,33 +614,54 @@ "" "add.l %2,%0") -;; special case of addsi3, needed to specify an A reg for the destination -;; when the source is a sum involving FP or AP. +(define_expand "addsi3" + [(set (match_operand:SI 0 "register_operand" "") + (plus:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "nonmemory_operand" "")))] + "" + "") (define_insn "" [(set (match_operand:SI 0 "register_operand" "=a") - (plus:SI (match_operand:SI 1 "register_operand" "%a") + (plus:SI (match_operand:SI 1 "register_operand" "%A") (match_operand:SI 2 "immediate_operand" "i")))] "operands[1] == frame_pointer_rtx || operands[1] == arg_pointer_rtx" "ldea %a2(%1),%0") -(define_insn "addsi3" +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=a") + (plus:SI (match_operand:SI 1 "register_operand" "%a") + (match_operand:SI 2 "nonmemory_operand" "ri")))] + "operands[1] == stack_pointer_rtx && operands[0] != stack_pointer_rtx" + "mov %1,%0\;add.w %2,%0") + +(define_insn "" + [(set (match_operand:SI 0 "push_operand" "=<") + (plus:SI (match_operand:SI 1 "register_operand" "A") + (match_operand:SI 2 "immediate_operand" "i")))] + "operands[1] != stack_pointer_rtx" + "pshea %a2(%1)" + [(set_attr "type" "mst")]) + +(define_insn "" [(set (match_operand:SI 0 "register_operand" "=d,a,a") - (plus:SI (match_operand:SI 1 "nonmemory_operand" "%0,0,a") + (plus:SI (match_operand:SI 1 "register_operand" "%0,0,A") + (match_operand:SI 2 "nonmemory_operand" "di,ri,i")))] + "TARGET_C1" + "@ + add.w %2,%0 + add.w %2,%0 + ldea %a2(%1),%0") + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d,a,r") + (plus:SI (match_operand:SI 1 "register_operand" "%0,0,A") (match_operand:SI 2 "nonmemory_operand" "di,ri,i")))] "" - "* switch (which_alternative) -{ - case 0: - case 1: - return \"add.w %2,%0\"; - case 2: - if ((TARGET_C2 || A_REG_P (operands[0])) - && operands[1] != stack_pointer_rtx) - return \"ldea %a2(%1),%0\"; - else - return \"mov %1,%0\;add.w %2,%0\"; -}") + "@ + add.w %2,%0 + add.w %2,%0 + ldea %a2(%1),%0") (define_insn "addhi3" [(set (match_operand:HI 0 "register_operand" "=d,a") @@ -571,11 +671,13 @@ "add.h %2,%0") (define_insn "addqi3" - [(set (match_operand:QI 0 "register_operand" "=d") - (plus:QI (match_operand:QI 1 "register_operand" "%0") - (match_operand:QI 2 "register_operand" "d")))] + [(set (match_operand:QI 0 "register_operand" "=d,d") + (plus:QI (match_operand:QI 1 "register_operand" "%0,0") + (match_operand:QI 2 "nonmemory_operand" "d,i")))] "" - "add.b %2,%0") + "@ + add.b %2,%0 + add.w %2,%0") ;;- All kinds of subtract instructions. @@ -584,14 +686,16 @@ (minus:DF (match_operand:DF 1 "register_operand" "0") (match_operand:DF 2 "register_operand" "d")))] "" - "sub.d %2,%0") + "sub.d %2,%0" + [(set_attr "type" "addd")]) (define_insn "subsf3" [(set (match_operand:SF 0 "register_operand" "=d") (minus:SF (match_operand:SF 1 "register_operand" "0") (match_operand:SF 2 "nonmemory_operand" "dF")))] "" - "sub.s %2,%0") + "sub.s %2,%0" + [(set_attr "type" "adds")]) (define_insn "subdi3" [(set (match_operand:DI 0 "register_operand" "=d") @@ -601,11 +705,15 @@ "sub.l %2,%0") (define_insn "subsi3" - [(set (match_operand:SI 0 "register_operand" "=d,a") - (minus:SI (match_operand:SI 1 "register_operand" "0,0") - (match_operand:SI 2 "nonmemory_operand" "di,ai")))] + [(set (match_operand:SI 0 "register_operand" "=d,a,?d,?a") + (minus:SI (match_operand:SI 1 "nonmemory_operand" "0,0,di,ai") + (match_operand:SI 2 "nonmemory_operand" "di,ai,0,0")))] "" - "sub.w %2,%0") + "@ + sub.w %2,%0 + sub.w %2,%0 + sub.w %1,%0\;neg.w %0,%0 + sub.w %1,%0\;neg.w %0,%0") (define_insn "subhi3" [(set (match_operand:HI 0 "register_operand" "=d,a") @@ -615,11 +723,13 @@ "sub.h %2,%0") (define_insn "subqi3" - [(set (match_operand:QI 0 "register_operand" "=d") - (minus:QI (match_operand:QI 1 "register_operand" "0") - (match_operand:QI 2 "register_operand" "d")))] + [(set (match_operand:QI 0 "register_operand" "=d,d") + (minus:QI (match_operand:QI 1 "register_operand" "0,0") + (match_operand:QI 2 "nonmemory_operand" "d,i")))] "" - "sub.b %2,%0") + "@ + sub.b %2,%0 + sub.w %2,%0") ;;- Multiply instructions. @@ -628,42 +738,50 @@ (mult:DF (match_operand:DF 1 "register_operand" "%0") (match_operand:DF 2 "register_operand" "d")))] "" - "mul.d %2,%0") + "mul.d %2,%0" + [(set_attr "type" "muld")]) (define_insn "mulsf3" [(set (match_operand:SF 0 "register_operand" "=d") (mult:SF (match_operand:SF 1 "register_operand" "%0") (match_operand:SF 2 "nonmemory_operand" "dF")))] "" - "mul.s %2,%0") + "mul.s %2,%0" + [(set_attr "type" "muls")]) (define_insn "muldi3" [(set (match_operand:DI 0 "register_operand" "=d") (mult:DI (match_operand:DI 1 "register_operand" "%0") (match_operand:DI 2 "register_operand" "d")))] "" - "mul.l %2,%0") + "mul.l %2,%0" + [(set_attr "type" "mull")]) (define_insn "mulsi3" [(set (match_operand:SI 0 "register_operand" "=d,a") (mult:SI (match_operand:SI 1 "register_operand" "%0,0") (match_operand:SI 2 "nonmemory_operand" "di,ai")))] "" - "mul.w %2,%0") + "mul.w %2,%0" + [(set_attr "type" "mulw")]) (define_insn "mulhi3" [(set (match_operand:HI 0 "register_operand" "=d,a") (mult:HI (match_operand:HI 1 "register_operand" "%0,0") (match_operand:HI 2 "nonmemory_operand" "di,ai")))] "" - "mul.h %2,%0") + "mul.h %2,%0" + [(set_attr "type" "mulw")]) (define_insn "mulqi3" - [(set (match_operand:QI 0 "register_operand" "=d") - (mult:QI (match_operand:QI 1 "register_operand" "%0") - (match_operand:QI 2 "register_operand" "d")))] + [(set (match_operand:QI 0 "register_operand" "=d,d") + (mult:QI (match_operand:QI 1 "register_operand" "%0,0") + (match_operand:QI 2 "nonmemory_operand" "d,i")))] "" - "mul.b %2,%0") + "@ + mul.b %2,%0 + mul.w %2,%0" + [(set_attr "type" "mulw,mulw")]) ;;- Divide instructions. @@ -672,21 +790,37 @@ (div:DF (match_operand:DF 1 "register_operand" "0") (match_operand:DF 2 "register_operand" "d")))] "" - "div.d %2,%0") + "div.d %2,%0" + [(set_attr "type" "divd")]) (define_insn "divsf3" [(set (match_operand:SF 0 "register_operand" "=d") (div:SF (match_operand:SF 1 "register_operand" "0") (match_operand:SF 2 "nonmemory_operand" "dF")))] "" - "div.s %2,%0") + "div.s %2,%0" + [(set_attr "type" "divs")]) (define_insn "divdi3" [(set (match_operand:DI 0 "register_operand" "=d") (div:DI (match_operand:DI 1 "register_operand" "0") (match_operand:DI 2 "register_operand" "d")))] "" - "div.l %2,%0") + "div.l %2,%0" + [(set_attr "type" "divl")]) + +(define_expand "udivsi3" + [(set (match_dup 3) + (zero_extend:DI (match_operand:SI 1 "register_operand" ""))) + (set (match_dup 4) + (zero_extend:DI (match_operand:SI 2 "register_operand" ""))) + (set (match_dup 3) + (div:DI (match_dup 3) (match_dup 4))) + (set (match_operand:SI 0 "register_operand" "") + (subreg:SI (match_dup 3) 0))] + "" + "operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_reg_rtx (DImode); ") (define_insn "udivdi3" [(set (match_operand:DI 0 "register_operand" "=d") @@ -700,28 +834,31 @@ (div:SI (match_operand:SI 1 "register_operand" "0,0") (match_operand:SI 2 "nonmemory_operand" "di,ai")))] "" - "div.w %2,%0") + "div.w %2,%0" + [(set_attr "type" "divw")]) (define_insn "divhi3" [(set (match_operand:HI 0 "register_operand" "=d,a") (div:HI (match_operand:HI 1 "register_operand" "0,0") (match_operand:HI 2 "nonmemory_operand" "di,ai")))] "" - "div.h %2,%0") + "div.h %2,%0" + [(set_attr "type" "divw")]) (define_insn "divqi3" [(set (match_operand:QI 0 "register_operand" "=d") (div:QI (match_operand:QI 1 "register_operand" "0") (match_operand:QI 2 "register_operand" "d")))] "" - "div.b %2,%0") + "div.b %2,%0" + [(set_attr "type" "divw")]) -;; - and, or, xor +;;- Bit clear instructions. (define_insn "" [(set (match_operand:DI 0 "register_operand" "=d") (and:DI (match_operand:DI 1 "register_operand" "%0") - (match_operand:DI 2 "immediate_operand" "Fn")))] + (match_operand:DI 2 "" "")))] "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0) || (GET_CODE (operands[2]) == CONST_DOUBLE && CONST_DOUBLE_HIGH (operands[2]) == -1)" @@ -760,7 +897,7 @@ (define_insn "" [(set (match_operand:DI 0 "register_operand" "=d") (ior:DI (match_operand:DI 1 "register_operand" "%0") - (match_operand:DI 2 "immediate_operand" "Fn")))] + (match_operand:DI 2 "" "")))] "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0) || (GET_CODE (operands[2]) == CONST_DOUBLE && CONST_DOUBLE_HIGH (operands[2]) == 0)" @@ -799,7 +936,7 @@ (define_insn "" [(set (match_operand:DI 0 "register_operand" "=d") (xor:DI (match_operand:DI 1 "register_operand" "%0") - (match_operand:DI 2 "immediate_operand" "Fn")))] + (match_operand:DI 2 "" "")))] "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0) || (GET_CODE (operands[2]) == CONST_DOUBLE && CONST_DOUBLE_HIGH (operands[2]) == 0)" @@ -837,13 +974,15 @@ [(set (match_operand:DF 0 "register_operand" "=d") (neg:DF (match_operand:DF 1 "register_operand" "d")))] "" - "neg.d %1,%0") + "neg.d %1,%0" + [(set_attr "type" "addd")]) (define_insn "negsf2" [(set (match_operand:SF 0 "register_operand" "=d") (neg:SF (match_operand:SF 1 "register_operand" "d")))] "" - "neg.s %1,%0") + "neg.s %1,%0" + [(set_attr "type" "adds")]) (define_insn "negdi2" [(set (match_operand:DI 0 "register_operand" "=d") @@ -893,42 +1032,164 @@ "" "not %1,%0") -;;- shifts +;;- Shifts ;; -;; Convex shift instructions are logical shifts. -;; To make signed right shifts: -;; for SImode, sign extend to DImode and shift, works for 0..32 -;; for DImode, shift and then extend the sign, works for 0..63 -- but not 64 +;; The extreme profusion of patterns here is due to the different-speed +;; shifts on different machines, and the C1's lack of word shift S-register +;; instructions. + +;; SImode + +;; Logical left 1, 1 cycle on all machines via add + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=r") + (lshift:SI (match_operand:SI 1 "register_operand" "0") + (const_int 1)))] + "" + "add.w %0,%0") + +;; C34 general shift is 1 cycle + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d,a") + (lshift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "di,ai")))] + "TARGET_C34" + "@ + shf.w %2,%0 + shf %2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; else shift left 0..7 is 1 cycle if we use an A register + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=a,?d") + (lshift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "ai,di")))] + "TARGET_C1 && INTVAL (operands[2]) < (unsigned) 8" + "@ + shf %2,%0 + shf %2,%0" + [(set_attr "type" "alu,shfl")]) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=a,?d") + (lshift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "ai,di")))] + "INTVAL (operands[2]) < (unsigned) 8" + "@ + shf %2,%0 + shf.w %2,%0" + [(set_attr "type" "alu,shfw")]) + +;; else general left shift + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d,a") + (lshift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "di,ai")))] + "TARGET_C1" + "@ + shf %2,%0 + shf %2,%0" + [(set_attr "type" "shfl,shfw")]) + +;; (but C2 shift left by a constant can is faster via multiply) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=r") + (lshift:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "i")))] + "TARGET_C2 && INTVAL (operands[2]) < (unsigned) 32" + "mul.w %z2,%0" + [(set_attr "type" "mulw")]) (define_insn "lshlsi3" [(set (match_operand:SI 0 "register_operand" "=d,a") (lshift:SI (match_operand:SI 1 "register_operand" "0,0") (match_operand:SI 2 "nonmemory_operand" "di,ai")))] "" - "* -{ - if (operands[2] == const1_rtx) - return \"add.w %0,%0\"; - else if (TARGET_C2 && S_REG_P (operands[0])) - return \"shf.w %2,%0\"; - else - return \"shf %2,%0\"; -}") + "@ + shf.w %2,%0 + shf %2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; Arithmetic left 1, 1 cycle on all machines via add + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=r") + (ashift:SI (match_operand:SI 1 "register_operand" "0") + (const_int 1)))] + "" + "add.w %0,%0") + +;; C34 general shift is 1 cycle + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d,a") + (ashift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "di,ai")))] + "TARGET_C34" + "@ + shf.w %2,%0 + shf %2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; else shift left 0..7 is 1 cycle if we use an A register + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=a,?d") + (ashift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "ai,di")))] + "TARGET_C1 && INTVAL (operands[2]) < (unsigned) 8" + "@ + shf %2,%0 + shf %2,%0" + [(set_attr "type" "alu,shfl")]) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=a,?d") + (ashift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "ai,di")))] + "INTVAL (operands[2]) < (unsigned) 8" + "@ + shf %2,%0 + shf.w %2,%0" + [(set_attr "type" "alu,shfw")]) + +;; else general left shift + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d,a") + (ashift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "di,ai")))] + "TARGET_C1" + "@ + shf %2,%0 + shf %2,%0" + [(set_attr "type" "shfl,shfw")]) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=r") + (ashift:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "const_int_operand" "i")))] + "TARGET_C2 && INTVAL (operands[2]) < (unsigned) 32" + "mul.w %z2,%0" + [(set_attr "type" "mulw")]) (define_insn "ashlsi3" [(set (match_operand:SI 0 "register_operand" "=d,a") (ashift:SI (match_operand:SI 1 "register_operand" "0,0") (match_operand:SI 2 "nonmemory_operand" "di,ai")))] "" - "* -{ - if (operands[2] == const1_rtx) - return \"add.w %0,%0\"; - else if (TARGET_C2 && S_REG_P (operands[0])) - return \"shf.w %2,%0\"; - else - return \"shf %2,%0\"; -}") + "@ + shf.w %2,%0 + shf %2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; Logical right, general +;; The hardware wants the negative of the shift count (define_expand "lshrsi3" [(set (match_operand:SI 0 "register_operand" "") @@ -937,77 +1198,146 @@ "" "operands[2] = negate_rtx (SImode, operands[2]);") +;; C1 lacks word shift S reg + +(define_insn "" + [(set + (match_operand:SI 0 "register_operand" "=a,?d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (neg:SI (match_operand:SI 2 "nonmemory_operand" "ai,di"))))] + "TARGET_C1" + "@ + shf %2,%0 + ld.u #0,%0\;shf %2,%0" + [(set_attr "type" "shfw,shfl")]) + +;; general case + (define_insn "" [(set (match_operand:SI 0 "register_operand" "=d,a") (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0") (neg:SI (match_operand:SI 2 "nonmemory_operand" "di,ai"))))] "" - "* -{ - if (A_REG_P (operands[0])) - return \"shf %2,%0\"; - else if (TARGET_C2) - return \"shf.w %2,%0\"; - else - return \"ld.u #0,%0\;shf %2,%0\"; -}") + "@ + shf.w %2,%0 + shf %2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; Patterns without neg produced by constant folding (define_insn "" [(set - (match_operand:SI 0 "register_operand" "=r") - (lshiftrt:SI (match_operand:SI 1 "register_operand" "0") - (match_operand:SI 2 "immediate_operand" "i")))] + (match_operand:SI 0 "register_operand" "=a,?d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "i,i")))] + "TARGET_C1" + "@ + shf #%n2,%0 + ld.u #0,%0\;shf #%n2,%0" + [(set_attr "type" "shfw,shfl")]) + +(define_insn "" + [(set + (match_operand:SI 0 "register_operand" "=d,a") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "i,i")))] "" - "* -{ - if (A_REG_P (operands[0])) - return \"shf #%n2,%0\"; - else if (TARGET_C2) - return \"shf.w #%n2,%0\"; - else - return \"ld.u #0,%0\;shf #%n2,%0\"; -}") + "@ + shf.w #%n2,%0 + shf #%n2,%0" + [(set_attr "type" "shfw,shfw")]) + +;; Arithmetic right, general +;; Sign-extend to 64 bits, then shift that. Works for 0..32. (define_expand "ashrsi3" - [(set (match_operand:SI 0 "register_operand" "=d") - (ashiftrt:SI (match_operand:SI 1 "register_operand" "d") - (neg:SI (match_operand:SI 2 "nonmemory_operand" "di"))))] + [(set (match_operand:SI 0 "register_operand" "") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "") + (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))] "" "operands[2] = negate_rtx (SImode, operands[2]);") (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=&d") - (ashiftrt:SI (match_operand:SI 1 "register_operand" "d") - (neg:SI (match_operand:SI 2 "nonmemory_operand" "di"))))] + [(set (match_operand:SI 0 "register_operand" "=d,&d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,d") + (neg:SI + (match_operand:SI 2 "nonmemory_operand" "di,di"))))] "" - "cvtw.l %1,%0\;shf %2,%0") + "cvtw.l %1,%0\;shf %2,%0" + [(set_attr "type" "shfl,shfl")]) (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=&d") + [(set (match_operand:SI 0 "register_operand" "=d") (ashiftrt:SI (match_operand:SI 1 "register_operand" "d") (match_operand:SI 2 "immediate_operand" "i")))] "" - "cvtw.l %1,%0\;shf #%n2,%0") + "cvtw.l %1,%0\;shf #%n2,%0" + [(set_attr "type" "shfl")]) + +;; DImode +;; Logical left, 1-cycle + +(define_insn "" + [(set (match_operand:DI 0 "register_operand" "=d") + (lshift:DI (match_operand:DI 1 "register_operand" "0") + (const_int 1)))] + "" + "add.l %0,%0") + +;; Logical left, general (define_insn "lshldi3" [(set (match_operand:DI 0 "register_operand" "=d") (lshift:DI (match_operand:DI 1 "register_operand" "0") (match_operand:SI 2 "nonmemory_operand" "di")))] "" - "shf %2,%0") + "shf %2,%0" + [(set_attr "type" "shfl")]) + +;; Arithmetic left, 1-cycle + +(define_insn "" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (match_operand:DI 1 "register_operand" "0") + (const_int 1)))] + "" + "add.l %0,%0") + +;; Arithmetic left, general (define_insn "ashldi3" [(set (match_operand:DI 0 "register_operand" "=d") (ashift:DI (match_operand:DI 1 "register_operand" "0") (match_operand:SI 2 "nonmemory_operand" "di")))] "" - "shf %2,%0") + "shf %2,%0" + [(set_attr "type" "shfl")]) -(define_expand "lshrdi3" +;; Can omit zero- or sign-extend if shift is 32 or more. + +(define_insn "" [(set (match_operand:DI 0 "register_operand" "=d") - (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") - (neg:SI (match_operand:SI 2 "nonmemory_operand" "di"))))] + (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (match_operand:SI 2 "const_int_operand" "i")))] + "INTVAL (operands[2]) >= 32" + "shf %2,%0" + [(set_attr "type" "shfl")]) + +(define_insn "" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (match_operand:SI 2 "const_int_operand" "i")))] + "INTVAL (operands[2]) >= 32" + "shf %2,%0" + [(set_attr "type" "shfl")]) + +;; Logical right, general + +(define_expand "lshrdi3" + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))] "" "operands[2] = negate_rtx (SImode, operands[2]);") @@ -1016,88 +1346,126 @@ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") (neg:SI (match_operand:SI 2 "nonmemory_operand" "di"))))] "" - "shf %2,%0") + "shf %2,%0" + [(set_attr "type" "shfl")]) (define_insn "" [(set (match_operand:DI 0 "register_operand" "=d") (lshiftrt:DI (match_operand:DI 1 "register_operand" "0") (match_operand:SI 2 "immediate_operand" "i")))] "" - "shf #%n2,%0") + "shf #%n2,%0" + [(set_attr "type" "shfl")]) -;; signed a >> b is +;; Arithmetic right, general +;; Use ;; ((a >> b) ^ signbit) - signbit ;; where signbit is (1 << 63) >> b +;; Works for 0..63. Does not work for 64; unfortunate but legal. (define_expand "ashrdi3" - [(match_operand:DI 0 "register_operand" "") - (match_operand:DI 1 "register_operand" "") - (match_operand:SI 2 "nonmemory_operand" "") - (match_dup 3)] + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (neg:SI (match_operand:SI 2 "nonmemory_operand" "")))) + (set (match_dup 3) (lshiftrt:DI (match_dup 3) (neg:SI (match_dup 2)))) + (set (match_dup 0) (xor:DI (match_dup 0) (match_dup 3))) + (set (match_dup 0) (minus:DI (match_dup 0) (match_dup 3)))] "" " { if (GET_CODE (operands[2]) == CONST_INT) - { - int rshift = INTVAL (operands[2]); - if (rshift < 0) - operands[3] = force_reg (DImode, immed_double_const (0, 0, DImode)); - else if (rshift < 32) - operands[3] = - force_reg (DImode, - immed_double_const (0, 1 << (31 - rshift), DImode)); - else if (rshift < 64) - operands[3] = - force_reg (DImode, - immed_double_const (1 << (63 - rshift), 0, DImode)); - else - operands[3] = force_reg (DImode, immed_double_const (0, 0, DImode)); - } - else - { - operands[3] = - force_reg (DImode, immed_double_const (0, 1 << 31, DImode)); - emit_insn (gen_lshrdi3 (operands[3], operands[3], operands[2])); - } - - emit_insn (gen_lshrdi3 (operands[0], operands[1], operands[2])); - emit_insn (gen_rtx (SET, VOIDmode, operands[0], - gen_rtx (XOR, DImode, operands[0], operands[3]))); - emit_insn (gen_rtx (SET, VOIDmode, operands[0], - gen_rtx (MINUS, DImode, operands[0], operands[3]))); - DONE; + switch (INTVAL (operands[2])) + { + case 32: + emit_insn (gen_ashrdi3_32 (operands[0], operands[1])); + DONE; + } + + operands[2] = negate_rtx (SImode, operands[2]); + operands[3] = force_reg (DImode, immed_double_const (0, 1 << 31, DImode)); }") + +;; Arithmetic right 32, a common case that can save a couple of insns. + +(define_expand "ashrdi3_32" + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (const_int 32))) + (set (match_dup 0) + (sign_extend:DI (subreg:SI (match_dup 0) 0)))] + "" + "") ;; __builtin instructions (define_insn "sqrtdf2" [(set (match_operand:DF 0 "register_operand" "=d") (sqrt:DF (match_operand:DF 1 "register_operand" "0")))] - "TARGET_C2" - "sqrt.d %0") + "! TARGET_C1 && flag_fast_math" + "sqrt.d %0" + [(set_attr "type" "divd")]) (define_insn "sqrtsf2" [(set (match_operand:SF 0 "register_operand" "=d") (sqrt:SF (match_operand:SF 1 "register_operand" "0")))] - "TARGET_C2" - "sqrt.s %0") - -;(define_insn "" -; [(set (match_operand:SI 0 "register_operand" "=d") -; (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d")) -; (const_int 1)))] -; "" -; "tzc %1,%0\;le.w #32,%0\;jbrs.f .+6\;ld.w #-1,%0") -; -;(define_expand "ffssi2" -; [(set (match_operand:SI 0 "register_operand" "=d") -; (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d")) -; (const_int 1))) -; (set (match_dup 0) -; (plus:SI (match_dup 0) -; (const_int 1)))] -; "" -; "") + "! TARGET_C1 && flag_fast_math" + "sqrt.s %0" + [(set_attr "type" "divs")]) + +(define_insn "sindf2" + [(set (match_operand:DF 0 "register_operand" "=d") + (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 1))] + "! TARGET_C1 && flag_fast_math" + "sin.d %0") + +(define_insn "sinsf2" + [(set (match_operand:SF 0 "register_operand" "=d") + (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 1))] + "! TARGET_C1 && flag_fast_math" + "sin.s %0") + +(define_insn "cosdf2" + [(set (match_operand:DF 0 "register_operand" "=d") + (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 2))] + "! TARGET_C1 && flag_fast_math" + "cos.d %0") + +(define_insn "cossf2" + [(set (match_operand:SF 0 "register_operand" "=d") + (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 2))] + "! TARGET_C1 && flag_fast_math" + "cos.s %0") + +(define_insn "ftruncdf2" + [(set (match_operand:DF 0 "register_operand" "=d") + (fix:DF (match_operand:DF 1 "register_operand" "d")))] + "! TARGET_C1" + "frint.d %1,%0" + [(set_attr "type" "cvtd")]) + +(define_insn "ftruncsf2" + [(set (match_operand:SF 0 "register_operand" "=d") + (fix:SF (match_operand:SF 1 "register_operand" "d")))] + "! TARGET_C1" + "frint.s %1,%0" + [(set_attr "type" "cvts")]) + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=d") + (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d")) + (const_int 1)))] + "" + "tzc %1,%0\;le.w #32,%0\;jbrs.f L0%=\;ld.w #-1,%0\\nL0%=:") + +(define_expand "ffssi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d")) + (const_int 1))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int 1)))] + "" + "") (define_insn "abssf2" [(set (match_operand:SF 0 "register_operand" "=d") @@ -1111,9 +1479,208 @@ (match_dup 2)))] "" "operands[2] = force_reg (DImode, - immed_double_const (-1, 0x7fffffff, DImode));") + immed_double_const (-1, 0x7fffffff, DImode));") -;; Jumps +;;- Compares + +(define_insn "cmpdi" + [(set (cc0) + (compare (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "register_operand" "d")))] + "" + "* return output_cmp (operands[0], operands[1], 'l');") + +(define_insn "" + [(set (cc0) (match_operand:DI 0 "register_operand" "d")) + (clobber (match_scratch:DI 1 "=d"))] + "next_insn_tests_no_inequality (insn)" + "* return output_cmp (operands[0], operands[1], 'L');") + +(define_insn "cmpsi" + [(set (cc0) + (compare (match_operand:SI 0 "register_operand" "d,a") + (match_operand:SI 1 "nonmemory_operand" "di,ai")))] + "" + "* return output_cmp (operands[0], operands[1], 'w');") + +(define_insn "cmphi" + [(set (cc0) + (compare (match_operand:HI 0 "register_operand" "d,a") + (match_operand:HI 1 "nonmemory_operand" "di,ai")))] + "" + "* return output_cmp (operands[0], operands[1], 'h');") + +; cmpqi is intentionally omitted. +; +; gcc will sign-extend or zero-extend the operands to the next +; wider mode, HImode. +; +; For reg .cmp. constant, we just go with the halfword immediate +; instruction. Perhaps the widening insn can be cse'd or combined away. +; If not, we're still as good as loading a byte constant into a register +; to do a reg-reg byte compare. +; +; The following patterns pick up cases that can use reg .cmp. reg after all. + +(define_insn "" + [(set (cc0) + (compare + (sign_extend:HI (match_operand:QI 0 "register_operand" "d")) + (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))))] + "" + "* return output_cmp (operands[0], operands[1], 'b');") + +(define_insn "" + [(set (cc0) + (compare + (ashift:HI (subreg:HI (match_operand:QI 0 "register_operand" "d") 0) + (const_int 8)) + (ashift:HI (subreg:HI (match_operand:QI 1 "register_operand" "d") 0) + (const_int 8))))] + "" + "* return output_cmp (operands[0], operands[1], 'b');") + +(define_insn "" + [(set (cc0) + (compare (match_operand:QI 0 "register_operand" "d") + (match_operand:QI 1 "register_operand" "d")))] + "" + "* return output_cmp (operands[0], operands[1], 'b');") + +(define_insn "" + [(set (cc0) (match_operand:QI 0 "register_operand" "d,a")) + (clobber (match_scratch:QI 1 "=d,a"))] + "next_insn_tests_no_inequality (insn)" + "* return output_cmp (operands[0], operands[1], 'B');") + +(define_insn "" + [(set (cc0) (subreg (match_operand:QI 0 "register_operand" "d,a") 0)) + (clobber (match_scratch:QI 1 "=d,a"))] + "next_insn_tests_no_inequality (insn)" + "* return output_cmp (operands[0], operands[1], 'B');") + +(define_insn "" + [(set (cc0) + (zero_extend (subreg (match_operand:QI 0 "register_operand" "d,a") 0))) + (clobber (match_scratch:QI 1 "=d,a"))] + "next_insn_tests_no_inequality (insn)" + "* return output_cmp (operands[0], operands[1], 'B');") + +(define_insn "cmpdf" + [(set (cc0) + (compare (match_operand:DF 0 "register_operand" "d") + (match_operand:DF 1 "register_operand" "d")))] + "" + "* return output_cmp (operands[0], operands[1], 'd');") + +(define_insn "cmpsf" + [(set (cc0) + (compare (match_operand:SF 0 "register_operand" "d") + (match_operand:SF 1 "nonmemory_cmpsf_operand" "dF")))] + "" + "* return output_cmp (operands[0], operands[1], 's');") + +;; decrement-and-set-cc0 insns. +;; +;; The most important case where we can use the carry bit from an +;; arithmetic insn to eliminate a redundant compare is the decrement in +;; constructs like while (n--) and while (--n >= 0). +;; +;; We do it with combine patterns instead of NOTICE_UPDATE_CC because +;; the decrement needs to be kept at the end of the block during scheduling. +;; +;; These patterns must have memory alternatives because reload refuses +;; to do output reloads for an insn that sets cc0 (since it does not +;; want to clobber cc0 with its moves). Convex moves do not clobber +;; cc0, but there is no evident way to get reload to know that. + +(define_insn "" + [(set (cc0) + (match_operand:SI 0 "register_operand" "+r,*m")) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))] + "next_insn_tests_no_inequality (insn)" + "* +{ + if (which_alternative == 0) + { + output_cmp (operands[0], constm1_rtx, 'W'); + return \"add.w #-1,%0\"; + } + else + { + output_cmp (gen_rtx (REG, SImode, 7), constm1_rtx, 'W'); + return \"psh.w s7\;ld.w %0,s7\;add.w #-1,s7\;st.w s7,%0\;pop.w s7\"; + } +}") + +(define_insn "" + [(set (cc0) + (plus:SI (match_operand:SI 0 "register_operand" "+r,*m") + (const_int -1))) + (set (match_dup 0) + (plus:SI (match_dup 0) + (const_int -1)))] + "find_reg_note (next_cc0_user (insn), REG_NONNEG, 0)" + "* +{ + if (which_alternative == 0) + { + output_cmp (operands[0], const0_rtx, 'W'); + return \"add.w #-1,%0\"; + } + else + { + output_cmp (gen_rtx (REG, SImode, 7), const0_rtx, 'W'); + return \"psh.w s7\;ld.w %0,s7\;add.w #-1,s7\;st.w s7,%0\;pop.w s7\"; + } +}") + +(define_insn "" + [(set (cc0) + (match_operand:HI 0 "register_operand" "+r,*m")) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))] + "next_insn_tests_no_inequality (insn)" + "* +{ + if (which_alternative == 0) + { + output_cmp (operands[0], constm1_rtx, 'W'); + return \"add.w #-1,%0\"; + } + else + { + output_cmp (gen_rtx (REG, HImode, 7), constm1_rtx, 'W'); + return \"psh.w s7\;ld.h %0,s7\;add.h #-1,s7\;st.h s7,%0\;pop.w s7\"; + } +}") + +(define_insn "" + [(set (cc0) + (plus:HI (match_operand:HI 0 "register_operand" "+r,*m") + (const_int -1))) + (set (match_dup 0) + (plus:HI (match_dup 0) + (const_int -1)))] + "find_reg_note (next_cc0_user (insn), REG_NONNEG, 0)" + "* +{ + if (which_alternative == 0) + { + output_cmp (operands[0], const0_rtx, 'W'); + return \"add.w #-1,%0\"; + } + else + { + output_cmp (gen_rtx (REG, HImode, 7), const0_rtx, 'W'); + return \"psh.w s7\;ld.h %0,s7\;add.h #-1,s7\;st.h s7,%0\;pop.w s7\"; + } +}") + +;;- Jumps (define_insn "jump" [(set (pc) @@ -1128,7 +1695,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"eq\", 't'); ") + "* return output_condjump (operands[0], \"eq\", 't'); ") (define_insn "bne" [(set (pc) @@ -1137,7 +1704,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"eq\", 'f'); ") + "* return output_condjump (operands[0], \"eq\", 'f'); ") (define_insn "bgt" [(set (pc) @@ -1146,7 +1713,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"le\", 'f'); ") + "* return output_condjump (operands[0], \"le\", 'f'); ") (define_insn "bgtu" [(set (pc) @@ -1155,7 +1722,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"leu\", 'f'); ") + "* return output_condjump (operands[0], \"leu\", 'f'); ") (define_insn "blt" [(set (pc) @@ -1164,7 +1731,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"lt\", 't'); ") + "* return output_condjump (operands[0], \"lt\", 't'); ") (define_insn "bltu" [(set (pc) @@ -1173,7 +1740,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"ltu\", 't'); ") + "* return output_condjump (operands[0], \"ltu\", 't'); ") (define_insn "bge" [(set (pc) @@ -1182,7 +1749,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"lt\", 'f'); ") + "* return output_condjump (operands[0], \"lt\", 'f'); ") (define_insn "bgeu" [(set (pc) @@ -1191,7 +1758,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"ltu\", 'f'); ") + "* return output_condjump (operands[0], \"ltu\", 'f'); ") (define_insn "ble" [(set (pc) @@ -1200,7 +1767,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"le\", 't'); ") + "* return output_condjump (operands[0], \"le\", 't'); ") (define_insn "bleu" [(set (pc) @@ -1209,7 +1776,7 @@ (label_ref (match_operand 0 "" "")) (pc)))] "" - "* return gen_cmp (operands[0], \"leu\", 't'); ") + "* return output_condjump (operands[0], \"leu\", 't'); ") (define_insn "" [(set (pc) @@ -1218,7 +1785,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"eq\", 'f'); ") + "* return output_condjump (operands[0], \"eq\", 'f'); ") (define_insn "" [(set (pc) @@ -1227,7 +1794,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"eq\", 't'); ") + "* return output_condjump (operands[0], \"eq\", 't'); ") (define_insn "" [(set (pc) @@ -1236,7 +1803,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"le\", 't'); ") + "* return output_condjump (operands[0], \"le\", 't'); ") (define_insn "" [(set (pc) @@ -1245,7 +1812,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"leu\", 't'); ") + "* return output_condjump (operands[0], \"leu\", 't'); ") (define_insn "" [(set (pc) @@ -1254,7 +1821,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"lt\", 'f'); ") + "* return output_condjump (operands[0], \"lt\", 'f'); ") (define_insn "" [(set (pc) @@ -1263,7 +1830,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"ltu\", 'f'); ") + "* return output_condjump (operands[0], \"ltu\", 'f'); ") (define_insn "" [(set (pc) @@ -1272,7 +1839,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"lt\", 't'); ") + "* return output_condjump (operands[0], \"lt\", 't'); ") (define_insn "" [(set (pc) @@ -1281,7 +1848,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"ltu\", 't'); ") + "* return output_condjump (operands[0], \"ltu\", 't'); ") (define_insn "" [(set (pc) @@ -1290,7 +1857,7 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"le\", 'f'); ") + "* return output_condjump (operands[0], \"le\", 'f'); ") (define_insn "" [(set (pc) @@ -1299,35 +1866,66 @@ (pc) (label_ref (match_operand 0 "" ""))))] "" - "* return gen_cmp (operands[0], \"leu\", 'f'); ") + "* return output_condjump (operands[0], \"leu\", 'f'); ") -;; - Calls -;; -;; arg count word may be omitted to save a push and let gcc try to -;; combine the arg list pop. RETURN_POPS_ARGS from tm.h decides this. +;;- Calls -(define_insn "call" +(define_expand "call_pop" + [(parallel [(call (match_operand:QI 0 "memory_operand" "m") + (match_operand:SI 1 "const_int_operand" "i")) + (match_operand:SI 2 "const_int_operand" "i") + (match_operand:SI 3 "const_int_operand" "i") + (reg:SI 8)])] + "" + "") + +(define_insn "" [(call (match_operand:QI 0 "memory_operand" "m") - (match_operand 1 "" "g"))] + (match_operand:SI 1 "const_int_operand" "i")) + (match_operand:SI 2 "const_int_operand" "i") + (match_operand:SI 3 "const_int_operand" "i") + (match_operand:SI 4 "" "")] "" - "* return output_call (insn, operands[0], operands[1]);") + "* return output_call (insn, &operands[0]);") -(define_insn "call_value" +(define_expand "call_value_pop" + [(parallel [(set (match_operand 0 "" "=g") + (call (match_operand:QI 1 "memory_operand" "m") + (match_operand:SI 2 "const_int_operand" "i"))) + (match_operand:SI 3 "const_int_operand" "i") + (match_operand:SI 4 "const_int_operand" "i") + (reg:SI 8)])] + "" + "") + +(define_insn "" [(set (match_operand 0 "" "=g") (call (match_operand:QI 1 "memory_operand" "m") - (match_operand 2 "" "g")))] + (match_operand:SI 2 "const_int_operand" "i"))) + (match_operand:SI 3 "const_int_operand" "i") + (match_operand:SI 4 "const_int_operand" "i") + (match_operand:SI 5 "" "")] + "" + "* return output_call (insn, &operands[1]); ") + +(define_expand "return" + [(return)] "" - "* return output_call (insn, operands[1], operands[2]);") + " replace_arg_pushes (); ") -(define_insn "return" +(define_insn "" [(return)] "" "rtn") -(define_insn "nop" +(define_expand "prologue" [(const_int 0)] "" - "nop") + " +{ + emit_ap_optimizations (); + DONE; +}") (define_insn "tablejump" [(set (pc) (match_operand:SI 0 "address_operand" "p")) @@ -1339,3 +1937,13 @@ [(set (pc) (match_operand:SI 0 "address_operand" "p"))] "" "jmp %a0") + +;;- Local variables: +;;- mode:emacs-lisp +;;- comment-start: ";;- " +;;- eval: (set-syntax-table (copy-sequence (syntax-table))) +;;- eval: (modify-syntax-entry ?[ "(]") +;;- eval: (modify-syntax-entry ?] ")[") +;;- eval: (modify-syntax-entry ?{ "(}") +;;- eval: (modify-syntax-entry ?} "){") +;;- End: diff --git a/gcc/config/convex/x-convex b/gcc/config/convex/x-convex index bc8b8b46500..e1fbe742d03 100644 --- a/gcc/config/convex/x-convex +++ b/gcc/config/convex/x-convex @@ -1,3 +1,5 @@ -# Use -pcc to avoid surprises. +# ld can make exe's c2-only if this lib is searched even though not loaded +CCLIBFLAGS = -tm c1 +# Use -pcc to avoid surprises. CC = cc -pcc diff --git a/gcc/config/convex/xm-convex.h b/gcc/config/convex/xm-convex.h index 3fd8e2e2fd7..38e717f3909 100644 --- a/gcc/config/convex/xm-convex.h +++ b/gcc/config/convex/xm-convex.h @@ -1,5 +1,5 @@ /* Configuration for GNU C-compiler for Convex. - Copyright (C) 1989, 1991 Free Software Foundation, Inc. + Copyright (C) 1989, 1993 Free Software Foundation, Inc. This file is part of GNU CC. @@ -41,26 +41,17 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #define alloca __builtin_alloca #endif -#ifdef _POSIX_SOURCE - /* Convex ships /tmp as a separate file system - thus it usually has more free space than /usr/tmp */ #define P_tmpdir "/tmp/" -/* Un-hide names hidden in Posix include files. */ - -#define S_IFMT _S_IFMT -#define S_IFREG _S_IFREG +/* Use memcpy and memset -- either would work but these get inlined. */ -#else +#define bcopy(a,b,c) memcpy (b,a,c) +#define bzero(a,b) memset (a,0,b) -/* This definition is to prevent 8.0 include files from declaring prototypes. - Those include files ANSIfied, but the prototypes sometimes do not match. - There is no effect on pre-8.0 OS versions. */ - -#ifndef _PROTO -#define _PROTO(X) () -#endif +/* Convex uses Vax or IEEE floats. + Both formats have Vax semantics. */ -#endif /* _POSIX_SOURCE */ +#define HOST_FLOAT_FORMAT VAX_FLOAT_FORMAT -- 2.30.2