From: Charles Hannum Date: Sat, 14 Mar 1992 05:17:02 +0000 (+0000) Subject: entered into RCS X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=cce8749ec52efb47ba2b1c6b266d53fc239a2f0f;p=gcc.git entered into RCS From-SVN: r479 --- diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c new file mode 100644 index 00000000000..8ef04c4cdf5 --- /dev/null +++ b/gcc/config/arm/arm.c @@ -0,0 +1,1336 @@ +/* Output routines for GCC for ARM/RISCiX. + Copyright (C) 1991 Free Software Foundation, Inc. + Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) + and Martin Simmons (@harleqn.co.uk). + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#include +#include +#include "config.h" +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "insn-flags.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" + +/* The maximum number of insns skipped which will be conditionalised if + possible. */ +#define MAX_INSNS_SKIPPED 5 + +/* Some function declarations. */ +extern void *xmalloc (); +extern FILE *asm_out_file; +extern char *output_multi_immediate (); +extern char *arm_output_asm_insn (); +extern void arm_increase_location (); + +/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we + must report the mode of the memory reference from PRINT_OPERAND to + PRINT_OPERAND_ADDRESS. */ +int output_memory_reference_mode; + +/* Nonzero if the prologue must setup `fp'. */ +int current_function_anonymous_args; + +/* Location counter of .text segment. */ +int arm_text_location = 0; + +/* A hash table is used to store text segment labels and their associated + offset from the start of the text segment. */ +struct label_offset +{ + char *name; + int offset; + struct label_offset *cdr; +}; + +#define LABEL_HASH_SIZE 257 + +static struct label_offset *offset_table[LABEL_HASH_SIZE]; + +/* For an explanation of these variables, see final_prescan_insn below. */ +int arm_ccfsm_state; +int arm_current_cc; +rtx arm_target_insn; +int arm_target_label; +char *arm_condition_codes[]; + +/* Return the number of mov instructions needed to get the constant VALUE into + a register. */ + +int +arm_const_nmoves (value) + register int value; +{ + register int i; + + if (value == 0) + return (1); + for (i = 0; value; i++, value &= ~0xff) + while ((value & 3) == 0) + value = (value >> 2) | ((value & 3) << 30); + return (i); +} /* arm_const_nmoves */ + + +/* Return TRUE if int I is a valid immediate ARM constant. */ + +int +const_ok_for_arm (i) + int i; +{ + unsigned int mask = ~0xFF; + + do + { + if ((i & mask) == 0) + return(TRUE); + mask = (mask << 2) | (mask >> (32 - 2)); + } while (mask != ~0xFF); + + return (FALSE); +} /* const_ok_for_arm */ + +/* Return TRUE if rtx X is a valid immediate FPU constant. */ + +int +const_double_rtx_ok_for_fpu (x) + rtx x; +{ + double d; + union real_extract u; + u.i[0] = CONST_DOUBLE_LOW(x); + u.i[1] = CONST_DOUBLE_HIGH(x); + d = u.d; + + return (d == 0.0 || d == 1.0 || d == 2.0 || d == 3.0 + || d == 4.0 || d == 5.0 || d == 0.5 || d == 10.0); +} /* const_double_rtx_ok_for_fpu */ + +/* Predicates for `match_operand' and `match_operator'. */ + +/* Return TRUE for valid operands for the rhs of an ARM instruction. */ + +int +arm_rhs_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (register_operand (op, mode) + || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))); +} /* arm_rhs_operand */ + +/* Return TRUE for valid operands for the rhs of an FPU instruction. */ + +int +fpu_rhs_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (register_operand (op, mode)) + return(TRUE); + else if (GET_CODE (op) == CONST_DOUBLE) + return (const_double_rtx_ok_for_fpu (op)); + else return (FALSE); +} /* fpu_rhs_operand */ + +/* Return nonzero if OP is a constant power of two. */ + +int +power_of_two_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (GET_CODE (op) == CONST_INT) + { + int value = INTVAL(op); + return (value != 0 && (value & (value-1)) == 0); + } + return (FALSE); +} /* power_of_two_operand */ + +/* Return TRUE for a valid operand of a DImode operation. + Either: REG, CONST_DOUBLE or MEM(offsettable). + Note that this disallows MEM(REG+REG). */ + +int +di_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (register_operand (op, mode)) + return (TRUE); + + switch (GET_CODE (op)) + { + case CONST_DOUBLE: + case CONST_INT: + return (TRUE); + case MEM: + return (memory_address_p (DImode, XEXP (op, 0)) + && offsettable_address_p (FALSE, DImode, XEXP (op, 0))); + default: + return (FALSE); + } +} /* di_operand */ + +/* Return TRUE for valid index operands. */ + +int +index_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (register_operand(op, mode) + || (immediate_operand (op, mode) && abs (INTVAL (op)) < 4096)); +} /* index_operand */ + +/* Return TRUE for arithmetic operators which can be combined with a multiply + (shift). */ + +int +shiftable_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + if (GET_MODE (x) != mode) + return FALSE; + else + { + enum rtx_code code = GET_CODE (x); + + return (code == PLUS || code == MINUS + || code == IOR || code == XOR || code == AND); + } +} /* shiftable_operator */ + +/* Return TRUE for shift operators. */ + +int +shift_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + if (GET_MODE (x) != mode) + return FALSE; + else + { + enum rtx_code code = GET_CODE (x); + + return (code == ASHIFT || code == LSHIFT + || code == ASHIFTRT || code == LSHIFTRT); + } +} /* shift_operator */ + +/* Routines to output assembly language. */ + +/* Output the operands of a LDM/STM instruction to STREAM. + MASK is the ARM register set mask of which only bits 0-15 are important. + INSTR is the possibly suffixed base register. HAT unequals zero if a hat + must follow the register list. */ + +void +print_multi_reg (stream, instr, mask, hat) + FILE *stream; + char *instr; + int mask, hat; +{ + int i; + int not_first = FALSE; + + fprintf (stream, "\t%s, {", instr); + for (i = 0; i < 16; i++) + if (mask & (1 << i)) + { + if (not_first) + fprintf (stream, ", "); + fprintf (stream, "%s", reg_names[i]); + not_first = TRUE; + } + fprintf (stream, "}%s\n", hat ? "^" : ""); +} /* print_multi_reg */ + +/* Output a 'call' insn. */ + +char * +output_call (operands) + rtx operands[]; +{ + operands[0] = XEXP (operands[0], 0); + + /* Handle calls to lr using ip (which may be clobbered in subr anyway). */ + + if (REGNO (operands[0]) == 14) + { + operands[0] = gen_rtx (REG, SImode, 12); + arm_output_asm_insn ("mov\t%0, lr", operands); + } + arm_output_asm_insn ("mov\tlr, pc", operands); + arm_output_asm_insn ("mov\tpc, %0", operands); + return (""); +} /* output_call */ + +/* Output a move from arm registers to an fpu registers. + OPERANDS[0] is an fpu register. + OPERANDS[1] is the first registers of an arm register pair. */ + +char * +output_mov_double_fpu_from_arm (operands) + rtx operands[]; +{ + int arm_reg0 = REGNO (operands[1]); + rtx ops[2]; + + if (arm_reg0 == 12) + abort(); + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + arm_output_asm_insn ("stmfd\tsp!, {%0, %1}", ops); + arm_output_asm_insn ("ldfd\t%0, [sp], #8", operands); + return (""); +} /* output_mov_double_fpu_from_arm */ + +/* Output a move from an fpu register to arm registers. + OPERANDS[0] is the first registers of an arm register pair. + OPERANDS[1] is an fpu register. */ + +char * +output_mov_double_arm_from_fpu (operands) + rtx operands[]; +{ + int arm_reg0 = REGNO (operands[0]); + rtx ops[2]; + + if (arm_reg0 == 12) + abort(); + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + arm_output_asm_insn ("stfd\t%1, [sp, #-8]!", operands); + arm_output_asm_insn ("ldmfd\tsp!, {%0, %1}", ops); + return(""); +} /* output_mov_double_arm_from_fpu */ + +/* Output a move between double words. + It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM + or MEM<-REG and all MEMs must be offsettable addresses. */ + +char * +output_move_double (operands) + rtx operands[]; +{ + enum rtx_code code0 = GET_CODE (operands[0]); + enum rtx_code code1 = GET_CODE (operands[1]); + rtx otherops[2]; + + if (code0 == REG) + { + int reg0 = REGNO (operands[0]); + + otherops[0] = gen_rtx (REG, SImode, 1 + reg0); + if (code1 == REG) + { + int reg1 = REGNO (operands[1]); + if (reg1 == 12) + abort(); + otherops[1] = gen_rtx (REG, SImode, 1 + reg1); + + /* Ensure the second source is not overwritten */ + if (reg0 == 1 + reg1) + { + arm_output_asm_insn("mov\t%0, %1", otherops); + arm_output_asm_insn("mov\t%0, %1", operands); + } + else + { + arm_output_asm_insn("mov\t%0, %1", operands); + arm_output_asm_insn("mov\t%0, %1", otherops); + } + } + else if (code1 == CONST_DOUBLE) + { + otherops[1] = gen_rtx (CONST_INT, VOIDmode, + CONST_DOUBLE_HIGH (operands[1])); + operands[1] = gen_rtx (CONST_INT, VOIDmode, + CONST_DOUBLE_LOW (operands[1])); + arm_output_asm_insn ("mov\t%0, %1", operands); + arm_output_asm_insn ("mov\t%0, %1", otherops); + } + else if (code1 == CONST_INT) + { + otherops[1] = const0_rtx; + arm_output_asm_insn ("mov\t%0, %1", operands); + arm_output_asm_insn ("mov\t%0, %1", otherops); + } + else if (code1 == MEM) + { + if (GET_CODE (XEXP (operands[1], 0)) == REG) + { + /* Handle the simple case where address is [r, #0] more + efficient. */ + operands[1] = XEXP (operands[1], 0); + arm_output_asm_insn ("ldmia\t%1, %M0", operands); + } + else + { + otherops[1] = adj_offsettable_operand (operands[1], 4); + /* Take care of overlapping base/data reg. */ + if (reg_mentioned_p (operands[0], operands[1])) + { + arm_output_asm_insn ("ldr\t%0, %1", otherops); + arm_output_asm_insn ("ldr\t%0, %1", operands); + } + else + { + arm_output_asm_insn ("ldr\t%0, %1", operands); + arm_output_asm_insn ("ldr\t%0, %1", otherops); + } + } + } + else abort(); /* Constraints should prevent this */ + } + else if (code0 == MEM && code1 == REG) + { + if (REGNO (operands[1]) == 12) + abort(); + + if (GET_CODE (XEXP (operands[0], 0)) == REG) + { + operands[0] = XEXP (operands[0], 0); + arm_output_asm_insn ("stmia\t%0, %M1", operands); + } + else + { + otherops[0] = adj_offsettable_operand (operands[0], 4); + otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1])); + arm_output_asm_insn ("str\t%1, %0", operands); + arm_output_asm_insn ("str\t%1, %0", otherops); + } + } + else abort(); /* Constraints should prevent this */ + + return(""); +} /* output_move_double */ + + +/* Output an arbitrary MOV reg, #n. + OPERANDS[0] is a register. OPERANDS[1] is a const_int. */ + +char * +output_mov_immediate (operands) + rtx operands[2]; +{ + int n = INTVAL (operands[1]); + int n_ones = 0; + int i; + + /* Try to use one MOV */ + + if (const_ok_for_arm (n)) + return (arm_output_asm_insn ("mov\t%0, %1", operands)); + + /* Try to use one MVN */ + + if (const_ok_for_arm(~n)) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, ~n); + return (arm_output_asm_insn ("mvn\t%0, %1", operands)); + } + + /* If all else fails, make it out of ORRs or BICs as appropriate. */ + + for (i=0; i < 32; i++) + if (n & 1 << i) + n_ones++; + + if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */ + output_multi_immediate(operands, "mvn\t%0, %1", "bic\t%0, %0, %1", 1, ~n); + else + output_multi_immediate(operands, "mov\t%0, %1", "orr\t%0, %0, %1", 1, n); + return(""); +} /* output_mov_immediate */ + + +/* Output an ADD r, s, #n where n may be too big for one instruction. If + adding zero to one register, output nothing. */ + +char * +output_add_immediate (operands) + rtx operands[3]; +{ + int n = INTVAL (operands[2]); + + if (n != 0 || REGNO (operands[0]) != REGNO (operands[1])) + { + if (n < 0) + output_multi_immediate (operands, + "sub\t%0, %1, %2", "sub\t%0, %0, %2", 2, -n); + else + output_multi_immediate (operands, + "add\t%0, %1, %2", "add\t%0, %0, %2", 2, n); + } + return(""); +} /* output_add_immediate */ + + +/* Output a multiple immediate operation. + OPERANDS is the vector of operands referred to in the output patterns. + INSTR1 is the output pattern to use for the first constant. + INSTR2 is the output pattern to use for subsequent constants. + IMMED_OP is the index of the constant slot in OPERANDS. + N is the constant value. */ + +char * +output_multi_immediate (operands, instr1, instr2, immed_op, n) + rtx operands[]; + char *instr1, *instr2; + int immed_op, n; +{ + if (n == 0) + { + operands[immed_op] = const0_rtx; + arm_output_asm_insn (instr1, operands); /* Quick and easy output */ + } + else + { + int i; + char *instr = instr1; + + /* Note that n is never zero here (which would give no output) */ + + for (i = 0; i < 32; i += 2) + { + if (n & (3 << i)) + { + operands[immed_op] = gen_rtx (CONST_INT, VOIDmode, + n & (255 << i)); + arm_output_asm_insn (instr, operands); + instr = instr2; + i += 6; + } + } + } + return (""); +} /* output_multi_immediate */ + + +/* Return the appropriate ARM instruction for the operation code. + The returned result should not be overwritten. OP is the rtx of the + operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator + was shifted. */ + +char * +arithmetic_instr (op, shift_first_arg) + rtx op; +{ + switch (GET_CODE(op)) + { + case PLUS: + return ("add"); + case MINUS: + if (shift_first_arg) + return ("rsb"); + else + return ("sub"); + case IOR: + return ("orr"); + case XOR: + return ("eor"); + case AND: + return ("and"); + default: + abort(); + } + return (""); /* stupid cc */ +} /* arithmetic_instr */ + + +/* Ensure valid constant shifts and return the appropriate shift mnemonic + for the operation code. The returned result should not be overwritten. + OP is the rtx code of the shift. + SHIFT_PTR points to the shift size operand. */ + +char * +shift_instr (op, shift_ptr) + enum rtx_code op; + rtx *shift_ptr; +{ + int min_shift = 0; + int max_shift = 31; + char *mnem; + + switch (op) + { + case ASHIFT: + mnem = "asl"; + break; + case LSHIFT: + mnem = "lsl"; + break; + case ASHIFTRT: + mnem = "asr"; + max_shift = 32; + break; + case LSHIFTRT: + mnem = "lsr"; + max_shift = 32; + break; + default: + abort(); + } + + if (GET_CODE (*shift_ptr) == CONST_INT) + { + int shift = INTVAL (*shift_ptr); + + if (shift < min_shift) + *shift_ptr = gen_rtx (CONST_INT, VOIDmode, 0); + else if (shift > max_shift) + *shift_ptr = gen_rtx (CONST_INT, VOIDmode, max_shift); + } + return (mnem); +} /* shift_instr */ + + +/* Obtain the shift from the POWER of two. */ + +int +int_log2 (power) + unsigned int power; +{ + int shift = 0; + + while (((1 << shift) & power) == 0) + { + if (shift > 31) + abort(); + shift++; + } + return (shift); +} /* int_log2 */ + + +/* Output an arithmetic instruction which may set the condition code. + OPERANDS[0] is the destination register. + OPERANDS[1] is the arithmetic operator expression. + OPERANDS[2] is the left hand argument. + OPERANDS[3] is the right hand argument. + CONST_FIRST_ARG is TRUE if the first argument of the operator was constant. + SET_COND is TRUE when the condition code should be set. */ + +char * +output_arithmetic (operands, const_first_arg, set_cond) + rtx operands[4]; + int const_first_arg; + int set_cond; +{ + char mnemonic[80]; + char *instr = arithmetic_instr (operands[1], const_first_arg); + + sprintf (mnemonic, "%s%s\t%%0, %%2, %%3", instr, set_cond ? "s" : ""); + return (arm_output_asm_insn (mnemonic, operands)); +} /* output_arithmetic */ + + +/* Output an arithmetic instruction with a shift. + OPERANDS[0] is the destination register. + OPERANDS[1] is the arithmetic operator expression. + OPERANDS[2] is the unshifted register. + OPERANDS[3] is the shift operator expression. + OPERANDS[4] is the shifted register. + OPERANDS[5] is the shift constant or register. + SHIFT_FIRST_ARG is TRUE if the first argument of the operator was shifted. + SET_COND is TRUE when the condition code should be set. */ + +char * +output_arithmetic_with_shift (operands, shift_first_arg, set_cond) + rtx operands[6]; + int shift_first_arg; + int set_cond; +{ + char mnemonic[80]; + char *instr = arithmetic_instr (operands[1], shift_first_arg); + char *condbit = set_cond ? "s" : ""; + char *shift = shift_instr (GET_CODE (operands[3]), &operands[5]); + + sprintf (mnemonic, "%s%s\t%%0, %%2, %%4, %s %%5", instr, condbit, shift); + return (arm_output_asm_insn (mnemonic, operands)); +} /* output_arithmetic_with_shift */ + + +/* Output an arithmetic instruction with a power of two multiplication. + OPERANDS[0] is the destination register. + OPERANDS[1] is the arithmetic operator expression. + OPERANDS[2] is the unmultiplied register. + OPERANDS[3] is the multiplied register. + OPERANDS[4] is the constant multiple (power of two). + SHIFT_FIRST_ARG is TRUE if the first arg of the operator was multiplied. */ + +char * +output_arithmetic_with_immediate_multiply (operands, shift_first_arg) + rtx operands[5]; + int shift_first_arg; +{ + char mnemonic[80]; + char *instr = arithmetic_instr (operands[1], shift_first_arg); + int shift = int_log2 (INTVAL (operands[4])); + + sprintf (mnemonic, "%s\t%%0, %%2, %%3, asl#%d", instr, shift); + return (arm_output_asm_insn (mnemonic, operands)); +} /* output_arithmetic_with_immediate_multiply */ + + +/* Output a move with a shift. + OP is the shift rtx code. + OPERANDS[0] = destination register. + OPERANDS[1] = source register. + OPERANDS[2] = shift constant or register. */ + +char * +output_shifted_move (op, operands) + enum rtx_code op; + rtx operands[2]; +{ + char mnemonic[80]; + + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0) + sprintf (mnemonic, "mov\t%%0, %%1"); + else + sprintf (mnemonic, "mov\t%%0, %%1, %s %%2", + shift_instr (op, &operands[2])); + return (arm_output_asm_insn (mnemonic, operands)); +} /* output_shifted_move */ + + +/* Output a .ascii pseudo-op, keeping track of lengths. This is because + /bin/as is horribly restrictive. */ + +void +output_ascii_pseudo_op (stream, p, len) + FILE *stream; + char *p; + int len; +{ + int i; + int len_so_far = 1000; + int chars_so_far = 0; + + for (i = 0; i < len; i++) + { + register int c = p[i]; + + if (len_so_far > 50) + { + if (chars_so_far) + fputs ("\"\n", stream); + fputs ("\t.ascii\t\"", stream); + len_so_far = 0; + arm_increase_location (chars_so_far); + chars_so_far = 0; + } + + if (c == '\"' || c == '\\') + { + putc('\\', stream); + len_so_far++; + } + if (c >= ' ' && c < 0177) + { + putc (c, stream); + len_so_far++; + } + else + { + fprintf (stream, "\\%03o", c); + len_so_far +=4; + } + chars_so_far++; + } + fputs ("\"\n", stream); + arm_increase_location (chars_so_far); +} /* output_ascii_pseudo_op */ + +void +output_prologue (f, frame_size) + FILE *f; + int frame_size; +{ + + int reg, live_regs_mask = 0, code_size = 0; + rtx operands[3]; + + /* Nonzero if the `fp' (argument pointer) register is needed. */ + int fp_needed = 0; + + /* Nonzero if we must stuff some register arguments onto the stack as if + they were passed there. */ + int store_arg_regs = 0; + + fprintf (f, "\t@ args = %d, pretend = %d, frame = %d\n", + current_function_args_size, current_function_pretend_args_size, frame_size); + fprintf (f, "\t@ frame_pointer_needed = %d, current_function_anonymous_args = %d\n", + frame_pointer_needed, current_function_anonymous_args); + + if (current_function_pretend_args_size || current_function_args_size + || frame_pointer_needed || current_function_anonymous_args || TARGET_APCS) + fp_needed = 1; + + if (current_function_anonymous_args && current_function_pretend_args_size) + store_arg_regs = 1; + + for (reg = 4; reg < 10; reg++) + if (regs_ever_live[reg]) + live_regs_mask |= (1 << reg); + + if (fp_needed) + { + live_regs_mask |= 0xD800; + /* The following statement is probably redundant now + because the frame pointer is recorded in regs_ever_live. */ + if (frame_pointer_needed) + live_regs_mask |= (1 << FRAME_POINTER_REGNUM); + fputs ("\tmov\tip, sp\n", f); + code_size += 4; + } + else if (regs_ever_live[14]) + live_regs_mask |= 0x4000; + + /* If CURRENT_FUNCTION_PRETEND_ARGS_SIZE, adjust the stack pointer to make + room. If also STORE_ARG_REGS store the argument registers involved in + the created slot (this is for stdarg and varargs). */ + if (current_function_pretend_args_size) + { + if (store_arg_regs) + { + int arg_size, mask = 0; + + assert (current_function_pretend_args_size <= 16); + for (reg = 3, arg_size = current_function_pretend_args_size; + arg_size > 0; reg--, arg_size -= 4) + mask |= (1 << reg); + print_multi_reg (f, "stmfd\tsp!", mask, FALSE); + } + else + { + operands[0] = operands[1] = stack_pointer_rtx; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + -current_function_pretend_args_size); + output_add_immediate (operands); + } + } + + if (live_regs_mask) + { + print_multi_reg (f, "stmfd\tsp!", live_regs_mask, FALSE); + code_size += 4; + } + + for (reg = 23; reg > 19; reg--) + if (regs_ever_live[reg]) + { + fprintf (f, "\tstfe\t%s, [sp, #-12]!\n", reg_names[reg]); + code_size += 4; + } + + if (fp_needed) + { + /* Make `fp' point to saved value of `pc'. */ + + operands[0] = arg_pointer_rtx; + operands[1] = gen_rtx (REG, SImode, 12); + operands[2] = gen_rtx (CONST_INT, VOIDmode, + - (4 + current_function_pretend_args_size)); + output_add_immediate (operands); + } + + if (frame_pointer_needed) + { + fprintf (f, "\tmov\trfp, sp\n"); + code_size += 4; + } + + if (frame_size) + { + operands[0] = operands[1] = stack_pointer_rtx; + operands[2] = gen_rtx (CONST_INT, VOIDmode, -frame_size); + output_add_immediate (operands); + } + + arm_increase_location (code_size); +} /* output_prologue */ + + +void +output_epilogue (f, frame_size) + FILE *f; + int frame_size; +{ + int reg, live_regs_mask = 0, code_size = 0, fp_needed = 0; + rtx operands[3]; + + if (current_function_pretend_args_size || current_function_args_size + || frame_pointer_needed || current_function_anonymous_args || TARGET_APCS) + fp_needed = 1; + + for (reg = 4; reg < 10; reg++) + if (regs_ever_live[reg]) + live_regs_mask |= (1 << reg); + + if (fp_needed) + { + live_regs_mask |= 0xA800; + if (frame_pointer_needed) + live_regs_mask |= (1 << FRAME_POINTER_REGNUM); + } + else if (regs_ever_live[14]) + live_regs_mask |= 0x4000; + + for (reg = 20; reg < 24; reg++) + if (regs_ever_live[reg]) + { + fprintf (f, "\tldfe\t%s, [%s], #12\n", reg_names[reg], + frame_pointer_needed ? "rfp" : "sp"); + code_size += 4; + } + + if (fp_needed) + { + print_multi_reg (f, "ldmea\tfp", live_regs_mask, TRUE); + code_size += 4; + } + else + { + if (current_function_pretend_args_size == 0 && regs_ever_live[14]) + { + print_multi_reg (f, "ldmfd\tsp!", + (live_regs_mask & ~0x4000) | 0x8000, TRUE); + code_size += 4; + } + else + { + if (live_regs_mask) + { + print_multi_reg (f, "ldmfd\tsp!", live_regs_mask, FALSE); + code_size += 4; + } + if (current_function_pretend_args_size) + { + operands[0] = operands[1] = stack_pointer_rtx; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + current_function_pretend_args_size); + output_add_immediate (operands); + } + fputs ("\tmovs\tpc, lr\n", f); + code_size += 4; + } + } + arm_increase_location (code_size); + current_function_anonymous_args = 0; +} /* output_epilogue */ + +/* Increase the `arm_text_location' by AMOUNT if we're in the text + segment. */ + +void +arm_increase_location (amount) + int amount; +{ + if (in_text_section ()) + arm_text_location += amount; +} /* arm_increase_location */ + + +/* Like output_asm_insn (), but also increases the arm_text_location (if in + the .text segment, of course, even though this will always be true). + Returns the empty string. */ + +char * +arm_output_asm_insn (template, operands) + char *template; + rtx *operands; +{ + extern FILE *asm_out_file; + + output_asm_insn (template, operands); + if (in_text_section ()) + arm_text_location += 4; + fflush (asm_out_file); + return (""); +} /* arm_output_asm_insn */ + + +/* Output a label definition. If this label is within the .text segment, it + is stored in OFFSET_TABLE, to be used when building `llc' instructions. + Maybe GCC remembers names not starting with a `*' for a long time, but this + is a minority anyway, so we just make a copy. Do not store the leading `*' + if the name starts with one. */ + +void +arm_asm_output_label (stream, name) + FILE *stream; + char *name; +{ + char *real_name, *s; + struct label_offset *cur; + int hash = 0; + + assemble_name (stream, name); + fputs (":\n", stream); + if (! in_text_section ()) + return; + + if (name[0] == '*') + { + real_name = xmalloc (1 + strlen (&name[1])); + strcpy (real_name, &name[1]); + } + else + { + real_name = xmalloc (2 + strlen (name)); + strcpy (real_name, "_"); + strcat (real_name, name); + } + for (s = real_name; *s; s++) + hash += *s; + hash = hash % LABEL_HASH_SIZE; + cur = xmalloc (sizeof (struct label_offset)); + cur->name = real_name; + cur->offset = arm_text_location; + cur->cdr = offset_table[hash]; + offset_table[hash] = cur; +} /* arm_asm_output_label */ + + +/* Output the instructions needed to perform what Martin's /bin/as called + llc: load an SImode thing from the function's constant pool. + + XXX This could be enhanced in that we do not really need a pointer in the + constant pool pointing to the real thing. If we can address this pointer, + we can also address what it is pointing at, in fact, anything in the text + segment which has been defined already within this .s file. */ + +char * +arm_output_llc (operands) + rtx *operands; +{ + char *s, *name = XSTR (XEXP (operands[1], 0), 0); + struct label_offset *he; + int hash = 0, conditional = (arm_ccfsm_state == 3 || arm_ccfsm_state == 4); + + if (*name != '*') + abort (); + + for (s = &name[1]; *s; s++) + hash += *s; + hash = hash % LABEL_HASH_SIZE; + he = offset_table[hash]; + while (he && strcmp (he->name, &name[1])) + he = he->cdr; + + if (!he) + abort (); + + if (arm_text_location + 8 - he->offset < 4095) + { + fprintf (asm_out_file, "\tldr%s\t%s, [pc, #%s - . - 8]\n", + conditional ? arm_condition_codes[arm_current_cc] : "", + reg_names[REGNO (operands[0])], &name[1]); + arm_increase_location (4); + return (""); + } + else + { + int offset = - (arm_text_location + 8 - he->offset); + char *reg_name = reg_names[REGNO (operands[0])]; + + /* ??? This is a hack, assuming the constant pool never is more than + (1 + 255) * 4096 == 1Meg away from the PC. */ + + if (offset > 1000000) + abort (); + + fprintf (asm_out_file, "\tsub%s\t%s, pc, #(8 + . - %s) & ~4095\n", + conditional ? arm_condition_codes[arm_current_cc] : "", + reg_name, &name[1]); + fprintf (asm_out_file, "\tldr%s\t%s, [%s, #- ((4 + . - %s) & 4095)]\n", + conditional ? arm_condition_codes[arm_current_cc] : "", + reg_name, reg_name, &name[1]); + arm_increase_location (8); + } + return (""); +} /* arm_output_llc */ + + +/* Output code resembling an .lcomm directive. /bin/as doesn't have this + directive hence this hack, which works by reserving some `.space' in the + bss segment directly. + + XXX This is a severe hack, which is guaranteed NOT to work since it doesn't + define STATIC COMMON space but merely STATIC BSS space. */ + +void +output_lcomm_directive (stream, name, size, rounded) + FILE *stream; + char *name; + int size, rounded; +{ + fputs ("\n\t.bss\t@ .lcomm\n", stream); + assemble_name (stream, name); + fprintf (stream, ":\t.space\t%d\n", rounded); + if (in_text_section ()) + fputs ("\n\t.text\n", stream); + else + fputs ("\n\t.data\n", stream); +} /* output_lcomm_directive */ + +/* A finite state machine takes care of noticing whether or not instructions + can be conditionally executed, and thus decrease execution time and code + size by deleting branch instructions. The fsm is controlled by + final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */ + +/* The state of the fsm controlling condition codes are: + 0: normal, do nothing special + 1: make ASM_OUTPUT_OPCODE not output this instruction + 2: make ASM_OUTPUT_OPCODE not output this instruction + 3: make instructions conditional + 4: make instructions conditional + + State transitions (state->state by whom under condition): + 0 -> 1 final_prescan_insn if the `target' is a label + 0 -> 2 final_prescan_insn if the `target' is an unconditional branch + 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch + 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch + 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached + (the target label has CODE_LABEL_NUMBER equal to arm_target_label). + 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached + (the target insn is arm_target_insn). + + XXX In case the `target' is an unconditional branch, this conditionalising + of the instructions always reduces code size, but not always execution + time. But then, I want to reduce the code size to somewhere near what + /bin/cc produces. */ + +/* The condition codes of the ARM, and the inverse function. */ +char *arm_condition_codes[] = +{ + "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" +}; + +#define ARM_INVERSE_CONDITION_CODE(X) ((X) ^ 1) + +/* Returns the index of the ARM condition code string in + `arm_condition_codes'. COMPARISON should be an rtx like + `(eq (...) (...))'. */ + +int +get_arm_condition_code (comparison) + rtx comparison; +{ + switch (GET_CODE (comparison)) + { + case NE: return (1); + case EQ: return (0); + case GE: return (10); + case GT: return (12); + case LE: return (13); + case LT: return (11); + case GEU: return (2); + case GTU: return (8); + case LEU: return (9); + case LTU: return (3); + default: abort (); + } + /*NOTREACHED*/ + return (42); +} /* get_arm_condition_code */ + + +void +final_prescan_insn (insn, opvec, noperands) + rtx insn; + rtx *opvec; + int noperands; +{ + /* BODY will hold the body of INSN. */ + register rtx body = PATTERN (insn); + + /* This will be 1 if trying to repeat the trick, and things need to be + reversed if it appears to fail. */ + int reverse = 0; + + /* START_INSN will hold the insn from where we start looking. This is the + first insn after the following code_label if REVERSE is true. */ + rtx start_insn = insn; + + /* If in state 4, check if the target branch is reached, in order to + change back to state 0. */ + if (arm_ccfsm_state == 4) + { + if (insn == arm_target_insn) + arm_ccfsm_state = 0; + return; + } + + /* If in state 3, it is possible to repeat the trick, if this insn is an + unconditional branch to a label, and immediately following this branch + is the previous target label which is only used once, and the label this + branch jumps to is not too far off. */ + if (arm_ccfsm_state == 3) + { + if (simplejump_p (insn)) + { + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == BARRIER) + { + /* XXX Isn't this always a barrier? */ + start_insn = next_nonnote_insn (start_insn); + } + if (GET_CODE (start_insn) == CODE_LABEL + && CODE_LABEL_NUMBER (start_insn) == arm_target_label + && LABEL_NUSES (start_insn) == 1) + reverse = TRUE; + else + return; + } + else + return; + } + + if (arm_ccfsm_state != 0 && !reverse) + abort (); + if (GET_CODE (insn) != JUMP_INSN) + return; + + if (reverse + || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC + && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE)) + { + int insns_skipped = 0, fail = FALSE, succeed = FALSE; + /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */ + int then_not_else = TRUE; + rtx this_insn = start_insn, label; + + /* Register the insn jumped to. */ + if (reverse) + label = XEXP (SET_SRC (body), 0); + else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF) + label = XEXP (XEXP (SET_SRC (body), 1), 0); + else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF) + { + label = XEXP (XEXP (SET_SRC (body), 2), 0); + then_not_else = FALSE; + } + else + abort (); + + /* See how many insns this branch skips, and what kind of insns. If all + insns are okay, and the label or unconditional branch to the same + label is not too far away, succeed. */ + for (insns_skipped = 0; + !fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED; + insns_skipped++) + { + rtx scanbody; + + this_insn = next_nonnote_insn (this_insn); + if (!this_insn) + break; + + scanbody = PATTERN (this_insn); + + switch (GET_CODE (this_insn)) + { + case CODE_LABEL: + /* Succeed if it is the target label, otherwise fail since + control falls in from somewhere else. */ + if (this_insn == label) + { + arm_ccfsm_state = 1; + succeed = TRUE; + } + else + fail = TRUE; + break; + + case BARRIER: /* XXX Is this case necessary? */ + /* Succeed if the following insn is the target label. + Otherwise fail. */ + this_insn = next_nonnote_insn (this_insn); + if (this_insn == label) + { + arm_ccfsm_state = 1; + succeed = TRUE; + } + else + fail = TRUE; + break; + + case JUMP_INSN: + /* If this is an unconditional branch to the same label, succeed. + If it is to another label, do nothing. If it is conditional, + fail. */ + /* XXX Probably, the test for the SET and the PC are unnecessary. */ + + if (GET_CODE (scanbody) == SET && GET_CODE (SET_DEST (scanbody)) == PC) + { + if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF + && XEXP (SET_SRC (scanbody), 0) == label && !reverse) + { + arm_ccfsm_state = 2; + succeed = TRUE; + } + else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE) + fail = TRUE; + } + break; + + case INSN: + /* Instructions affecting the condition codes make it fail. */ + if (sets_cc0_p (scanbody)) + fail = TRUE; + break; + + default: + break; + } + } + if (succeed) + { + if (arm_ccfsm_state == 1 || reverse) + arm_target_label = CODE_LABEL_NUMBER (label); + else if (arm_ccfsm_state == 2) + arm_target_insn = this_insn; + else + abort (); + + /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from what + it was. */ + if (!reverse) + arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), 0)); + if (reverse || then_not_else) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + } + } +} /* final_prescan_insn */ + +/* EOF */ diff --git a/gcc/config/tahoe/tahoe.c b/gcc/config/tahoe/tahoe.c new file mode 100644 index 00000000000..810175ab3be --- /dev/null +++ b/gcc/config/tahoe/tahoe.c @@ -0,0 +1,564 @@ +/* Subroutines for insn-output.c for Tahoe. + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + + +#include "config.h" +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "insn-flags.h" +#include "output.h" +#include "insn-attr.h" + +/* + * File: output-tahoe.c + * + * Original port made at the University of Buffalo by Devon Bowen, + * Dale Wiles and Kevin Zachmann. + * + * Changes for HCX by Piet van Oostrum, + * University of Utrecht, The Netherlands (piet@cs.ruu.nl) + * + * Speed tweaks by Michael Tiemann (tiemann@lurch.stanford.edu). + * + * Mail bugs reports or fixes to: gcc@cs.buffalo.edu + */ + + +/* On tahoe, you have to go to memory to convert a register + from sub-word to word. */ + +rtx tahoe_reg_conversion_loc; + +int +extendable_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if ((GET_CODE (op) == REG + || (GET_CODE (op) == SUBREG + && GET_CODE (SUBREG_REG (op)) == REG)) + && tahoe_reg_conversion_loc == 0) + tahoe_reg_conversion_loc = assign_stack_local (SImode, GET_MODE_SIZE (SImode)); + return general_operand (op, mode); +} + +/* most of the print_operand_address function was taken from the vax */ +/* since the modes are basically the same. I had to add a special case, */ +/* though, for symbol references with offsets. */ + +#include + +print_operand_address (file, addr) + FILE *file; + register rtx addr; +{ + register rtx reg1, reg2, breg, ireg; + rtx offset; + static char *reg_name[] = REGISTER_NAMES; + + retry: + switch (GET_CODE (addr)) + { + case MEM: + fprintf (file, "*"); + addr = XEXP (addr, 0); + goto retry; + + case REG: + fprintf (file, "(%s)", reg_name [REGNO (addr)]); + break; + + case PRE_DEC: + fprintf (file, "-(%s)", reg_name [REGNO (XEXP (addr, 0))]); + break; + + case POST_INC: + fprintf (file, "(%s)+", reg_name [REGNO (XEXP (addr, 0))]); + break; + + case PLUS: + reg1 = 0; reg2 = 0; + ireg = 0; breg = 0; + offset = 0; + + if (CONSTANT_ADDRESS_P (XEXP (addr, 0)) + && GET_CODE (XEXP (addr, 1)) == CONST_INT) + output_addr_const (file, addr); + + if (CONSTANT_ADDRESS_P (XEXP (addr, 1)) + && GET_CODE (XEXP (addr, 0)) == CONST_INT) + output_addr_const (file, addr); + + if (CONSTANT_ADDRESS_P (XEXP (addr, 0)) + || GET_CODE (XEXP (addr, 0)) == MEM) + { + offset = XEXP (addr, 0); + addr = XEXP (addr, 1); + } + else if (CONSTANT_ADDRESS_P (XEXP (addr, 1)) + || GET_CODE (XEXP (addr, 1)) == MEM) + { + offset = XEXP (addr, 1); + addr = XEXP (addr, 0); + } + if (GET_CODE (addr) != PLUS) + ; + else if (GET_CODE (XEXP (addr, 0)) == MULT) + { + reg1 = XEXP (addr, 0); + addr = XEXP (addr, 1); + } + else if (GET_CODE (XEXP (addr, 1)) == MULT) + { + reg1 = XEXP (addr, 1); + addr = XEXP (addr, 0); + } + else if (GET_CODE (XEXP (addr, 0)) == REG) + { + reg1 = XEXP (addr, 0); + addr = XEXP (addr, 1); + } + else if (GET_CODE (XEXP (addr, 1)) == REG) + { + reg1 = XEXP (addr, 1); + addr = XEXP (addr, 0); + } + if (GET_CODE (addr) == REG || GET_CODE (addr) == MULT) + { + if (reg1 == 0) + reg1 = addr; + else + reg2 = addr; + addr = 0; + } + if (offset != 0) + { + if (addr != 0) abort (); + addr = offset; + } + if (reg1 != 0 && GET_CODE (reg1) == MULT) + { + breg = reg2; + ireg = reg1; + } + else if (reg2 != 0 && GET_CODE (reg2) == MULT) + { + breg = reg1; + ireg = reg2; + } + else if (reg2 != 0 || GET_CODE (addr) == MEM) + { + breg = reg2; + ireg = reg1; + } + else + { + breg = reg1; + ireg = reg2; + } + if (addr != 0) + output_address (offset); + if (breg != 0) + { + if (GET_CODE (breg) != REG) + abort (); + fprintf (file, "(%s)", reg_name[REGNO (breg)]); + } + if (ireg != 0) + { + if (GET_CODE (ireg) == MULT) + ireg = XEXP (ireg, 0); + if (GET_CODE (ireg) != REG) + abort (); + fprintf (file, "[%s]", reg_name[REGNO (ireg)]); + } + break; + + default: + output_addr_const (file, addr); + } +} + +/* Do a quick check and find out what the best way to do the */ +/* mini-move is. Could be a push or a move..... */ + +static char * +singlemove_string (operands) + rtx *operands; +{ + if (operands[1] == const0_rtx) + return "clrl %0"; + if (push_operand (operands[0], SImode)) + return "pushl %1"; + return "movl %1,%0"; +} + +/* given the rtx for an address, return true if the given */ +/* register number is used in the address somewhere. */ + +regisused(addr,regnum) +rtx addr; +int regnum; +{ + if (GET_CODE(addr) == REG) + if (REGNO(addr) == regnum) + return (1); + else + return (0); + + if (GET_CODE(addr) == MEM) + return regisused(XEXP(addr,0),regnum); + + if ((GET_CODE(addr) == MULT) || (GET_CODE(addr) == PLUS)) + return ((regisused(XEXP(addr,0),regnum)) || + (regisused(XEXP(addr,1),regnum))); + + return 0; +} + + +/* Given some rtx, traverse it and return the register used in a */ +/* index. If no index is found, return 0. */ + +rtx +index_reg(addr) +rtx addr; +{ + rtx temp; + + if (GET_CODE(addr) == MEM) + return index_reg(XEXP(addr,0)); + + if (GET_CODE(addr) == MULT) + if (GET_CODE(XEXP(addr,0)) == REG) + return XEXP(addr,0); + else + return XEXP(addr,1); + + if (GET_CODE(addr) == PLUS) + if (temp = index_reg(XEXP(addr,0))) + return temp; + else + return index_reg(XEXP(addr,1)); + + return 0; +} + + +/* simulate the move double by generating two movl's. You have */ +/* to be careful about mixing modes here. */ + +char * +output_move_double (operands) + rtx *operands; +{ + enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, INDOP, CNSTOP, RNDOP } + optype0, optype1; + rtx latehalf[2]; + rtx shftreg0 = 0, shftreg1 = 0; + rtx temp0 = 0, temp1 = 0; + rtx addreg0 = 0, addreg1 = 0; + int dohighfirst = 0; + + /* First classify both operands. */ + + if (REG_P (operands[0])) + optype0 = REGOP; + else if ((GET_CODE(operands[0])==MEM) && (shftreg0=index_reg(operands[0]))) + optype0 = INDOP; + else if (offsettable_memref_p (operands[0])) + optype0 = OFFSOP; + else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) { + optype0 = PUSHOP; + dohighfirst++; + } else if (GET_CODE (operands[0]) == MEM) + optype0 = MEMOP; + else + optype0 = RNDOP; + + if (REG_P (operands[1])) + optype1 = REGOP; + else if ((GET_CODE(operands[1])==MEM) && (shftreg1=index_reg(operands[1]))) + optype1 = INDOP; + else if (offsettable_memref_p (operands[1])) + optype1 = OFFSOP; + else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC) + optype1 = POPOP; + else if (GET_CODE (operands[1]) == MEM) + optype1 = MEMOP; + else if (CONSTANT_P (operands[1])) + optype1 = CNSTOP; + else + optype1 = RNDOP; + + /* set up for the high byte move for operand zero */ + + switch (optype0) { + + /* if it's a register, just use the next highest in the */ + /* high address move. */ + + case REGOP : latehalf[0] = gen_rtx (REG,SImode,REGNO(operands[0])+1); + break; + + /* for an offsettable address, use the gcc function to */ + /* modify the operand to get an offset of 4 higher for */ + /* the second move. */ + + case OFFSOP : latehalf[0] = adj_offsettable_operand (operands[0], 4); + break; + + /* if the operand is MEMOP type, it must be a pointer */ + /* to a pointer. So just remember to increase the mem */ + /* location and use the same operand. */ + + case MEMOP : latehalf[0] = operands[0]; + addreg0 = XEXP(operands[0],0); + break; + + /* if we're dealing with a push instruction, just leave */ + /* the operand alone since it auto-increments. */ + + case PUSHOP : latehalf[0] = operands[0]; + break; + + /* YUCK! Indexed addressing!! If the address is considered */ + /* offsettable, go use the offset in the high part. Otherwise */ + /* find what exactly is being added to the multiplication. If */ + /* it's a mem reference, increment that with the high part */ + /* being unchanged to cause the shift. If it's a reg, do the */ + /* same. If you can't identify it, abort. Remember that the */ + /* shift register was already set during identification. */ + + case INDOP : if (offsettable_memref_p(operands[0])) { + latehalf[0] = adj_offsettable_operand(operands[0],4); + break; + } + + latehalf[0] = operands[0]; + + temp0 = XEXP(XEXP(operands[0],0),0); + if (GET_CODE(temp0) == MULT) { + temp1 = temp0; + temp0 = XEXP(XEXP(operands[0],0),1); + } else { + temp1 = XEXP(XEXP(operands[0],0),1); + if (GET_CODE(temp1) != MULT) + abort(); + } + + if (GET_CODE(temp0) == MEM) + addreg0 = temp0; + else if (GET_CODE(temp0) == REG) + addreg0 = temp0; + else + abort(); + + break; + + /* if we don't know the operand type, print a friendly */ + /* little error message... 8-) */ + + case RNDOP : + default : abort(); + } + + /* do the same setup for operand one */ + + switch (optype1) { + + case REGOP : latehalf[1] = gen_rtx(REG,SImode,REGNO(operands[1])+1); + break; + + case OFFSOP : latehalf[1] = adj_offsettable_operand (operands[1], 4); + break; + + case MEMOP : latehalf[1] = operands[1]; + addreg1 = XEXP(operands[1],0); + break; + + case POPOP : latehalf[1] = operands[1]; + break; + + case INDOP : if (offsettable_memref_p(operands[1])) { + latehalf[1] = adj_offsettable_operand(operands[1],4); + break; + } + + latehalf[1] = operands[1]; + + temp0 = XEXP(XEXP(operands[1],0),0); + if (GET_CODE(temp0) == MULT) { + temp1 = temp0; + temp0 = XEXP(XEXP(operands[1],0),1); + } else { + temp1 = XEXP(XEXP(operands[1],0),1); + if (GET_CODE(temp1) != MULT) + abort(); + } + + if (GET_CODE(temp0) == MEM) + addreg1 = temp0; + else if (GET_CODE(temp0) == REG) + addreg1 = temp0; + else + abort(); + + break; + + case CNSTOP : + if (GET_CODE (operands[1]) == CONST_DOUBLE) + split_double (operands[1], &operands[1], &latehalf[1]); + else if (CONSTANT_P (operands[1])) + latehalf[1] = const0_rtx; + else abort (); + break; + + case RNDOP : + default : abort(); + } + + + /* double the register used for shifting in both of the operands */ + /* but make sure the same register isn't doubled twice! */ + + if (shftreg0 && shftreg1 && (rtx_equal_p(shftreg0,shftreg1))) + output_asm_insn("addl2 %0,%0", &shftreg0); + else { + if (shftreg0) + output_asm_insn("addl2 %0,%0", &shftreg0); + if (shftreg1) + output_asm_insn("addl2 %0,%0", &shftreg1); + } + + /* if the destination is a register and that register is needed in */ + /* the source addressing mode, swap the order of the moves since we */ + /* don't want this destroyed til last. If both regs are used, not */ + /* much we can do, so abort. If these becomes a problem, maybe we */ + /* can do it on the stack? */ + + if (GET_CODE(operands[0])==REG && regisused(operands[1],REGNO(operands[0]))) + if (regisused(latehalf[1],REGNO(latehalf[0]))) + 8; + else + dohighfirst++; + + /* if we're pushing, do the high address part first. */ + + if (dohighfirst) { + + if (addreg0 && addreg1 && (rtx_equal_p(addreg0,addreg1))) + output_asm_insn("addl2 $4,%0", &addreg0); + else { + if (addreg0) + output_asm_insn("addl2 $4,%0", &addreg0); + if (addreg1) + output_asm_insn("addl2 $4,%0", &addreg1); + } + + output_asm_insn(singlemove_string(latehalf), latehalf); + + if (addreg0 && addreg1 && (rtx_equal_p(addreg0,addreg1))) + output_asm_insn("subl2 $4,%0", &addreg0); + else { + if (addreg0) + output_asm_insn("subl2 $4,%0", &addreg0); + if (addreg1) + output_asm_insn("subl2 $4,%0", &addreg1); + } + + return singlemove_string(operands); + } + + output_asm_insn(singlemove_string(operands), operands); + + if (addreg0 && addreg1 && (rtx_equal_p(addreg0,addreg1))) + output_asm_insn("addl2 $4,%0", &addreg0); + else { + if (addreg0) + output_asm_insn("addl2 $4,%0", &addreg0); + if (addreg1) + output_asm_insn("addl2 $4,%0", &addreg1); + } + + output_asm_insn(singlemove_string(latehalf), latehalf); + + if (addreg0 && addreg1 && (rtx_equal_p(addreg0,addreg1))) + output_asm_insn("subl2 $4,%0", &addreg0); + else { + if (addreg0) + output_asm_insn("subl2 $4,%0", &addreg0); + if (addreg1) + output_asm_insn("subl2 $4,%0", &addreg1); + } + + if (shftreg0 && shftreg1 && (rtx_equal_p(shftreg0,shftreg1))) + output_asm_insn("shar $1,%0,%0", &shftreg0); + else { + if (shftreg0) + output_asm_insn("shar $1,%0,%0", &shftreg0); + if (shftreg1) + output_asm_insn("shar $1,%0,%0", &shftreg1); + } + + return ""; +} + + +/* This checks if a zero_extended cmp[bw] can be replaced by a sign_extended + cmp[bw]. This can be done if the operand is a constant that fits in a + byte/word or a memory operand. Besides that the next instruction must be an + unsigned compare. Some of these tests are done by the machine description */ + +int +tahoe_cmp_check (insn, op, max) +rtx insn, op; int max; +{ + if (GET_CODE (op) == CONST_INT + && ( INTVAL (op) < 0 || INTVAL (op) > max )) + return 0; + { + register rtx next = NEXT_INSN (insn); + + if ((GET_CODE (next) == JUMP_INSN + || GET_CODE (next) == INSN + || GET_CODE (next) == CALL_INSN)) + { + next = PATTERN (next); + if (GET_CODE (next) == SET + && SET_DEST (next) == pc_rtx + && GET_CODE (SET_SRC (next)) == IF_THEN_ELSE) + switch (GET_CODE (XEXP (SET_SRC (next), 0))) + { + case EQ: + case NE: + case LTU: + case GTU: + case LEU: + case GEU: + return 1; + } + } + } + return 0; +} diff --git a/gcc/config/tahoe/xm-tahoe.h b/gcc/config/tahoe/xm-tahoe.h new file mode 100644 index 00000000000..a5f06e94de5 --- /dev/null +++ b/gcc/config/tahoe/xm-tahoe.h @@ -0,0 +1,63 @@ +/* Configuration for GNU C-compiler for Tahoe. + Copyright (C) 1987 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + +/* + * File: xm-tahoe.h + * + * Original port made at the University of Buffalo by Devon Bowen, + * Dale Wiles and Kevin Zachmann. + * + * Changes for HCX by Piet van Oostrum, + * University of Utrecht, The Netherlands (piet@cs.ruu.nl) + * + * Mail bugs reports or fixes to: gcc@cs.buffalo.edu + */ + + +/* This file has the same stuff the vax version does */ + +/* defines that need visibility everywhere */ + +#define FALSE 0 +#define TRUE 1 + +/* target machine dependencies + tm.h is a symbolic link to the actual target specific file. */ + +#include "tm.h" + +/* This describes the machine the compiler is hosted on. */ + +#define HOST_BITS_PER_CHAR 8 +#define HOST_BITS_PER_SHORT 16 +#define HOST_BITS_PER_INT 32 +#define HOST_BITS_PER_LONG 32 +#define HOST_BITS_PER_LONGLONG 64 + +#define HOST_WORDS_BIG_ENDIAN + +/* Arguments to use with `exit'. */ + +#define SUCCESS_EXIT_CODE 0 +#define FATAL_EXIT_CODE 33 + +/* If compiled with GNU C, use the built-in alloca */ +#ifdef __GNUC__ +#define alloca __builtin_alloca +#endif diff --git a/gcc/stupid.c b/gcc/stupid.c new file mode 100644 index 00000000000..47d7740fb64 --- /dev/null +++ b/gcc/stupid.c @@ -0,0 +1,544 @@ +/* Dummy data flow analysis for GNU compiler in nonoptimizing mode. + Copyright (C) 1987, 1991 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + + +/* This file performs stupid register allocation, which is used + when cc1 gets the -noreg switch (which is when cc does not get -O). + + Stupid register allocation goes in place of the the flow_analysis, + local_alloc and global_alloc passes. combine_instructions cannot + be done with stupid allocation because the data flow info that it needs + is not computed here. + + In stupid allocation, the only user-defined variables that can + go in registers are those declared "register". They are assumed + to have a life span equal to their scope. Other user variables + are given stack slots in the rtl-generation pass and are not + represented as pseudo regs. A compiler-generated temporary + is assumed to live from its first mention to its last mention. + + Since each pseudo-reg's life span is just an interval, it can be + represented as a pair of numbers, each of which identifies an insn by + its position in the function (number of insns before it). The first + thing done for stupid allocation is to compute such a number for each + insn. It is called the suid. Then the life-interval of each + pseudo reg is computed. Then the pseudo regs are ordered by priority + and assigned hard regs in priority order. */ + +#include +#include "config.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "regs.h" +#include "flags.h" + +/* Vector mapping INSN_UIDs to suids. + The suids are like uids but increase monotonically always. + We use them to see whether a subroutine call came + between a variable's birth and its death. */ + +static int *uid_suid; + +/* Get the suid of an insn. */ + +#define INSN_SUID(INSN) (uid_suid[INSN_UID (INSN)]) + +/* Record the suid of the last CALL_INSN + so we can tell whether a pseudo reg crosses any calls. */ + +static int last_call_suid; + +/* Record the suid of the last JUMP_INSN + so we can tell whether a pseudo reg crosses any jumps. */ + +static int last_jump_suid; + +/* Record the suid of the last CODE_LABEL + so we can tell whether a pseudo reg crosses any labels. */ + +static int last_label_suid; + +/* Element N is suid of insn where life span of pseudo reg N ends. + Element is 0 if register N has not been seen yet on backward scan. */ + +static int *reg_where_dead; + +/* Element N is suid of insn where life span of pseudo reg N begins. */ + +static int *reg_where_born; + +/* Element N is 1 if pseudo reg N lives across labels or jumps. */ + +static char *reg_crosses_blocks; + +/* Numbers of pseudo-regs to be allocated, highest priority first. */ + +static int *reg_order; + +/* Indexed by reg number (hard or pseudo), nonzero if register is live + at the current point in the instruction stream. */ + +static char *regs_live; + +/* Indexed by insn's suid, the set of hard regs live after that insn. */ + +static HARD_REG_SET *after_insn_hard_regs; + +/* Record that hard reg REGNO is live after insn INSN. */ + +#define MARK_LIVE_AFTER(INSN,REGNO) \ + SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (INSN)], (REGNO)) + +static void stupid_mark_refs (); +static int stupid_reg_compare (); +static int stupid_find_reg (); + +/* Stupid life analysis is for the case where only variables declared + `register' go in registers. For this case, we mark all + pseudo-registers that belong to register variables as + dying in the last instruction of the function, and all other + pseudo registers as dying in the last place they are referenced. + Hard registers are marked as dying in the last reference before + the end or before each store into them. */ + +void +stupid_life_analysis (f, nregs, file) + rtx f; + int nregs; + FILE *file; +{ + register int i; + register rtx last, insn; + int max_uid; + + bzero (regs_ever_live, sizeof regs_ever_live); + + regs_live = (char *) alloca (nregs); + + /* First find the last real insn, and count the number of insns, + and assign insns their suids. */ + + for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) + if (INSN_UID (insn) > i) + i = INSN_UID (insn); + + max_uid = i + 1; + uid_suid = (int *) alloca ((i + 1) * sizeof (int)); + + /* Compute the mapping from uids to suids. + Suids are numbers assigned to insns, like uids, + except that suids increase monotonically through the code. */ + + last = 0; /* In case of empty function body */ + for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) + { + if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN + || GET_CODE (insn) == JUMP_INSN) + last = insn; + INSN_SUID (insn) = ++i; + } + + last_call_suid = i + 1; + last_jump_suid = i + 1; + last_label_suid = i + 1; + + max_regno = nregs; + + /* Allocate tables to record info about regs. */ + + reg_where_dead = (int *) alloca (nregs * sizeof (int)); + bzero (reg_where_dead, nregs * sizeof (int)); + + reg_where_born = (int *) alloca (nregs * sizeof (int)); + bzero (reg_where_born, nregs * sizeof (int)); + + reg_crosses_blocks = (char *) alloca (nregs); + bzero (reg_crosses_blocks, nregs); + + reg_order = (int *) alloca (nregs * sizeof (int)); + bzero (reg_order, nregs * sizeof (int)); + + reg_renumber = (short *) oballoc (nregs * sizeof (short)); + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + reg_renumber[i] = i; + + for (i = FIRST_VIRTUAL_REGISTER; i <= LAST_VIRTUAL_REGISTER; i++) + reg_renumber[i] = -1; + + after_insn_hard_regs = (HARD_REG_SET *) alloca (max_uid * sizeof (HARD_REG_SET)); + bzero (after_insn_hard_regs, max_uid * sizeof (HARD_REG_SET)); + + /* Allocate and zero out many data structures + that will record the data from lifetime analysis. */ + + allocate_for_life_analysis (); + + for (i = 0; i < max_regno; i++) + { + reg_n_deaths[i] = 1; + } + + bzero (regs_live, nregs); + + /* Find where each pseudo register is born and dies, + by scanning all insns from the end to the start + and noting all mentions of the registers. + + Also find where each hard register is live + and record that info in after_insn_hard_regs. + regs_live[I] is 1 if hard reg I is live + at the current point in the scan. */ + + for (insn = last; insn; insn = PREV_INSN (insn)) + { + register HARD_REG_SET *p = after_insn_hard_regs + INSN_SUID (insn); + + /* Copy the info in regs_live + into the element of after_insn_hard_regs + for the current position in the rtl code. */ + + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (regs_live[i]) + SET_HARD_REG_BIT (*p, i); + + /* Mark all call-clobbered regs as live after each call insn + so that a pseudo whose life span includes this insn + will not go in one of them. + Then mark those regs as all dead for the continuing scan + of the insns before the call. */ + + if (GET_CODE (insn) == CALL_INSN) + { + last_call_suid = INSN_SUID (insn); + IOR_HARD_REG_SET (after_insn_hard_regs[last_call_suid], + call_used_reg_set); + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (call_used_regs[i]) + regs_live[i] = 0; + } + + if (GET_CODE (insn) == JUMP_INSN) + last_jump_suid = INSN_SUID (insn); + + if (GET_CODE (insn) == CODE_LABEL) + last_label_suid = INSN_SUID (insn); + + /* Update which hard regs are currently live + and also the birth and death suids of pseudo regs + based on the pattern of this insn. */ + + if (GET_CODE (insn) == INSN + || GET_CODE (insn) == CALL_INSN + || GET_CODE (insn) == JUMP_INSN) + { + stupid_mark_refs (PATTERN (insn), insn); + } + } + + /* Now decide the order in which to allocate the pseudo registers. */ + + for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++) + reg_order[i] = i; + + qsort (®_order[LAST_VIRTUAL_REGISTER + 1], + max_regno - LAST_VIRTUAL_REGISTER - 1, sizeof (int), + stupid_reg_compare); + + /* Now, in that order, try to find hard registers for those pseudo regs. */ + + for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++) + { + register int r = reg_order[i]; + enum reg_class class; + + /* Some regnos disappear from the rtl. Ignore them to avoid crash. */ + if (regno_reg_rtx[r] == 0) + continue; + + /* Now find the best hard-register class for this pseudo register */ + if (N_REG_CLASSES > 1) + { + class = reg_preferred_class (r); + + reg_renumber[r] = stupid_find_reg (reg_n_calls_crossed[r], class, + PSEUDO_REGNO_MODE (r), + reg_where_born[r], + reg_where_dead[r], + reg_crosses_blocks[r]); + } + else + reg_renumber[r] = -1; + + /* If no reg available in that class, + try any reg. */ + if (reg_renumber[r] == -1) + reg_renumber[r] = stupid_find_reg (reg_n_calls_crossed[r], + GENERAL_REGS, + PSEUDO_REGNO_MODE (r), + reg_where_born[r], + reg_where_dead[r], + reg_crosses_blocks[r]); + } + + if (file) + dump_flow_info (file); +} + +/* Comparison function for qsort. + Returns -1 (1) if register *R1P is higher priority than *R2P. */ + +static int +stupid_reg_compare (r1p, r2p) + int *r1p, *r2p; +{ + register int r1 = *r1p, r2 = *r2p; + register int len1 = reg_where_dead[r1] - reg_where_born[r1]; + register int len2 = reg_where_dead[r2] - reg_where_born[r2]; + int tem; + + tem = len2 - len1; + if (tem != 0) return tem; + + tem = reg_n_refs[r1] - reg_n_refs[r2]; + if (tem != 0) return tem; + + /* If regs are equally good, sort by regno, + so that the results of qsort leave nothing to chance. */ + return r1 - r2; +} + +/* Find a block of SIZE words of hard registers in reg_class CLASS + that can hold a value of machine-mode MODE + (but actually we test only the first of the block for holding MODE) + currently free from after insn whose suid is BIRTH + through the insn whose suid is DEATH, + and return the number of the first of them. + Return -1 if such a block cannot be found. + + If CALL_PRESERVED is nonzero, insist on registers preserved + over subroutine calls, and return -1 if cannot find such. + If CROSSES_BLOCKS is nonzero, reject registers for which + PRESERVE_DEATH_INFO_REGNO_P is true. */ + +static int +stupid_find_reg (call_preserved, class, mode, + born_insn, dead_insn, crosses_blocks) + int call_preserved; + enum reg_class class; + enum machine_mode mode; + int born_insn, dead_insn; + int crosses_blocks; +{ + register int i, ins; +#ifdef HARD_REG_SET + register /* Declare them register if they are scalars. */ +#endif + HARD_REG_SET used, this_reg; +#ifdef ELIMINABLE_REGS + static struct {int from, to; } eliminables[] = ELIMINABLE_REGS; +#endif + + COPY_HARD_REG_SET (used, + call_preserved ? call_used_reg_set : fixed_reg_set); + +#ifdef ELIMINABLE_REGS + for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++) + SET_HARD_REG_BIT (used, eliminables[i].from); +#else + SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM); +#endif + + for (ins = born_insn; ins < dead_insn; ins++) + IOR_HARD_REG_SET (used, after_insn_hard_regs[ins]); + + IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]); + + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + { +#ifdef REG_ALLOC_ORDER + int regno = reg_alloc_order[i]; +#else + int regno = i; +#endif + + /* If we need reasonable death info on this hard reg, + don't use it for anything whose life spans a label or a jump. */ +#ifdef PRESERVE_DEATH_INFO_REGNO_P + if (PRESERVE_DEATH_INFO_REGNO_P (regno) + && crosses_blocks) + continue; +#endif + /* If a register has screwy overlap problems, + don't use it at all if not optimizing. + Actually this is only for the 387 stack register, + and it's because subsequent code won't work. */ +#ifdef OVERLAPPING_REGNO_P + if (OVERLAPPING_REGNO_P (regno)) + continue; +#endif + + if (! TEST_HARD_REG_BIT (used, regno) + && HARD_REGNO_MODE_OK (regno, mode)) + { + register int j; + register int size1 = HARD_REGNO_NREGS (regno, mode); + for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++); + if (j == size1) + { + CLEAR_HARD_REG_SET (this_reg); + while (--j >= 0) + SET_HARD_REG_BIT (this_reg, regno + j); + for (ins = born_insn; ins < dead_insn; ins++) + { + IOR_HARD_REG_SET (after_insn_hard_regs[ins], this_reg); + } + return regno; + } +#ifndef REG_ALLOC_ORDER + i += j; /* Skip starting points we know will lose */ +#endif + } + } + return -1; +} + +/* Walk X, noting all assignments and references to registers + and recording what they imply about life spans. + INSN is the current insn, supplied so we can find its suid. */ + +static void +stupid_mark_refs (x, insn) + rtx x, insn; +{ + register RTX_CODE code = GET_CODE (x); + register char *fmt; + register int regno, i; + + if (code == SET || code == CLOBBER) + { + if (SET_DEST (x) != 0 && GET_CODE (SET_DEST (x)) == REG) + { + /* Register is being assigned. */ + regno = REGNO (SET_DEST (x)); + + /* For hard regs, update the where-live info. */ + if (regno < FIRST_PSEUDO_REGISTER) + { + register int j + = HARD_REGNO_NREGS (regno, GET_MODE (SET_DEST (x))); + while (--j >= 0) + { + regs_ever_live[regno+j] = 1; + regs_live[regno+j] = 0; + /* The following line is for unused outputs; + they do get stored even though never used again. */ + MARK_LIVE_AFTER (insn, regno); + /* When a hard reg is clobbered, mark it in use + just before this insn, so it is live all through. */ + if (code == CLOBBER && INSN_SUID (insn) > 0) + SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (insn) - 1], + regno); + } + } + /* For pseudo regs, record where born, where dead, number of + times used, and whether live across a call. */ + else + { + /* Update the life-interval bounds of this pseudo reg. */ + + /* When a pseudo-reg is CLOBBERed, it is born just before + the clobbering insn. When setting, just after. */ + int where_born = INSN_SUID (insn) - (code == CLOBBER); + + reg_where_born[regno] = where_born; + /* The reg must live at least one insn even + in it is never again used--because it has to go + in SOME hard reg. Mark it as dying after the current + insn so that it will conflict with any other outputs of + this insn. */ + if (reg_where_dead[regno] < where_born + 2) + reg_where_dead[regno] = where_born + 2; + + /* Count the refs of this reg. */ + reg_n_refs[regno]++; + + if (last_call_suid < reg_where_dead[regno]) + reg_n_calls_crossed[regno] += 1; + if (last_jump_suid < reg_where_dead[regno] + || last_label_suid < reg_where_dead[regno]) + reg_crosses_blocks[regno] = 1; + } + } + /* Record references from the value being set, + or from addresses in the place being set if that's not a reg. + If setting a SUBREG, we treat the entire reg as *used*. */ + if (code == SET) + { + stupid_mark_refs (SET_SRC (x), insn); + if (GET_CODE (SET_DEST (x)) != REG) + stupid_mark_refs (SET_DEST (x), insn); + } + return; + } + + /* Register value being used, not set. */ + + if (code == REG) + { + regno = REGNO (x); + if (regno < FIRST_PSEUDO_REGISTER) + { + /* Hard reg: mark it live for continuing scan of previous insns. */ + register int j = HARD_REGNO_NREGS (regno, GET_MODE (x)); + while (--j >= 0) + { + regs_ever_live[regno+j] = 1; + regs_live[regno+j] = 1; + } + } + else + { + /* Pseudo reg: record first use, last use and number of uses. */ + + reg_where_born[regno] = INSN_SUID (insn); + reg_n_refs[regno]++; + if (regs_live[regno] == 0) + { + regs_live[regno] = 1; + reg_where_dead[regno] = INSN_SUID (insn); + } + } + return; + } + + /* Recursive scan of all other rtx's. */ + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + stupid_mark_refs (XEXP (x, i), insn); + if (fmt[i] == 'E') + { + register int j; + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + stupid_mark_refs (XVECEXP (x, i, j), insn); + } + } +}