From 886c62d1e32ac976334fa843f7400eb05eae31bd Mon Sep 17 00:00:00 2001 From: James Van Artsdalen Date: Fri, 24 Jan 1992 10:34:10 +0000 Subject: [PATCH] Initial revision From-SVN: r238 --- gcc/config/i386/i386.md | 3391 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 3391 insertions(+) create mode 100644 gcc/config/i386/i386.md diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md new file mode 100644 index 00000000000..70f2224c8f1 --- /dev/null +++ b/gcc/config/i386/i386.md @@ -0,0 +1,3391 @@ +;; GCC machine description for Intel 80386. +;; Copyright (C) 1988 Free Software Foundation, Inc. +;; Mostly by William Schelter. + +;; This file is part of GNU CC. + +;; GNU CC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2, or (at your option) +;; any later version. + +;; GNU CC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GNU CC; see the file COPYING. If not, write to +;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + +;;- instruction definitions + +;;- @@The original PO technology requires these to be ordered by speed, +;;- @@ so that assigner will pick the fastest. + +;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. + +;;- When naming insn's (operand 0 of define_insn) be careful about using +;;- names from other targets machine descriptions. + +;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code +;;- updates for most instructions. + +;;- Operand classes for the register allocator: +;;- 'a' for eax +;;- 'd' for edx +;;- 'c' for ecx +;;- 'b' for ebx +;;- 'f' for anything in FLOAT_REGS +;;- 'r' any (non-floating-point) register +;;- 'q' regs that allow byte operations (A, B, C and D) +;;- 'A' A and D registers + +;; the special asm out single letter directives following a '%' are: +;; 'z' mov%z1 would be movl, movw, or movb depending on the mode of operands[1] +;; 's' output a '*' +;; 'w' If the operand is a REG, it uses the mode size to determine the +;; printing of the reg + + + +;; "movl MEM,REG / testl REG,REG" is faster on a 486 than "cmpl $0,MEM". +;; But restricting MEM here would mean that gcc could not remove a redundant +;; test in cases like "incl MEM / je TARGET". +;; +;; We don't want to allow a constant operand for test insns because +;; (set (cc0) (const_int foo)) has no mode information. Such insns will +;; be folded while optimizing anyway. + +(define_insn "tstsi" + [(set (cc0) + (match_operand:SI 0 "nonimmediate_operand" "rm"))] + "" + "* +{ + if (REG_P (operands[0])) + return AS2 (test%L0,%0,%0); + + operands[1] = const0_rtx; + return AS2 (cmp%L0,%1,%0); +}") + +(define_insn "tsthi" + [(set (cc0) + (match_operand:HI 0 "nonimmediate_operand" "rm"))] + "" + "* +{ + if (REG_P (operands[0])) + return AS2 (test%W0,%0,%0); + + operands[1] = const0_rtx; + return AS2 (cmp%W0,%1,%0); +}") + +(define_insn "tstqi" + [(set (cc0) + (match_operand:QI 0 "nonimmediate_operand" "qm"))] + "" + "* +{ + if (REG_P (operands[0])) + return AS2 (test%B0,%0,%0); + + operands[1] = const0_rtx; + return AS2 (cmp%B0,%1,%0); +}") + +(define_insn "tstsf" + [(set (cc0) + (match_operand:SF 0 "register_operand" "f")) + (clobber (match_scratch:HI 1 "=a"))] + "TARGET_80387" + "* +{ + if (! STACK_TOP_P (operands[0])) + abort (); + + output_asm_insn (\"ftst\", operands); + cc_status.flags |= CC_IN_80387; + + if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG)) + output_asm_insn (AS1 (fstp,%y0), operands); + + output_asm_insn (AS1 (fnsts%W1,%1), operands); + + return \"sahf\"; +}") + +(define_insn "tstdf" + [(set (cc0) + (match_operand:DF 0 "register_operand" "f")) + (clobber (match_scratch:HI 1 "=a"))] + "TARGET_80387" + "* +{ + if (! STACK_TOP_P (operands[0])) + abort (); + + output_asm_insn (\"ftst\", operands); + cc_status.flags |= CC_IN_80387; + + if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG)) + output_asm_insn (AS1 (fstp,%y0), operands); + + output_asm_insn (AS1 (fnsts%W1,%1), operands); + + return \"sahf\"; +}") + +;;- compare instructions + +(define_insn "cmpsi" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "mr,ri") + (match_operand:SI 1 "general_operand" "ri,mr")))] + "" + "* +{ + if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM) + { + cc_status.flags |= CC_REVERSED; + return AS2 (cmp%L0,%0,%1); + } + return AS2 (cmp%L0,%1,%0); +}") + +(define_insn "cmphi" + [(set (cc0) + (compare (match_operand:HI 0 "nonimmediate_operand" "mr,ri") + (match_operand:HI 1 "general_operand" "ri,mr")))] + "" + "* +{ + if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM) + { + cc_status.flags |= CC_REVERSED; + return AS2 (cmp%W0,%0,%1); + } + return AS2 (cmp%W0,%1,%0); +}") + +(define_insn "cmpqi" + [(set (cc0) + (compare (match_operand:QI 0 "nonimmediate_operand" "qn,mq") + (match_operand:QI 1 "general_operand" "qm,nq")))] + "" + "* +{ + if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM) + { + cc_status.flags |= CC_REVERSED; + return AS2 (cmp%B0,%0,%1); + } + return AS2 (cmp%B0,%1,%0); +}") + +;; These implement float point compares. For each of DFmode and +;; SFmode, there is the normal insn, and an insn where the second operand +;; is converted to the desired mode. + +(define_expand "cmpdf" + [(parallel [(set (cc0) + (compare (match_operand:DF 0 "nonimmediate_operand" "") + (match_operand:DF 1 "nonimmediate_operand" ""))) + (clobber (match_scratch:HI 2 ""))])] + "TARGET_80387" + "") + +(define_expand "cmpsf" + [(parallel [(set (cc0) + (compare (match_operand:SF 0 "nonimmediate_operand" "") + (match_operand:SF 1 "nonimmediate_operand" ""))) + (clobber (match_scratch:HI 2 ""))])] + "TARGET_80387" + "") + +(define_insn "" + [(set (cc0) + (compare (match_operand:DF 0 "general_operand" "f") + (match_operand:DF 1 "general_operand" "fm"))) + (clobber (match_scratch:HI 2 "=a"))] + "TARGET_80387" + "* return (char *) output_float_compare (insn, operands);") + +(define_insn "" + [(set (cc0) + (compare (match_operand:DF 0 "general_operand" "f,f") + (float:DF (match_operand:SI 1 "general_operand" "m,!*r")))) + (clobber (match_scratch:HI 2 "=a,a"))] + "TARGET_80387" + "* return (char *) output_float_compare (insn, operands);") + +(define_insn "" + [(set (cc0) + (compare (match_operand:DF 0 "general_operand" "f,f") + (float_extend:DF + (match_operand:SF 1 "general_operand" "fm,!*r")))) + (clobber (match_scratch:HI 2 "=a,a"))] + "TARGET_80387" + "* return (char *) output_float_compare (insn, operands);") + +(define_insn "" + [(set (cc0) + (compare (match_operand:SF 0 "general_operand" "f") + (match_operand:SF 1 "general_operand" "fm"))) + (clobber (match_scratch:HI 2 "=a"))] + "TARGET_80387" + "* return (char *) output_float_compare (insn, operands);") + +(define_insn "" + [(set (cc0) + (compare (match_operand:SF 0 "general_operand" "f,f") + (float:SF (match_operand:SI 1 "general_operand" "m,!*r")))) + (clobber (match_scratch:HI 2 "=a,a"))] + "TARGET_80387" + "* return (char *) output_float_compare (insn, operands);") + +;; logical compare + +;; ??? What if we are testing one byte of an offsettable memory reference? +(define_insn "" + [(set (cc0) + (and:SI (match_operand:SI 0 "general_operand" "%rm") + (match_operand:SI 1 "general_operand" "ri")))] + "" + "* +{ + /* For small integers, we may actually use testb. */ + if (GET_CODE (operands[1]) == CONST_INT + && (INTVAL (operands[1]) & ~0xffff) == 0 + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + /* We may set the sign bit spuriously. */ + cc_status.flags |= CC_NOT_NEGATIVE; + + if (! NON_QI_REG_P (operands[0]) && (INTVAL (operands[1]) & ~0xff) == 0) + return AS2 (test%B0,%1,%b0); + + if (QI_REG_P (operands[0]) && (INTVAL (operands[1]) & ~0xff00) == 0) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[1]) >> 8); + return AS2 (test%B0,%1,%h0); + } + } + + if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM) + return AS2 (test%L0,%1,%0); + + return AS2 (test%L1,%0,%1); +}") + +(define_insn "" + [(set (cc0) + (and:HI (match_operand:HI 0 "general_operand" "%rm") + (match_operand:HI 1 "general_operand" "ri")))] + "" + "* +{ + if (GET_CODE (operands[1]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + /* Can we ignore the upper byte? */ + if (! NON_QI_REG_P (operands[0]) + && (INTVAL (operands[1]) & 0xff00) == 0) + { + if (INTVAL (operands[1]) & 0xffff0000) + operands[1] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[1]) & 0xff); + + /* We may set the sign bit spuriously. */ + cc_status.flags |= CC_NOT_NEGATIVE; + return AS2 (test%B0,%1,%b0); + } + + /* Can we ignore the lower byte? */ + /* ??? what about offsettable memory references? */ + if (QI_REG_P (operands[0]) && (INTVAL (operands[1]) & 0xff) == 0) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, + (INTVAL (operands[1]) >> 8) & 0xff); + return AS2 (test%B0,%1,%h0); + } + } + + if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM) + return AS2 (test%W0,%1,%0); + + return AS2 (test%W1,%0,%1); +}") + +(define_insn "" + [(set (cc0) + (and:QI (match_operand:QI 0 "general_operand" "%qm") + (match_operand:QI 1 "general_operand" "qi")))] + "" + "* +{ + if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM) + return AS2 (test%B0,%1,%0); + + return AS2 (test%B1,%0,%1); +}") + +;; move instructions. +;; There is one for each machine mode, +;; and each is preceded by a corresponding push-insn pattern +;; (since pushes are not general_operands on the 386). + +(define_insn "" + [(set (match_operand:SI 0 "push_operand" "=<") + (match_operand:SI 1 "general_operand" "g"))] + "! TARGET_486" + "push%L0 %1") + +;; On a 486, it is faster to move MEM to a REG and then push, rather than +;; push MEM directly. + +(define_insn "" + [(set (match_operand:SI 0 "push_operand" "=<") + (match_operand:SI 1 "general_operand" "ri"))] + "TARGET_486" + "push%L0 %1") + +;; General case of fullword move. + +;; On i486, incl reg is faster than movl $1,reg. + +(define_insn "movsi" + [(set (match_operand:SI 0 "general_operand" "=g,r") + (match_operand:SI 1 "general_operand" "ri,m"))] + "" + "* +{ + rtx link; + if (operands[1] == const0_rtx && REG_P (operands[0])) + return AS2 (xor%L0,%0,%0); + + if (operands[1] == const1_rtx + && (link = find_reg_note (insn, REG_WAS_0, 0)) + /* Make sure the insn that stored the 0 is still present. */ + && ! XEXP (link, 0)->volatil + && GET_CODE (XEXP (link, 0)) != NOTE + /* Make sure cross jumping didn't happen here. */ + && no_labels_between_p (XEXP (link, 0), insn)) + /* Fastest way to change a 0 to a 1. */ + return AS1 (inc%L0,%0); + + return AS2 (mov%L0,%1,%0); +}") + +(define_insn "" + [(set (match_operand:HI 0 "push_operand" "=<") + (match_operand:HI 1 "general_operand" "g"))] + "" + "push%W0 %1") + +;; On i486, an incl and movl are both faster than incw and movw. + +(define_insn "movhi" + [(set (match_operand:HI 0 "general_operand" "=g,r") + (match_operand:HI 1 "general_operand" "ri,m"))] + "" + "* +{ + rtx link; + if (REG_P (operands[0]) && operands[1] == const0_rtx) + return AS2 (xor%L0,%k0,%k0); + + if (REG_P (operands[0]) && operands[1] == const1_rtx + && (link = find_reg_note (insn, REG_WAS_0, 0)) + /* Make sure the insn that stored the 0 is still present. */ + && ! XEXP (link, 0)->volatil + && GET_CODE (XEXP (link, 0)) != NOTE + /* Make sure cross jumping didn't happen here. */ + && no_labels_between_p (XEXP (link, 0), insn)) + /* Fastest way to change a 0 to a 1. */ + return AS1 (inc%L0,%k0); + + if (REG_P (operands[0])) + { + if (REG_P (operands[1])) + return AS2 (mov%L0,%k1,%k0); + else if (CONSTANT_P (operands[1])) + return AS2 (mov%L0,%1,%k0); + } + + return AS2 (mov%W0,%1,%0); +}") + +(define_insn "movstricthi" + [(set (strict_low_part (match_operand:HI 0 "general_operand" "+g,r")) + (match_operand:HI 1 "general_operand" "ri,m"))] + "" + "* +{ + rtx link; + if (operands[1] == const0_rtx && REG_P (operands[0])) + return AS2 (xor%W0,%0,%0); + + if (operands[1] == const1_rtx + && (link = find_reg_note (insn, REG_WAS_0, 0)) + /* Make sure the insn that stored the 0 is still present. */ + && ! XEXP (link, 0)->volatil + && GET_CODE (XEXP (link, 0)) != NOTE + /* Make sure cross jumping didn't happen here. */ + && no_labels_between_p (XEXP (link, 0), insn)) + /* Fastest way to change a 0 to a 1. */ + return AS1 (inc%W0,%0); + + return AS2 (mov%W0,%1,%0); +}") + +;; emit_push_insn when it calls move_by_pieces +;; requires an insn to "push a byte". +;; But actually we use pushw, which has the effect of rounding +;; the amount pushed up to a halfword. +(define_insn "" + [(set (match_operand:QI 0 "push_operand" "=<") + (match_operand:QI 1 "general_operand" "q"))] + "" + "* +{ + operands[1] = gen_rtx (REG, HImode, REGNO (operands[1])); + return AS1 (push%W0,%1); +}") + +;; On i486, incb reg is faster than movb $1,reg. + +;; ??? Do a recognizer for zero_extract that looks just like this, but reads +;; or writes %ah, %bh, %ch, %dh. + +(define_insn "movqi" + [(set (match_operand:QI 0 "general_operand" "=q,*r,qm") + (match_operand:QI 1 "general_operand" "*g,q,qn"))] + "" + "* +{ + rtx link; + if (operands[1] == const0_rtx && REG_P (operands[0])) + return AS2 (xor%B0,%0,%0); + + if (operands[1] == const1_rtx + && (link = find_reg_note (insn, REG_WAS_0, 0)) + /* Make sure the insn that stored the 0 is still present. */ + && ! XEXP (link, 0)->volatil + && GET_CODE (XEXP (link, 0)) != NOTE + /* Make sure cross jumping didn't happen here. */ + && no_labels_between_p (XEXP (link, 0), insn)) + /* Fastest way to change a 0 to a 1. */ + return AS1 (inc%B0,%0); + + /* If mov%B0 isn't allowed for one of these regs, use mov%L0. */ + if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1])) + return (AS2 (mov%L0,%k1,%k0)); + + return (AS2 (mov%B0,%1,%0)); +}") + +;; If it becomes necessary to support movstrictqi into %esi or %edi, +;; use the insn sequence: +;; +;; shrdl $8,srcreg,dstreg +;; rorl $24,dstreg +;; +;; If operands[1] is a constant, then an andl/orl sequence would be +;; faster. + +(define_insn "movstrictqi" + [(set (strict_low_part (match_operand:QI 0 "general_operand" "+q,qm")) + (match_operand:QI 1 "general_operand" "*g,qn"))] + "" + "* +{ + rtx link; + if (operands[1] == const0_rtx && REG_P (operands[0])) + return AS2 (xor%B0,%0,%0); + + if (operands[1] == const1_rtx + && (link = find_reg_note (insn, REG_WAS_0, 0)) + /* Make sure the insn that stored the 0 is still present. */ + && ! XEXP (link, 0)->volatil + && GET_CODE (XEXP (link, 0)) != NOTE + /* Make sure cross jumping didn't happen here. */ + && no_labels_between_p (XEXP (link, 0), insn)) + /* Fastest way to change a 0 to a 1. */ + return AS1 (inc%B0,%0); + + /* If mov%B0 isn't allowed for one of these regs, use mov%W0. */ + if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1])) + { + abort (); + return (AS2 (mov%L0,%k1,%k0)); + } + + return AS2 (mov%B0,%1,%0); +}") + +(define_insn "" + [(set (match_operand:SF 0 "push_operand" "=<,<") + (match_operand:SF 1 "general_operand" "gF,f"))] + "" + "* +{ + if (STACK_REG_P (operands[1])) + { + rtx xops[3]; + + if (! STACK_TOP_P (operands[1])) + abort (); + + xops[0] = AT_SP (SFmode); + xops[1] = gen_rtx (CONST_INT, VOIDmode, 4); + xops[2] = stack_pointer_rtx; + + output_asm_insn (AS2 (sub%L2,%1,%2), xops); + + if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG)) + output_asm_insn (AS1 (fstp%S0,%0), xops); + else + output_asm_insn (AS1 (fst%S0,%0), xops); + RET; + } + return AS1 (push%L1,%1); +}") + +(define_insn "movsf" + [(set (match_operand:SF 0 "general_operand" "=f,fm,!*rf,!*rm") + (match_operand:SF 1 "general_operand" "fmG,f,*rfm,*rfF"))] + "" + "* +{ + int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; + + /* First handle a `pop' insn or a `fld %st(0)' */ + + if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1])) + { + if (stack_top_dies) + return AS1 (fstp,%y0); + else + return AS1 (fld,%y0); + } + + /* Handle a transfer between the 387 and a 386 register */ + + if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1])) + { + output_op_from_reg (operands[1], AS1 (fld%z0,%y1)); + RET; + } + + if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0])) + { + output_to_reg (operands[0], stack_top_dies); + RET; + } + + /* Handle other kinds of writes from the 387 */ + + if (STACK_TOP_P (operands[1])) + { + if (stack_top_dies) + return AS1 (fstp%z0,%y0); + else + return AS1 (fst%z0,%y0); + } + + /* Handle other kinds of reads to the 387 */ + + if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE) + return (char *) output_move_const_single (operands); + + if (STACK_TOP_P (operands[0])) + return AS1 (fld%z1,%y1); + + /* Handle all SFmode moves not involving the 387 */ + + return (char *) singlemove_string (operands); +}") + +;;should change to handle the memory operands[1] without doing df push.. +(define_insn "" + [(set (match_operand:DF 0 "push_operand" "=<,<") + (match_operand:DF 1 "general_operand" "gF,f"))] + "" + "* +{ + if (STACK_REG_P (operands[1])) + { + rtx xops[3]; + + xops[0] = AT_SP (SFmode); + xops[1] = gen_rtx (CONST_INT, VOIDmode, 8); + xops[2] = stack_pointer_rtx; + + output_asm_insn (AS2 (sub%L2,%1,%2), xops); + + if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG)) + output_asm_insn (AS1 (fstp%Q0,%0), xops); + else + output_asm_insn (AS1 (fst%Q0,%0), xops); + + RET; + } + else + return (char *) output_move_double (operands); +}") + +(define_insn "swapdf" + [(set (match_operand:DF 0 "register_operand" "f") + (match_operand:DF 1 "register_operand" "f")) + (set (match_dup 1) + (match_dup 0))] + "" + "* +{ + if (STACK_TOP_P (operands[0])) + return AS1 (fxch,%1); + else + return AS1 (fxch,%0); +}") + +(define_insn "movdf" + [(set (match_operand:DF 0 "general_operand" "=f,fm,!*rf,!*rm") + (match_operand:DF 1 "general_operand" "fmG,f,*rfm,*rfF"))] + "" + "* +{ + int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; + + /* First handle a `pop' insn or a `fld %st(0)' */ + + if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1])) + { + if (stack_top_dies) + return AS1 (fstp,%y0); + else + return AS1 (fld,%y0); + } + + /* Handle a transfer between the 387 and a 386 register */ + + if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1])) + { + output_op_from_reg (operands[1], AS1 (fld%z0,%y1)); + RET; + } + + if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0])) + { + output_to_reg (operands[0], stack_top_dies); + RET; + } + + /* Handle other kinds of writes from the 387 */ + + if (STACK_TOP_P (operands[1])) + { + if (stack_top_dies) + return AS1 (fstp%z0,%y0); + else + return AS1 (fst%z0,%y0); + } + + /* Handle other kinds of reads to the 387 */ + + if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE) + return (char *) output_move_const_single (operands); + + if (STACK_TOP_P (operands[0])) + return AS1 (fld%z1,%y1); + + /* Handle all DFmode moves not involving the 387 */ + + return (char *) output_move_double (operands); +}") + +(define_insn "" + [(set (match_operand:DI 0 "push_operand" "=<") + (match_operand:DI 1 "general_operand" "roiF"))] + "" + "* +{ + return (char *) output_move_double (operands); +}") + +(define_insn "movdi" + [(set (match_operand:DI 0 "general_operand" "=&r,rm") + (match_operand:DI 1 "general_operand" "m,riF"))] + "" + "* +{ + return (char *) output_move_double (operands); +}") + +;;- conversion instructions +;;- NONE + +;;- truncation instructions + +(define_insn "truncsiqi2" + [(set (match_operand:QI 0 "general_operand" "=q,qm") + (truncate:QI + (match_operand:SI 1 "general_operand" "qim,qn")))] + "" + "* +{ + if (CONSTANT_P (operands[1]) && GET_CODE (operands[1]) != CONST_INT) + return AS2 (mov%L0,%1,%k0); + + return AS2 (mov%B0,%b1,%0); +}") + +(define_insn "trunchiqi2" + [(set (match_operand:QI 0 "general_operand" "=q,qm") + (truncate:QI + (match_operand:HI 1 "general_operand" "qim,qn")))] + "" + "* +{ + if (CONSTANT_P (operands[1]) && GET_CODE (operands[1]) != CONST_INT) + return AS2 (mov%L0,%1,%k0); + + return AS2 (mov%B0,%b1,%0); +}") + +(define_insn "truncsihi2" + [(set (match_operand:HI 0 "general_operand" "=r,rm") + (truncate:HI + (match_operand:SI 1 "general_operand" "rim,rn")))] + "" + "* +{ + if (CONSTANT_P (operands[1]) && GET_CODE (operands[1]) != CONST_INT) + return AS2 (mov%L0,%1,%k0); + + return AS2 (mov%W0,%w1,%0); +}") + +;;- zero extension instructions +;; See comments by `andsi' for when andl is faster than movzx. + +(define_insn "zero_extendhisi2" + [(set (match_operand:SI 0 "general_operand" "=r") + (zero_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "rm")))] + "" + "* +{ + if ((TARGET_486 || REGNO (operands[0]) == 0) + && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])) + { + rtx xops[2]; + xops[0] = operands[0]; + xops[1] = gen_rtx (CONST_INT, VOIDmode, 0xffff); + output_asm_insn (AS2 (and%L0,%1,%k0), xops); + RET; + } + +#ifdef INTEL_SYNTAX + return AS2 (movzx,%1,%0); +#else + return AS2 (movz%W0%L0,%1,%0); +#endif +}") + +(define_insn "zero_extendqihi2" + [(set (match_operand:HI 0 "general_operand" "=r") + (zero_extend:HI + (match_operand:QI 1 "nonimmediate_operand" "qm")))] + "" + "* +{ + if ((TARGET_486 || REGNO (operands[0]) == 0) + && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])) + { + rtx xops[2]; + xops[0] = operands[0]; + xops[1] = gen_rtx (CONST_INT, VOIDmode, 0xff); + output_asm_insn (AS2 (and%L0,%1,%k0), xops); + RET; + } + +#ifdef INTEL_SYNTAX + return AS2 (movzx,%1,%0); +#else + return AS2 (movz%B0%W0,%1,%0); +#endif +}") + +(define_insn "zero_extendqisi2" + [(set (match_operand:SI 0 "general_operand" "=r") + (zero_extend:SI + (match_operand:QI 1 "nonimmediate_operand" "qm")))] + "" + "* +{ + if ((TARGET_486 || REGNO (operands[0]) == 0) + && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])) + { + rtx xops[2]; + xops[0] = operands[0]; + xops[1] = gen_rtx (CONST_INT, VOIDmode, 0xff); + output_asm_insn (AS2 (and%L0,%1,%k0), xops); + RET; + } + +#ifdef INTEL_SYNTAX + return AS2 (movzx,%1,%0); +#else + return AS2 (movz%B0%L0,%1,%0); +#endif +}") + +;;- sign extension instructions + +/* +(define_insn "extendsidi2" + [(set (match_operand:DI 0 "general_operand" "=a") + (sign_extend:DI + (match_operand:SI 1 "nonimmediate_operand" "a")))] + "" + "clq") +*/ + +;; Note that the i386 programmers' manual says that the opcodes +;; are named movsx..., but the assembler on Unix does not accept that. +;; We use what the Unix assembler expects. + +(define_insn "extendhisi2" + [(set (match_operand:SI 0 "general_operand" "=r") + (sign_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "rm")))] + "" + "* +{ + if (REGNO (operands[0]) == 0 + && REG_P (operands[1]) && REGNO (operands[1]) == 0) +#ifdef INTEL_SYNTAX + return \"cwde\"; +#else + return \"cwtl\"; +#endif + +#ifdef INTEL_SYNTAX + return AS2 (movsx,%1,%0); +#else + return AS2 (movs%W0%L0,%1,%0); +#endif +}") + +(define_insn "extendqihi2" + [(set (match_operand:HI 0 "general_operand" "=r") + (sign_extend:HI + (match_operand:QI 1 "nonimmediate_operand" "qm")))] + "" + "* +{ + if (REGNO (operands[0]) == 0 + && REG_P (operands[1]) && REGNO (operands[1]) == 0) + return \"cbtw\"; + +#ifdef INTEL_SYNTAX + return AS2 (movsx,%1,%0); +#else + return AS2 (movs%B0%W0,%1,%0); +#endif +}") + +(define_insn "extendqisi2" + [(set (match_operand:SI 0 "general_operand" "=r") + (sign_extend:SI + (match_operand:QI 1 "nonimmediate_operand" "qm")))] + "" + "* +{ +#ifdef INTEL_SYNTAX + return AS2 (movsx,%1,%0); +#else + return AS2 (movs%B0%L0,%1,%0); +#endif +}") + +;; Conversions between float and double. + +(define_insn "extendsfdf2" + [(set (match_operand:DF 0 "general_operand" "=fm,f,f,!*r") + (float_extend:DF + (match_operand:SF 1 "general_operand" "f,fm,!*r,f")))] + "TARGET_80387" + "* +{ + int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; + + if (NON_STACK_REG_P (operands[1])) + { + output_op_from_reg (operands[1], AS1 (fld%z0,%y1)); + RET; + } + + if (NON_STACK_REG_P (operands[0])) + { + output_to_reg (operands[0], stack_top_dies); + RET; + } + + if (STACK_TOP_P (operands[0])) + return AS1 (fld%z1,%y1); + + if (GET_CODE (operands[0]) == MEM) + { + if (stack_top_dies) + return AS1 (fstp%z0,%y0); + else + return AS1 (fst%z0,%y0); + } + + abort (); +}") + +;; This cannot output into an f-reg because there is no way to be sure +;; of truncating in that case. Otherwise this is just like a simple move +;; insn. + +(define_insn "truncdfsf2" + [(set (match_operand:SF 0 "general_operand" "=m,!*r") + (float_truncate:SF + (match_operand:DF 1 "register_operand" "f,f")))] + "TARGET_80387" + "* +{ + int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; + + if (NON_STACK_REG_P (operands[0])) + { + output_to_reg (operands[0], stack_top_dies); + RET; + } + else if (GET_CODE (operands[0]) == MEM) + { + if (stack_top_dies) + return AS1 (fstp%z0,%0); + else + return AS1 (fst%z0,%0); + } + else + abort (); +}") + +;; The 387 requires that the stack top dies after converting to DImode. + +;; Represent an unsigned conversion from SImode to MODE_FLOAT by first +;; doing a signed conversion to DImode, and then taking just the low +;; part. + +(define_expand "fixuns_truncdfsi2" + [(parallel [(set (match_dup 3) + (fix:DI + (fix:DF (match_operand:DF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 "")) + (clobber (match_dup 1))]) + (set (match_operand:SI 0 "general_operand" "") + (match_dup 4))] + "TARGET_80387" + " +{ + operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_lowpart (SImode, operands[3]); +}") + +(define_expand "fixuns_truncsfsi2" + [(parallel [(set (match_dup 3) + (fix:DI + (fix:SF (match_operand:SF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 "")) + (clobber (match_dup 1))]) + (set (match_operand:SI 0 "general_operand" "") + (match_dup 4))] + "TARGET_80387" + " +{ + operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_lowpart (SImode, operands[3]); +}") + +;; Signed conversion to DImode. + +(define_expand "fix_truncdfdi2" + [(parallel [(set (match_operand:DI 0 "general_operand" "") + (fix:DI + (fix:DF (match_operand:DF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 "")) + (clobber (match_dup 1))])] + "TARGET_80387" + " +{ + operands[1] = copy_to_mode_reg (DFmode, operands[1]); +}") + +(define_expand "fix_truncsfdi2" + [(parallel [(set (match_operand:DI 0 "general_operand" "") + (fix:DI + (fix:SF (match_operand:SF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 "")) + (clobber (match_dup 1))])] + "TARGET_80387" + " +{ + operands[1] = copy_to_mode_reg (SFmode, operands[1]); +}") + +;; These match a signed convertion of either DFmode or SFmode to DImode. + +(define_insn "" + [(set (match_operand:DI 0 "general_operand" "=m,!*r") + (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f,f")))) + (clobber (match_scratch:HI 2 "=&r,&r")) + (clobber (match_dup 1))] + "TARGET_80387" + "* return (char *) output_fix_trunc (insn, operands);") + +(define_insn "" + [(set (match_operand:DI 0 "general_operand" "=m,!*r") + (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f,f")))) + (clobber (match_scratch:HI 2 "=&r,&r")) + (clobber (match_dup 1))] + "TARGET_80387" + "* return (char *) output_fix_trunc (insn, operands);") + +;; Signed MODE_FLOAT conversion to SImode. + +(define_expand "fix_truncdfsi2" + [(parallel [(set (match_operand:SI 0 "general_operand" "") + (fix:SI + (fix:DF (match_operand:DF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 ""))])] + "TARGET_80387" + "") + +(define_expand "fix_truncsfsi2" + [(parallel [(set (match_operand:SI 0 "general_operand" "") + (fix:SI + (fix:SF (match_operand:SF 1 "register_operand" "")))) + (clobber (match_scratch:HI 2 ""))])] + "TARGET_80387" + "") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=m,!*r") + (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f,f")))) + (clobber (match_scratch:HI 2 "=&r,&r"))] + "TARGET_80387" + "* return (char *) output_fix_trunc (insn, operands);") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=m,!*r") + (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f,f")))) + (clobber (match_scratch:HI 2 "=&r,&r"))] + "TARGET_80387" + "* return (char *) output_fix_trunc (insn, operands);") + +;; Conversion between fixed point and floating point. +;; The actual pattern that matches these is at the end of this file. + +;; ??? Possibly repsent floatunssidf2 here in gcc2. + +(define_expand "floatsisf2" + [(set (match_operand:SF 0 "register_operand" "") + (float:SF (match_operand:SI 1 "general_operand" "")))] + "TARGET_80387" + "") + +(define_expand "floatdisf2" + [(set (match_operand:SF 0 "register_operand" "") + (float:SF (match_operand:DI 1 "general_operand" "")))] + "TARGET_80387" + "") + +(define_expand "floatsidf2" + [(set (match_operand:DF 0 "register_operand" "") + (float:DF (match_operand:SI 1 "general_operand" "")))] + "TARGET_80387" + "") + +(define_expand "floatdidf2" + [(set (match_operand:DF 0 "register_operand" "") + (float:DF (match_operand:DI 1 "general_operand" "")))] + "TARGET_80387" + "") + +;; This will convert from SImode or DImode to MODE_FLOAT. + +(define_insn "" + [(set (match_operand 0 "register_operand" "=f,f") + (match_operator 2 "float_op" + [(match_operand:DI 1 "general_operand" "m,!*r")]))] + "TARGET_80387 && GET_MODE (operands[0]) == GET_MODE (operands[2]) + && GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_FLOAT" + "* +{ + if (NON_STACK_REG_P (operands[1])) + { + output_op_from_reg (operands[1], AS1 (fild%z0,%1)); + RET; + } + else if (GET_CODE (operands[1]) == MEM) + return AS1 (fild%z1,%1); + else + abort (); +}") + +(define_insn "" + [(set (match_operand 0 "register_operand" "=f,f") + (match_operator 2 "float_op" + [(match_operand:SI 1 "general_operand" "m,!*r")]))] + "TARGET_80387 && GET_MODE (operands[0]) == GET_MODE (operands[2]) + && GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_FLOAT" + "* +{ + if (NON_STACK_REG_P (operands[1])) + { + output_op_from_reg (operands[1], AS1 (fild%z0,%1)); + RET; + } + else if (GET_CODE (operands[1]) == MEM) + return AS1 (fild%z1,%1); + else + abort (); +}") + +;;- add instructions + +(define_insn "adddi3" + [(set (match_operand:DI 0 "general_operand" "=&r,ro") + (plus:DI (match_operand:DI 1 "general_operand" "%0,0") + (match_operand:DI 2 "general_operand" "o,riF")))] + "" + "* +{ + rtx low[3], high[3]; + + CC_STATUS_INIT; + + split_di (operands, 3, low, high); + + output_asm_insn (AS2 (add%L0,%2,%0), low); + output_asm_insn (AS2 (adc%L0,%2,%0), high); + RET; +}") + +;; On a 486, it is faster to do movl/addl than to do a single leal if +;; operands[1] and operands[2] are both registers. + +(define_insn "addsi3" + [(set (match_operand:SI 0 "general_operand" "=?r,rm,r") + (plus:SI (match_operand:SI 1 "general_operand" "%r,0,0") + (match_operand:SI 2 "general_operand" "ri,ri,rm")))] + "" + "* +{ + if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1])) + { + if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2])) + return AS2 (add%L0,%1,%0); + + if (! TARGET_486 || ! REG_P (operands[2])) + { + CC_STATUS_INIT; + operands[1] = SET_SRC (PATTERN (insn)); + return AS2 (lea%L0,%a1,%0); + } + + output_asm_insn (AS2 (mov%L0,%1,%0), operands); + } + + if (operands[2] == const1_rtx) + return AS1 (inc%L0,%0); + + if (operands[2] == constm1_rtx) + return AS1 (dec%L0,%0); + + return AS2 (add%L0,%2,%0); +}") + +;; ??? `lea' here, for three operand add? If leaw is used, only %bx, +;; %si and %di can appear in SET_SRC, and output_asm_insn might not be +;; able to handle the operand. But leal always works? + +(define_insn "addhi3" + [(set (match_operand:HI 0 "general_operand" "=rm,r") + (plus:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (operands[2] == const1_rtx) + return AS1 (inc%W0,%0); + + if (operands[2] == constm1_rtx) + return AS1 (dec%W0,%0); + + return AS2 (add%W0,%2,%0); +}") + +(define_insn "addqi3" + [(set (match_operand:QI 0 "general_operand" "=qm,q") + (plus:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_operand" "qn,qmn")))] + "" + "* +{ + if (operands[2] == const1_rtx) + return AS1 (inc%B0,%0); + + if (operands[2] == constm1_rtx) + return AS1 (dec%B0,%0); + + return AS2 (add%B0,%2,%0); +}") + +;Lennart Augustsson +;says this pattern just makes slower code: +; pushl %ebp +; addl $-80,(%esp) +;instead of +; leal -80(%ebp),%eax +; pushl %eax +; +;(define_insn "" +; [(set (match_operand:SI 0 "push_operand" "=<") +; (plus:SI (match_operand:SI 1 "general_operand" "%r") +; (match_operand:SI 2 "general_operand" "ri")))] +; "" +; "* +;{ +; rtx xops[4]; +; xops[0] = operands[0]; +; xops[1] = operands[1]; +; xops[2] = operands[2]; +; xops[3] = gen_rtx (MEM, SImode, stack_pointer_rtx); +; output_asm_insn (\"push%z1 %1\", xops); +; output_asm_insn (AS2 (add%z3,%2,%3), xops); +; RET; +;}") + +;; addsi3 is faster, so put this after. + +(define_insn "" + [(set (match_operand:SI 0 "register_operand" "=r") + (match_operand:QI 1 "address_operand" "p"))] + "" + "* +{ + CC_STATUS_INIT; + /* Adding a constant to a register is faster with an add. */ + /* ??? can this ever happen? */ + if (GET_CODE (operands[1]) == PLUS + && GET_CODE (XEXP (operands[1], 1)) == CONST_INT + && rtx_equal_p (operands[0], XEXP (operands[1], 0))) + { + operands[1] = XEXP (operands[1], 1); + + if (operands[1] == const1_rtx) + return AS1 (inc%L0,%0); + + if (operands[1] == constm1_rtx) + return AS1 (dec%L0,%0); + + return AS2 (add%L0,%1,%0); + } + return AS2 (lea%L0,%a1,%0); +}") + +;; The patterns that match these are at the end of this file. + +(define_expand "adddf3" + [(set (match_operand:DF 0 "register_operand" "") + (plus:DF (match_operand:DF 1 "nonimmediate_operand" "") + (match_operand:DF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +(define_expand "addsf3" + [(set (match_operand:SF 0 "register_operand" "") + (plus:SF (match_operand:SF 1 "nonimmediate_operand" "") + (match_operand:SF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +;;- subtract instructions + +(define_insn "subdi3" + [(set (match_operand:DI 0 "general_operand" "=&r,ro") + (minus:DI (match_operand:DI 1 "general_operand" "0,0") + (match_operand:DI 2 "general_operand" "o,riF")))] + "" + "* +{ + rtx low[3], high[3]; + + CC_STATUS_INIT; + + split_di (operands, 3, low, high); + + output_asm_insn (AS2 (sub%L0,%2,%0), low); + output_asm_insn (AS2 (sbb%L0,%2,%0), high); + RET; +}") + +(define_insn "subsi3" + [(set (match_operand:SI 0 "general_operand" "=rm,r") + (minus:SI (match_operand:SI 1 "general_operand" "0,0") + (match_operand:SI 2 "general_operand" "ri,rm")))] + "" + "* return AS2 (sub%L0,%2,%0);") + +(define_insn "subhi3" + [(set (match_operand:HI 0 "general_operand" "=rm,r") + (minus:HI (match_operand:HI 1 "general_operand" "0,0") + (match_operand:HI 2 "general_operand" "ri,rm")))] + "" + "* return AS2 (sub%W0,%2,%0);") + +(define_insn "subqi3" + [(set (match_operand:QI 0 "general_operand" "=qm,q") + (minus:QI (match_operand:QI 1 "general_operand" "0,0") + (match_operand:QI 2 "general_operand" "qn,qmn")))] + "" + "* return AS2 (sub%B0,%2,%0);") + +;; The patterns that match these are at the end of this file. + +(define_expand "subdf3" + [(set (match_operand:DF 0 "register_operand" "") + (minus:DF (match_operand:DF 1 "nonimmediate_operand" "") + (match_operand:DF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +(define_expand "subsf3" + [(set (match_operand:SF 0 "register_operand" "") + (minus:SF (match_operand:SF 1 "nonimmediate_operand" "") + (match_operand:SF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +;;- multiply instructions + +;(define_insn "mulqi3" +; [(set (match_operand:QI 0 "general_operand" "=a") +; (mult:QI (match_operand:QI 1 "general_operand" "%0") +; (match_operand:QI 2 "general_operand" "qm")))] +; "" +; "imul%B0 %2,%0") + +(define_insn "" + [(set (match_operand:HI 0 "general_operand" "=r") + (mult:SI (match_operand:HI 1 "general_operand" "%0") + (match_operand:HI 2 "general_operand" "r")))] + "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80" + "* return AS2 (imul%W0,%2,%0);") + +(define_insn "mulhi3" + [(set (match_operand:HI 0 "general_operand" "=r,r") + (mult:SI (match_operand:HI 1 "general_operand" "%0,rm") + (match_operand:HI 2 "general_operand" "g,i")))] + "" + "* +{ + if (GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[0]) + && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG)) + /* Assembler has weird restrictions. */ + return AS2 (imul%W0,%2,%0); + return AS3 (imul%W0,%2,%1,%0); +}") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=r") + (mult:SI (match_operand:SI 1 "general_operand" "%0") + (match_operand:SI 2 "general_operand" "r")))] + "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80" + "* return AS2 (imul%L0,%2,%0);") + +(define_insn "mulsi3" + [(set (match_operand:SI 0 "general_operand" "=r,r") + (mult:SI (match_operand:SI 1 "general_operand" "%0,rm") + (match_operand:SI 2 "general_operand" "g,i")))] + "" + "* +{ + if (GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[0]) + && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG)) + /* Assembler has weird restrictions. */ + return AS2 (imul%L0,%2,%0); + return AS3 (imul%L0,%2,%1,%0); +}") + +(define_insn "mulqihi3_1" + [(set (match_operand:HI 0 "general_operand" "=a") + (mult:SI (zero_extend:HI + (match_operand:QI 1 "nonimmediate_operand" "%0")) + (zero_extend:HI + (match_operand:QI 2 "nonimmediate_operand" "qm"))))] + "" + "mul%B0 %2") + +;; The patterns that match these are at the end of this file. + +(define_expand "muldf3" + [(set (match_operand:DF 0 "register_operand" "") + (mult:DF (match_operand:DF 1 "nonimmediate_operand" "") + (match_operand:DF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +(define_expand "mulsf3" + [(set (match_operand:SF 0 "register_operand" "") + (mult:SF (match_operand:SF 1 "nonimmediate_operand" "") + (match_operand:SF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +;;- divide instructions + +(define_insn "divqi3" + [(set (match_operand:QI 0 "general_operand" "=a") + (div:QI (match_operand:HI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "qm")))] + "" + "idiv%B0 %2") + +(define_insn "udivqi3" + [(set (match_operand:QI 0 "general_operand" "=a") + (udiv:QI (match_operand:HI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "qm")))] + "" + "div%B0 %2") + +;; The patterns that match these are at the end of this file. + +(define_expand "divdf3" + [(set (match_operand:DF 0 "register_operand" "") + (div:DF (match_operand:DF 1 "nonimmediate_operand" "") + (match_operand:DF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +(define_expand "divsf3" + [(set (match_operand:SF 0 "register_operand" "") + (div:SF (match_operand:SF 1 "nonimmediate_operand" "") + (match_operand:SF 2 "nonimmediate_operand" "")))] + "TARGET_80387" + "") + +;; Remainder instructions. + +(define_insn "divmodsi4" + [(set (match_operand:SI 0 "general_operand" "=a") + (div:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "rm"))) + (set (match_operand:SI 3 "general_operand" "=&d") + (mod:SI (match_dup 1) (match_dup 2)))] + "" + "* +{ +#ifdef INTEL_SYNTAX + output_asm_insn (\"cdq\", operands); +#else + output_asm_insn (\"cltd\", operands); +#endif + return AS1 (idiv%L0,%2); +}") + +(define_insn "divmodhi4" + [(set (match_operand:HI 0 "general_operand" "=a") + (div:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "rm"))) + (set (match_operand:HI 3 "general_operand" "=&d") + (mod:HI (match_dup 1) (match_dup 2)))] + "" + "cwtd\;idiv%W0 %2") + +;; ??? Can we make gcc zero extend operand[0]? +(define_insn "udivmodsi4" + [(set (match_operand:SI 0 "general_operand" "=a") + (udiv:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "rm"))) + (set (match_operand:SI 3 "general_operand" "=&d") + (umod:SI (match_dup 1) (match_dup 2)))] + "" + "* +{ + output_asm_insn (AS2 (xor%L3,%3,%3), operands); + return AS1 (div%L0,%2); +}") + +;; ??? Can we make gcc zero extend operand[0]? +(define_insn "udivmodhi4" + [(set (match_operand:HI 0 "general_operand" "=a") + (udiv:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "rm"))) + (set (match_operand:HI 3 "general_operand" "=&d") + (umod:HI (match_dup 1) (match_dup 2)))] + "" + "* +{ + output_asm_insn (AS2 (xor%W0,%3,%3), operands); + return AS1 (div%W0,%2); +}") + +/* +;;this should be a valid double division which we may want to add + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=a") + (udiv:DI (match_operand:DI 1 "general_operand" "a") + (match_operand:SI 2 "general_operand" "rm"))) + (set (match_operand:SI 3 "general_operand" "=d") + (umod:SI (match_dup 1) (match_dup 2)))] + "" + "div%L0 %2,%0") +*/ + +;;- and instructions + +;; On i386, +;; movzbl %bl,%ebx +;; is faster than +;; andl $255,%ebx +;; +;; but if the reg is %eax, then the "andl" is faster. +;; +;; On i486, the "andl" is always faster than the "movzbl". +;; +;; On both i386 and i486, a three operand AND is as fast with movzbl or +;; movzwl as with andl, if operands[0] != operands[1]. + +;; The `r' in `rm' for operand 3 looks redundant, but it causes +;; optional reloads to be generated if op 3 is a pseudo in a stack slot. + +;; ??? What if we only change one byte of an offsettable memory reference? +(define_insn "andsi3" + [(set (match_operand:SI 0 "general_operand" "=r,r,rm,r") + (and:SI (match_operand:SI 1 "general_operand" "%rm,qm,0,0") + (match_operand:SI 2 "general_operand" "L,K,ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + if (INTVAL (operands[2]) == 0xffff && REG_P (operands[0]) + && (! REG_P (operands[1]) + || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0) + && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1]))) + { + /* ??? tege: Should forget CC_STATUS only if we clobber a + remembered operand. Fix that later. */ + CC_STATUS_INIT; +#ifdef INTEL_SYNTAX + return AS2 (movzx,%w1,%0); +#else + return AS2 (movz%W0%L0,%w1,%0); +#endif + } + + if (INTVAL (operands[2]) == 0xff && REG_P (operands[0]) + && !(REG_P (operands[1]) && NON_QI_REG_P (operands[1])) + && (! REG_P (operands[1]) + || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0) + && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1]))) + { + /* ??? tege: Should forget CC_STATUS only if we clobber a + remembered operand. Fix that later. */ + CC_STATUS_INIT; +#ifdef INTEL_SYNTAX + return AS2 (movzx,%b1,%0); +#else + return AS2 (movz%B0%L0,%b1,%0); +#endif + } + + if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff) == 0) + { + CC_STATUS_INIT; + + if (INTVAL (operands[2]) == 0xffffff00) + { + operands[2] = const0_rtx; + return AS2 (mov%B0,%2,%b0); + } + + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) & 0xff); + return AS2 (and%B0,%2,%b0); + } + + if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff00) == 0) + { + CC_STATUS_INIT; + + if (INTVAL (operands[2]) == 0xffff00ff) + { + operands[2] = const0_rtx; + return AS2 (mov%B0,%2,%h0); + } + + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) >> 8); + return AS2 (and%B0,%2,%h0); + } + + if (GET_CODE (operands[0]) == MEM && INTVAL (operands[2]) == 0xffff0000) + { + operands[2] = const0_rtx; + return AS2 (mov%W0,%2,%w0); + } + } + + return AS2 (and%L0,%2,%0); +}") + +(define_insn "andhi3" + [(set (match_operand:HI 0 "general_operand" "=rm,r") + (and:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + /* Can we ignore the upper byte? */ + if (! NON_QI_REG_P (operands[0]) + && (INTVAL (operands[2]) & 0xff00) == 0xff00) + { + CC_STATUS_INIT; + + if ((INTVAL (operands[2]) & 0xff) == 0) + { + operands[2] = const0_rtx; + return AS2 (mov%B0,%2,%b0); + } + + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) & 0xff); + return AS2 (and%B0,%2,%b0); + } + + /* Can we ignore the lower byte? */ + /* ??? what about offsettable memory references? */ + if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & 0xff) == 0xff) + { + CC_STATUS_INIT; + + if ((INTVAL (operands[2]) & 0xff00) == 0) + { + operands[2] = const0_rtx; + return AS2 (mov%B0,%2,%h0); + } + + operands[2] = gen_rtx (CONST_INT, VOIDmode, + (INTVAL (operands[2]) >> 8) & 0xff); + return AS2 (and%B0,%2,%h0); + } + } + + return AS2 (and%W0,%2,%0); +}") + +(define_insn "andqi3" + [(set (match_operand:QI 0 "general_operand" "=qm,q") + (and:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_operand" "qn,qmn")))] + "" + "* return AS2 (and%B0,%2,%0);") + +/* I am nervous about these two.. add them later.. +;I presume this means that we have something in say op0= eax which is small +;and we want to and it with memory so we can do this by just an +;andb m,%al and have success. +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=r") + (and:SI (zero_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "rm")) + (match_operand:SI 2 "general_operand" "0")))] + "GET_CODE (operands[2]) == CONST_INT + && (unsigned int) INTVAL (operands[2]) < (1 << GET_MODE_BITSIZE (HImode))" + "and%W0 %1,%0") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=q") + (and:SI + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm")) + (match_operand:SI 2 "general_operand" "0")))] + "GET_CODE (operands[2]) == CONST_INT + && (unsigned int) INTVAL (operands[2]) < (1 << GET_MODE_BITSIZE (QImode))" + "and%L0 %1,%0") + +*/ + +;;- Bit set (inclusive or) instructions + +;; ??? What if we only change one byte of an offsettable memory reference? +(define_insn "iorsi3" + [(set (match_operand:SI 0 "general_operand" "=rm,r") + (ior:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + if (! NON_QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff) == 0) + { + CC_STATUS_INIT; + + if (INTVAL (operands[2]) == 0xff) + return AS2 (mov%B0,%2,%b0); + + return AS2 (or%B0,%2,%b0); + } + + if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0) + { + CC_STATUS_INIT; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) >> 8); + + if (INTVAL (operands[2]) == 0xff) + return AS2 (mov%B0,%2,%h0); + + return AS2 (or%B0,%2,%h0); + } + } + + return AS2 (or%L0,%2,%0); +}") + +(define_insn "iorhi3" + [(set (match_operand:HI 0 "general_operand" "=rm,r") + (ior:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + /* Can we ignore the upper byte? */ + if (! NON_QI_REG_P (operands[0]) + && (INTVAL (operands[2]) & 0xff00) == 0) + { + CC_STATUS_INIT; + if (INTVAL (operands[2]) & 0xffff0000) + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) & 0xffff); + + if (INTVAL (operands[2]) == 0xff) + return AS2 (mov%B0,%2,%b0); + + return AS2 (or%B0,%2,%b0); + } + + /* Can we ignore the lower byte? */ + /* ??? what about offsettable memory references? */ + if (QI_REG_P (operands[0]) + && (INTVAL (operands[2]) & 0xff) == 0) + { + CC_STATUS_INIT; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + (INTVAL (operands[2]) >> 8) & 0xff); + + if (INTVAL (operands[2]) == 0xff) + return AS2 (mov%B0,%2,%h0); + + return AS2 (or%B0,%2,%h0); + } + } + + return AS2 (or%W0,%2,%0); +}") + +(define_insn "iorqi3" + [(set (match_operand:QI 0 "general_operand" "=qm,q") + (ior:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_operand" "qn,qmn")))] + "" + "* return AS2 (or%B0,%2,%0);") + +;;- xor instructions + +;; ??? What if we only change one byte of an offsettable memory reference? +(define_insn "xorsi3" + [(set (match_operand:SI 0 "general_operand" "=rm,r") + (xor:SI (match_operand:SI 1 "general_operand" "%0,0") + (match_operand:SI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + if (! NON_QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff) == 0) + { + CC_STATUS_INIT; + + if (INTVAL (operands[2]) == 0xff) + return AS1 (not%B0,%0); + + return AS2 (xor%B0,%2,%b0); + } + + if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0) + { + CC_STATUS_INIT; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) >> 8); + + if (INTVAL (operands[2]) == 0xff) + return AS1 (not%B0,%h0); + + return AS2 (xor%B0,%2,%h0); + } + } + + return AS2 (xor%L0,%2,%0); +}") + +(define_insn "xorhi3" + [(set (match_operand:HI 0 "general_operand" "=rm,r") + (xor:HI (match_operand:HI 1 "general_operand" "%0,0") + (match_operand:HI 2 "general_operand" "ri,rm")))] + "" + "* +{ + if (GET_CODE (operands[2]) == CONST_INT + && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))) + { + /* Can we ignore the upper byte? */ + if (! NON_QI_REG_P (operands[0]) + && (INTVAL (operands[2]) & 0xff00) == 0) + { + CC_STATUS_INIT; + if (INTVAL (operands[2]) & 0xffff0000) + operands[2] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[2]) & 0xffff); + + if (INTVAL (operands[2]) == 0xff) + return AS1 (not%B0,%0); + + return AS2 (xor%B0,%2,%b0); + } + + /* Can we ignore the lower byte? */ + /* ??? what about offsettable memory references? */ + if (QI_REG_P (operands[0]) + && (INTVAL (operands[2]) & 0xff) == 0) + { + CC_STATUS_INIT; + operands[2] = gen_rtx (CONST_INT, VOIDmode, + (INTVAL (operands[2]) >> 8) & 0xff); + + if (INTVAL (operands[2]) == 0xff) + return AS1 (not%B0,%h0); + + return AS2 (xor%B0,%2,%h0); + } + } + + return AS2 (xor%W0,%2,%0); +}") + +(define_insn "xorqi3" + [(set (match_operand:QI 0 "general_operand" "=qm,q") + (xor:QI (match_operand:QI 1 "general_operand" "%0,0") + (match_operand:QI 2 "general_operand" "qn,qm")))] + "" + "* return AS2 (xor%B0,%2,%0);") + +;;- negation instructions + +(define_insn "negdi2" + [(set (match_operand:DI 0 "general_operand" "=&ro") + (neg:DI (match_operand:DI 1 "general_operand" "0")))] + "" + "* +{ + rtx xops[2], low[1], high[1]; + + CC_STATUS_INIT; + + split_di (operands, 1, low, high); + xops[0] = const0_rtx; + xops[1] = high[0]; + + output_asm_insn (AS1 (neg%L0,%0), low); + output_asm_insn (AS2 (adc%L1,%0,%1), xops); + output_asm_insn (AS1 (neg%L0,%0), high); + RET; +}") + +(define_insn "negsi2" + [(set (match_operand:SI 0 "general_operand" "=rm") + (neg:SI (match_operand:SI 1 "general_operand" "0")))] + "" + "neg%L0 %0") + +(define_insn "neghi2" + [(set (match_operand:HI 0 "general_operand" "=rm") + (neg:HI (match_operand:HI 1 "general_operand" "0")))] + "" + "neg%W0 %0") + +(define_insn "negqi2" + [(set (match_operand:QI 0 "general_operand" "=qm") + (neg:QI (match_operand:QI 1 "general_operand" "0")))] + "" + "neg%B0 %0") + +(define_insn "negsf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (neg:SF (match_operand:SF 1 "general_operand" "0")))] + "TARGET_80387" + "fchs") + +(define_insn "negdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (neg:DF (match_operand:DF 1 "general_operand" "0")))] + "TARGET_80387" + "fchs") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f") + (neg:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))] + "TARGET_80387" + "fchs") + +;; Absolute value instructions + +(define_insn "abssf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (abs:SF (match_operand:SF 1 "general_operand" "0")))] + "TARGET_80387" + "fabs") + +(define_insn "absdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (abs:DF (match_operand:DF 1 "general_operand" "0")))] + "TARGET_80387" + "fabs") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f") + (abs:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))] + "TARGET_80387" + "fabs") + +(define_insn "sqrtsf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (sqrt:SF (match_operand:SF 1 "general_operand" "0")))] + "TARGET_80387" + "fsqrt") + +(define_insn "sqrtdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (sqrt:DF (match_operand:DF 1 "general_operand" "0")))] + "TARGET_80387" + "fsqrt") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f") + (sqrt:DF (float_extend:DF + (match_operand:SF 1 "general_operand" "0"))))] + "TARGET_80387" + "fsqrt") + +;;- one complement instructions + +(define_insn "one_cmplsi2" + [(set (match_operand:SI 0 "general_operand" "=rm") + (not:SI (match_operand:SI 1 "general_operand" "0")))] + "" + "not%L0 %0") + +(define_insn "one_cmplhi2" + [(set (match_operand:HI 0 "general_operand" "=rm") + (not:HI (match_operand:HI 1 "general_operand" "0")))] + "" + "not%W0 %0") + +(define_insn "one_cmplqi2" + [(set (match_operand:QI 0 "general_operand" "=qm") + (not:QI (match_operand:QI 1 "general_operand" "0")))] + "" + "not%B0 %0") + +;;- arithmetic shift instructions + +;; DImode shifts are implemented using the i386 "shift double" opcode, +;; which is written as "sh[lr]d[lw] imm,reg,reg/mem". If the shift count +;; is variable, then the count is in %cl and the "imm" operand is dropped +;; from the assembler input. + +;; This instruction shifts the target reg/mem as usual, but instead of +;; shifting in zeros, bits are shifted in from reg operand. If the insn +;; is a left shift double, bits are taken from the high order bits of +;; reg, else if the insn is a shift right double, bits are taken from the +;; low order bits of reg. So if %eax is "1234" and %edx is "5678", +;; "shldl $8,%edx,%eax" leaves %edx unchanged and sets %eax to "2345". + +;; Since sh[lr]d does not change the `reg' operand, that is done +;; separately, making all shifts emit pairs of shift double and normal +;; shift. Since sh[lr]d does not shift more than 31 bits, and we wish to +;; support a 63 bit shift, each shift where the count is in a reg expands +;; to three pairs. If the overall shift is by N bits, then the first two +;; pairs shift by N / 2 and the last pair by N & 1. + +;; If the shift count is a constant, we need never emit more than one +;; shift pair, instead using moves and sign extension for counts greater +;; than 31. + +(define_insn "ashldi3" + [(set (match_operand:DI 0 "general_operand" "=&r") + (ashift:DI (match_operand:DI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cJ"))) + (clobber (match_dup 2))] + "" + "* +{ + rtx xops[4], low[1], high[1]; + + CC_STATUS_INIT; + + split_di (operands, 1, low, high); + xops[0] = operands[2]; + xops[1] = const1_rtx; + xops[2] = low[0]; + xops[3] = high[0]; + + if (REG_P (xops[0])) /* If shift count in %cl */ + { + output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */ + + output_asm_insn (AS2 (shld%L3,%2,%3), xops); + output_asm_insn (AS2 (sal%L2,%0,%2), xops); + output_asm_insn (AS2 (shld%L3,%2,%3), xops); + output_asm_insn (AS2 (sal%L2,%0,%2), xops); + + xops[1] = gen_rtx (CONST_INT, VOIDmode, 7); /* shift count & 1 */ + + output_asm_insn (AS2 (shr%B0,%1,%0), xops); + + output_asm_insn (AS2 (shld%L3,%2,%3), xops); + output_asm_insn (AS2 (sal%L2,%0,%2), xops); + } + else if (GET_CODE (xops[0]) == CONST_INT) + { + if (INTVAL (xops[0]) > 31) + { + output_asm_insn (AS2 (mov%L3,%2,%3), xops); /* Fast shift by 32 */ + output_asm_insn (AS2 (xor%L2,%2,%2), xops); + + if (INTVAL (xops[0]) > 32) + { + xops[0] = gen_rtx (CONST_INT, VOIDmode, INTVAL (xops[0]) - 32); + + output_asm_insn (AS2 (sal%3,%0,%3), xops); /* Remaining shift */ + } + } + else + { + output_asm_insn (AS3 (shld%L3,%0,%2,%3), xops); + output_asm_insn (AS2 (sal%L2,%0,%2), xops); + } + } + RET; +}") + +;; On i386 and i486, "addl reg,reg" is faster than "sall $1,reg" +;; On i486, movl/sall appears slightly faster than leal, but the leal +;; is smaller - use leal for now unless the shift count is 1. + +(define_insn "ashlsi3" + [(set (match_operand:SI 0 "general_operand" "=r,rm") + (ashift:SI (match_operand:SI 1 "general_operand" "r,0") + (match_operand:SI 2 "general_operand" "M,cI")))] + "" + "* +{ + if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1])) + { + if (TARGET_486 && INTVAL (operands[2]) == 1) + { + output_asm_insn (AS2 (mov%L0,%1,%0), operands); + return AS2 (add%L0,%1,%0); + } + else + { + CC_STATUS_INIT; + operands[1] = gen_rtx (MULT, SImode, operands[1], + gen_rtx (CONST_INT, VOIDmode, + 1 << INTVAL (operands[2]))); + return AS2 (lea%L0,%a1,%0); + } + } + + if (REG_P (operands[2])) + return AS2 (sal%L0,%b2,%0); + + if (REG_P (operands[0]) && operands[2] == const1_rtx) + return AS2 (add%L0,%0,%0); + + return AS2 (sal%L0,%2,%0); +}") + +(define_insn "ashlhi3" + [(set (match_operand:HI 0 "general_operand" "=rm") + (ashift:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (sal%W0,%b2,%0); + + if (REG_P (operands[0]) && operands[2] == const1_rtx) + return AS2 (add%W0,%0,%0); + + return AS2 (sal%W0,%2,%0); +}") + +(define_insn "ashlqi3" + [(set (match_operand:QI 0 "general_operand" "=qm") + (ashift:QI (match_operand:QI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (sal%B0,%b2,%0); + + if (REG_P (operands[0]) && operands[2] == const1_rtx) + return AS2 (add%B0,%0,%0); + + return AS2 (sal%B0,%2,%0); +}") + +;; See comment above `ashldi3' about how this works. + +(define_insn "ashrdi3" + [(set (match_operand:DI 0 "general_operand" "=&r") + (ashiftrt:DI (match_operand:DI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cJ"))) + (clobber (match_dup 2))] + "" + "* +{ + rtx xops[5], low[1], high[1]; + + CC_STATUS_INIT; + + split_di (operands, 1, low, high); + xops[0] = operands[2]; + xops[1] = const1_rtx; + xops[2] = low[0]; + xops[3] = high[0]; + + if (REG_P (xops[0])) /* If shift count in %cl */ + { + output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */ + + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (sar%L3,%0,%3), xops); + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (sar%L3,%0,%3), xops); + + xops[1] = gen_rtx (CONST_INT, VOIDmode, 7); /* shift count & 1 */ + + output_asm_insn (AS2 (shr%B0,%1,%0), xops); + + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (sar%L3,%0,%3), xops); + } + else if (GET_CODE (xops[0]) == CONST_INT) + { + if (INTVAL (xops[0]) > 31) + { + xops[1] = gen_rtx (CONST_INT, VOIDmode, 31); + output_asm_insn (AS2 (mov%L2,%3,%2), xops); + output_asm_insn (AS2 (sar%L3,%1,%3), xops); /* shift by 32 */ + + if (INTVAL (xops[0]) > 32) + { + xops[0] = gen_rtx (CONST_INT, VOIDmode, INTVAL (xops[0]) - 32); + + output_asm_insn (AS2 (sar%2,%0,%2), xops); /* Remaining shift */ + } + } + else + { + output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops); + output_asm_insn (AS2 (sar%L3,%0,%3), xops); + } + } + RET; +}") + +(define_insn "ashrsi3" + [(set (match_operand:SI 0 "general_operand" "=rm") + (ashiftrt:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (sar%L0,%b2,%0); + else + return AS2 (sar%L0,%2,%0); +}") + +(define_insn "ashrhi3" + [(set (match_operand:HI 0 "general_operand" "=rm") + (ashiftrt:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (sar%W0,%b2,%0); + else + return AS2 (sar%W0,%2,%0); +}") + +(define_insn "ashrqi3" + [(set (match_operand:QI 0 "general_operand" "=qm") + (ashiftrt:QI (match_operand:QI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (sar%B0,%b2,%0); + else + return AS2 (sar%B0,%2,%0); +}") + +;;- logical shift instructions + +;; See comment above `ashldi3' about how this works. + +(define_insn "lshrdi3" + [(set (match_operand:DI 0 "general_operand" "=&r") + (lshiftrt:DI (match_operand:DI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cJ"))) + (clobber (match_dup 2))] + "" + "* +{ + rtx xops[5], low[1], high[1]; + + CC_STATUS_INIT; + + split_di (operands, 1, low, high); + xops[0] = operands[2]; + xops[1] = const1_rtx; + xops[2] = low[0]; + xops[3] = high[0]; + + if (REG_P (xops[0])) /* If shift count in %cl */ + { + output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */ + + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (shr%L3,%0,%3), xops); + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (shr%L3,%0,%3), xops); + + xops[1] = gen_rtx (CONST_INT, VOIDmode, 7); /* shift count & 1 */ + + output_asm_insn (AS2 (shr%B0,%1,%0), xops); + + output_asm_insn (AS2 (shrd%L2,%3,%2), xops); + output_asm_insn (AS2 (shr%L3,%0,%3), xops); + } + else if (GET_CODE (xops[0]) == CONST_INT) + { + if (INTVAL (xops[0]) > 31) + { + output_asm_insn (AS2 (mov%L2,%3,%2), xops); /* Fast shift by 32 */ + output_asm_insn (AS2 (xor%L3,%3,%3), xops); + + if (INTVAL (xops[0]) > 32) + { + xops[0] = gen_rtx (CONST_INT, VOIDmode, INTVAL (xops[0]) - 32); + + output_asm_insn (AS2 (shr%2,%0,%2), xops); /* Remaining shift */ + } + } + else + { + output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops); + output_asm_insn (AS2 (shr%L3,%0,%3), xops); + } + } + RET; +}") + +(define_insn "lshrsi3" + [(set (match_operand:SI 0 "general_operand" "=rm") + (lshiftrt:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (shr%L0,%b2,%0); + else + return AS2 (shr%L0,%2,%1); +}") + +(define_insn "lshrhi3" + [(set (match_operand:HI 0 "general_operand" "=rm") + (lshiftrt:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (shr%W0,%b2,%0); + else + return AS2 (shr%W0,%2,%0); +}") + +(define_insn "lshrqi3" + [(set (match_operand:QI 0 "general_operand" "=qm") + (lshiftrt:QI (match_operand:QI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (shr%B0,%b2,%0); + else + return AS2 (shr%B0,%2,%0); +}") + +;;- rotate instructions + +(define_insn "rotlsi3" + [(set (match_operand:SI 0 "general_operand" "=rm") + (rotate:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (rol%L0,%b2,%0); + else + return AS2 (rol%L0,%2,%0); +}") + +(define_insn "rotlhi3" + [(set (match_operand:HI 0 "general_operand" "=rm") + (rotate:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (rol%W0,%b2,%0); + else + return AS2 (rol%W0,%2,%0); +}") + +(define_insn "rotlqi3" + [(set (match_operand:QI 0 "general_operand" "=qm") + (rotate:QI (match_operand:QI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (rol%B0,%b2,%0); + else + return AS2 (rol%B0,%2,%0); +}") + +(define_insn "rotrsi3" + [(set (match_operand:SI 0 "general_operand" "=rm") + (rotatert:SI (match_operand:SI 1 "general_operand" "0") + (match_operand:SI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (ror%L0,%b2,%0); + else + return AS2 (ror%L0,%2,%0); +}") + +(define_insn "rotrhi3" + [(set (match_operand:HI 0 "general_operand" "=rm") + (rotatert:HI (match_operand:HI 1 "general_operand" "0") + (match_operand:HI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (ror%W0,%b2,%0); + else + return AS2 (ror%W0,%2,%0); +}") + +(define_insn "rotrqi3" + [(set (match_operand:QI 0 "general_operand" "=qm") + (rotatert:QI (match_operand:QI 1 "general_operand" "0") + (match_operand:QI 2 "general_operand" "cI")))] + "" + "* +{ + if (REG_P (operands[2])) + return AS2 (ror%B0,%b2,%0); + else + return AS2 (ror%B0,%2,%0); +}") + +/* +;; This usually looses. But try a define_expand to recognize a few case +;; we can do efficiently, such as accessing the "high" QImode registers, +;; %ah, %bh, %ch, %dh. +(define_insn "insv" + [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+&r") + (match_operand:SI 1 "general_operand" "i") + (match_operand:SI 2 "general_operand" "i")) + (match_operand:SI 3 "general_operand" "ri"))] + "" + "* +{ + if (INTVAL (operands[1]) + INTVAL (operands[2]) > GET_MODE_BITSIZE (SImode)) + abort (); + if (GET_CODE (operands[3]) == CONST_INT) + { + unsigned int mask = (1 << INTVAL (operands[1])) - 1; + operands[1] = gen_rtx (CONST_INT, VOIDmode, + ~(mask << INTVAL (operands[2]))); + output_asm_insn (AS2 (and%L0,%1,%0), operands); + operands[3] = gen_rtx (CONST_INT, VOIDmode, + INTVAL (operands[3]) << INTVAL (operands[2])); + output_asm_insn (AS2 (or%L0,%3,%0), operands); + } + else + { + operands[0] = gen_rtx (REG, SImode, REGNO (operands[0])); + if (INTVAL (operands[2])) + output_asm_insn (AS2 (ror%L0,%2,%0), operands); + output_asm_insn (AS3 (shrd%L0,%1,%3,%0), operands); + operands[2] = gen_rtx (CONST_INT, VOIDmode, + BITS_PER_WORD + - INTVAL (operands[1]) - INTVAL (operands[2])); + if (INTVAL (operands[2])) + output_asm_insn (AS2 (ror%L0,%2,%0), operands); + } + RET; +}") +*/ +/* +;; ??? There are problems with the mode of operand[3]. The point of this +;; is to represent an HImode move to a "high byte" register. + +(define_expand "insv" + [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "immediate_operand" "") + (match_operand:SI 2 "immediate_operand" "")) + (match_operand:QI 3 "general_operand" "ri"))] + "" + " +{ + if (GET_CODE (operands[1]) != CONST_INT + || GET_CODE (operands[2]) != CONST_INT) + FAIL; + + if (! (INTVAL (operands[1]) == 8 + && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 0)) + && ! INTVAL (operands[1]) == 1) + FAIL; +}") + +;; ??? Are these constraints right? +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+&qo") + (const_int 8) + (const_int 8)) + (match_operand:QI 1 "general_operand" "qn"))] + "" + "* +{ + if (REG_P (operands[0])) + return AS2 (mov%B0,%1,%h0); + + operands[0] = adj_offsettable_operand (operands[0], 1); + return AS2 (mov%B0,%1,%0); +}") +*/ + +;; On i386, the register count for a bit operation is *not* truncated, +;; so SHIFT_COUNT_TRUNCATED must not be defined. + +;; On i486, the shift & or/and code is faster than bts or btr. If +;; operands[0] is a MEM, the bt[sr] is half as fast as the normal code. + +;; On i386, bts is a little faster if operands[0] is a reg, and a +;; little slower if operands[0] is a MEM, than the shift & or/and code. +;; Use bts & btr, since they reload better. + +;; General bit set and clear. +(define_insn "" + [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+rm") + (const_int 1) + (match_operand:SI 2 "nonimmediate_operand" "r")) + (match_operand:SI 3 "immediate_operand" "i"))] + "! TARGET_486" + "* +{ + CC_STATUS_INIT; + + if (INTVAL (operands[3]) == 1) + return AS2 (bts%L0,%2,%0); + else + return AS2 (btr%L0,%2,%0); +}") + +;; Bit complement. See comments on previous pattern. +;; ??? Is this really worthwhile? +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "+rm") + (xor:SI (ashift:SI (const_int 1) + (match_operand:SI 1 "general_operand" "r")) + (match_dup 0)))] + "! TARGET_486" + "* +{ + CC_STATUS_INIT; + + return AS2 (btc%L0,%1,%0); +}") + +/* ??? This works, but that SUBREG looks dangerous. +(define_insn "" + [(set (match_operand:HI 0 "general_operand" "+rm") + (xor:HI (subreg:HI + (ashift:SI (const_int 1) + (sign_extend:SI + (match_operand:HI 1 "nonimmediate_operand" "r"))) 0) + (match_dup 0)))] + "! TARGET_486" + "* +{ + CC_STATUS_INIT; + + return AS2 (btc%W0,%1,%0); +}") +*/ + +;; Recognizers for bit-test instructions. + +;; The bt opcode allows a MEM in operands[0]. But on both i386 and +;; i486, it is faster to copy a MEM to REG and then use bt, than to use +;; bt on the MEM directly. + +(define_insn "" + [(set (cc0) (zero_extract (match_operand:QI 0 "nonimmediate_operand" "q") + (const_int 1) + (match_operand:SI 1 "general_operand" "ri")))] + "" + "* +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, 1 << INTVAL (operands[1])); + output_asm_insn (AS2 (test%B0,%1,%0), operands); + } + else + { + operands[0] = gen_rtx (REG, SImode, REGNO (operands[0])); + cc_status.flags |= CC_Z_IN_NOT_C; + output_asm_insn (AS2 (bt%L0,%1,%0), operands); + } + RET; +}") + +(define_insn "" + [(set (cc0) (zero_extract (match_operand:HI 0 "nonimmediate_operand" "r") + (const_int 1) + (match_operand:SI 1 "general_operand" "ri")))] + "" + "* +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, 1 << INTVAL (operands[1])); + output_asm_insn (AS2 (test%W0,%1,%0), operands); + } + else + { + cc_status.flags |= CC_Z_IN_NOT_C; + output_asm_insn (AS2 (bt%W0,%1,%0), operands); + } + RET; +}") + +(define_insn "" + [(set (cc0) (zero_extract (match_operand:SI 0 "nonimmediate_operand" "r") + (const_int 1) + (match_operand:SI 1 "general_operand" "ri")))] + "" + "* +{ + if (GET_CODE (operands[1]) == CONST_INT) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, 1 << INTVAL (operands[1])); + output_asm_insn (AS2 (test%L0,%1,%0), operands); + } + else + { + cc_status.flags |= CC_Z_IN_NOT_C; + output_asm_insn (AS2 (bt%L0,%1,%0), operands); + } + RET; +}") + +;; Store-flag instructions. + +(define_insn "seq" + [(set (match_operand:QI 0 "general_operand" "=qm") + (eq:QI (cc0) (const_int 0)))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return AS1 (setnb,%0); + else + return AS1 (sete,%0); +} +") + +(define_insn "sne" + [(set (match_operand:QI 0 "general_operand" "=qm") + (ne:QI (cc0) (const_int 0)))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return AS1 (setb,%0); + else + return AS1 (setne,%0); +} +") + +(define_insn "sgt" + [(set (match_operand:QI 0 "general_operand" "=qm") + (gt:QI (cc0) (const_int 0)))] + "" + "* OUTPUT_JUMP (\"setg %0\", \"seta %0\", 0); ") + +(define_insn "sgtu" + [(set (match_operand:QI 0 "general_operand" "=qm") + (gtu:QI (cc0) (const_int 0)))] + "" + "* return \"seta %0\"; ") + +(define_insn "slt" + [(set (match_operand:QI 0 "general_operand" "=qm") + (lt:QI (cc0) (const_int 0)))] + "" + "* OUTPUT_JUMP (\"setl %0\", \"setb %0\", \"sets %0\"); ") + +(define_insn "sltu" + [(set (match_operand:QI 0 "general_operand" "=qm") + (ltu:QI (cc0) (const_int 0)))] + "" + "* return \"setb %0\"; ") + +(define_insn "sge" + [(set (match_operand:QI 0 "general_operand" "=qm") + (ge:QI (cc0) (const_int 0)))] + "" + "* OUTPUT_JUMP (\"setge %0\", \"setae %0\", \"setns %0\"); ") + +(define_insn "sgeu" + [(set (match_operand:QI 0 "general_operand" "=qm") + (geu:QI (cc0) (const_int 0)))] + "" + "* return \"setae %0\"; ") + +(define_insn "sle" + [(set (match_operand:QI 0 "general_operand" "=qm") + (le:QI (cc0) (const_int 0)))] + "" + "* OUTPUT_JUMP (\"setle %0\", \"setbe %0\", 0); ") + +(define_insn "sleu" + [(set (match_operand:QI 0 "general_operand" "=qm") + (leu:QI (cc0) (const_int 0)))] + "" + "* return \"setbe %0\"; ") + +;; Basic conditional jump instructions. +;; We ignore the overflow flag for signed branch instructions. + +(define_insn "beq" + [(set (pc) + (if_then_else (eq (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return \"jnc %l0\"; + else + return \"je %l0\"; +}") + +(define_insn "bne" + [(set (pc) + (if_then_else (ne (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return \"jc %l0\"; + else + return \"jne %l0\"; +}") + +(define_insn "bgt" + [(set (pc) + (if_then_else (gt (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "*OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", 0)") + +(define_insn "bgtu" + [(set (pc) + (if_then_else (gtu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "ja %l0") + +;; There is no jump insn to check for `<' on IEEE floats. +;; Page 17-80 in the 80387 manual says jb, but that's wrong; +;; jb checks for `not >='. So swap the operands and do `>'. +(define_expand "blt" + [(set (pc) + (if_then_else (lt (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + rtx prev = get_last_insn_anywhere (); + rtx body = PATTERN (prev); + rtx comp; + if (GET_CODE (body) == SET) + comp = SET_SRC (body); + else + comp = SET_SRC (XVECEXP (body, 0, 0)); + + if (GET_CODE (comp) == COMPARE + ? GET_MODE_CLASS (GET_MODE (XEXP (comp, 0))) == MODE_FLOAT + : GET_MODE_CLASS (GET_MODE (comp)) == MODE_FLOAT) + { + reverse_comparison (prev); + emit_insn (gen_bgt (operands[0])); + DONE; + } +}") + +(define_insn "" + [(set (pc) + (if_then_else (lt (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "*OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\")") + +(define_insn "bltu" + [(set (pc) + (if_then_else (ltu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "jb %l0") + +(define_insn "bge" + [(set (pc) + (if_then_else (ge (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "*OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\")") + +(define_insn "bgeu" + [(set (pc) + (if_then_else (geu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "jae %l0") + +;; See comment on `blt', above. +(define_expand "ble" + [(set (pc) + (if_then_else (le (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + rtx prev = get_last_insn_anywhere (); + rtx body = PATTERN (prev); + rtx comp; + if (GET_CODE (body) == SET) + comp = SET_SRC (body); + else + comp = SET_SRC (XVECEXP (body, 0, 0)); + + if (GET_CODE (comp) == COMPARE + ? GET_MODE_CLASS (GET_MODE (XEXP (comp, 0))) == MODE_FLOAT + : GET_MODE_CLASS (GET_MODE (comp)) == MODE_FLOAT) + { + reverse_comparison (prev); + emit_insn (gen_bge (operands[0])); + DONE; + } +}") + +(define_insn "" + [(set (pc) + (if_then_else (le (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "*OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", 0) ") + +(define_insn "bleu" + [(set (pc) + (if_then_else (leu (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "jbe %l0") + +;; Negated conditional jump instructions. + +(define_insn "" + [(set (pc) + (if_then_else (eq (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return \"jc %l0\"; + else + return \"jne %l0\"; +}") + +(define_insn "" + [(set (pc) + (if_then_else (ne (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "* +{ + if (cc_status.flags & CC_Z_IN_NOT_C) + return \"jnc %l0\"; + else + return \"je %l0\"; +}") + +(define_insn "" + [(set (pc) + (if_then_else (gt (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "*OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", 0) ") + +(define_insn "" + [(set (pc) + (if_then_else (gtu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "jbe %l0") + +(define_insn "" + [(set (pc) + (if_then_else (lt (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "*OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\") +") + +(define_insn "" + [(set (pc) + (if_then_else (ltu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "jae %l0") + +(define_insn "" + [(set (pc) + (if_then_else (ge (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "*OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\")") + +(define_insn "" + [(set (pc) + (if_then_else (geu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "jb %l0") + +(define_insn "" + [(set (pc) + (if_then_else (le (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "*OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", 0)") + +(define_insn "" + [(set (pc) + (if_then_else (leu (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "ja %l0") + +;; Unconditional and other jump instructions + +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))] + "" + "jmp %l0") + +(define_insn "indirect_jump" + [(set (pc) (match_operand:SI 0 "register_operand" "a"))] + "" + "* +{ + CC_STATUS_INIT; + + return AS1 (jmp,%*%0); +}") + +(define_insn "tablejump" + [(set (pc) (match_operand:SI 0 "general_operand" "rm")) + (use (label_ref (match_operand 1 "" "")))] + "" + "* +{ + CC_STATUS_INIT; + + return AS1 (jmp,%*%0); +}") + +;; Call subroutine returning no value. + +(define_insn "call_pop" + [(call (match_operand:QI 0 "indirect_operand" "m") + (match_operand:SI 1 "general_operand" "g")) + (set (reg:SI 7) (plus:SI (reg:SI 7) + (match_operand:SI 3 "immediate_operand" "i")))] + "" + "* +{ + if (GET_CODE (operands[0]) == MEM + && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0))) + { + operands[0] = XEXP (operands[0], 0); + return AS1 (call,%*%0); + } + else + return AS1 (call,%P0); +}") + +(define_insn "call" + [(call (match_operand:QI 0 "indirect_operand" "m") + (match_operand:SI 1 "general_operand" "g"))] + ;; Operand 1 not used on the i386. + "" + "* +{ + if (GET_CODE (operands[0]) == MEM + && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0))) + { + operands[0] = XEXP (operands[0], 0); + return AS1 (call,%*%0); + } + else + return AS1 (call,%P0); +}") + +;; Call subroutine, returning value in operand 0 +;; (which must be a hard register). + +(define_insn "call_value_pop" + [(set (match_operand 0 "" "=rf") + (call (match_operand:QI 1 "indirect_operand" "m") + (match_operand:SI 2 "general_operand" "g"))) + (set (reg:SI 7) (plus:SI (reg:SI 7) + (match_operand:SI 4 "immediate_operand" "i")))] + "" + "* +{ + if (GET_CODE (operands[1]) == MEM + && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))) + { + operands[1] = XEXP (operands[1], 0); + output_asm_insn (AS1 (call,%*%1), operands); + } + else + output_asm_insn (AS1 (call,%P1), operands); + + RET; +}") + +(define_insn "call_value" + [(set (match_operand 0 "" "=rf") + (call (match_operand:QI 1 "indirect_operand" "m") + (match_operand:SI 2 "general_operand" "g")))] + ;; Operand 2 not used on the i386. + "" + "* +{ + if (GET_CODE (operands[1]) == MEM + && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))) + { + operands[1] = XEXP (operands[1], 0); + output_asm_insn (AS1 (call,%*%1), operands); + } + else + output_asm_insn (AS1 (call,%P1), operands); + + RET; +}") + +;; Insn emitted into the body of a function to return from a function. +;; This is only done if the function's epilogue is known to be simple. +;; See comments for simple_386_epilogue in i386.c. + +(define_insn "return" + [(return)] + "simple_386_epilogue ()" + "* +{ + function_epilogue (asm_out_file, get_frame_size ()); + RET; +}") + +(define_insn "nop" + [(const_int 0)] + "" + "nop") + +(define_expand "movstrsi" + [(parallel [(set (mem:BLK (match_operand:BLK 0 "general_operand" "")) + (mem:BLK (match_operand:BLK 1 "general_operand" ""))) + (use (match_operand:SI 2 "immediate_operand" "")) + (use (match_operand:SI 3 "immediate_operand" "")) + (set (match_operand:SI 4 "register_operand" "") + (const_int 0)) + (set (match_dup 0) + (plus:SI (match_dup 0) + (match_dup 2))) + (set (match_dup 1) + (plus:SI (match_dup 1) + (match_dup 2)))])] + "" + " +{ + if (GET_CODE (operands[2]) != CONST_INT) + FAIL; + operands[0] = copy_to_mode_reg (SImode, XEXP (operands[0], 0)); + operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0)); + operands[4] = gen_reg_rtx (SImode); +}") + +(define_insn "" + [(set (mem:BLK (match_operand:SI 0 "register_operand" "D")) + (mem:BLK (match_operand:SI 1 "register_operand" "S"))) + (use (match_operand:SI 2 "immediate_operand" "n")) + (use (match_operand:SI 3 "immediate_operand" "i")) + (set (match_operand:SI 4 "register_operand" "c") + (const_int 0)) + (set (match_operand:SI 5 "register_operand" "=0") + (plus:SI (match_dup 0) + (match_dup 2))) + (set (match_operand:SI 7 "register_operand" "=1") + (plus:SI (match_dup 1) + (match_dup 2)))] + "" + "* +{ + rtx xops[2]; + + if (GET_CODE (operands[2]) == CONST_INT) + { + if (INTVAL (operands[2]) & ~0x03) + { + xops[0] = gen_rtx (CONST_INT, VOIDmode, INTVAL (operands[2]) >> 2); + xops[1] = gen_rtx (REG, SImode, 2); + + output_asm_insn (AS2 (mov%L1,%0,%1), xops); +#ifdef INTEL_SYNTAX + output_asm_insn (\"rep movsd\", xops); +#else + output_asm_insn (\"rep\;movs%L1\", xops); +#endif + } + if (INTVAL (operands[2]) & 0x02) + output_asm_insn (\"movsw\", operands); + if (INTVAL (operands[2]) & 0x01) + output_asm_insn (\"movsb\", operands); + } + else + abort (); + RET; +}") + +(define_expand "cmpstrsi" + [(parallel [(set (match_operand:QI 0 "general_operand" "") + (compare + (mem:BLK (match_operand:BLK 1 "general_operand" "")) + (mem:BLK (match_operand:BLK 2 "general_operand" "")))) + (use (match_operand:SI 3 "general_operand" "")) + (use (match_operand:SI 4 "immediate_operand" "")) + (clobber (match_dup 1)) + (clobber (match_dup 2)) + (clobber (match_dup 3))])] + "" + " +{ + operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0)); + operands[2] = copy_to_mode_reg (SImode, XEXP (operands[2], 0)); + operands[3] = copy_to_mode_reg (SImode, operands[3]); +}") + +(define_insn "" + [(set (match_operand:QI 0 "general_operand" "=q") + (compare (mem:BLK (match_operand:SI 1 "general_operand" "D")) + (mem:BLK (match_operand:SI 2 "general_operand" "S")))) + (use (match_operand:SI 3 "general_operand" "c")) + (use (match_operand:SI 4 "immediate_operand" "i")) + (clobber (match_dup 1)) + (clobber (match_dup 2)) + (clobber (match_dup 3))] + "" + "* +{ + rtx xops[3]; + + output_asm_insn (\"repz\;cmps%B2\", operands); + + xops[0] = operands[0]; + xops[1] = gen_rtx (MEM, QImode, + gen_rtx (PLUS, SImode, operands[1], constm1_rtx)); + xops[2] = gen_rtx (MEM, QImode, + gen_rtx (PLUS, SImode, operands[2], constm1_rtx)); + + output_asm_insn (AS2 (mov%B0,%1,%b0), xops); + output_asm_insn (AS2 (sub%B0,%2,%b0), xops); + RET; +}") + +(define_insn "" + [(set (cc0) + (compare (mem:BLK (match_operand:SI 0 "general_operand" "D")) + (mem:BLK (match_operand:SI 1 "general_operand" "S")))) + (use (match_operand:SI 2 "general_operand" "c")) + (use (match_operand:SI 3 "immediate_operand" "i")) + (clobber (match_dup 0)) + (clobber (match_dup 1)) + (clobber (match_dup 2))] + "" + "repz\;cmps%B2") + +(define_expand "ffssi2" + [(set (match_dup 2) + (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" "")) + (const_int -1))) + (set (match_operand:SI 0 "general_operand" "") + (plus:SI (match_dup 2) (const_int 1)))] + "" + "operands[2] = gen_reg_rtx (SImode);") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=&r") + (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" "rm")) + (const_int -1)))] + "" + "* +{ + rtx xops[2]; + + xops[0] = operands[0]; + xops[1] = constm1_rtx; + output_asm_insn (AS2 (mov%L0,%1,%0), xops); + return AS2 (bsf%L0,%1,%0); +}") + +(define_expand "ffshi2" + [(set (match_dup 2) + (plus:HI (ffs:HI (match_operand:HI 1 "general_operand" "")) + (const_int -1))) + (set (match_operand:HI 0 "general_operand" "") + (plus:HI (match_dup 2) (const_int 1)))] + "" + "operands[2] = gen_reg_rtx (HImode);") + +(define_insn "" + [(set (match_operand:HI 0 "general_operand" "=&r") + (plus:HI (ffs:HI (match_operand:SI 1 "general_operand" "rm")) + (const_int -1)))] + "" + "* +{ + rtx xops[2]; + + xops[0] = operands[0]; + xops[1] = constm1_rtx; + output_asm_insn (AS2 (mov%W0,%1,%0), xops); + return AS2 (bsf%W0,%1,%0); +}") + +;; These patterns match the binary 387 instructions for addM3, subM3, +;; mulM3 and divM3. There are three patterns for each of DFmode and +;; SFmode. The first is the normal insn, the second the same insn but +;; with one operand a conversion, and the third the same insn but with +;; the other operand a conversion. The conversion may be SFmode or +;; SImode if the target mode DFmode, but only SImode if the target mode +;; is SFmode. + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (match_operator:DF 3 "binary_387_op" + [(match_operand:DF 1 "general_operand" "0,fm") + (match_operand:DF 2 "general_operand" "fm,0")]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (match_operator:DF 3 "binary_387_op" + [(float:DF (match_operand:SI 1 "general_operand" "m,!*r")) + (match_operand:DF 2 "general_operand" "0,0")]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f,f,f") + (match_operator:DF 3 "binary_387_op" + [(float_extend:DF (match_operand:SF 1 "general_operand" "fm,!*r,0")) + (match_operand:DF 2 "general_operand" "0,0,f")]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (match_operator:DF 3 "binary_387_op" + [(match_operand:DF 1 "general_operand" "0,0") + (float:DF (match_operand:SI 2 "general_operand" "m,!*r"))]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:DF 0 "register_operand" "=f,f,f") + (match_operator:DF 3 "binary_387_op" + [(match_operand:DF 1 "general_operand" "0,0,f") + (float_extend:DF + (match_operand:SF 2 "general_operand" "fm,!*r,0"))]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (match_operator:SF 3 "binary_387_op" + [(match_operand:SF 1 "general_operand" "0,fm") + (match_operand:SF 2 "general_operand" "fm,0")]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (match_operator:SF 3 "binary_387_op" + [(float:SF (match_operand:SI 1 "general_operand" "m,!*r")) + (match_operand:SF 2 "general_operand" "0,0")]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +(define_insn "" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (match_operator:SF 3 "binary_387_op" + [(match_operand:SF 1 "general_operand" "0,0") + (float:SF (match_operand:SI 2 "general_operand" "m,!*r"))]))] + "TARGET_80387" + "* return (char *) output_387_binary_op (insn, operands);") + +;;- Local variables: +;;- mode:emacs-lisp +;;- comment-start: ";;- " +;;- eval: (set-syntax-table (copy-sequence (syntax-table))) +;;- eval: (modify-syntax-entry ?[ "(]") +;;- eval: (modify-syntax-entry ?] ")[") +;;- eval: (modify-syntax-entry ?{ "(}") +;;- eval: (modify-syntax-entry ?} "){") +;;- End: -- 2.30.2