1 /* Subroutines for insn-output.c for Intel 860
2 Copyright (C) 1989, 1991, 1997, 1998, 1999, 2000
3 Free Software Foundation, Inc.
6 Written by Richard Stallman (rms@ai.mit.edu).
8 Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
9 to the whims of the System V Release 4 assembler.
11 This file is part of GNU CC.
13 GNU CC is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GNU CC is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GNU CC; see the file COPYING. If not, write to
25 the Free Software Foundation, 59 Temple Place - Suite 330,
26 Boston, MA 02111-1307, USA. */
35 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "conditions.h"
39 #include "insn-flags.h"
42 #include "insn-attr.h"
47 static rtx find_addr_reg
PARAMS ((rtx
));
48 static int reg_clobbered_p
PARAMS ((rtx
, rtx
));
49 static const char *singlemove_string
PARAMS ((rtx
*));
50 static const char *load_opcode
PARAMS ((enum machine_mode
, const char *, rtx
));
51 static const char *store_opcode
PARAMS ((enum machine_mode
, const char *, rtx
));
52 static void output_size_for_block_move
PARAMS ((rtx
, rtx
, rtx
));
54 #ifndef I860_REG_PREFIX
55 #define I860_REG_PREFIX ""
58 const char *i860_reg_prefix
= I860_REG_PREFIX
;
60 /* Save information from a "cmpxx" operation until the branch is emitted. */
62 rtx i860_compare_op0
, i860_compare_op1
;
64 /* Return non-zero if this pattern, can be evaluated safely, even if it
67 safe_insn_src_p (op
, mode
)
69 enum machine_mode mode
;
71 /* Just experimenting. */
73 /* No floating point src is safe if it contains an arithmetic
74 operation, since that operation may trap. */
75 switch (GET_CODE (op
))
87 return CONSTANT_ADDRESS_P (XEXP (op
, 0));
89 /* We never need to negate or complement constants. */
91 return (mode
!= SFmode
&& mode
!= DFmode
);
108 return (mode
!= SFmode
&& mode
!= DFmode
);
115 if ((GET_CODE (XEXP (op
, 0)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 0)))
116 || (GET_CODE (XEXP (op
, 1)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 1))))
125 /* Return 1 if REG is clobbered in IN.
126 Return 2 if REG is used in IN.
127 Return 3 if REG is both used and clobbered in IN.
128 Return 0 if neither. */
131 reg_clobbered_p (reg
, in
)
135 register enum rtx_code code
;
140 code
= GET_CODE (in
);
142 if (code
== SET
|| code
== CLOBBER
)
144 rtx dest
= SET_DEST (in
);
148 while (GET_CODE (dest
) == STRICT_LOW_PART
149 || GET_CODE (dest
) == SUBREG
150 || GET_CODE (dest
) == SIGN_EXTRACT
151 || GET_CODE (dest
) == ZERO_EXTRACT
)
152 dest
= XEXP (dest
, 0);
156 else if (GET_CODE (dest
) == REG
157 && refers_to_regno_p (REGNO (reg
),
158 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
162 /* Anything that sets just part of the register
163 is considered using as well as setting it.
164 But note that a straight SUBREG of a single-word value
165 clobbers the entire value. */
166 if (dest
!= SET_DEST (in
)
167 && ! (GET_CODE (SET_DEST (in
)) == SUBREG
168 || UNITS_PER_WORD
>= GET_MODE_SIZE (GET_MODE (dest
))))
175 used
= refers_to_regno_p (REGNO (reg
),
176 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
179 used
= refers_to_regno_p (REGNO (reg
),
180 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
184 return set
+ used
* 2;
187 if (refers_to_regno_p (REGNO (reg
),
188 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
194 /* Return non-zero if OP can be written to without screwing up
195 GCC's model of what's going on. It is assumed that this operand
196 appears in the dest position of a SET insn in a conditional
197 branch's delay slot. AFTER is the label to start looking from. */
199 operand_clobbered_before_used_after (op
, after
)
203 /* Just experimenting. */
204 if (GET_CODE (op
) == CC0
)
206 if (GET_CODE (op
) == REG
)
210 if (op
== stack_pointer_rtx
)
213 /* Scan forward from the label, to see if the value of OP
214 is clobbered before the first use. */
216 for (insn
= NEXT_INSN (after
); insn
; insn
= NEXT_INSN (insn
))
218 if (GET_CODE (insn
) == NOTE
)
220 if (GET_CODE (insn
) == INSN
221 || GET_CODE (insn
) == JUMP_INSN
222 || GET_CODE (insn
) == CALL_INSN
)
224 switch (reg_clobbered_p (op
, PATTERN (insn
)))
234 /* If we reach another label without clobbering OP,
235 then we cannot safely write it here. */
236 else if (GET_CODE (insn
) == CODE_LABEL
)
238 if (GET_CODE (insn
) == JUMP_INSN
)
240 if (condjump_p (insn
))
242 /* This is a jump insn which has already
243 been mangled. We can't tell what it does. */
244 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
246 if (! JUMP_LABEL (insn
))
248 /* Keep following jumps. */
249 insn
= JUMP_LABEL (insn
);
255 /* In both of these cases, the first insn executed
256 for this op will be a orh whatever%h,%?r0,%?r31,
257 which is tolerable. */
258 if (GET_CODE (op
) == MEM
)
259 return (CONSTANT_ADDRESS_P (XEXP (op
, 0)));
264 /* Return non-zero if this pattern, as a source to a "SET",
265 is known to yield an instruction of unit size. */
267 single_insn_src_p (op
, mode
)
269 enum machine_mode mode
;
271 switch (GET_CODE (op
))
274 /* This is not always a single insn src, technically,
275 but output_delayed_branch knows how to deal with it. */
280 /* This is not a single insn src, technically,
281 but output_delayed_branch knows how to deal with it. */
290 /* We never need to negate or complement constants. */
292 return (mode
!= DFmode
);
299 /* Detect cases that require multiple instructions. */
300 if (CONSTANT_P (XEXP (op
, 1))
301 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
302 && SMALL_INT (XEXP (op
, 1))))
314 /* Not doing floating point, since they probably
315 take longer than the branch slot they might fill. */
316 return (mode
!= SFmode
&& mode
!= DFmode
);
319 if (GET_CODE (XEXP (op
, 1)) == NOT
)
321 rtx arg
= XEXP (XEXP (op
, 1), 0);
323 && !(GET_CODE (arg
) == CONST_INT
325 || (INTVAL (arg
) & 0xffff) == 0)))
330 /* Both small and round numbers take one instruction;
332 if (CONSTANT_P (XEXP (op
, 1))
333 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
334 && (SMALL_INT (XEXP (op
, 1))
335 || (INTVAL (XEXP (op
, 1)) & 0xffff) == 0)))
344 if (SUBREG_WORD (op
) != 0)
346 return single_insn_src_p (SUBREG_REG (op
), mode
);
348 /* Not doing floating point, since they probably
349 take longer than the branch slot they might fill. */
363 /* Return non-zero only if OP is a register of mode MODE,
366 reg_or_0_operand (op
, mode
)
368 enum machine_mode mode
;
370 return (op
== const0_rtx
|| register_operand (op
, mode
)
371 || op
== CONST0_RTX (mode
));
374 /* Return truth value of whether OP can be used as an operands in a three
375 address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */
378 arith_operand (op
, mode
)
380 enum machine_mode mode
;
382 return (register_operand (op
, mode
)
383 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
)));
386 /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */
389 logic_operand (op
, mode
)
391 enum machine_mode mode
;
393 return (register_operand (op
, mode
)
394 || (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
)));
397 /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */
400 shift_operand (op
, mode
)
402 enum machine_mode mode
;
404 return (register_operand (op
, mode
)
405 || (GET_CODE (op
) == CONST_INT
));
408 /* Return 1 if OP is a valid first operand for either a logical insn
409 or an add insn of mode MODE. */
412 compare_operand (op
, mode
)
414 enum machine_mode mode
;
416 return (register_operand (op
, mode
)
417 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
) && LOGIC_INT (op
)));
420 /* Return truth value of whether OP can be used as the 5-bit immediate
421 operand of a bte or btne insn. */
424 bte_operand (op
, mode
)
426 enum machine_mode mode
;
428 return (register_operand (op
, mode
)
429 || (GET_CODE (op
) == CONST_INT
430 && (unsigned) INTVAL (op
) < 0x20));
433 /* Return 1 if OP is an indexed memory reference of mode MODE. */
436 indexed_operand (op
, mode
)
438 enum machine_mode mode
;
440 return (GET_CODE (op
) == MEM
&& GET_MODE (op
) == mode
441 && GET_CODE (XEXP (op
, 0)) == PLUS
442 && GET_MODE (XEXP (op
, 0)) == SImode
443 && register_operand (XEXP (XEXP (op
, 0), 0), SImode
)
444 && register_operand (XEXP (XEXP (op
, 0), 1), SImode
));
447 /* Return 1 if OP is a suitable source operand for a load insn
451 load_operand (op
, mode
)
453 enum machine_mode mode
;
455 return (memory_operand (op
, mode
) || indexed_operand (op
, mode
));
458 /* Return truth value of whether OP is a integer which fits the
459 range constraining immediate operands in add/subtract insns. */
464 enum machine_mode mode ATTRIBUTE_UNUSED
;
466 return (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
));
469 /* Return truth value of whether OP is a integer which fits the
470 range constraining immediate operands in logic insns. */
475 enum machine_mode mode ATTRIBUTE_UNUSED
;
477 return (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
));
480 /* Test for a valid operand for a call instruction.
481 Don't allow the arg pointer register or virtual regs
482 since they may change into reg + const, which the patterns
486 call_insn_operand (op
, mode
)
488 enum machine_mode mode ATTRIBUTE_UNUSED
;
490 if (GET_CODE (op
) == MEM
491 && (CONSTANT_ADDRESS_P (XEXP (op
, 0))
492 || (GET_CODE (XEXP (op
, 0)) == REG
493 && XEXP (op
, 0) != arg_pointer_rtx
494 && !(REGNO (XEXP (op
, 0)) >= FIRST_PSEUDO_REGISTER
495 && REGNO (XEXP (op
, 0)) <= LAST_VIRTUAL_REGISTER
))))
500 /* Return the best assembler insn template
501 for moving operands[1] into operands[0] as a fullword. */
504 singlemove_string (operands
)
507 if (GET_CODE (operands
[0]) == MEM
)
509 if (GET_CODE (operands
[1]) != MEM
)
510 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
512 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
513 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
514 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
517 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
519 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
520 cc_status
.mdep
= XEXP (operands
[0], 0);
521 return "st.l %r1,%L0(%?r31)";
524 return "st.l %r1,%0";
531 cc_status
.flags
&= ~CC_F0_IS_0
;
532 xoperands
[0] = gen_rtx_REG (SFmode
, 32);
533 xoperands
[1] = operands
[1];
534 output_asm_insn (singlemove_string (xoperands
), xoperands
);
535 xoperands
[1] = xoperands
[0];
536 xoperands
[0] = operands
[0];
537 output_asm_insn (singlemove_string (xoperands
), xoperands
);
542 if (GET_CODE (operands
[1]) == MEM
)
544 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
546 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
547 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
548 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
551 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
553 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
554 cc_status
.mdep
= XEXP (operands
[1], 0);
555 return "ld.l %L1(%?r31),%0";
557 return "ld.l %m1,%0";
559 if (GET_CODE (operands
[1]) == CONST_INT
)
561 if (operands
[1] == const0_rtx
)
562 return "mov %?r0,%0";
563 if((INTVAL (operands
[1]) & 0xffff0000) == 0)
564 return "or %L1,%?r0,%0";
565 if((INTVAL (operands
[1]) & 0xffff8000) == 0xffff8000)
566 return "adds %1,%?r0,%0";
567 if((INTVAL (operands
[1]) & 0x0000ffff) == 0)
568 return "orh %H1,%?r0,%0";
570 return "orh %H1,%?r0,%0\n\tor %L1,%0,%0";
575 /* Output assembler code to perform a doubleword move insn
576 with operands OPERANDS. */
579 output_move_double (operands
)
582 enum { REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
584 rtx addreg0
= 0, addreg1
= 0;
585 int highest_first
= 0;
586 int no_addreg1_decrement
= 0;
588 /* First classify both operands. */
590 if (REG_P (operands
[0]))
592 else if (offsettable_memref_p (operands
[0]))
594 else if (GET_CODE (operands
[0]) == MEM
)
599 if (REG_P (operands
[1]))
601 else if (CONSTANT_P (operands
[1]))
603 else if (offsettable_memref_p (operands
[1]))
605 else if (GET_CODE (operands
[1]) == MEM
)
610 /* Check for the cases that the operand constraints are not
611 supposed to allow to happen. Abort if we get one,
612 because generating code for these cases is painful. */
614 if (optype0
== RNDOP
|| optype1
== RNDOP
)
617 /* If an operand is an unoffsettable memory ref, find a register
618 we can increment temporarily to make it refer to the second word. */
620 if (optype0
== MEMOP
)
621 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
623 if (optype1
== MEMOP
)
624 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
626 /* ??? Perhaps in some cases move double words
627 if there is a spare pair of floating regs. */
629 /* Ok, we can do one word at a time.
630 Normally we do the low-numbered word first,
631 but if either operand is autodecrementing then we
632 do the high-numbered word first.
634 In either case, set up in LATEHALF the operands to use
635 for the high-numbered word and in some cases alter the
636 operands in OPERANDS to be suitable for the low-numbered word. */
638 if (optype0
== REGOP
)
639 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
640 else if (optype0
== OFFSOP
)
641 latehalf
[0] = adj_offsettable_operand (operands
[0], 4);
643 latehalf
[0] = operands
[0];
645 if (optype1
== REGOP
)
646 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
647 else if (optype1
== OFFSOP
)
648 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
649 else if (optype1
== CNSTOP
)
651 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
652 split_double (operands
[1], &operands
[1], &latehalf
[1]);
653 else if (CONSTANT_P (operands
[1]))
654 latehalf
[1] = const0_rtx
;
657 latehalf
[1] = operands
[1];
659 /* If the first move would clobber the source of the second one,
660 do them in the other order.
662 RMS says "This happens only for registers;
663 such overlap can't happen in memory unless the user explicitly
664 sets it up, and that is an undefined circumstance."
666 but it happens on the sparc when loading parameter registers,
667 so I am going to define that circumstance, and make it work
670 if (optype0
== REGOP
&& optype1
== REGOP
671 && REGNO (operands
[0]) == REGNO (latehalf
[1]))
673 CC_STATUS_PARTIAL_INIT
;
674 /* Make any unoffsettable addresses point at high-numbered word. */
676 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
678 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
681 output_asm_insn (singlemove_string (latehalf
), latehalf
);
683 /* Undo the adds we just did. */
685 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
687 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
689 /* Do low-numbered word. */
690 return singlemove_string (operands
);
692 else if (optype0
== REGOP
&& optype1
!= REGOP
693 && reg_overlap_mentioned_p (operands
[0], operands
[1]))
695 /* If both halves of dest are used in the src memory address,
696 add the two regs and put them in the low reg (operands[0]).
697 Then it works to load latehalf first. */
698 if (reg_mentioned_p (operands
[0], XEXP (operands
[1], 0))
699 && reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
702 xops
[0] = latehalf
[0];
703 xops
[1] = operands
[0];
704 output_asm_insn ("adds %1,%0,%1", xops
);
705 operands
[1] = gen_rtx_MEM (DImode
, operands
[0]);
706 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
710 /* Only one register in the dest is used in the src memory address,
711 and this is the first register of the dest, so we want to do
712 the late half first here also. */
713 else if (! reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
715 /* Only one register in the dest is used in the src memory address,
716 and this is the second register of the dest, so we want to do
717 the late half last. If addreg1 is set, and addreg1 is the same
718 register as latehalf, then we must suppress the trailing decrement,
719 because it would clobber the value just loaded. */
720 else if (addreg1
&& reg_mentioned_p (addreg1
, latehalf
[0]))
721 no_addreg1_decrement
= 1;
724 /* Normal case: do the two words, low-numbered first.
725 Overlap case (highest_first set): do high-numbered word first. */
728 output_asm_insn (singlemove_string (operands
), operands
);
730 CC_STATUS_PARTIAL_INIT
;
731 /* Make any unoffsettable addresses point at high-numbered word. */
733 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
735 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
738 output_asm_insn (singlemove_string (latehalf
), latehalf
);
740 /* Undo the adds we just did. */
742 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
743 if (addreg1
&& !no_addreg1_decrement
)
744 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
747 output_asm_insn (singlemove_string (operands
), operands
);
753 output_fp_move_double (operands
)
756 /* If the source operand is any sort of zero, use f0 instead. */
758 if (operands
[1] == CONST0_RTX (GET_MODE (operands
[1])))
759 operands
[1] = gen_rtx_REG (DFmode
, F0_REGNUM
);
761 if (FP_REG_P (operands
[0]))
763 if (FP_REG_P (operands
[1]))
764 return "fmov.dd %1,%0";
765 if (GET_CODE (operands
[1]) == REG
)
767 output_asm_insn ("ixfr %1,%0", operands
);
768 operands
[0] = gen_rtx_REG (VOIDmode
, REGNO (operands
[0]) + 1);
769 operands
[1] = gen_rtx_REG (VOIDmode
, REGNO (operands
[1]) + 1);
772 if (operands
[1] == CONST0_RTX (DFmode
))
773 return "fmov.dd f0,%0";
774 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
776 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
777 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
778 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
781 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
783 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
784 cc_status
.mdep
= XEXP (operands
[1], 0);
785 return "fld.d %L1(%?r31),%0";
787 return "fld.d %1,%0";
789 else if (FP_REG_P (operands
[1]))
791 if (GET_CODE (operands
[0]) == REG
)
793 output_asm_insn ("fxfr %1,%0", operands
);
794 operands
[0] = gen_rtx_REG (VOIDmode
, REGNO (operands
[0]) + 1);
795 operands
[1] = gen_rtx_REG (VOIDmode
, REGNO (operands
[1]) + 1);
798 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
800 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
801 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
802 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
805 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
807 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
808 cc_status
.mdep
= XEXP (operands
[0], 0);
809 return "fst.d %1,%L0(%?r31)";
811 return "fst.d %1,%0";
819 /* Return a REG that occurs in ADDR with coefficient 1.
820 ADDR can be effectively incremented by incrementing REG. */
826 while (GET_CODE (addr
) == PLUS
)
828 if (GET_CODE (XEXP (addr
, 0)) == REG
)
829 addr
= XEXP (addr
, 0);
830 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
831 addr
= XEXP (addr
, 1);
832 else if (CONSTANT_P (XEXP (addr
, 0)))
833 addr
= XEXP (addr
, 1);
834 else if (CONSTANT_P (XEXP (addr
, 1)))
835 addr
= XEXP (addr
, 0);
839 if (GET_CODE (addr
) == REG
)
846 /* Return a template for a load instruction with mode MODE and
847 arguments from the string ARGS.
849 This string is in static storage. */
852 load_opcode (mode
, args
, reg
)
853 enum machine_mode mode
;
889 sprintf (buf
, "%s %s", opcode
, args
);
893 /* Return a template for a store instruction with mode MODE and
894 arguments from the string ARGS.
896 This string is in static storage. */
899 store_opcode (mode
, args
, reg
)
900 enum machine_mode mode
;
936 sprintf (buf
, "%s %s", opcode
, args
);
940 /* Output a store-in-memory whose operands are OPERANDS[0,1].
941 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
943 This function returns a template for an insn.
944 This is in static storage.
946 It may also output some insns directly.
947 It may alter the values of operands[0] and operands[1]. */
950 output_store (operands
)
953 enum machine_mode mode
= GET_MODE (operands
[0]);
954 rtx address
= XEXP (operands
[0], 0);
956 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
957 cc_status
.mdep
= address
;
959 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
960 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
961 && address
== cc_prev_status
.mdep
))
964 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
965 cc_prev_status
.mdep
= address
;
968 /* Store zero in two parts when appropriate. */
969 if (mode
== DFmode
&& operands
[1] == CONST0_RTX (DFmode
))
970 return store_opcode (DFmode
, "%r1,%L0(%?r31)", operands
[1]);
972 /* Code below isn't smart enough to move a doubleword in two parts,
973 so use output_move_double to do that in the cases that require it. */
974 if ((mode
== DImode
|| mode
== DFmode
)
975 && ! FP_REG_P (operands
[1]))
976 return output_move_double (operands
);
978 return store_opcode (mode
, "%r1,%L0(%?r31)", operands
[1]);
981 /* Output a load-from-memory whose operands are OPERANDS[0,1].
982 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
984 This function returns a template for an insn.
985 This is in static storage.
987 It may also output some insns directly.
988 It may alter the values of operands[0] and operands[1]. */
991 output_load (operands
)
994 enum machine_mode mode
= GET_MODE (operands
[0]);
995 rtx address
= XEXP (operands
[1], 0);
997 /* We don't bother trying to see if we know %hi(address).
998 This is because we are doing a load, and if we know the
999 %hi value, we probably also know that value in memory. */
1000 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
1001 cc_status
.mdep
= address
;
1003 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1004 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1005 && address
== cc_prev_status
.mdep
1006 && cc_prev_status
.mdep
== cc_status
.mdep
))
1009 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
1010 cc_prev_status
.mdep
= address
;
1013 /* Code below isn't smart enough to move a doubleword in two parts,
1014 so use output_move_double to do that in the cases that require it. */
1015 if ((mode
== DImode
|| mode
== DFmode
)
1016 && ! FP_REG_P (operands
[0]))
1017 return output_move_double (operands
);
1019 return load_opcode (mode
, "%L1(%?r31),%0", operands
[0]);
1023 /* Load the address specified by OPERANDS[3] into the register
1024 specified by OPERANDS[0].
1026 OPERANDS[3] may be the result of a sum, hence it could either be:
1031 (3) REG + REG + CONST_INT
1032 (4) REG + REG (special case of 3).
1034 Note that (3) is not a legitimate address.
1035 All cases are handled here. */
1038 output_load_address (operands
)
1043 if (CONSTANT_P (operands
[3]))
1045 output_asm_insn ("mov %3,%0", operands
);
1049 if (REG_P (operands
[3]))
1051 if (REGNO (operands
[0]) != REGNO (operands
[3]))
1052 output_asm_insn ("shl %?r0,%3,%0", operands
);
1056 if (GET_CODE (operands
[3]) != PLUS
)
1059 base
= XEXP (operands
[3], 0);
1060 offset
= XEXP (operands
[3], 1);
1062 if (GET_CODE (base
) == CONST_INT
)
1069 if (GET_CODE (offset
) != CONST_INT
)
1071 /* Operand is (PLUS (REG) (REG)). */
1073 offset
= const0_rtx
;
1079 operands
[7] = offset
;
1080 CC_STATUS_PARTIAL_INIT
;
1081 if (SMALL_INT (offset
))
1082 output_asm_insn ("adds %7,%6,%0", operands
);
1084 output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands
);
1086 else if (GET_CODE (base
) == PLUS
)
1088 operands
[6] = XEXP (base
, 0);
1089 operands
[7] = XEXP (base
, 1);
1090 operands
[8] = offset
;
1092 CC_STATUS_PARTIAL_INIT
;
1093 if (SMALL_INT (offset
))
1094 output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands
);
1096 output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands
);
1103 /* Output code to place a size count SIZE in register REG.
1104 Because block moves are pipelined, we don't include the
1105 first element in the transfer of SIZE to REG.
1106 For this, we subtract ALIGN. (Actually, I think it is not
1107 right to subtract on this machine, so right now we don't.) */
1110 output_size_for_block_move (size
, reg
, align
)
1111 rtx size
, reg
, align
;
1116 xoperands
[1] = size
;
1117 xoperands
[2] = align
;
1120 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1121 output_asm_insn (singlemove_string (xoperands
), xoperands
);
1123 if (GET_CODE (size
) == REG
)
1124 output_asm_insn ("sub %2,%1,%0", xoperands
);
1127 xoperands
[1] = GEN_INT (INTVAL (size
) - INTVAL (align
));
1128 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1129 output_asm_insn ("mov %1,%0", xoperands
);
1134 /* Emit code to perform a block move.
1136 OPERANDS[0] is the destination.
1137 OPERANDS[1] is the source.
1138 OPERANDS[2] is the size.
1139 OPERANDS[3] is the known safe alignment.
1140 OPERANDS[4..6] are pseudos we can safely clobber as temps. */
1143 output_block_move (operands
)
1146 /* A vector for our computed operands. Note that load_output_address
1147 makes use of (and can clobber) up to the 8th element of this vector. */
1152 static int movstrsi_label
= 0;
1154 rtx temp1
= operands
[4];
1155 rtx alignrtx
= operands
[3];
1156 int align
= INTVAL (alignrtx
);
1159 xoperands
[0] = operands
[0];
1160 xoperands
[1] = operands
[1];
1161 xoperands
[2] = temp1
;
1163 /* We can't move more than four bytes at a time
1164 because we have only one register to move them through. */
1168 alignrtx
= GEN_INT (4);
1171 /* Recognize special cases of block moves. These occur
1172 when GNU C++ is forced to treat something as BLKmode
1173 to keep it in memory, when its mode could be represented
1174 with something smaller.
1176 We cannot do this for global variables, since we don't know
1177 what pages they don't cross. Sigh. */
1178 if (GET_CODE (operands
[2]) == CONST_INT
1179 && ! CONSTANT_ADDRESS_P (operands
[0])
1180 && ! CONSTANT_ADDRESS_P (operands
[1]))
1182 int size
= INTVAL (operands
[2]);
1183 rtx op0
= xoperands
[0];
1184 rtx op1
= xoperands
[1];
1186 if ((align
& 3) == 0 && (size
& 3) == 0 && (size
>> 2) <= 16)
1188 if (memory_address_p (SImode
, plus_constant (op0
, size
))
1189 && memory_address_p (SImode
, plus_constant (op1
, size
)))
1191 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1192 for (i
= (size
>>2)-1; i
>= 0; i
--)
1194 xoperands
[0] = plus_constant (op0
, i
* 4);
1195 xoperands
[1] = plus_constant (op1
, i
* 4);
1196 output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
1202 else if ((align
& 1) == 0 && (size
& 1) == 0 && (size
>> 1) <= 16)
1204 if (memory_address_p (HImode
, plus_constant (op0
, size
))
1205 && memory_address_p (HImode
, plus_constant (op1
, size
)))
1207 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1208 for (i
= (size
>>1)-1; i
>= 0; i
--)
1210 xoperands
[0] = plus_constant (op0
, i
* 2);
1211 xoperands
[1] = plus_constant (op1
, i
* 2);
1212 output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
1218 else if (size
<= 16)
1220 if (memory_address_p (QImode
, plus_constant (op0
, size
))
1221 && memory_address_p (QImode
, plus_constant (op1
, size
)))
1223 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1224 for (i
= size
-1; i
>= 0; i
--)
1226 xoperands
[0] = plus_constant (op0
, i
);
1227 xoperands
[1] = plus_constant (op1
, i
);
1228 output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
1236 /* Since we clobber untold things, nix the condition codes. */
1239 /* This is the size of the transfer.
1240 Either use the register which already contains the size,
1241 or use a free register (used by no operands). */
1242 output_size_for_block_move (operands
[2], operands
[4], alignrtx
);
1245 /* Also emit code to decrement the size value by ALIGN. */
1246 zoperands
[0] = operands
[0];
1247 zoperands
[3] = plus_constant (operands
[0], align
);
1248 output_load_address (zoperands
);
1251 /* Generate number for unique label. */
1253 xoperands
[3] = GEN_INT (movstrsi_label
++);
1255 /* Calculate the size of the chunks we will be trying to move first. */
1258 if ((align
& 3) == 0)
1260 else if ((align
& 1) == 0)
1266 /* Copy the increment (negative) to a register for bla insn. */
1268 xoperands
[4] = GEN_INT (- chunk_size
);
1269 xoperands
[5] = operands
[5];
1270 output_asm_insn ("adds %4,%?r0,%5", xoperands
);
1272 /* Predecrement the loop counter. This happens again also in the `bla'
1273 instruction which precedes the loop, but we need to have it done
1274 two times before we enter the loop because of the bizarre semantics
1275 of the bla instruction. */
1277 output_asm_insn ("adds %5,%2,%2", xoperands
);
1279 /* Check for the case where the original count was less than or equal to
1280 zero. Avoid going through the loop at all if the original count was
1281 indeed less than or equal to zero. Note that we treat the count as
1282 if it were a signed 32-bit quantity here, rather than an unsigned one,
1283 even though we really shouldn't. We have to do this because of the
1284 semantics of the `ble' instruction, which assume that the count is
1285 a signed 32-bit value. Anyway, in practice it won't matter because
1286 nobody is going to try to do a memcpy() of more than half of the
1287 entire address space (i.e. 2 gigabytes) anyway. */
1289 output_asm_insn ("bc .Le%3", xoperands
);
1291 /* Make available a register which is a temporary. */
1293 xoperands
[6] = operands
[6];
1295 /* Now the actual loop.
1296 In xoperands, elements 1 and 0 are the input and output vectors.
1297 Element 2 is the loop index. Element 5 is the increment. */
1299 output_asm_insn ("subs %1,%5,%1", xoperands
);
1300 output_asm_insn ("bla %5,%2,.Lm%3", xoperands
);
1301 output_asm_insn ("adds %0,%2,%6", xoperands
);
1302 output_asm_insn ("\n.Lm%3:", xoperands
); /* Label for bla above. */
1303 output_asm_insn ("\n.Ls%3:", xoperands
); /* Loop start label. */
1304 output_asm_insn ("adds %5,%6,%6", xoperands
);
1306 /* NOTE: The code here which is supposed to handle the cases where the
1307 sources and destinations are known to start on a 4 or 2 byte boundary
1308 are currently broken. They fail to do anything about the overflow
1309 bytes which might still need to be copied even after we have copied
1310 some number of words or halfwords. Thus, for now we use the lowest
1311 common denominator, i.e. the code which just copies some number of
1312 totally unaligned individual bytes. (See the calculation of
1313 chunk_size above. */
1315 if (chunk_size
== 4)
1317 output_asm_insn ("ld.l %2(%1),%?r31", xoperands
);
1318 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1319 output_asm_insn ("st.l %?r31,8(%6)", xoperands
);
1321 else if (chunk_size
== 2)
1323 output_asm_insn ("ld.s %2(%1),%?r31", xoperands
);
1324 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1325 output_asm_insn ("st.s %?r31,4(%6)", xoperands
);
1327 else /* chunk_size == 1 */
1329 output_asm_insn ("ld.b %2(%1),%?r31", xoperands
);
1330 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1331 output_asm_insn ("st.b %?r31,2(%6)", xoperands
);
1333 output_asm_insn ("\n.Le%3:", xoperands
); /* Here if count <= 0. */
1339 /* Output a delayed branch insn with the delay insn in its
1340 branch slot. The delayed branch insn template is in TEMPLATE,
1341 with operands OPERANDS. The insn in its delay slot is INSN.
1343 As a special case, since we know that all memory transfers are via
1344 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1345 reference around the branch as
1349 ld/st l%x(%?r31),...
1351 As another special case, we handle loading (SYMBOL_REF ...) and
1352 other large constants around branches as well:
1359 /* ??? Disabled because this re-recognition is incomplete and causes
1360 constrain_operands to segfault. Anyone who cares should fix up
1361 the code to use the DBR pass. */
1364 output_delayed_branch (template, operands
, insn
)
1365 const char *template;
1369 rtx src
= XVECEXP (PATTERN (insn
), 0, 1);
1370 rtx dest
= XVECEXP (PATTERN (insn
), 0, 0);
1372 /* See if we are doing some branch together with setting some register
1373 to some 32-bit value which does (or may) have some of the high-order
1374 16 bits set. If so, we need to set the register in two stages. One
1375 stage must be done before the branch, and the other one can be done
1376 in the delay slot. */
1378 if ( (GET_CODE (src
) == CONST_INT
1379 && ((unsigned) INTVAL (src
) & (unsigned) 0xffff0000) != (unsigned) 0)
1380 || (GET_CODE (src
) == SYMBOL_REF
)
1381 || (GET_CODE (src
) == LABEL_REF
)
1382 || (GET_CODE (src
) == CONST
))
1385 xoperands
[0] = dest
;
1388 CC_STATUS_PARTIAL_INIT
;
1389 /* Output the `orh' insn. */
1390 output_asm_insn ("orh %H1,%?r0,%0", xoperands
);
1392 /* Output the branch instruction next. */
1393 output_asm_insn (template, operands
);
1395 /* Now output the `or' insn. */
1396 output_asm_insn ("or %L1,%0,%0", xoperands
);
1398 else if ((GET_CODE (src
) == MEM
1399 && CONSTANT_ADDRESS_P (XEXP (src
, 0)))
1400 || (GET_CODE (dest
) == MEM
1401 && CONSTANT_ADDRESS_P (XEXP (dest
, 0))))
1404 const char *split_template
;
1405 xoperands
[0] = dest
;
1408 /* Output the `orh' insn. */
1409 if (GET_CODE (src
) == MEM
)
1411 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1412 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1413 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
1416 output_asm_insn ("orh %h1,%?r0,%?r31", xoperands
);
1418 split_template
= load_opcode (GET_MODE (dest
),
1419 "%L1(%?r31),%0", dest
);
1423 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1424 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1425 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
1428 output_asm_insn ("orh %h0,%?r0,%?r31", xoperands
);
1430 split_template
= store_opcode (GET_MODE (dest
),
1431 "%r1,%L0(%?r31)", src
);
1434 /* Output the branch instruction next. */
1435 output_asm_insn (template, operands
);
1437 /* Now output the load or store.
1438 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1439 output_asm_insn (split_template
, xoperands
);
1443 int insn_code_number
;
1444 rtx pat
= gen_rtx_SET (VOIDmode
, dest
, src
);
1445 rtx delay_insn
= gen_rtx_INSN (VOIDmode
, 0, 0, 0, pat
, -1, 0, 0);
1448 /* Output the branch instruction first. */
1449 output_asm_insn (template, operands
);
1451 /* Now recognize the insn which we put in its delay slot.
1452 We must do this after outputting the branch insn,
1453 since operands may just be a pointer to `recog_data.operand'. */
1454 INSN_CODE (delay_insn
) = insn_code_number
1455 = recog (pat
, delay_insn
, NULL_PTR
);
1456 if (insn_code_number
== -1)
1459 for (i
= 0; i
< insn_data
[insn_code_number
].n_operands
; i
++)
1461 if (GET_CODE (recog_data
.operand
[i
]) == SUBREG
)
1462 recog_data
.operand
[i
] = alter_subreg (recog_data
.operand
[i
]);
1465 insn_extract (delay_insn
);
1466 if (! constrain_operands (1))
1467 fatal_insn_not_found (delay_insn
);
1469 template = get_insn_template (insn_code_number
, delay_insn
);
1470 output_asm_insn (template, recog_data
.operand
);
1476 /* Output a newly constructed insn DELAY_INSN. */
1478 output_delay_insn (delay_insn
)
1481 const char *template;
1482 int insn_code_number
;
1485 /* Now recognize the insn which we put in its delay slot.
1486 We must do this after outputting the branch insn,
1487 since operands may just be a pointer to `recog_data.operand'. */
1488 insn_code_number
= recog_memoized (delay_insn
);
1489 if (insn_code_number
== -1)
1492 /* Extract the operands of this delay insn. */
1493 INSN_CODE (delay_insn
) = insn_code_number
;
1494 insn_extract (delay_insn
);
1496 /* It is possible that this insn has not been properly scanned by final
1497 yet. If this insn's operands don't appear in the peephole's
1498 actual operands, then they won't be fixed up by final, so we
1499 make sure they get fixed up here. -- This is a kludge. */
1500 for (i
= 0; i
< insn_data
[insn_code_number
].n_operands
; i
++)
1502 if (GET_CODE (recog_data
.operand
[i
]) == SUBREG
)
1503 recog_data
.operand
[i
] = alter_subreg (recog_data
.operand
[i
]);
1506 if (! constrain_operands (1))
1509 cc_prev_status
= cc_status
;
1511 /* Update `cc_status' for this instruction.
1512 The instruction's output routine may change it further.
1513 If the output routine for a jump insn needs to depend
1514 on the cc status, it should look at cc_prev_status. */
1516 NOTICE_UPDATE_CC (PATTERN (delay_insn
), delay_insn
);
1518 /* Now get the template for what this insn would
1519 have been, without the branch. */
1521 template = get_insn_template (insn_code_number
, delay_insn
);
1522 output_asm_insn (template, recog_data
.operand
);
1527 /* Special routine to convert an SFmode value represented as a
1528 CONST_DOUBLE into its equivalent unsigned long bit pattern.
1529 We convert the value from a double precision floating-point
1530 value to single precision first, and thence to a bit-wise
1531 equivalent unsigned long value. This routine is used when
1532 generating an immediate move of an SFmode value directly
1533 into a general register because the svr4 assembler doesn't
1534 grok floating literals in instruction operand contexts. */
1537 sfmode_constant_to_ulong (x
)
1541 union { float f
; unsigned long i
; } u2
;
1543 if (GET_CODE (x
) != CONST_DOUBLE
|| GET_MODE (x
) != SFmode
)
1546 #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT
1547 error IEEE emulation needed
1549 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
1554 /* This function generates the assembly code for function entry.
1555 The macro FUNCTION_PROLOGUE in i860.h is defined to call this function.
1557 ASM_FILE is a stdio stream to output the code to.
1558 SIZE is an int: how many units of temporary storage to allocate.
1560 Refer to the array `regs_ever_live' to determine which registers
1561 to save; `regs_ever_live[I]' is nonzero if register number I
1562 is ever used in the function. This macro is responsible for
1563 knowing which registers should not be saved even if used.
1565 NOTE: `frame_lower_bytes' is the count of bytes which will lie
1566 between the new `fp' value and the new `sp' value after the
1567 prologue is done. `frame_upper_bytes' is the count of bytes
1568 that will lie between the new `fp' and the *old* `sp' value
1569 after the new `fp' is setup (in the prologue). The upper
1570 part of each frame always includes at least 2 words (8 bytes)
1571 to hold the saved frame pointer and the saved return address.
1573 The svr4 ABI for the i860 now requires that the values of the
1574 stack pointer and frame pointer registers be kept aligned to
1575 16-byte boundaries at all times. We obey that restriction here.
1577 The svr4 ABI for the i860 is entirely vague when it comes to specifying
1578 exactly where the "preserved" registers should be saved. The native
1579 svr4 C compiler I now have doesn't help to clarify the requirements
1580 very much because it is plainly out-of-date and non-ABI-compliant
1581 (in at least one important way, i.e. how it generates function
1584 The native svr4 C compiler saves the "preserved" registers (i.e.
1585 r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
1586 offsets from the frame pointer).
1588 Previous versions of GCC also saved the "preserved" registers in the
1589 "negative" part of the frame, but they saved them using positive
1590 offsets from the (adjusted) stack pointer (after it had been adjusted
1591 to allocate space for the new frame). That's just plain wrong
1592 because if the current function calls alloca(), the stack pointer
1593 will get moved, and it will be impossible to restore the registers
1594 properly again after that.
1596 Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
1597 by copying their values either into various "preserved" registers or
1598 into stack slots in the lower part of the current frame (as seemed
1599 appropriate, depending upon subsequent usage of these values).
1601 Here we want to save the preserved registers at some offset from the
1602 frame pointer register so as to avoid any possible problems arising
1603 from calls to alloca(). We can either save them at small positive
1604 offsets from the frame pointer, or at small negative offsets from
1605 the frame pointer. If we save them at small negative offsets from
1606 the frame pointer (i.e. in the lower part of the frame) then we
1607 must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
1608 many bytes of space we plan to use in the lower part of the frame
1609 for this purpose. Since other parts of the compiler reference the
1610 value of STARTING_FRAME_OFFSET long before final() calls this function,
1611 we would have to go ahead and assume the worst-case storage requirements
1612 for saving all of the "preserved" registers (and use that number, i.e.
1613 `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
1614 the lower part of the frame. That could potentially be very wasteful,
1615 and that wastefulness could really hamper people compiling for embedded
1616 i860 targets with very tight limits on stack space. Thus, we choose
1617 here to save the preserved registers in the upper part of the
1618 frame, so that we can decide at the very last minute how much (or how
1619 little) space we must allocate for this purpose.
1621 To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
1622 registers must always be saved so that the saved values of registers
1623 with higher numbers are at higher addresses. We obey that restriction
1626 There are two somewhat different ways that you can generate prologues
1627 here... i.e. pedantically ABI-compliant, and the "other" way. The
1628 "other" way is more consistent with what is currently generated by the
1629 "native" svr4 C compiler for the i860. That's important if you want
1630 to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
1631 The SVR4 SDB for the i860 insists on having function prologues be
1634 To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
1635 in the i860svr4.h file. (By default this is *not* defined).
1637 The differences between the ABI-compliant and non-ABI-compliant prologues
1638 are that (a) the ABI version seems to require the use of *signed*
1639 (rather than unsigned) adds and subtracts, and (b) the ordering of
1640 the various steps (e.g. saving preserved registers, saving the
1641 return address, setting up the new frame pointer value) is different.
1643 For strict ABI compliance, it seems to be the case that the very last
1644 thing that is supposed to happen in the prologue is getting the frame
1645 pointer set to its new value (but only after everything else has
1646 already been properly setup). We do that here, but only if the symbol
1647 I860_STRICT_ABI_PROLOGUES is defined.
1650 #ifndef STACK_ALIGNMENT
1651 #define STACK_ALIGNMENT 16
1654 extern char call_used_regs
[];
1656 char *current_function_original_name
;
1658 static int must_preserve_r1
;
1659 static unsigned must_preserve_bytes
;
1662 function_prologue (asm_file
, local_bytes
)
1663 register FILE *asm_file
;
1664 register unsigned local_bytes
;
1666 register unsigned frame_lower_bytes
;
1667 register unsigned frame_upper_bytes
;
1668 register unsigned total_fsize
;
1669 register unsigned preserved_reg_bytes
= 0;
1670 register unsigned i
;
1671 register unsigned preserved_so_far
= 0;
1673 must_preserve_r1
= (optimize
< 2 || ! leaf_function_p ());
1674 must_preserve_bytes
= 4 + (must_preserve_r1
? 4 : 0);
1676 /* Count registers that need preserving. Ignore r0. It never needs
1679 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
1681 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1682 preserved_reg_bytes
+= 4;
1685 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1687 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1689 /* The upper part of each frame will contain the saved fp,
1690 the saved r1, and stack slots for all of the other "preserved"
1691 registers that we find we will need to save & restore. */
1693 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
1695 /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
1698 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1700 total_fsize
= frame_upper_bytes
+ frame_lower_bytes
;
1702 #ifndef I860_STRICT_ABI_PROLOGUES
1704 /* There are two kinds of function prologues.
1705 You use the "small" version if the total frame size is
1706 small enough so that it can fit into an immediate 16-bit
1707 value in one instruction. Otherwise, you use the "large"
1708 version of the function prologue. */
1710 if (total_fsize
> 0x7fff)
1712 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1713 but the native C compiler on svr4 uses `addu'. */
1715 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1716 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1718 /* Save the old frame pointer. */
1720 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1721 i860_reg_prefix
, i860_reg_prefix
);
1723 /* Setup the new frame pointer. The ABI sez to do this after
1724 preserving registers (using adds), but that's not what the
1725 native C compiler on svr4 does. */
1727 fprintf (asm_file
, "\taddu 0,%ssp,%sfp\n",
1728 i860_reg_prefix
, i860_reg_prefix
);
1730 /* Get the value of frame_lower_bytes into r31. */
1732 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1733 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1734 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1735 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1737 /* Now re-adjust the stack pointer using the value in r31.
1738 The ABI sez to do this with `subs' but SDB may prefer `subu'. */
1740 fprintf (asm_file
, "\tsubu %ssp,%sr31,%ssp\n",
1741 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1743 /* Preserve registers. The ABI sez to do this before setting
1744 up the new frame pointer, but that's not what the native
1745 C compiler on svr4 does. */
1747 for (i
= 1; i
< 32; i
++)
1748 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1749 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1750 i860_reg_prefix
, reg_names
[i
],
1751 must_preserve_bytes
+ (4 * preserved_so_far
++),
1754 for (i
= 32; i
< 64; i
++)
1755 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1756 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1757 i860_reg_prefix
, reg_names
[i
],
1758 must_preserve_bytes
+ (4 * preserved_so_far
++),
1761 /* Save the return address. */
1763 if (must_preserve_r1
)
1764 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1765 i860_reg_prefix
, i860_reg_prefix
);
1769 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1770 but the native C compiler on svr4 uses `addu'. */
1772 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1773 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1775 /* Save the old frame pointer. */
1777 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1778 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1780 /* Setup the new frame pointer. The ABI sez to do this after
1781 preserving registers and after saving the return address,
1782 (and its saz to do this using adds), but that's not what the
1783 native C compiler on svr4 does. */
1785 fprintf (asm_file
, "\taddu %d,%ssp,%sfp\n",
1786 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1788 /* Preserve registers. The ABI sez to do this before setting
1789 up the new frame pointer, but that's not what the native
1790 compiler on svr4 does. */
1792 for (i
= 1; i
< 32; i
++)
1793 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1794 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1795 i860_reg_prefix
, reg_names
[i
],
1796 must_preserve_bytes
+ (4 * preserved_so_far
++),
1799 for (i
= 32; i
< 64; i
++)
1800 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1801 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1802 i860_reg_prefix
, reg_names
[i
],
1803 must_preserve_bytes
+ (4 * preserved_so_far
++),
1806 /* Save the return address. The ABI sez to do this earlier,
1807 and also via an offset from %sp, but the native C compiler
1808 on svr4 does it later (i.e. now) and uses an offset from
1811 if (must_preserve_r1
)
1812 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1813 i860_reg_prefix
, i860_reg_prefix
);
1816 #else /* defined(I860_STRICT_ABI_PROLOGUES) */
1818 /* There are two kinds of function prologues.
1819 You use the "small" version if the total frame size is
1820 small enough so that it can fit into an immediate 16-bit
1821 value in one instruction. Otherwise, you use the "large"
1822 version of the function prologue. */
1824 if (total_fsize
> 0x7fff)
1826 /* Adjust the stack pointer (thereby allocating a new frame). */
1828 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1829 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1831 /* Save the caller's frame pointer. */
1833 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1834 i860_reg_prefix
, i860_reg_prefix
);
1836 /* Save return address. */
1838 if (must_preserve_r1
)
1839 fprintf (asm_file
, "\tst.l %sr1,4(%ssp)\n",
1840 i860_reg_prefix
, i860_reg_prefix
);
1842 /* Get the value of frame_lower_bytes into r31 for later use. */
1844 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1845 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1846 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1847 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1849 /* Now re-adjust the stack pointer using the value in r31. */
1851 fprintf (asm_file
, "\tsubs %ssp,%sr31,%ssp\n",
1852 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1854 /* Pre-compute value to be used as the new frame pointer. */
1856 fprintf (asm_file
, "\tadds %ssp,%sr31,%sr31\n",
1857 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1859 /* Preserve registers. */
1861 for (i
= 1; i
< 32; i
++)
1862 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1863 fprintf (asm_file
, "\tst.l %s%s,%d(%sr31)\n",
1864 i860_reg_prefix
, reg_names
[i
],
1865 must_preserve_bytes
+ (4 * preserved_so_far
++),
1868 for (i
= 32; i
< 64; i
++)
1869 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1870 fprintf (asm_file
, "\tfst.l %s%s,%d(%sr31)\n",
1871 i860_reg_prefix
, reg_names
[i
],
1872 must_preserve_bytes
+ (4 * preserved_so_far
++),
1875 /* Actually set the new value of the frame pointer. */
1877 fprintf (asm_file
, "\tmov %sr31,%sfp\n",
1878 i860_reg_prefix
, i860_reg_prefix
);
1882 /* Adjust the stack pointer. */
1884 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1885 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1887 /* Save the caller's frame pointer. */
1889 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1890 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1892 /* Save the return address. */
1894 if (must_preserve_r1
)
1895 fprintf (asm_file
, "\tst.l %sr1,%d(%ssp)\n",
1896 i860_reg_prefix
, frame_lower_bytes
+ 4, i860_reg_prefix
);
1898 /* Preserve registers. */
1900 for (i
= 1; i
< 32; i
++)
1901 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1902 fprintf (asm_file
, "\tst.l %s%s,%d(%ssp)\n",
1903 i860_reg_prefix
, reg_names
[i
],
1904 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1907 for (i
= 32; i
< 64; i
++)
1908 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1909 fprintf (asm_file
, "\tfst.l %s%s,%d(%ssp)\n",
1910 i860_reg_prefix
, reg_names
[i
],
1911 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1914 /* Setup the new frame pointer. */
1916 fprintf (asm_file
, "\tadds %d,%ssp,%sfp\n",
1917 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1919 #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
1921 #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
1922 ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file
);
1923 #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
1926 /* This function generates the assembly code for function exit.
1927 The macro FUNCTION_EPILOGUE in i860.h is defined to call this function.
1929 ASM_FILE is a stdio stream to output the code to.
1930 SIZE is an int: how many units of temporary storage to allocate.
1932 The function epilogue should not depend on the current stack pointer!
1933 It should use the frame pointer only. This is mandatory because
1934 of alloca; we also take advantage of it to omit stack adjustments
1937 Note that when we go to restore the preserved register values we must
1938 not try to address their slots by using offsets from the stack pointer.
1939 That's because the stack pointer may have been moved during the function
1940 execution due to a call to alloca(). Rather, we must restore all
1941 preserved registers via offsets from the frame pointer value.
1943 Note also that when the current frame is being "popped" (by adjusting
1944 the value of the stack pointer) on function exit, we must (for the
1945 sake of alloca) set the new value of the stack pointer based upon
1946 the current value of the frame pointer. We can't just add what we
1947 believe to be the (static) frame size to the stack pointer because
1948 if we did that, and alloca() had been called during this function,
1949 we would end up returning *without* having fully deallocated all of
1950 the space grabbed by alloca. If that happened, and a function
1951 containing one or more alloca() calls was called over and over again,
1952 then the stack would grow without limit!
1954 Finally note that the epilogues generated here are completely ABI
1955 compliant. They go out of their way to insure that the value in
1956 the frame pointer register is never less than the value in the stack
1957 pointer register. It's not clear why this relationship needs to be
1958 maintained at all times, but maintaining it only costs one extra
1959 instruction, so what the hell.
1962 /* This corresponds to a version 4 TDESC structure. Lower numbered
1963 versions successively omit the last word of the structure. We
1964 don't try to handle version 5 here. */
1966 typedef struct TDESC_flags
{
1969 int callable_block
:1;
1971 int fregs
:6; /* fp regs 2-7 */
1972 int iregs
:16; /* regs 0-15 */
1975 typedef struct TDESC
{
1977 int integer_reg_offset
; /* same as must_preserve_bytes */
1978 int floating_point_reg_offset
;
1979 unsigned int positive_frame_size
; /* same as frame_upper_bytes */
1980 unsigned int negative_frame_size
; /* same as frame_lower_bytes */
1984 function_epilogue (asm_file
, local_bytes
)
1985 register FILE *asm_file
;
1986 register unsigned local_bytes
;
1988 register unsigned frame_upper_bytes
;
1989 register unsigned frame_lower_bytes
;
1990 register unsigned preserved_reg_bytes
= 0;
1991 register unsigned i
;
1992 register unsigned restored_so_far
= 0;
1993 register unsigned int_restored
;
1994 register unsigned mask
;
1995 unsigned intflags
=0;
1996 register TDESC_flags
*flags
= (TDESC_flags
*) &intflags
;
1999 flags
->reg_packing
= 1;
2000 flags
->iregs
= 8; /* old fp always gets saved */
2002 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
2004 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2006 /* Count the number of registers that were preserved in the prologue.
2007 Ignore r0. It is never preserved. */
2009 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
2011 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
2012 preserved_reg_bytes
+= 4;
2015 /* The upper part of each frame will contain only saved fp,
2016 the saved r1, and stack slots for all of the other "preserved"
2017 registers that we find we will need to save & restore. */
2019 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
2021 /* Round-up frame_upper_bytes so that t is a multiple of 16. */
2024 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2026 /* Restore all of the "preserved" registers that need restoring. */
2030 for (i
= 1; i
< 32; i
++, mask
<<=1)
2031 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2032 fprintf (asm_file
, "\tld.l %d(%sfp),%s%s\n",
2033 must_preserve_bytes
+ (4 * restored_so_far
++),
2034 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2035 if (i
> 3 && i
< 16)
2036 flags
->iregs
|= mask
;
2039 int_restored
= restored_so_far
;
2042 for (i
= 32; i
< 64; i
++) {
2043 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2044 fprintf (asm_file
, "\tfld.l %d(%sfp),%s%s\n",
2045 must_preserve_bytes
+ (4 * restored_so_far
++),
2046 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2047 if (i
> 33 && i
< 40)
2048 flags
->fregs
|= mask
;
2050 if (i
> 33 && i
< 40)
2054 /* Get the value we plan to use to restore the stack pointer into r31. */
2056 fprintf (asm_file
, "\tadds %d,%sfp,%sr31\n",
2057 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
2059 /* Restore the return address and the old frame pointer. */
2061 if (must_preserve_r1
) {
2062 fprintf (asm_file
, "\tld.l 4(%sfp),%sr1\n",
2063 i860_reg_prefix
, i860_reg_prefix
);
2067 fprintf (asm_file
, "\tld.l 0(%sfp),%sfp\n",
2068 i860_reg_prefix
, i860_reg_prefix
);
2070 /* Return and restore the old stack pointer value. */
2072 fprintf (asm_file
, "\tbri %sr1\n\tmov %sr31,%ssp\n",
2073 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
2075 #ifdef OUTPUT_TDESC /* Output an ABI-compliant TDESC entry */
2076 if (! frame_lower_bytes
) {
2078 if (! frame_upper_bytes
) {
2080 if (restored_so_far
== int_restored
) /* No FP saves */
2084 assemble_name(asm_file
,current_function_original_name
);
2085 fputs(".TDESC:\n", asm_file
);
2086 fprintf(asm_file
, "%s 0x%0x\n", ASM_LONG
, intflags
);
2087 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2088 int_restored
? must_preserve_bytes
: 0);
2089 if (flags
->version
> 1) {
2090 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2091 (restored_so_far
== int_restored
) ? 0 : must_preserve_bytes
+
2092 (4 * int_restored
));
2093 if (flags
->version
> 2) {
2094 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_upper_bytes
);
2095 if (flags
->version
> 3)
2096 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_lower_bytes
);
2100 fprintf(asm_file
, "%s ", ASM_LONG
);
2101 assemble_name(asm_file
, current_function_original_name
);
2102 fprintf(asm_file
, "\n%s ", ASM_LONG
);
2103 assemble_name(asm_file
, current_function_original_name
);
2104 fputs(".TDESC\n", asm_file
);
2110 /* Expand a library call to __builtin_saveregs. */
2114 rtx fn
= gen_rtx_SYMBOL_REF (Pmode
, "__builtin_saveregs");
2115 rtx save
= gen_reg_rtx (Pmode
);
2116 rtx valreg
= LIBCALL_VALUE (Pmode
);
2119 /* The return value register overlaps the first argument register.
2120 Save and restore it around the call. */
2121 emit_move_insn (save
, valreg
);
2122 ret
= emit_library_call_value (fn
, NULL_RTX
, 1, Pmode
, 0);
2123 if (GET_CODE (ret
) != REG
|| REGNO (ret
) < FIRST_PSEUDO_REGISTER
)
2124 ret
= copy_to_reg (ret
);
2125 emit_move_insn (valreg
, save
);
2131 i860_build_va_list ()
2133 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2136 record
= make_node (RECORD_TYPE
);
2138 field_ireg_used
= build_decl (FIELD_DECL
, get_identifier ("__ireg_used"),
2139 unsigned_type_node
);
2140 field_freg_used
= build_decl (FIELD_DECL
, get_identifier ("__freg_used"),
2141 unsigned_type_node
);
2142 field_reg_base
= build_decl (FIELD_DECL
, get_identifier ("__reg_base"),
2144 field_mem_ptr
= build_decl (FIELD_DECL
, get_identifier ("__mem_ptr"),
2147 DECL_FIELD_CONTEXT (field_ireg_used
) = record
;
2148 DECL_FIELD_CONTEXT (field_freg_used
) = record
;
2149 DECL_FIELD_CONTEXT (field_reg_base
) = record
;
2150 DECL_FIELD_CONTEXT (field_mem_ptr
) = record
;
2152 #ifdef I860_SVR4_VA_LIST
2153 TYPE_FIELDS (record
) = field_ireg_used
;
2154 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2155 TREE_CHAIN (field_freg_used
) = field_reg_base
;
2156 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2158 TYPE_FIELDS (record
) = field_reg_base
;
2159 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2160 TREE_CHAIN (field_mem_ptr
) = field_ireg_used
;
2161 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2164 layout_type (record
);
2169 i860_va_start (stdarg_p
, valist
, nextarg
)
2176 saveregs
= make_tree (build_pointer_type (va_list_type_node
),
2177 expand_builtin_saveregs ());
2178 saveregs
= build1 (INDIRECT_REF
, va_list_type_node
, saveregs
);
2182 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2183 tree ireg_used
, freg_used
, reg_base
, mem_ptr
;
2185 #ifdef I860_SVR4_VA_LIST
2186 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2187 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2188 field_reg_base
= TREE_CHAIN (field_freg_used
);
2189 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2191 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2192 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2193 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2194 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2197 ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2198 valist
, field_ireg_used
);
2199 freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2200 valist
, field_freg_used
);
2201 reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2202 valist
, field_reg_base
);
2203 mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2204 valist
, field_mem_ptr
);
2206 t
= build_int_2 (current_function_args_info
.ints
, 0);
2207 t
= build (MODIFY_EXPR
, TREE_TYPE (ireg_used
), ireg_used
, t
);
2208 TREE_SIDE_EFFECTS (t
) = 1;
2209 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2211 t
= build_int_2 (ROUNDUP (current_function_args_info
.floats
, 8), 0);
2212 t
= build (MODIFY_EXPR
, TREE_TYPE (freg_used
), freg_used
, t
);
2213 TREE_SIDE_EFFECTS (t
) = 1;
2214 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2216 t
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2217 saveregs
, field_reg_base
);
2218 t
= build (MODIFY_EXPR
, TREE_TYPE (reg_base
), reg_base
, t
);
2219 TREE_SIDE_EFFECTS (t
) = 1;
2220 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2222 t
= make_tree (ptr_type_node
, nextarg
);
2223 t
= build (MODIFY_EXPR
, TREE_TYPE (mem_ptr
), mem_ptr
, t
);
2224 TREE_SIDE_EFFECTS (t
) = 1;
2225 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2229 t
= build (MODIFY_EXPR
, va_list_type_node
, valist
, saveregs
);
2230 TREE_SIDE_EFFECTS (t
) = 1;
2231 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2235 #define NUM_PARM_FREGS 8
2236 #define NUM_PARM_IREGS 12
2237 #ifdef I860_SVR4_VARARGS
2238 #define FREG_OFFSET 0
2239 #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
2241 #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
2242 #define IREG_OFFSET 0
2246 i860_va_arg (valist
, type
)
2249 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2250 tree type_ptr_node
, t
;
2251 rtx lab_over
= NULL_RTX
;
2253 HOST_WIDE_INT align
;
2255 #ifdef I860_SVR4_VA_LIST
2256 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2257 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2258 field_reg_base
= TREE_CHAIN (field_freg_used
);
2259 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2261 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2262 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2263 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2264 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2267 field_ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2268 valist
, field_ireg_used
);
2269 field_freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2270 valist
, field_freg_used
);
2271 field_reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2272 valist
, field_reg_base
);
2273 field_mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2274 valist
, field_mem_ptr
);
2276 ret
= gen_reg_rtx (Pmode
);
2277 type_ptr_node
= build_pointer_type (type
);
2279 if (! AGGREGATE_TYPE_P (type
))
2281 int nparm
, incr
, ofs
;
2285 if (FLOAT_TYPE_P (type
))
2287 field
= field_freg_used
;
2288 nparm
= NUM_PARM_FREGS
;
2294 field
= field_ireg_used
;
2295 nparm
= NUM_PARM_IREGS
;
2296 incr
= int_size_in_bytes (type
) / UNITS_PER_WORD
;
2300 lab_false
= gen_label_rtx ();
2301 lab_over
= gen_label_rtx ();
2303 emit_cmp_and_jump_insns (expand_expr (field
, NULL_RTX
, 0, 0),
2304 GEN_INT (nparm
- incr
), GT
, const0_rtx
,
2305 TYPE_MODE (TREE_TYPE (field
)),
2306 TREE_UNSIGNED (field
), 0, lab_false
);
2308 t
= fold (build (POSTINCREMENT_EXPR
, TREE_TYPE (field
), field
,
2309 build_int_2 (incr
, 0)));
2310 TREE_SIDE_EFFECTS (t
) = 1;
2312 t
= fold (build (MULT_EXPR
, TREE_TYPE (field
), field
,
2313 build_int_2 (UNITS_PER_WORD
, 0)));
2314 TREE_SIDE_EFFECTS (t
) = 1;
2316 t
= fold (build (PLUS_EXPR
, ptr_type_node
, field_reg_base
,
2317 fold (build (PLUS_EXPR
, TREE_TYPE (field
), t
,
2318 build_int_2 (ofs
, 0)))));
2319 TREE_SIDE_EFFECTS (t
) = 1;
2321 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2323 emit_move_insn (ret
, val
);
2325 emit_jump_insn (gen_jump (lab_over
));
2327 emit_label (lab_false
);
2330 align
= TYPE_ALIGN (type
);
2331 if (align
< BITS_PER_WORD
)
2332 align
= BITS_PER_WORD
;
2333 align
/= BITS_PER_UNIT
;
2335 t
= build (PLUS_EXPR
, ptr_type_node
, field_mem_ptr
,
2336 build_int_2 (align
- 1, 0));
2337 t
= build (BIT_AND_EXPR
, ptr_type_node
, t
, build_int_2 (-align
, -1));
2339 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2341 emit_move_insn (ret
, val
);
2343 t
= fold (build (PLUS_EXPR
, ptr_type_node
,
2344 make_tree (ptr_type_node
, ret
),
2345 build_int_2 (int_size_in_bytes (type
), 0)));
2346 t
= build (MODIFY_EXPR
, ptr_type_node
, field_mem_ptr
, t
);
2347 TREE_SIDE_EFFECTS (t
) = 1;
2348 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2351 emit_label (lab_over
);