recog.h (INSN_OUTPUT_FORMAT_*): New.
[gcc.git] / gcc / config / i860 / i860.c
1 /* Subroutines for insn-output.c for Intel 860
2 Copyright (C) 1989, 91, 97, 98, 1999 Free Software Foundation, Inc.
3 Derived from sparc.c.
4
5 Written by Richard Stallman (rms@ai.mit.edu).
6
7 Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
8 to the whims of the System V Release 4 assembler.
9
10 This file is part of GNU CC.
11
12 GNU CC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
15 any later version.
16
17 GNU CC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GNU CC; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
26
27
28 #include "config.h"
29 #include "system.h"
30 #include "flags.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "real.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-flags.h"
39 #include "output.h"
40 #include "recog.h"
41 #include "insn-attr.h"
42 #include "function.h"
43 #include "expr.h"
44
45 static rtx find_addr_reg ();
46
47 #ifndef I860_REG_PREFIX
48 #define I860_REG_PREFIX ""
49 #endif
50
51 char *i860_reg_prefix = I860_REG_PREFIX;
52
53 /* Save information from a "cmpxx" operation until the branch is emitted. */
54
55 rtx i860_compare_op0, i860_compare_op1;
56 \f
57 /* Return non-zero if this pattern, can be evaluated safely, even if it
58 was not asked for. */
59 int
60 safe_insn_src_p (op, mode)
61 rtx op;
62 enum machine_mode mode;
63 {
64 /* Just experimenting. */
65
66 /* No floating point src is safe if it contains an arithmetic
67 operation, since that operation may trap. */
68 switch (GET_CODE (op))
69 {
70 case CONST_INT:
71 case LABEL_REF:
72 case SYMBOL_REF:
73 case CONST:
74 return 1;
75
76 case REG:
77 return 1;
78
79 case MEM:
80 return CONSTANT_ADDRESS_P (XEXP (op, 0));
81
82 /* We never need to negate or complement constants. */
83 case NEG:
84 return (mode != SFmode && mode != DFmode);
85 case NOT:
86 case ZERO_EXTEND:
87 return 1;
88
89 case EQ:
90 case NE:
91 case LT:
92 case GT:
93 case LE:
94 case GE:
95 case LTU:
96 case GTU:
97 case LEU:
98 case GEU:
99 case MINUS:
100 case PLUS:
101 return (mode != SFmode && mode != DFmode);
102 case AND:
103 case IOR:
104 case XOR:
105 case ASHIFT:
106 case ASHIFTRT:
107 case LSHIFTRT:
108 if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
109 || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
110 return 0;
111 return 1;
112
113 default:
114 return 0;
115 }
116 }
117
118 /* Return 1 if REG is clobbered in IN.
119 Return 2 if REG is used in IN.
120 Return 3 if REG is both used and clobbered in IN.
121 Return 0 if neither. */
122
123 static int
124 reg_clobbered_p (reg, in)
125 rtx reg;
126 rtx in;
127 {
128 register enum rtx_code code;
129
130 if (in == 0)
131 return 0;
132
133 code = GET_CODE (in);
134
135 if (code == SET || code == CLOBBER)
136 {
137 rtx dest = SET_DEST (in);
138 int set = 0;
139 int used = 0;
140
141 while (GET_CODE (dest) == STRICT_LOW_PART
142 || GET_CODE (dest) == SUBREG
143 || GET_CODE (dest) == SIGN_EXTRACT
144 || GET_CODE (dest) == ZERO_EXTRACT)
145 dest = XEXP (dest, 0);
146
147 if (dest == reg)
148 set = 1;
149 else if (GET_CODE (dest) == REG
150 && refers_to_regno_p (REGNO (reg),
151 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
152 SET_DEST (in), 0))
153 {
154 set = 1;
155 /* Anything that sets just part of the register
156 is considered using as well as setting it.
157 But note that a straight SUBREG of a single-word value
158 clobbers the entire value. */
159 if (dest != SET_DEST (in)
160 && ! (GET_CODE (SET_DEST (in)) == SUBREG
161 || UNITS_PER_WORD >= GET_MODE_SIZE (GET_MODE (dest))))
162 used = 1;
163 }
164
165 if (code == SET)
166 {
167 if (set)
168 used = refers_to_regno_p (REGNO (reg),
169 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
170 SET_SRC (in), 0);
171 else
172 used = refers_to_regno_p (REGNO (reg),
173 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
174 in, 0);
175 }
176
177 return set + used * 2;
178 }
179
180 if (refers_to_regno_p (REGNO (reg),
181 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
182 in, 0))
183 return 2;
184 return 0;
185 }
186
187 /* Return non-zero if OP can be written to without screwing up
188 GCC's model of what's going on. It is assumed that this operand
189 appears in the dest position of a SET insn in a conditional
190 branch's delay slot. AFTER is the label to start looking from. */
191 int
192 operand_clobbered_before_used_after (op, after)
193 rtx op;
194 rtx after;
195 {
196 /* Just experimenting. */
197 if (GET_CODE (op) == CC0)
198 return 1;
199 if (GET_CODE (op) == REG)
200 {
201 rtx insn;
202
203 if (op == stack_pointer_rtx)
204 return 0;
205
206 /* Scan forward from the label, to see if the value of OP
207 is clobbered before the first use. */
208
209 for (insn = NEXT_INSN (after); insn; insn = NEXT_INSN (insn))
210 {
211 if (GET_CODE (insn) == NOTE)
212 continue;
213 if (GET_CODE (insn) == INSN
214 || GET_CODE (insn) == JUMP_INSN
215 || GET_CODE (insn) == CALL_INSN)
216 {
217 switch (reg_clobbered_p (op, PATTERN (insn)))
218 {
219 default:
220 return 0;
221 case 1:
222 return 1;
223 case 0:
224 break;
225 }
226 }
227 /* If we reach another label without clobbering OP,
228 then we cannot safely write it here. */
229 else if (GET_CODE (insn) == CODE_LABEL)
230 return 0;
231 if (GET_CODE (insn) == JUMP_INSN)
232 {
233 if (condjump_p (insn))
234 return 0;
235 /* This is a jump insn which has already
236 been mangled. We can't tell what it does. */
237 if (GET_CODE (PATTERN (insn)) == PARALLEL)
238 return 0;
239 if (! JUMP_LABEL (insn))
240 return 0;
241 /* Keep following jumps. */
242 insn = JUMP_LABEL (insn);
243 }
244 }
245 return 1;
246 }
247
248 /* In both of these cases, the first insn executed
249 for this op will be a orh whatever%h,%?r0,%?r31,
250 which is tolerable. */
251 if (GET_CODE (op) == MEM)
252 return (CONSTANT_ADDRESS_P (XEXP (op, 0)));
253
254 return 0;
255 }
256
257 /* Return non-zero if this pattern, as a source to a "SET",
258 is known to yield an instruction of unit size. */
259 int
260 single_insn_src_p (op, mode)
261 rtx op;
262 enum machine_mode mode;
263 {
264 switch (GET_CODE (op))
265 {
266 case CONST_INT:
267 /* This is not always a single insn src, technically,
268 but output_delayed_branch knows how to deal with it. */
269 return 1;
270
271 case SYMBOL_REF:
272 case CONST:
273 /* This is not a single insn src, technically,
274 but output_delayed_branch knows how to deal with it. */
275 return 1;
276
277 case REG:
278 return 1;
279
280 case MEM:
281 return 1;
282
283 /* We never need to negate or complement constants. */
284 case NEG:
285 return (mode != DFmode);
286 case NOT:
287 case ZERO_EXTEND:
288 return 1;
289
290 case PLUS:
291 case MINUS:
292 /* Detect cases that require multiple instructions. */
293 if (CONSTANT_P (XEXP (op, 1))
294 && !(GET_CODE (XEXP (op, 1)) == CONST_INT
295 && SMALL_INT (XEXP (op, 1))))
296 return 0;
297 case EQ:
298 case NE:
299 case LT:
300 case GT:
301 case LE:
302 case GE:
303 case LTU:
304 case GTU:
305 case LEU:
306 case GEU:
307 /* Not doing floating point, since they probably
308 take longer than the branch slot they might fill. */
309 return (mode != SFmode && mode != DFmode);
310
311 case AND:
312 if (GET_CODE (XEXP (op, 1)) == NOT)
313 {
314 rtx arg = XEXP (XEXP (op, 1), 0);
315 if (CONSTANT_P (arg)
316 && !(GET_CODE (arg) == CONST_INT
317 && (SMALL_INT (arg)
318 || (INTVAL (arg) & 0xffff) == 0)))
319 return 0;
320 }
321 case IOR:
322 case XOR:
323 /* Both small and round numbers take one instruction;
324 others take two. */
325 if (CONSTANT_P (XEXP (op, 1))
326 && !(GET_CODE (XEXP (op, 1)) == CONST_INT
327 && (SMALL_INT (XEXP (op, 1))
328 || (INTVAL (XEXP (op, 1)) & 0xffff) == 0)))
329 return 0;
330
331 case ASHIFT:
332 case ASHIFTRT:
333 case LSHIFTRT:
334 return 1;
335
336 case SUBREG:
337 if (SUBREG_WORD (op) != 0)
338 return 0;
339 return single_insn_src_p (SUBREG_REG (op), mode);
340
341 /* Not doing floating point, since they probably
342 take longer than the branch slot they might fill. */
343 case FLOAT_EXTEND:
344 case FLOAT_TRUNCATE:
345 case FLOAT:
346 case FIX:
347 case UNSIGNED_FLOAT:
348 case UNSIGNED_FIX:
349 return 0;
350
351 default:
352 return 0;
353 }
354 }
355 \f
356 /* Return non-zero only if OP is a register of mode MODE,
357 or const0_rtx. */
358 int
359 reg_or_0_operand (op, mode)
360 rtx op;
361 enum machine_mode mode;
362 {
363 return (op == const0_rtx || register_operand (op, mode)
364 || op == CONST0_RTX (mode));
365 }
366
367 /* Return truth value of whether OP can be used as an operands in a three
368 address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */
369
370 int
371 arith_operand (op, mode)
372 rtx op;
373 enum machine_mode mode;
374 {
375 return (register_operand (op, mode)
376 || (GET_CODE (op) == CONST_INT && SMALL_INT (op)));
377 }
378
379 /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */
380
381 int
382 logic_operand (op, mode)
383 rtx op;
384 enum machine_mode mode;
385 {
386 return (register_operand (op, mode)
387 || (GET_CODE (op) == CONST_INT && LOGIC_INT (op)));
388 }
389
390 /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */
391
392 int
393 shift_operand (op, mode)
394 rtx op;
395 enum machine_mode mode;
396 {
397 return (register_operand (op, mode)
398 || (GET_CODE (op) == CONST_INT));
399 }
400
401 /* Return 1 if OP is a valid first operand for either a logical insn
402 or an add insn of mode MODE. */
403
404 int
405 compare_operand (op, mode)
406 rtx op;
407 enum machine_mode mode;
408 {
409 return (register_operand (op, mode)
410 || (GET_CODE (op) == CONST_INT && SMALL_INT (op) && LOGIC_INT (op)));
411 }
412
413 /* Return truth value of whether OP can be used as the 5-bit immediate
414 operand of a bte or btne insn. */
415
416 int
417 bte_operand (op, mode)
418 rtx op;
419 enum machine_mode mode;
420 {
421 return (register_operand (op, mode)
422 || (GET_CODE (op) == CONST_INT
423 && (unsigned) INTVAL (op) < 0x20));
424 }
425
426 /* Return 1 if OP is an indexed memory reference of mode MODE. */
427
428 int
429 indexed_operand (op, mode)
430 rtx op;
431 enum machine_mode mode;
432 {
433 return (GET_CODE (op) == MEM && GET_MODE (op) == mode
434 && GET_CODE (XEXP (op, 0)) == PLUS
435 && GET_MODE (XEXP (op, 0)) == SImode
436 && register_operand (XEXP (XEXP (op, 0), 0), SImode)
437 && register_operand (XEXP (XEXP (op, 0), 1), SImode));
438 }
439
440 /* Return 1 if OP is a suitable source operand for a load insn
441 with mode MODE. */
442
443 int
444 load_operand (op, mode)
445 rtx op;
446 enum machine_mode mode;
447 {
448 return (memory_operand (op, mode) || indexed_operand (op, mode));
449 }
450
451 /* Return truth value of whether OP is a integer which fits the
452 range constraining immediate operands in add/subtract insns. */
453
454 int
455 small_int (op, mode)
456 rtx op;
457 enum machine_mode mode;
458 {
459 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
460 }
461
462 /* Return truth value of whether OP is a integer which fits the
463 range constraining immediate operands in logic insns. */
464
465 int
466 logic_int (op, mode)
467 rtx op;
468 enum machine_mode mode;
469 {
470 return (GET_CODE (op) == CONST_INT && LOGIC_INT (op));
471 }
472
473 /* Test for a valid operand for a call instruction.
474 Don't allow the arg pointer register or virtual regs
475 since they may change into reg + const, which the patterns
476 can't handle yet. */
477
478 int
479 call_insn_operand (op, mode)
480 rtx op;
481 enum machine_mode mode;
482 {
483 if (GET_CODE (op) == MEM
484 && (CONSTANT_ADDRESS_P (XEXP (op, 0))
485 || (GET_CODE (XEXP (op, 0)) == REG
486 && XEXP (op, 0) != arg_pointer_rtx
487 && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
488 && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
489 return 1;
490 return 0;
491 }
492 \f
493 /* Return the best assembler insn template
494 for moving operands[1] into operands[0] as a fullword. */
495
496 static char *
497 singlemove_string (operands)
498 rtx *operands;
499 {
500 if (GET_CODE (operands[0]) == MEM)
501 {
502 if (GET_CODE (operands[1]) != MEM)
503 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
504 {
505 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
506 && (cc_prev_status.flags & CC_HI_R31_ADJ)
507 && cc_prev_status.mdep == XEXP (operands[0], 0)))
508 {
509 CC_STATUS_INIT;
510 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
511 }
512 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
513 cc_status.mdep = XEXP (operands[0], 0);
514 return "st.l %r1,%L0(%?r31)";
515 }
516 else
517 return "st.l %r1,%0";
518 else
519 abort ();
520 #if 0
521 {
522 rtx xoperands[2];
523
524 cc_status.flags &= ~CC_F0_IS_0;
525 xoperands[0] = gen_rtx_REG (SFmode, 32);
526 xoperands[1] = operands[1];
527 output_asm_insn (singlemove_string (xoperands), xoperands);
528 xoperands[1] = xoperands[0];
529 xoperands[0] = operands[0];
530 output_asm_insn (singlemove_string (xoperands), xoperands);
531 return "";
532 }
533 #endif
534 }
535 if (GET_CODE (operands[1]) == MEM)
536 {
537 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
538 {
539 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
540 && (cc_prev_status.flags & CC_HI_R31_ADJ)
541 && cc_prev_status.mdep == XEXP (operands[1], 0)))
542 {
543 CC_STATUS_INIT;
544 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
545 }
546 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
547 cc_status.mdep = XEXP (operands[1], 0);
548 return "ld.l %L1(%?r31),%0";
549 }
550 return "ld.l %m1,%0";
551 }
552 if (GET_CODE (operands[1]) == CONST_INT)
553 {
554 if (operands[1] == const0_rtx)
555 return "mov %?r0,%0";
556 if((INTVAL (operands[1]) & 0xffff0000) == 0)
557 return "or %L1,%?r0,%0";
558 if((INTVAL (operands[1]) & 0xffff8000) == 0xffff8000)
559 return "adds %1,%?r0,%0";
560 if((INTVAL (operands[1]) & 0x0000ffff) == 0)
561 return "orh %H1,%?r0,%0";
562 }
563 return "mov %1,%0";
564 }
565 \f
566 /* Output assembler code to perform a doubleword move insn
567 with operands OPERANDS. */
568
569 char *
570 output_move_double (operands)
571 rtx *operands;
572 {
573 enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
574 rtx latehalf[2];
575 rtx addreg0 = 0, addreg1 = 0;
576 int highest_first = 0;
577 int no_addreg1_decrement = 0;
578
579 /* First classify both operands. */
580
581 if (REG_P (operands[0]))
582 optype0 = REGOP;
583 else if (offsettable_memref_p (operands[0]))
584 optype0 = OFFSOP;
585 else if (GET_CODE (operands[0]) == MEM)
586 optype0 = MEMOP;
587 else
588 optype0 = RNDOP;
589
590 if (REG_P (operands[1]))
591 optype1 = REGOP;
592 else if (CONSTANT_P (operands[1]))
593 optype1 = CNSTOP;
594 else if (offsettable_memref_p (operands[1]))
595 optype1 = OFFSOP;
596 else if (GET_CODE (operands[1]) == MEM)
597 optype1 = MEMOP;
598 else
599 optype1 = RNDOP;
600
601 /* Check for the cases that the operand constraints are not
602 supposed to allow to happen. Abort if we get one,
603 because generating code for these cases is painful. */
604
605 if (optype0 == RNDOP || optype1 == RNDOP)
606 abort ();
607
608 /* If an operand is an unoffsettable memory ref, find a register
609 we can increment temporarily to make it refer to the second word. */
610
611 if (optype0 == MEMOP)
612 addreg0 = find_addr_reg (XEXP (operands[0], 0));
613
614 if (optype1 == MEMOP)
615 addreg1 = find_addr_reg (XEXP (operands[1], 0));
616
617 /* ??? Perhaps in some cases move double words
618 if there is a spare pair of floating regs. */
619
620 /* Ok, we can do one word at a time.
621 Normally we do the low-numbered word first,
622 but if either operand is autodecrementing then we
623 do the high-numbered word first.
624
625 In either case, set up in LATEHALF the operands to use
626 for the high-numbered word and in some cases alter the
627 operands in OPERANDS to be suitable for the low-numbered word. */
628
629 if (optype0 == REGOP)
630 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
631 else if (optype0 == OFFSOP)
632 latehalf[0] = adj_offsettable_operand (operands[0], 4);
633 else
634 latehalf[0] = operands[0];
635
636 if (optype1 == REGOP)
637 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
638 else if (optype1 == OFFSOP)
639 latehalf[1] = adj_offsettable_operand (operands[1], 4);
640 else if (optype1 == CNSTOP)
641 {
642 if (GET_CODE (operands[1]) == CONST_DOUBLE)
643 split_double (operands[1], &operands[1], &latehalf[1]);
644 else if (CONSTANT_P (operands[1]))
645 latehalf[1] = const0_rtx;
646 }
647 else
648 latehalf[1] = operands[1];
649
650 /* If the first move would clobber the source of the second one,
651 do them in the other order.
652
653 RMS says "This happens only for registers;
654 such overlap can't happen in memory unless the user explicitly
655 sets it up, and that is an undefined circumstance."
656
657 but it happens on the sparc when loading parameter registers,
658 so I am going to define that circumstance, and make it work
659 as expected. */
660
661 if (optype0 == REGOP && optype1 == REGOP
662 && REGNO (operands[0]) == REGNO (latehalf[1]))
663 {
664 CC_STATUS_PARTIAL_INIT;
665 /* Make any unoffsettable addresses point at high-numbered word. */
666 if (addreg0)
667 output_asm_insn ("adds 0x4,%0,%0", &addreg0);
668 if (addreg1)
669 output_asm_insn ("adds 0x4,%0,%0", &addreg1);
670
671 /* Do that word. */
672 output_asm_insn (singlemove_string (latehalf), latehalf);
673
674 /* Undo the adds we just did. */
675 if (addreg0)
676 output_asm_insn ("adds -0x4,%0,%0", &addreg0);
677 if (addreg1)
678 output_asm_insn ("adds -0x4,%0,%0", &addreg1);
679
680 /* Do low-numbered word. */
681 return singlemove_string (operands);
682 }
683 else if (optype0 == REGOP && optype1 != REGOP
684 && reg_overlap_mentioned_p (operands[0], operands[1]))
685 {
686 /* If both halves of dest are used in the src memory address,
687 add the two regs and put them in the low reg (operands[0]).
688 Then it works to load latehalf first. */
689 if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
690 && reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
691 {
692 rtx xops[2];
693 xops[0] = latehalf[0];
694 xops[1] = operands[0];
695 output_asm_insn ("adds %1,%0,%1", xops);
696 operands[1] = gen_rtx_MEM (DImode, operands[0]);
697 latehalf[1] = adj_offsettable_operand (operands[1], 4);
698 addreg1 = 0;
699 highest_first = 1;
700 }
701 /* Only one register in the dest is used in the src memory address,
702 and this is the first register of the dest, so we want to do
703 the late half first here also. */
704 else if (! reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
705 highest_first = 1;
706 /* Only one register in the dest is used in the src memory address,
707 and this is the second register of the dest, so we want to do
708 the late half last. If addreg1 is set, and addreg1 is the same
709 register as latehalf, then we must suppress the trailing decrement,
710 because it would clobber the value just loaded. */
711 else if (addreg1 && reg_mentioned_p (addreg1, latehalf[0]))
712 no_addreg1_decrement = 1;
713 }
714
715 /* Normal case: do the two words, low-numbered first.
716 Overlap case (highest_first set): do high-numbered word first. */
717
718 if (! highest_first)
719 output_asm_insn (singlemove_string (operands), operands);
720
721 CC_STATUS_PARTIAL_INIT;
722 /* Make any unoffsettable addresses point at high-numbered word. */
723 if (addreg0)
724 output_asm_insn ("adds 0x4,%0,%0", &addreg0);
725 if (addreg1)
726 output_asm_insn ("adds 0x4,%0,%0", &addreg1);
727
728 /* Do that word. */
729 output_asm_insn (singlemove_string (latehalf), latehalf);
730
731 /* Undo the adds we just did. */
732 if (addreg0)
733 output_asm_insn ("adds -0x4,%0,%0", &addreg0);
734 if (addreg1 && !no_addreg1_decrement)
735 output_asm_insn ("adds -0x4,%0,%0", &addreg1);
736
737 if (highest_first)
738 output_asm_insn (singlemove_string (operands), operands);
739
740 return "";
741 }
742 \f
743 char *
744 output_fp_move_double (operands)
745 rtx *operands;
746 {
747 /* If the source operand is any sort of zero, use f0 instead. */
748
749 if (operands[1] == CONST0_RTX (GET_MODE (operands[1])))
750 operands[1] = gen_rtx_REG (DFmode, F0_REGNUM);
751
752 if (FP_REG_P (operands[0]))
753 {
754 if (FP_REG_P (operands[1]))
755 return "fmov.dd %1,%0";
756 if (GET_CODE (operands[1]) == REG)
757 {
758 output_asm_insn ("ixfr %1,%0", operands);
759 operands[0] = gen_rtx_REG (VOIDmode, REGNO (operands[0]) + 1);
760 operands[1] = gen_rtx_REG (VOIDmode, REGNO (operands[1]) + 1);
761 return "ixfr %1,%0";
762 }
763 if (operands[1] == CONST0_RTX (DFmode))
764 return "fmov.dd f0,%0";
765 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
766 {
767 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
768 && (cc_prev_status.flags & CC_HI_R31_ADJ)
769 && cc_prev_status.mdep == XEXP (operands[1], 0)))
770 {
771 CC_STATUS_INIT;
772 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
773 }
774 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
775 cc_status.mdep = XEXP (operands[1], 0);
776 return "fld.d %L1(%?r31),%0";
777 }
778 return "fld.d %1,%0";
779 }
780 else if (FP_REG_P (operands[1]))
781 {
782 if (GET_CODE (operands[0]) == REG)
783 {
784 output_asm_insn ("fxfr %1,%0", operands);
785 operands[0] = gen_rtx_REG (VOIDmode, REGNO (operands[0]) + 1);
786 operands[1] = gen_rtx_REG (VOIDmode, REGNO (operands[1]) + 1);
787 return "fxfr %1,%0";
788 }
789 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
790 {
791 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
792 && (cc_prev_status.flags & CC_HI_R31_ADJ)
793 && cc_prev_status.mdep == XEXP (operands[0], 0)))
794 {
795 CC_STATUS_INIT;
796 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
797 }
798 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
799 cc_status.mdep = XEXP (operands[0], 0);
800 return "fst.d %1,%L0(%?r31)";
801 }
802 return "fst.d %1,%0";
803 }
804 else
805 abort ();
806 /* NOTREACHED */
807 return NULL;
808 }
809 \f
810 /* Return a REG that occurs in ADDR with coefficient 1.
811 ADDR can be effectively incremented by incrementing REG. */
812
813 static rtx
814 find_addr_reg (addr)
815 rtx addr;
816 {
817 while (GET_CODE (addr) == PLUS)
818 {
819 if (GET_CODE (XEXP (addr, 0)) == REG)
820 addr = XEXP (addr, 0);
821 else if (GET_CODE (XEXP (addr, 1)) == REG)
822 addr = XEXP (addr, 1);
823 else if (CONSTANT_P (XEXP (addr, 0)))
824 addr = XEXP (addr, 1);
825 else if (CONSTANT_P (XEXP (addr, 1)))
826 addr = XEXP (addr, 0);
827 else
828 abort ();
829 }
830 if (GET_CODE (addr) == REG)
831 return addr;
832 abort ();
833 /* NOTREACHED */
834 return NULL;
835 }
836
837 /* Return a template for a load instruction with mode MODE and
838 arguments from the string ARGS.
839
840 This string is in static storage. */
841
842 static char *
843 load_opcode (mode, args, reg)
844 enum machine_mode mode;
845 char *args;
846 rtx reg;
847 {
848 static char buf[30];
849 char *opcode;
850
851 switch (mode)
852 {
853 case QImode:
854 opcode = "ld.b";
855 break;
856
857 case HImode:
858 opcode = "ld.s";
859 break;
860
861 case SImode:
862 case SFmode:
863 if (FP_REG_P (reg))
864 opcode = "fld.l";
865 else
866 opcode = "ld.l";
867 break;
868
869 case DImode:
870 if (!FP_REG_P (reg))
871 abort ();
872 case DFmode:
873 opcode = "fld.d";
874 break;
875
876 default:
877 abort ();
878 }
879
880 sprintf (buf, "%s %s", opcode, args);
881 return buf;
882 }
883
884 /* Return a template for a store instruction with mode MODE and
885 arguments from the string ARGS.
886
887 This string is in static storage. */
888
889 static char *
890 store_opcode (mode, args, reg)
891 enum machine_mode mode;
892 char *args;
893 rtx reg;
894 {
895 static char buf[30];
896 char *opcode;
897
898 switch (mode)
899 {
900 case QImode:
901 opcode = "st.b";
902 break;
903
904 case HImode:
905 opcode = "st.s";
906 break;
907
908 case SImode:
909 case SFmode:
910 if (FP_REG_P (reg))
911 opcode = "fst.l";
912 else
913 opcode = "st.l";
914 break;
915
916 case DImode:
917 if (!FP_REG_P (reg))
918 abort ();
919 case DFmode:
920 opcode = "fst.d";
921 break;
922
923 default:
924 abort ();
925 }
926
927 sprintf (buf, "%s %s", opcode, args);
928 return buf;
929 }
930 \f
931 /* Output a store-in-memory whose operands are OPERANDS[0,1].
932 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
933
934 This function returns a template for an insn.
935 This is in static storage.
936
937 It may also output some insns directly.
938 It may alter the values of operands[0] and operands[1]. */
939
940 char *
941 output_store (operands)
942 rtx *operands;
943 {
944 enum machine_mode mode = GET_MODE (operands[0]);
945 rtx address = XEXP (operands[0], 0);
946 char *string;
947
948 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
949 cc_status.mdep = address;
950
951 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
952 && (cc_prev_status.flags & CC_HI_R31_ADJ)
953 && address == cc_prev_status.mdep))
954 {
955 CC_STATUS_INIT;
956 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
957 cc_prev_status.mdep = address;
958 }
959
960 /* Store zero in two parts when appropriate. */
961 if (mode == DFmode && operands[1] == CONST0_RTX (DFmode))
962 return store_opcode (DFmode, "%r1,%L0(%?r31)", operands[1]);
963
964 /* Code below isn't smart enough to move a doubleword in two parts,
965 so use output_move_double to do that in the cases that require it. */
966 if ((mode == DImode || mode == DFmode)
967 && ! FP_REG_P (operands[1]))
968 return output_move_double (operands);
969
970 return store_opcode (mode, "%r1,%L0(%?r31)", operands[1]);
971 }
972
973 /* Output a load-from-memory whose operands are OPERANDS[0,1].
974 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
975
976 This function returns a template for an insn.
977 This is in static storage.
978
979 It may also output some insns directly.
980 It may alter the values of operands[0] and operands[1]. */
981
982 char *
983 output_load (operands)
984 rtx *operands;
985 {
986 enum machine_mode mode = GET_MODE (operands[0]);
987 rtx address = XEXP (operands[1], 0);
988
989 /* We don't bother trying to see if we know %hi(address).
990 This is because we are doing a load, and if we know the
991 %hi value, we probably also know that value in memory. */
992 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
993 cc_status.mdep = address;
994
995 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
996 && (cc_prev_status.flags & CC_HI_R31_ADJ)
997 && address == cc_prev_status.mdep
998 && cc_prev_status.mdep == cc_status.mdep))
999 {
1000 CC_STATUS_INIT;
1001 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
1002 cc_prev_status.mdep = address;
1003 }
1004
1005 /* Code below isn't smart enough to move a doubleword in two parts,
1006 so use output_move_double to do that in the cases that require it. */
1007 if ((mode == DImode || mode == DFmode)
1008 && ! FP_REG_P (operands[0]))
1009 return output_move_double (operands);
1010
1011 return load_opcode (mode, "%L1(%?r31),%0", operands[0]);
1012 }
1013 \f
1014 #if 0
1015 /* Load the address specified by OPERANDS[3] into the register
1016 specified by OPERANDS[0].
1017
1018 OPERANDS[3] may be the result of a sum, hence it could either be:
1019
1020 (1) CONST
1021 (2) REG
1022 (2) REG + CONST_INT
1023 (3) REG + REG + CONST_INT
1024 (4) REG + REG (special case of 3).
1025
1026 Note that (3) is not a legitimate address.
1027 All cases are handled here. */
1028
1029 void
1030 output_load_address (operands)
1031 rtx *operands;
1032 {
1033 rtx base, offset;
1034
1035 if (CONSTANT_P (operands[3]))
1036 {
1037 output_asm_insn ("mov %3,%0", operands);
1038 return;
1039 }
1040
1041 if (REG_P (operands[3]))
1042 {
1043 if (REGNO (operands[0]) != REGNO (operands[3]))
1044 output_asm_insn ("shl %?r0,%3,%0", operands);
1045 return;
1046 }
1047
1048 if (GET_CODE (operands[3]) != PLUS)
1049 abort ();
1050
1051 base = XEXP (operands[3], 0);
1052 offset = XEXP (operands[3], 1);
1053
1054 if (GET_CODE (base) == CONST_INT)
1055 {
1056 rtx tmp = base;
1057 base = offset;
1058 offset = tmp;
1059 }
1060
1061 if (GET_CODE (offset) != CONST_INT)
1062 {
1063 /* Operand is (PLUS (REG) (REG)). */
1064 base = operands[3];
1065 offset = const0_rtx;
1066 }
1067
1068 if (REG_P (base))
1069 {
1070 operands[6] = base;
1071 operands[7] = offset;
1072 CC_STATUS_PARTIAL_INIT;
1073 if (SMALL_INT (offset))
1074 output_asm_insn ("adds %7,%6,%0", operands);
1075 else
1076 output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands);
1077 }
1078 else if (GET_CODE (base) == PLUS)
1079 {
1080 operands[6] = XEXP (base, 0);
1081 operands[7] = XEXP (base, 1);
1082 operands[8] = offset;
1083
1084 CC_STATUS_PARTIAL_INIT;
1085 if (SMALL_INT (offset))
1086 output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands);
1087 else
1088 output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands);
1089 }
1090 else
1091 abort ();
1092 }
1093 #endif
1094
1095 /* Output code to place a size count SIZE in register REG.
1096 Because block moves are pipelined, we don't include the
1097 first element in the transfer of SIZE to REG.
1098 For this, we subtract ALIGN. (Actually, I think it is not
1099 right to subtract on this machine, so right now we don't.) */
1100
1101 static void
1102 output_size_for_block_move (size, reg, align)
1103 rtx size, reg, align;
1104 {
1105 rtx xoperands[3];
1106
1107 xoperands[0] = reg;
1108 xoperands[1] = size;
1109 xoperands[2] = align;
1110
1111 #if 1
1112 cc_status.flags &= ~ CC_KNOW_HI_R31;
1113 output_asm_insn (singlemove_string (xoperands), xoperands);
1114 #else
1115 if (GET_CODE (size) == REG)
1116 output_asm_insn ("sub %2,%1,%0", xoperands);
1117 else
1118 {
1119 xoperands[1] = GEN_INT (INTVAL (size) - INTVAL (align));
1120 cc_status.flags &= ~ CC_KNOW_HI_R31;
1121 output_asm_insn ("mov %1,%0", xoperands);
1122 }
1123 #endif
1124 }
1125
1126 /* Emit code to perform a block move.
1127
1128 OPERANDS[0] is the destination.
1129 OPERANDS[1] is the source.
1130 OPERANDS[2] is the size.
1131 OPERANDS[3] is the known safe alignment.
1132 OPERANDS[4..6] are pseudos we can safely clobber as temps. */
1133
1134 char *
1135 output_block_move (operands)
1136 rtx *operands;
1137 {
1138 /* A vector for our computed operands. Note that load_output_address
1139 makes use of (and can clobber) up to the 8th element of this vector. */
1140 rtx xoperands[10];
1141 rtx zoperands[10];
1142 static int movstrsi_label = 0;
1143 int i, j;
1144 rtx temp1 = operands[4];
1145 rtx alignrtx = operands[3];
1146 int align = INTVAL (alignrtx);
1147 int chunk_size;
1148
1149 xoperands[0] = operands[0];
1150 xoperands[1] = operands[1];
1151 xoperands[2] = temp1;
1152
1153 /* We can't move more than four bytes at a time
1154 because we have only one register to move them through. */
1155 if (align > 4)
1156 {
1157 align = 4;
1158 alignrtx = GEN_INT (4);
1159 }
1160
1161 /* Recognize special cases of block moves. These occur
1162 when GNU C++ is forced to treat something as BLKmode
1163 to keep it in memory, when its mode could be represented
1164 with something smaller.
1165
1166 We cannot do this for global variables, since we don't know
1167 what pages they don't cross. Sigh. */
1168 if (GET_CODE (operands[2]) == CONST_INT
1169 && ! CONSTANT_ADDRESS_P (operands[0])
1170 && ! CONSTANT_ADDRESS_P (operands[1]))
1171 {
1172 int size = INTVAL (operands[2]);
1173 rtx op0 = xoperands[0];
1174 rtx op1 = xoperands[1];
1175
1176 if ((align & 3) == 0 && (size & 3) == 0 && (size >> 2) <= 16)
1177 {
1178 if (memory_address_p (SImode, plus_constant (op0, size))
1179 && memory_address_p (SImode, plus_constant (op1, size)))
1180 {
1181 cc_status.flags &= ~CC_KNOW_HI_R31;
1182 for (i = (size>>2)-1; i >= 0; i--)
1183 {
1184 xoperands[0] = plus_constant (op0, i * 4);
1185 xoperands[1] = plus_constant (op1, i * 4);
1186 output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
1187 xoperands);
1188 }
1189 return "";
1190 }
1191 }
1192 else if ((align & 1) == 0 && (size & 1) == 0 && (size >> 1) <= 16)
1193 {
1194 if (memory_address_p (HImode, plus_constant (op0, size))
1195 && memory_address_p (HImode, plus_constant (op1, size)))
1196 {
1197 cc_status.flags &= ~CC_KNOW_HI_R31;
1198 for (i = (size>>1)-1; i >= 0; i--)
1199 {
1200 xoperands[0] = plus_constant (op0, i * 2);
1201 xoperands[1] = plus_constant (op1, i * 2);
1202 output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
1203 xoperands);
1204 }
1205 return "";
1206 }
1207 }
1208 else if (size <= 16)
1209 {
1210 if (memory_address_p (QImode, plus_constant (op0, size))
1211 && memory_address_p (QImode, plus_constant (op1, size)))
1212 {
1213 cc_status.flags &= ~CC_KNOW_HI_R31;
1214 for (i = size-1; i >= 0; i--)
1215 {
1216 xoperands[0] = plus_constant (op0, i);
1217 xoperands[1] = plus_constant (op1, i);
1218 output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
1219 xoperands);
1220 }
1221 return "";
1222 }
1223 }
1224 }
1225
1226 /* Since we clobber untold things, nix the condition codes. */
1227 CC_STATUS_INIT;
1228
1229 /* This is the size of the transfer.
1230 Either use the register which already contains the size,
1231 or use a free register (used by no operands). */
1232 output_size_for_block_move (operands[2], operands[4], alignrtx);
1233
1234 #if 0
1235 /* Also emit code to decrement the size value by ALIGN. */
1236 zoperands[0] = operands[0];
1237 zoperands[3] = plus_constant (operands[0], align);
1238 output_load_address (zoperands);
1239 #endif
1240
1241 /* Generate number for unique label. */
1242
1243 xoperands[3] = GEN_INT (movstrsi_label++);
1244
1245 /* Calculate the size of the chunks we will be trying to move first. */
1246
1247 #if 0
1248 if ((align & 3) == 0)
1249 chunk_size = 4;
1250 else if ((align & 1) == 0)
1251 chunk_size = 2;
1252 else
1253 #endif
1254 chunk_size = 1;
1255
1256 /* Copy the increment (negative) to a register for bla insn. */
1257
1258 xoperands[4] = GEN_INT (- chunk_size);
1259 xoperands[5] = operands[5];
1260 output_asm_insn ("adds %4,%?r0,%5", xoperands);
1261
1262 /* Predecrement the loop counter. This happens again also in the `bla'
1263 instruction which precedes the loop, but we need to have it done
1264 two times before we enter the loop because of the bizarre semantics
1265 of the bla instruction. */
1266
1267 output_asm_insn ("adds %5,%2,%2", xoperands);
1268
1269 /* Check for the case where the original count was less than or equal to
1270 zero. Avoid going through the loop at all if the original count was
1271 indeed less than or equal to zero. Note that we treat the count as
1272 if it were a signed 32-bit quantity here, rather than an unsigned one,
1273 even though we really shouldn't. We have to do this because of the
1274 semantics of the `ble' instruction, which assume that the count is
1275 a signed 32-bit value. Anyway, in practice it won't matter because
1276 nobody is going to try to do a memcpy() of more than half of the
1277 entire address space (i.e. 2 gigabytes) anyway. */
1278
1279 output_asm_insn ("bc .Le%3", xoperands);
1280
1281 /* Make available a register which is a temporary. */
1282
1283 xoperands[6] = operands[6];
1284
1285 /* Now the actual loop.
1286 In xoperands, elements 1 and 0 are the input and output vectors.
1287 Element 2 is the loop index. Element 5 is the increment. */
1288
1289 output_asm_insn ("subs %1,%5,%1", xoperands);
1290 output_asm_insn ("bla %5,%2,.Lm%3", xoperands);
1291 output_asm_insn ("adds %0,%2,%6", xoperands);
1292 output_asm_insn ("\n.Lm%3:", xoperands); /* Label for bla above. */
1293 output_asm_insn ("\n.Ls%3:", xoperands); /* Loop start label. */
1294 output_asm_insn ("adds %5,%6,%6", xoperands);
1295
1296 /* NOTE: The code here which is supposed to handle the cases where the
1297 sources and destinations are known to start on a 4 or 2 byte boundary
1298 are currently broken. They fail to do anything about the overflow
1299 bytes which might still need to be copied even after we have copied
1300 some number of words or halfwords. Thus, for now we use the lowest
1301 common denominator, i.e. the code which just copies some number of
1302 totally unaligned individual bytes. (See the calculation of
1303 chunk_size above. */
1304
1305 if (chunk_size == 4)
1306 {
1307 output_asm_insn ("ld.l %2(%1),%?r31", xoperands);
1308 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1309 output_asm_insn ("st.l %?r31,8(%6)", xoperands);
1310 }
1311 else if (chunk_size == 2)
1312 {
1313 output_asm_insn ("ld.s %2(%1),%?r31", xoperands);
1314 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1315 output_asm_insn ("st.s %?r31,4(%6)", xoperands);
1316 }
1317 else /* chunk_size == 1 */
1318 {
1319 output_asm_insn ("ld.b %2(%1),%?r31", xoperands);
1320 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1321 output_asm_insn ("st.b %?r31,2(%6)", xoperands);
1322 }
1323 output_asm_insn ("\n.Le%3:", xoperands); /* Here if count <= 0. */
1324
1325 return "";
1326 }
1327 \f
1328 #if 0
1329 /* Output a delayed branch insn with the delay insn in its
1330 branch slot. The delayed branch insn template is in TEMPLATE,
1331 with operands OPERANDS. The insn in its delay slot is INSN.
1332
1333 As a special case, since we know that all memory transfers are via
1334 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1335 reference around the branch as
1336
1337 orh ha%x,%?r0,%?r31
1338 b ...
1339 ld/st l%x(%?r31),...
1340
1341 As another special case, we handle loading (SYMBOL_REF ...) and
1342 other large constants around branches as well:
1343
1344 orh h%x,%?r0,%0
1345 b ...
1346 or l%x,%0,%1
1347
1348 */
1349 /* ??? Disabled because this re-recognition is incomplete and causes
1350 constrain_operands to segfault. Anyone who cares should fix up
1351 the code to use the DBR pass. */
1352
1353 char *
1354 output_delayed_branch (template, operands, insn)
1355 char *template;
1356 rtx *operands;
1357 rtx insn;
1358 {
1359 rtx src = XVECEXP (PATTERN (insn), 0, 1);
1360 rtx dest = XVECEXP (PATTERN (insn), 0, 0);
1361
1362 /* See if we are doing some branch together with setting some register
1363 to some 32-bit value which does (or may) have some of the high-order
1364 16 bits set. If so, we need to set the register in two stages. One
1365 stage must be done before the branch, and the other one can be done
1366 in the delay slot. */
1367
1368 if ( (GET_CODE (src) == CONST_INT
1369 && ((unsigned) INTVAL (src) & (unsigned) 0xffff0000) != (unsigned) 0)
1370 || (GET_CODE (src) == SYMBOL_REF)
1371 || (GET_CODE (src) == LABEL_REF)
1372 || (GET_CODE (src) == CONST))
1373 {
1374 rtx xoperands[2];
1375 xoperands[0] = dest;
1376 xoperands[1] = src;
1377
1378 CC_STATUS_PARTIAL_INIT;
1379 /* Output the `orh' insn. */
1380 output_asm_insn ("orh %H1,%?r0,%0", xoperands);
1381
1382 /* Output the branch instruction next. */
1383 output_asm_insn (template, operands);
1384
1385 /* Now output the `or' insn. */
1386 output_asm_insn ("or %L1,%0,%0", xoperands);
1387 }
1388 else if ((GET_CODE (src) == MEM
1389 && CONSTANT_ADDRESS_P (XEXP (src, 0)))
1390 || (GET_CODE (dest) == MEM
1391 && CONSTANT_ADDRESS_P (XEXP (dest, 0))))
1392 {
1393 rtx xoperands[2];
1394 char *split_template;
1395 xoperands[0] = dest;
1396 xoperands[1] = src;
1397
1398 /* Output the `orh' insn. */
1399 if (GET_CODE (src) == MEM)
1400 {
1401 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
1402 && (cc_prev_status.flags & CC_HI_R31_ADJ)
1403 && cc_prev_status.mdep == XEXP (operands[1], 0)))
1404 {
1405 CC_STATUS_INIT;
1406 output_asm_insn ("orh %h1,%?r0,%?r31", xoperands);
1407 }
1408 split_template = load_opcode (GET_MODE (dest),
1409 "%L1(%?r31),%0", dest);
1410 }
1411 else
1412 {
1413 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
1414 && (cc_prev_status.flags & CC_HI_R31_ADJ)
1415 && cc_prev_status.mdep == XEXP (operands[0], 0)))
1416 {
1417 CC_STATUS_INIT;
1418 output_asm_insn ("orh %h0,%?r0,%?r31", xoperands);
1419 }
1420 split_template = store_opcode (GET_MODE (dest),
1421 "%r1,%L0(%?r31)", src);
1422 }
1423
1424 /* Output the branch instruction next. */
1425 output_asm_insn (template, operands);
1426
1427 /* Now output the load or store.
1428 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1429 output_asm_insn (split_template, xoperands);
1430 }
1431 else
1432 {
1433 int insn_code_number;
1434 rtx pat = gen_rtx_SET (VOIDmode, dest, src);
1435 rtx delay_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, pat, -1, 0, 0);
1436 int i;
1437
1438 /* Output the branch instruction first. */
1439 output_asm_insn (template, operands);
1440
1441 /* Now recognize the insn which we put in its delay slot.
1442 We must do this after outputting the branch insn,
1443 since operands may just be a pointer to `recog_data.operand'. */
1444 INSN_CODE (delay_insn) = insn_code_number
1445 = recog (pat, delay_insn, NULL_PTR);
1446 if (insn_code_number == -1)
1447 abort ();
1448
1449 for (i = 0; i < insn_data[insn_code_number].n_operands; i++)
1450 {
1451 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1452 recog_data.operand[i] = alter_subreg (recog_data.operand[i]);
1453 }
1454
1455 insn_extract (delay_insn);
1456 if (! constrain_operands (1))
1457 fatal_insn_not_found (delay_insn);
1458
1459 template = get_insn_template (insn_code_number, delay_insn);
1460 output_asm_insn (template, recog_data.operand);
1461 }
1462 CC_STATUS_INIT;
1463 return "";
1464 }
1465
1466 /* Output a newly constructed insn DELAY_INSN. */
1467 char *
1468 output_delay_insn (delay_insn)
1469 rtx delay_insn;
1470 {
1471 char *template;
1472 int insn_code_number;
1473 int i;
1474
1475 /* Now recognize the insn which we put in its delay slot.
1476 We must do this after outputting the branch insn,
1477 since operands may just be a pointer to `recog_data.operand'. */
1478 insn_code_number = recog_memoized (delay_insn);
1479 if (insn_code_number == -1)
1480 abort ();
1481
1482 /* Extract the operands of this delay insn. */
1483 INSN_CODE (delay_insn) = insn_code_number;
1484 insn_extract (delay_insn);
1485
1486 /* It is possible that this insn has not been properly scanned by final
1487 yet. If this insn's operands don't appear in the peephole's
1488 actual operands, then they won't be fixed up by final, so we
1489 make sure they get fixed up here. -- This is a kludge. */
1490 for (i = 0; i < insn_data[insn_code_number].n_operands; i++)
1491 {
1492 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1493 recog_data.operand[i] = alter_subreg (recog_data.operand[i]);
1494 }
1495
1496 #ifdef REGISTER_CONSTRAINTS
1497 if (! constrain_operands (1))
1498 abort ();
1499 #endif
1500
1501 cc_prev_status = cc_status;
1502
1503 /* Update `cc_status' for this instruction.
1504 The instruction's output routine may change it further.
1505 If the output routine for a jump insn needs to depend
1506 on the cc status, it should look at cc_prev_status. */
1507
1508 NOTICE_UPDATE_CC (PATTERN (delay_insn), delay_insn);
1509
1510 /* Now get the template for what this insn would
1511 have been, without the branch. */
1512
1513 template = get_insn_template (insn_code_number, delay_insn);
1514 output_asm_insn (template, recog_data.operand);
1515 return "";
1516 }
1517 #endif
1518 \f
1519 /* Special routine to convert an SFmode value represented as a
1520 CONST_DOUBLE into its equivalent unsigned long bit pattern.
1521 We convert the value from a double precision floating-point
1522 value to single precision first, and thence to a bit-wise
1523 equivalent unsigned long value. This routine is used when
1524 generating an immediate move of an SFmode value directly
1525 into a general register because the svr4 assembler doesn't
1526 grok floating literals in instruction operand contexts. */
1527
1528 unsigned long
1529 sfmode_constant_to_ulong (x)
1530 rtx x;
1531 {
1532 REAL_VALUE_TYPE d;
1533 union { float f; unsigned long i; } u2;
1534
1535 if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != SFmode)
1536 abort ();
1537
1538 #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT
1539 error IEEE emulation needed
1540 #endif
1541 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
1542 u2.f = d;
1543 return u2.i;
1544 }
1545 \f
1546 /* This function generates the assembly code for function entry.
1547 The macro FUNCTION_PROLOGUE in i860.h is defined to call this function.
1548
1549 ASM_FILE is a stdio stream to output the code to.
1550 SIZE is an int: how many units of temporary storage to allocate.
1551
1552 Refer to the array `regs_ever_live' to determine which registers
1553 to save; `regs_ever_live[I]' is nonzero if register number I
1554 is ever used in the function. This macro is responsible for
1555 knowing which registers should not be saved even if used.
1556
1557 NOTE: `frame_lower_bytes' is the count of bytes which will lie
1558 between the new `fp' value and the new `sp' value after the
1559 prologue is done. `frame_upper_bytes' is the count of bytes
1560 that will lie between the new `fp' and the *old* `sp' value
1561 after the new `fp' is setup (in the prologue). The upper
1562 part of each frame always includes at least 2 words (8 bytes)
1563 to hold the saved frame pointer and the saved return address.
1564
1565 The svr4 ABI for the i860 now requires that the values of the
1566 stack pointer and frame pointer registers be kept aligned to
1567 16-byte boundaries at all times. We obey that restriction here.
1568
1569 The svr4 ABI for the i860 is entirely vague when it comes to specifying
1570 exactly where the "preserved" registers should be saved. The native
1571 svr4 C compiler I now have doesn't help to clarify the requirements
1572 very much because it is plainly out-of-date and non-ABI-compliant
1573 (in at least one important way, i.e. how it generates function
1574 epilogues).
1575
1576 The native svr4 C compiler saves the "preserved" registers (i.e.
1577 r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
1578 offsets from the frame pointer).
1579
1580 Previous versions of GCC also saved the "preserved" registers in the
1581 "negative" part of the frame, but they saved them using positive
1582 offsets from the (adjusted) stack pointer (after it had been adjusted
1583 to allocate space for the new frame). That's just plain wrong
1584 because if the current function calls alloca(), the stack pointer
1585 will get moved, and it will be impossible to restore the registers
1586 properly again after that.
1587
1588 Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
1589 by copying their values either into various "preserved" registers or
1590 into stack slots in the lower part of the current frame (as seemed
1591 appropriate, depending upon subsequent usage of these values).
1592
1593 Here we want to save the preserved registers at some offset from the
1594 frame pointer register so as to avoid any possible problems arising
1595 from calls to alloca(). We can either save them at small positive
1596 offsets from the frame pointer, or at small negative offsets from
1597 the frame pointer. If we save them at small negative offsets from
1598 the frame pointer (i.e. in the lower part of the frame) then we
1599 must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
1600 many bytes of space we plan to use in the lower part of the frame
1601 for this purpose. Since other parts of the compiler reference the
1602 value of STARTING_FRAME_OFFSET long before final() calls this function,
1603 we would have to go ahead and assume the worst-case storage requirements
1604 for saving all of the "preserved" registers (and use that number, i.e.
1605 `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
1606 the lower part of the frame. That could potentially be very wasteful,
1607 and that wastefulness could really hamper people compiling for embedded
1608 i860 targets with very tight limits on stack space. Thus, we choose
1609 here to save the preserved registers in the upper part of the
1610 frame, so that we can decide at the very last minute how much (or how
1611 little) space we must allocate for this purpose.
1612
1613 To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
1614 registers must always be saved so that the saved values of registers
1615 with higher numbers are at higher addresses. We obey that restriction
1616 here.
1617
1618 There are two somewhat different ways that you can generate prologues
1619 here... i.e. pedantically ABI-compliant, and the "other" way. The
1620 "other" way is more consistent with what is currently generated by the
1621 "native" svr4 C compiler for the i860. That's important if you want
1622 to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
1623 The SVR4 SDB for the i860 insists on having function prologues be
1624 non-ABI-compliant!
1625
1626 To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
1627 in the i860svr4.h file. (By default this is *not* defined).
1628
1629 The differences between the ABI-compliant and non-ABI-compliant prologues
1630 are that (a) the ABI version seems to require the use of *signed*
1631 (rather than unsigned) adds and subtracts, and (b) the ordering of
1632 the various steps (e.g. saving preserved registers, saving the
1633 return address, setting up the new frame pointer value) is different.
1634
1635 For strict ABI compliance, it seems to be the case that the very last
1636 thing that is supposed to happen in the prologue is getting the frame
1637 pointer set to its new value (but only after everything else has
1638 already been properly setup). We do that here, but only if the symbol
1639 I860_STRICT_ABI_PROLOGUES is defined.
1640 */
1641
1642 #ifndef STACK_ALIGNMENT
1643 #define STACK_ALIGNMENT 16
1644 #endif
1645
1646 extern char call_used_regs[];
1647 extern int leaf_function_p ();
1648
1649 char *current_function_original_name;
1650
1651 static int must_preserve_r1;
1652 static unsigned must_preserve_bytes;
1653
1654 void
1655 function_prologue (asm_file, local_bytes)
1656 register FILE *asm_file;
1657 register unsigned local_bytes;
1658 {
1659 register unsigned frame_lower_bytes;
1660 register unsigned frame_upper_bytes;
1661 register unsigned total_fsize;
1662 register unsigned preserved_reg_bytes = 0;
1663 register unsigned i;
1664 register unsigned preserved_so_far = 0;
1665
1666 must_preserve_r1 = (optimize < 2 || ! leaf_function_p ());
1667 must_preserve_bytes = 4 + (must_preserve_r1 ? 4 : 0);
1668
1669 /* Count registers that need preserving. Ignore r0. It never needs
1670 preserving. */
1671
1672 for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
1673 {
1674 if (regs_ever_live[i] && ! call_used_regs[i])
1675 preserved_reg_bytes += 4;
1676 }
1677
1678 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1679
1680 frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
1681
1682 /* The upper part of each frame will contain the saved fp,
1683 the saved r1, and stack slots for all of the other "preserved"
1684 registers that we find we will need to save & restore. */
1685
1686 frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
1687
1688 /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
1689
1690 frame_upper_bytes
1691 = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
1692
1693 total_fsize = frame_upper_bytes + frame_lower_bytes;
1694
1695 #ifndef I860_STRICT_ABI_PROLOGUES
1696
1697 /* There are two kinds of function prologues.
1698 You use the "small" version if the total frame size is
1699 small enough so that it can fit into an immediate 16-bit
1700 value in one instruction. Otherwise, you use the "large"
1701 version of the function prologue. */
1702
1703 if (total_fsize > 0x7fff)
1704 {
1705 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1706 but the native C compiler on svr4 uses `addu'. */
1707
1708 fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
1709 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
1710
1711 /* Save the old frame pointer. */
1712
1713 fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
1714 i860_reg_prefix, i860_reg_prefix);
1715
1716 /* Setup the new frame pointer. The ABI sez to do this after
1717 preserving registers (using adds), but that's not what the
1718 native C compiler on svr4 does. */
1719
1720 fprintf (asm_file, "\taddu 0,%ssp,%sfp\n",
1721 i860_reg_prefix, i860_reg_prefix);
1722
1723 /* Get the value of frame_lower_bytes into r31. */
1724
1725 fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
1726 frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
1727 fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
1728 frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
1729
1730 /* Now re-adjust the stack pointer using the value in r31.
1731 The ABI sez to do this with `subs' but SDB may prefer `subu'. */
1732
1733 fprintf (asm_file, "\tsubu %ssp,%sr31,%ssp\n",
1734 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1735
1736 /* Preserve registers. The ABI sez to do this before setting
1737 up the new frame pointer, but that's not what the native
1738 C compiler on svr4 does. */
1739
1740 for (i = 1; i < 32; i++)
1741 if (regs_ever_live[i] && ! call_used_regs[i])
1742 fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
1743 i860_reg_prefix, reg_names[i],
1744 must_preserve_bytes + (4 * preserved_so_far++),
1745 i860_reg_prefix);
1746
1747 for (i = 32; i < 64; i++)
1748 if (regs_ever_live[i] && ! call_used_regs[i])
1749 fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
1750 i860_reg_prefix, reg_names[i],
1751 must_preserve_bytes + (4 * preserved_so_far++),
1752 i860_reg_prefix);
1753
1754 /* Save the return address. */
1755
1756 if (must_preserve_r1)
1757 fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
1758 i860_reg_prefix, i860_reg_prefix);
1759 }
1760 else
1761 {
1762 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1763 but the native C compiler on svr4 uses `addu'. */
1764
1765 fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
1766 total_fsize, i860_reg_prefix, i860_reg_prefix);
1767
1768 /* Save the old frame pointer. */
1769
1770 fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
1771 i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
1772
1773 /* Setup the new frame pointer. The ABI sez to do this after
1774 preserving registers and after saving the return address,
1775 (and its saz to do this using adds), but that's not what the
1776 native C compiler on svr4 does. */
1777
1778 fprintf (asm_file, "\taddu %d,%ssp,%sfp\n",
1779 frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
1780
1781 /* Preserve registers. The ABI sez to do this before setting
1782 up the new frame pointer, but that's not what the native
1783 compiler on svr4 does. */
1784
1785 for (i = 1; i < 32; i++)
1786 if (regs_ever_live[i] && ! call_used_regs[i])
1787 fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
1788 i860_reg_prefix, reg_names[i],
1789 must_preserve_bytes + (4 * preserved_so_far++),
1790 i860_reg_prefix);
1791
1792 for (i = 32; i < 64; i++)
1793 if (regs_ever_live[i] && ! call_used_regs[i])
1794 fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
1795 i860_reg_prefix, reg_names[i],
1796 must_preserve_bytes + (4 * preserved_so_far++),
1797 i860_reg_prefix);
1798
1799 /* Save the return address. The ABI sez to do this earlier,
1800 and also via an offset from %sp, but the native C compiler
1801 on svr4 does it later (i.e. now) and uses an offset from
1802 %fp. */
1803
1804 if (must_preserve_r1)
1805 fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
1806 i860_reg_prefix, i860_reg_prefix);
1807 }
1808
1809 #else /* defined(I860_STRICT_ABI_PROLOGUES) */
1810
1811 /* There are two kinds of function prologues.
1812 You use the "small" version if the total frame size is
1813 small enough so that it can fit into an immediate 16-bit
1814 value in one instruction. Otherwise, you use the "large"
1815 version of the function prologue. */
1816
1817 if (total_fsize > 0x7fff)
1818 {
1819 /* Adjust the stack pointer (thereby allocating a new frame). */
1820
1821 fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
1822 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
1823
1824 /* Save the caller's frame pointer. */
1825
1826 fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
1827 i860_reg_prefix, i860_reg_prefix);
1828
1829 /* Save return address. */
1830
1831 if (must_preserve_r1)
1832 fprintf (asm_file, "\tst.l %sr1,4(%ssp)\n",
1833 i860_reg_prefix, i860_reg_prefix);
1834
1835 /* Get the value of frame_lower_bytes into r31 for later use. */
1836
1837 fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
1838 frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
1839 fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
1840 frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
1841
1842 /* Now re-adjust the stack pointer using the value in r31. */
1843
1844 fprintf (asm_file, "\tsubs %ssp,%sr31,%ssp\n",
1845 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1846
1847 /* Pre-compute value to be used as the new frame pointer. */
1848
1849 fprintf (asm_file, "\tadds %ssp,%sr31,%sr31\n",
1850 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1851
1852 /* Preserve registers. */
1853
1854 for (i = 1; i < 32; i++)
1855 if (regs_ever_live[i] && ! call_used_regs[i])
1856 fprintf (asm_file, "\tst.l %s%s,%d(%sr31)\n",
1857 i860_reg_prefix, reg_names[i],
1858 must_preserve_bytes + (4 * preserved_so_far++),
1859 i860_reg_prefix);
1860
1861 for (i = 32; i < 64; i++)
1862 if (regs_ever_live[i] && ! call_used_regs[i])
1863 fprintf (asm_file, "\tfst.l %s%s,%d(%sr31)\n",
1864 i860_reg_prefix, reg_names[i],
1865 must_preserve_bytes + (4 * preserved_so_far++),
1866 i860_reg_prefix);
1867
1868 /* Actually set the new value of the frame pointer. */
1869
1870 fprintf (asm_file, "\tmov %sr31,%sfp\n",
1871 i860_reg_prefix, i860_reg_prefix);
1872 }
1873 else
1874 {
1875 /* Adjust the stack pointer. */
1876
1877 fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
1878 total_fsize, i860_reg_prefix, i860_reg_prefix);
1879
1880 /* Save the caller's frame pointer. */
1881
1882 fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
1883 i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
1884
1885 /* Save the return address. */
1886
1887 if (must_preserve_r1)
1888 fprintf (asm_file, "\tst.l %sr1,%d(%ssp)\n",
1889 i860_reg_prefix, frame_lower_bytes + 4, i860_reg_prefix);
1890
1891 /* Preserve registers. */
1892
1893 for (i = 1; i < 32; i++)
1894 if (regs_ever_live[i] && ! call_used_regs[i])
1895 fprintf (asm_file, "\tst.l %s%s,%d(%ssp)\n",
1896 i860_reg_prefix, reg_names[i],
1897 frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
1898 i860_reg_prefix);
1899
1900 for (i = 32; i < 64; i++)
1901 if (regs_ever_live[i] && ! call_used_regs[i])
1902 fprintf (asm_file, "\tfst.l %s%s,%d(%ssp)\n",
1903 i860_reg_prefix, reg_names[i],
1904 frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
1905 i860_reg_prefix);
1906
1907 /* Setup the new frame pointer. */
1908
1909 fprintf (asm_file, "\tadds %d,%ssp,%sfp\n",
1910 frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
1911 }
1912 #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
1913
1914 #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
1915 ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file);
1916 #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
1917 }
1918 \f
1919 /* This function generates the assembly code for function exit.
1920 The macro FUNCTION_EPILOGUE in i860.h is defined to call this function.
1921
1922 ASM_FILE is a stdio stream to output the code to.
1923 SIZE is an int: how many units of temporary storage to allocate.
1924
1925 The function epilogue should not depend on the current stack pointer!
1926 It should use the frame pointer only. This is mandatory because
1927 of alloca; we also take advantage of it to omit stack adjustments
1928 before returning.
1929
1930 Note that when we go to restore the preserved register values we must
1931 not try to address their slots by using offsets from the stack pointer.
1932 That's because the stack pointer may have been moved during the function
1933 execution due to a call to alloca(). Rather, we must restore all
1934 preserved registers via offsets from the frame pointer value.
1935
1936 Note also that when the current frame is being "popped" (by adjusting
1937 the value of the stack pointer) on function exit, we must (for the
1938 sake of alloca) set the new value of the stack pointer based upon
1939 the current value of the frame pointer. We can't just add what we
1940 believe to be the (static) frame size to the stack pointer because
1941 if we did that, and alloca() had been called during this function,
1942 we would end up returning *without* having fully deallocated all of
1943 the space grabbed by alloca. If that happened, and a function
1944 containing one or more alloca() calls was called over and over again,
1945 then the stack would grow without limit!
1946
1947 Finally note that the epilogues generated here are completely ABI
1948 compliant. They go out of their way to insure that the value in
1949 the frame pointer register is never less than the value in the stack
1950 pointer register. It's not clear why this relationship needs to be
1951 maintained at all times, but maintaining it only costs one extra
1952 instruction, so what the hell.
1953 */
1954
1955 /* This corresponds to a version 4 TDESC structure. Lower numbered
1956 versions successively omit the last word of the structure. We
1957 don't try to handle version 5 here. */
1958
1959 typedef struct TDESC_flags {
1960 int version:4;
1961 int reg_packing:1;
1962 int callable_block:1;
1963 int reserved:4;
1964 int fregs:6; /* fp regs 2-7 */
1965 int iregs:16; /* regs 0-15 */
1966 } TDESC_flags;
1967
1968 typedef struct TDESC {
1969 TDESC_flags flags;
1970 int integer_reg_offset; /* same as must_preserve_bytes */
1971 int floating_point_reg_offset;
1972 unsigned int positive_frame_size; /* same as frame_upper_bytes */
1973 unsigned int negative_frame_size; /* same as frame_lower_bytes */
1974 } TDESC;
1975
1976 void
1977 function_epilogue (asm_file, local_bytes)
1978 register FILE *asm_file;
1979 register unsigned local_bytes;
1980 {
1981 register unsigned frame_upper_bytes;
1982 register unsigned frame_lower_bytes;
1983 register unsigned preserved_reg_bytes = 0;
1984 register unsigned i;
1985 register unsigned restored_so_far = 0;
1986 register unsigned int_restored;
1987 register unsigned mask;
1988 unsigned intflags=0;
1989 register TDESC_flags *flags = (TDESC_flags *) &intflags;
1990
1991 flags->version = 4;
1992 flags->reg_packing = 1;
1993 flags->iregs = 8; /* old fp always gets saved */
1994
1995 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1996
1997 frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
1998
1999 /* Count the number of registers that were preserved in the prologue.
2000 Ignore r0. It is never preserved. */
2001
2002 for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
2003 {
2004 if (regs_ever_live[i] && ! call_used_regs[i])
2005 preserved_reg_bytes += 4;
2006 }
2007
2008 /* The upper part of each frame will contain only saved fp,
2009 the saved r1, and stack slots for all of the other "preserved"
2010 registers that we find we will need to save & restore. */
2011
2012 frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
2013
2014 /* Round-up frame_upper_bytes so that t is a multiple of 16. */
2015
2016 frame_upper_bytes
2017 = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
2018
2019 /* Restore all of the "preserved" registers that need restoring. */
2020
2021 mask = 2;
2022
2023 for (i = 1; i < 32; i++, mask<<=1)
2024 if (regs_ever_live[i] && ! call_used_regs[i]) {
2025 fprintf (asm_file, "\tld.l %d(%sfp),%s%s\n",
2026 must_preserve_bytes + (4 * restored_so_far++),
2027 i860_reg_prefix, i860_reg_prefix, reg_names[i]);
2028 if (i > 3 && i < 16)
2029 flags->iregs |= mask;
2030 }
2031
2032 int_restored = restored_so_far;
2033 mask = 1;
2034
2035 for (i = 32; i < 64; i++) {
2036 if (regs_ever_live[i] && ! call_used_regs[i]) {
2037 fprintf (asm_file, "\tfld.l %d(%sfp),%s%s\n",
2038 must_preserve_bytes + (4 * restored_so_far++),
2039 i860_reg_prefix, i860_reg_prefix, reg_names[i]);
2040 if (i > 33 & i < 40)
2041 flags->fregs |= mask;
2042 }
2043 if (i > 33 && i < 40)
2044 mask<<=1;
2045 }
2046
2047 /* Get the value we plan to use to restore the stack pointer into r31. */
2048
2049 fprintf (asm_file, "\tadds %d,%sfp,%sr31\n",
2050 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
2051
2052 /* Restore the return address and the old frame pointer. */
2053
2054 if (must_preserve_r1) {
2055 fprintf (asm_file, "\tld.l 4(%sfp),%sr1\n",
2056 i860_reg_prefix, i860_reg_prefix);
2057 flags->iregs |= 2;
2058 }
2059
2060 fprintf (asm_file, "\tld.l 0(%sfp),%sfp\n",
2061 i860_reg_prefix, i860_reg_prefix);
2062
2063 /* Return and restore the old stack pointer value. */
2064
2065 fprintf (asm_file, "\tbri %sr1\n\tmov %sr31,%ssp\n",
2066 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
2067
2068 #ifdef OUTPUT_TDESC /* Output an ABI-compliant TDESC entry */
2069 if (! frame_lower_bytes) {
2070 flags->version--;
2071 if (! frame_upper_bytes) {
2072 flags->version--;
2073 if (restored_so_far == int_restored) /* No FP saves */
2074 flags->version--;
2075 }
2076 }
2077 assemble_name(asm_file,current_function_original_name);
2078 fputs(".TDESC:\n", asm_file);
2079 fprintf(asm_file, "%s 0x%0x\n", ASM_LONG, intflags);
2080 fprintf(asm_file, "%s %d\n", ASM_LONG,
2081 int_restored ? must_preserve_bytes : 0);
2082 if (flags->version > 1) {
2083 fprintf(asm_file, "%s %d\n", ASM_LONG,
2084 (restored_so_far == int_restored) ? 0 : must_preserve_bytes +
2085 (4 * int_restored));
2086 if (flags->version > 2) {
2087 fprintf(asm_file, "%s %d\n", ASM_LONG, frame_upper_bytes);
2088 if (flags->version > 3)
2089 fprintf(asm_file, "%s %d\n", ASM_LONG, frame_lower_bytes);
2090 }
2091 }
2092 tdesc_section();
2093 fprintf(asm_file, "%s ", ASM_LONG);
2094 assemble_name(asm_file, current_function_original_name);
2095 fprintf(asm_file, "\n%s ", ASM_LONG);
2096 assemble_name(asm_file, current_function_original_name);
2097 fputs(".TDESC\n", asm_file);
2098 text_section();
2099 #endif
2100 }
2101 \f
2102
2103 /* Expand a library call to __builtin_saveregs. */
2104 rtx
2105 i860_saveregs ()
2106 {
2107 rtx fn = gen_rtx_SYMBOL_REF (Pmode, "__builtin_saveregs");
2108 rtx save = gen_reg_rtx (Pmode);
2109 rtx valreg = LIBCALL_VALUE (Pmode);
2110 rtx ret;
2111
2112 /* The return value register overlaps the first argument register.
2113 Save and restore it around the call. */
2114 emit_move_insn (save, valreg);
2115 ret = emit_library_call_value (fn, NULL_RTX, 1, Pmode, 0);
2116 if (GET_CODE (ret) != REG || REGNO (ret) < FIRST_PSEUDO_REGISTER)
2117 ret = copy_to_reg (ret);
2118 emit_move_insn (valreg, save);
2119
2120 return ret;
2121 }
2122
2123 tree
2124 i860_build_va_list ()
2125 {
2126 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2127 tree record;
2128
2129 record = make_node (RECORD_TYPE);
2130
2131 field_ireg_used = build_decl (FIELD_DECL, get_identifier ("__ireg_used"),
2132 unsigned_type_node);
2133 field_freg_used = build_decl (FIELD_DECL, get_identifier ("__freg_used"),
2134 unsigned_type_node);
2135 field_reg_base = build_decl (FIELD_DECL, get_identifier ("__reg_base"),
2136 ptr_type_node);
2137 field_mem_ptr = build_decl (FIELD_DECL, get_identifier ("__mem_ptr"),
2138 ptr_type_node);
2139
2140 DECL_FIELD_CONTEXT (field_ireg_used) = record;
2141 DECL_FIELD_CONTEXT (field_freg_used) = record;
2142 DECL_FIELD_CONTEXT (field_reg_base) = record;
2143 DECL_FIELD_CONTEXT (field_mem_ptr) = record;
2144
2145 #ifdef I860_SVR4_VA_LIST
2146 TYPE_FIELDS (record) = field_ireg_used;
2147 TREE_CHAIN (field_ireg_used) = field_freg_used;
2148 TREE_CHAIN (field_freg_used) = field_reg_base;
2149 TREE_CHAIN (field_reg_base) = field_mem_ptr;
2150 #else
2151 TYPE_FIELDS (record) = field_reg_base;
2152 TREE_CHAIN (field_reg_base) = field_mem_ptr;
2153 TREE_CHAIN (field_mem_ptr) = field_ireg_used;
2154 TREE_CHAIN (field_ireg_used) = field_freg_used;
2155 #endif
2156
2157 layout_type (record);
2158 return record;
2159 }
2160
2161 void
2162 i860_va_start (stdarg_p, valist, nextarg)
2163 int stdarg_p;
2164 tree valist;
2165 rtx nextarg;
2166 {
2167 tree saveregs, t;
2168
2169 saveregs = make_tree (build_pointer_type (va_list_type_node),
2170 expand_builtin_saveregs ());
2171 saveregs = build1 (INDIRECT_REF, va_list_type_node, saveregs);
2172
2173 if (stdarg_p)
2174 {
2175 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2176 tree ireg_used, freg_used, reg_base, mem_ptr;
2177
2178 #ifdef I860_SVR4_VA_LIST
2179 field_ireg_used = TYPE_FIELDS (va_list_type_node);
2180 field_freg_used = TREE_CHAIN (field_ireg_used);
2181 field_reg_base = TREE_CHAIN (field_freg_used);
2182 field_mem_ptr = TREE_CHAIN (field_reg_base);
2183 #else
2184 field_reg_base = TYPE_FIELDS (va_list_type_node);
2185 field_mem_ptr = TREE_CHAIN (field_reg_base);
2186 field_ireg_used = TREE_CHAIN (field_mem_ptr);
2187 field_freg_used = TREE_CHAIN (field_ireg_used);
2188 #endif
2189
2190 ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
2191 valist, field_ireg_used);
2192 freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
2193 valist, field_freg_used);
2194 reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2195 valist, field_reg_base);
2196 mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
2197 valist, field_mem_ptr);
2198
2199 t = build_int_2 (current_function_args_info.ints, 0);
2200 t = build (MODIFY_EXPR, TREE_TYPE (ireg_used), ireg_used, t);
2201 TREE_SIDE_EFFECTS (t) = 1;
2202 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2203
2204 t = build_int_2 (ROUNDUP (current_function_args_info.floats, 8), 0);
2205 t = build (MODIFY_EXPR, TREE_TYPE (freg_used), freg_used, t);
2206 TREE_SIDE_EFFECTS (t) = 1;
2207 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2208
2209 t = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2210 saveregs, field_reg_base);
2211 t = build (MODIFY_EXPR, TREE_TYPE (reg_base), reg_base, t);
2212 TREE_SIDE_EFFECTS (t) = 1;
2213 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2214
2215 t = make_tree (ptr_type_node, nextarg);
2216 t = build (MODIFY_EXPR, TREE_TYPE (mem_ptr), mem_ptr, t);
2217 TREE_SIDE_EFFECTS (t) = 1;
2218 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2219 }
2220 else
2221 {
2222 t = build (MODIFY_EXPR, va_list_type_node, valist, saveregs);
2223 TREE_SIDE_EFFECTS (t) = 1;
2224 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2225 }
2226 }
2227
2228 #define NUM_PARM_FREGS 8
2229 #define NUM_PARM_IREGS 12
2230 #ifdef I860_SVR4_VARARGS
2231 #define FREG_OFFSET 0
2232 #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
2233 #else
2234 #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
2235 #define IREG_OFFSET 0
2236 #endif
2237
2238 rtx
2239 i860_va_arg (valist, type)
2240 tree valist, type;
2241 {
2242 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2243 tree type_ptr_node, t;
2244 rtx lab_over = NULL_RTX;
2245 rtx ret, val;
2246 HOST_WIDE_INT align;
2247
2248 #ifdef I860_SVR4_VA_LIST
2249 field_ireg_used = TYPE_FIELDS (va_list_type_node);
2250 field_freg_used = TREE_CHAIN (field_ireg_used);
2251 field_reg_base = TREE_CHAIN (field_freg_used);
2252 field_mem_ptr = TREE_CHAIN (field_reg_base);
2253 #else
2254 field_reg_base = TYPE_FIELDS (va_list_type_node);
2255 field_mem_ptr = TREE_CHAIN (field_reg_base);
2256 field_ireg_used = TREE_CHAIN (field_mem_ptr);
2257 field_freg_used = TREE_CHAIN (field_ireg_used);
2258 #endif
2259
2260 field_ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
2261 valist, field_ireg_used);
2262 field_freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
2263 valist, field_freg_used);
2264 field_reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2265 valist, field_reg_base);
2266 field_mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
2267 valist, field_mem_ptr);
2268
2269 ret = gen_reg_rtx (Pmode);
2270 type_ptr_node = build_pointer_type (type);
2271
2272 if (! AGGREGATE_TYPE_P (type))
2273 {
2274 int nparm, incr, ofs;
2275 tree field;
2276 rtx lab_false;
2277
2278 if (FLOAT_TYPE_P (type))
2279 {
2280 field = field_freg_used;
2281 nparm = NUM_PARM_FREGS;
2282 incr = 2;
2283 ofs = FREG_OFFSET;
2284 }
2285 else
2286 {
2287 field = field_ireg_used;
2288 nparm = NUM_PARM_IREGS;
2289 incr = int_size_in_bytes (type) / UNITS_PER_WORD;
2290 ofs = IREG_OFFSET;
2291 }
2292
2293 lab_false = gen_label_rtx ();
2294 lab_over = gen_label_rtx ();
2295
2296 emit_cmp_and_jump_insns (expand_expr (field, NULL_RTX, 0, 0),
2297 GEN_INT (nparm - incr), GT, const0_rtx,
2298 TYPE_MODE (TREE_TYPE (field)),
2299 TREE_UNSIGNED (field), 0, lab_false);
2300
2301 t = fold (build (POSTINCREMENT_EXPR, TREE_TYPE (field), field,
2302 build_int_2 (incr, 0)));
2303 TREE_SIDE_EFFECTS (t) = 1;
2304
2305 t = fold (build (MULT_EXPR, TREE_TYPE (field), field,
2306 build_int_2 (UNITS_PER_WORD, 0)));
2307 TREE_SIDE_EFFECTS (t) = 1;
2308
2309 t = fold (build (PLUS_EXPR, ptr_type_node, field_reg_base,
2310 fold (build (PLUS_EXPR, TREE_TYPE (field), t,
2311 build_int_2 (ofs, 0)))));
2312 TREE_SIDE_EFFECTS (t) = 1;
2313
2314 val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
2315 if (val != ret)
2316 emit_move_insn (ret, val);
2317
2318 emit_jump_insn (gen_jump (lab_over));
2319 emit_barrier ();
2320 emit_label (lab_false);
2321 }
2322
2323 align = TYPE_ALIGN (type);
2324 if (align < BITS_PER_WORD)
2325 align = BITS_PER_WORD;
2326 align /= BITS_PER_UNIT;
2327
2328 t = build (PLUS_EXPR, ptr_type_node, field_mem_ptr,
2329 build_int_2 (align - 1, 0));
2330 t = build (BIT_AND_EXPR, ptr_type_node, t, build_int_2 (-align, -1));
2331
2332 val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
2333 if (val != ret)
2334 emit_move_insn (ret, val);
2335
2336 t = fold (build (PLUS_EXPR, ptr_type_node,
2337 make_tree (ptr_type_node, ret),
2338 build_int_2 (int_size_in_bytes (type), 0)));
2339 t = build (MODIFY_EXPR, ptr_type_node, field_mem_ptr, t);
2340 TREE_SIDE_EFFECTS (t) = 1;
2341 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2342
2343 if (lab_over)
2344 emit_label (lab_over);
2345
2346 return ret;
2347 }